コード例 #1
0
ファイル: stategraph.py プロジェクト: beardhatcode/eco
    def __init__(self, start_symbol, grammar, lr_type=0):
        self.grammar = grammar
        self.start_symbol = start_symbol
        self.state_sets = []
        self.edges = {}
        self.ids = {}
        self.todo = []
        self.done = set()
        self.maybe_compatible = {}

        self.goto_time = 0
        self.add_time = 0
        self.closure_time = 0
        self.closure_count = 0
        self.addcount = 0
        self.weakly = 0
        self.weakly_count = 0
        self.mergetime = 0

        helper = Helper(grammar)
        self.helper = helper
        if lr_type == LR0:
            self.closure = helper.closure_0
            self.goto = helper.goto_0
            self.start_set = StateSet(
                [LR0Element(Production(None, [self.start_symbol]), 0)])
        elif lr_type == LR1 or lr_type == LALR:
            self.closure = helper.closure_1
            self.goto = helper.goto_1
            self.start_set = StateSet()
            self.start_set.add(
                LR0Element(Production(None, [self.start_symbol]), 0),
                set([FinishSymbol()]))
コード例 #2
0
class TestProduction2Beta(test_spectrum_data.TestSpectrumData2Beta):
    def setUp(self):
        self._spectrum = Production(path)
        self._spectrum.set_parameters()
    def test_n_events(self):
        pass
    def test_label(self):
        self.assertEqual(self._spectrum._label, "0#nu#beta#beta")
    def tearDown(self):
        pass
コード例 #3
0
class TestProductionBackg(test_spectrum_data.TestSpectrumDataBackg):
    def setUp(self):
        path = "TeLoadedK42_r10_s0_p1.ntuple.root"
        self._spectrum = Production(path)
        self._spectrum.set_parameters()
    def test_n_events(self):
        pass
    def test_label(self):
        self.assertEqual(self._spectrum._label, "K42")
    def tearDown(self):
        pass
コード例 #4
0
ファイル: autoterm.py プロジェクト: alheart/python
 def on_startBtn_clicked(self):
     """
     Slot documentation goes here.
     """
     port="com5"
     baudrate=115200
     parity="N"
     rtscts=False
     xonxoff=False
     miniterm = Miniterm(port, baudrate, parity, rtscts, xonxoff)
     production = Production(miniterm, self.msg, self.process)
     miniterm.start()
     production.start()
コード例 #5
0
 def on_startBtn_clicked(self):
     """
     Slot documentation goes here.
     """
     port = "com5"
     baudrate = 115200
     parity = "N"
     rtscts = False
     xonxoff = False
     miniterm = Miniterm(port, baudrate, parity, rtscts, xonxoff)
     production = Production(miniterm, self.msg, self.process)
     miniterm.start()
     production.start()
コード例 #6
0
ファイル: helpers.py プロジェクト: 10183308/eco-1
 def closure_0(self, state_set):
     result = set()
     # 1) Add state_set to it's own closure
     for element in state_set.elements:
         result.add(element)
     # 2) If there exists an LR-element with a Nonterminal as its next symbol
     #    add all production with this symbol on the left side to the closure
     temp = result
     while 1:
         newelements = set()
         # closure of temp
         for state in temp:
             symbol = state.next_symbol()
             if isinstance(symbol, Nonterminal):
                 alternatives = self.grammar[symbol].alternatives
                 for a in alternatives:
                     # create epsilon symbol if alternative is empty
                     if a == []:
                         a = [epsilon]
                     p = Production(symbol, a)
                     s = State(p, 0)
                     if a == [epsilon]:
                         s.d = 1
                     newelements.add(s)
         # add new elements to result
         temp = newelements.difference(
             result)  # remove elements already in result
         result.update(temp)
         if len(temp) == 0:  # no new elements were added
             break
     return StateSet(result)
コード例 #7
0
ファイル: helpers.py プロジェクト: 10183308/eco-1
def closure_1(grammar, state_set):
    assert False
    result = StateSet()
    # Step 1
    for state in state_set.elements:
        result.add(state)
    # Step 2
    for state in result:
        symbol = state.next_symbol()
        if isinstance(symbol, Nonterminal):
            f = set()
            for l in state.lookahead:
                betaL = []
                betaL.extend(state.remaining_symbols())
                betaL.append(l)
                f |= old2_first(grammar, betaL)

            alternatives = grammar[symbol].alternatives
            for a in alternatives:
                # create epsilon symbol if alternative is empty
                if a == []:
                    a = [Epsilon()]
                p = Production(symbol, a)
                s = LR1Element(p, 0, f)
                if a == [epsilon]:
                    s.d = 1
                result.add(s)
    # merge states that only differ in their lookahead
    result.merge()
    return result
コード例 #8
0
 def testAppend(self):
     hist_label = self._spectrum._label + "-Test"
     append = False
     always_remake = True
     hist_path = os.environ.get("MAJORAT_TEST")
     self._spectrum.make_histogram(hist_label, append, always_remake)
     self._spectrum.write_histograms(hist_path+"/")
     histogram = self._spectrum.get_histogram(hist_label)
     self.assertEqual(histogram.GetEntries(), 10001)
     append = True
     always_remake = False
     self._spectrum2 = Production(path2)
     self._spectrum2.set_parameters()
     bin_width = "default"
     self._spectrum2.make_histogram(hist_label, append, always_remake, 
                                    bin_width, hist_path+"/")
     histogram2 = self._spectrum2.get_histogram(hist_label)
     self.assertNotEqual(histogram.GetEntries(), histogram2.GetEntries())
     self.assertEqual(histogram2.GetEntries(), 20003)
コード例 #9
0
 def build(self, graph, precedences=[]):
     start_production = Production(None, [graph.start_symbol])
     symbols = graph.get_symbols()
     symbols.add(FinishSymbol())
     for i in range(len(graph.state_sets)):
         # accept, reduce
         state_set = graph.get_state_set(i)
         for state in state_set.elements:
             if state.isfinal():
                 if state.p == start_production:
                     self.table[(i, FinishSymbol())] = Accept()
                 else:
                     if self.lr_type in [LR1, LALR]:
                         lookahead = state_set.lookaheads[state]
                     else:
                         lookahead = symbols
                     for s in lookahead:
                         newaction = Reduce(state.p)
                         if self.table.has_key((i, s)):
                             oldaction = self.table[(i, s)]
                             newaction = self.resolve_conflict(
                                 i, s, oldaction, newaction, precedences)
                         if newaction:
                             self.table[(i, s)] = newaction
                         else:
                             del self.table[(i, s)]
         # shift, goto
         for s in symbols:
             dest = graph.follow(i, s)
             if dest:
                 if isinstance(s, Terminal):
                     action = Shift(dest)
                 if isinstance(s, Nonterminal):
                     action = Goto(dest)
                 if self.table.has_key((i, s)):
                     action = self.resolve_conflict(i, s,
                                                    self.table[(i, s)],
                                                    action, precedences)
                 if action:
                     self.table[(i, s)] = action
                 else:
                     del self.table[(i, s)]
コード例 #10
0
ファイル: helpers.py プロジェクト: 10183308/eco-1
def closure_0(grammar, state_set):
    result = StateSet()
    # 1) Add state_set to it's own closure
    for state in state_set.elements:
        result.add(state)
    # 2) If there exists an LR-element with a Nonterminal as its next symbol
    #    add all production with this symbol on the left side to the closure
    for state in result:
        symbol = state.next_symbol()
        if isinstance(symbol, Nonterminal):
            alternatives = grammar[symbol].alternatives
            for a in alternatives:
                # create epsilon symbol if alternative is empty
                if a == []:
                    a = [epsilon]
                p = Production(symbol, a)
                s = State(p, 0)
                if a == [epsilon]:
                    s.d = 1
                result.add(s)
    return result
コード例 #11
0
class TestProduction(unittest.TestCase):
    def setUp(self):
        self._spectrum = Production(path)
        self._spectrum.set_parameters()
    def test_majorat_name(self):
        majorat_name = "RAT4-5_prod_decay0_2beta_Te130_0_1_0-0_3-5"
        self.assertEqual(self._spectrum._majorat_name, majorat_name)
    def test_extension(self):
        self.assertEqual(self._spectrum._ext, ".ntuple.root")
    def testMakeHistogram(self):
        hist_label = self._spectrum._label + "-Test"
        append = False
        always_remake = True
        self._spectrum.make_histogram(hist_label, append, always_remake)
        histogram = self._spectrum.get_histogram(hist_label)
        self.assertEqual(histogram.GetEntries(), 10001)
    def testAppend(self):
        hist_label = self._spectrum._label + "-Test"
        append = False
        always_remake = True
        hist_path = os.environ.get("MAJORAT_TEST")
        self._spectrum.make_histogram(hist_label, append, always_remake)
        self._spectrum.write_histograms(hist_path+"/")
        histogram = self._spectrum.get_histogram(hist_label)
        self.assertEqual(histogram.GetEntries(), 10001)
        append = True
        always_remake = False
        self._spectrum2 = Production(path2)
        self._spectrum2.set_parameters()
        bin_width = "default"
        self._spectrum2.make_histogram(hist_label, append, always_remake, 
                                       bin_width, hist_path+"/")
        histogram2 = self._spectrum2.get_histogram(hist_label)
        self.assertNotEqual(histogram.GetEntries(), histogram2.GetEntries())
        self.assertEqual(histogram2.GetEntries(), 20003)
    def tearDown(self):
        pass
コード例 #12
0
ファイル: helpers.py プロジェクト: Britefury/eco
 def closure_1(self, state_set):
     la_dict = {}
     result = set()
     working_set = set()
     # Step 1
     for element in state_set.elements:
         la_dict[element] = state_set.get_lookahead(element)
         result.add(element)
         working_set.add(element)
     # Step 2
     i=0
     temp = working_set
     while 1:
         newelements = set()
         for state in temp:
             if state.isfinal():
                 continue
             symbol = state.next_symbol()
             if isinstance(symbol, Nonterminal):
                 f = set()
                 for l in la_dict[state]:#state.lookahead:
                     betaL = []
                     betaL.extend(state.remaining_symbols())
                     betaL.append(l)
                     f |= self.first(betaL)
                 alternatives = self.grammar[symbol].alternatives
                 i = -1
                 for a in alternatives:
                     i = i + 1
                     # create epsilon symbol if alternative is empty
                     if a == []:
                         a = [Epsilon()]
                     p = Production(symbol, a, self.grammar[symbol].annotations[i], self.grammar[symbol].precs[i])
                     if self.grammar[symbol].inserts.has_key(i):
                         insert = self.grammar[symbol].inserts[i]
                         p.inserts[insert[0]] = insert[1]
                     s = LR0Element(p, 0)
                     if a == [epsilon]:
                         s.d = 1
                     # NEW ELEMENT:
                     # 1. completely new (+lookahead): add to result
                     # 2. new lookahead: update lookahead in la_dict
                     # -> add to new working set
                     # 3. already known: ignore
                     if s in result:
                         if f.issubset(la_dict[s]):   # lookahead in combination with state already known
                             continue
                         else:
                             la_dict[s] |= f   # new lookahead
                     else:
                         la_dict[s] = set(f)        # completely new
                     result.add(s)
                     newelements.add(s)
         temp = newelements
         if len(temp) == 0:
             break
         i += 1
     # add lookaheads
     final_result = StateSet()
     for element in result:
         final_result.add(element, la_dict[element])
     return final_result
コード例 #13
0
from terminals import Terminals
from nonterminals import NonTerminals
from production import Production

"""
Calculator grammar:
    goal -> expr eof
    expr -> expr + term | expr - term | term
    term -> term * factor | term / factor | factor
    factor -> literal | ( expr )
"""
DEFAULT_GRAMMAR = (
    Production(
        NonTerminals.GOAL,
        NonTerminals.EXPR,
        Terminals.EOF,
        eval_fn=lambda c: c[0].evaluate(),
    ),
    Production(
        NonTerminals.EXPR,
        NonTerminals.EXPR,
        Terminals.PLUS,
        NonTerminals.TERM,
        eval_fn=lambda c: c[0].evaluate() + c[2].evaluate(),
    ),
    Production(
        NonTerminals.EXPR,
        NonTerminals.EXPR,
        Terminals.MINUS,
        NonTerminals.TERM,
        eval_fn=lambda c: c[0].evaluate() - c[2].evaluate(),
コード例 #14
0
ファイル: testwindow.py プロジェクト: sweta-12/pythontkinter
 def production_window(self):
     # self.master.withdraw()
     self.window = Production(self.master, self.userdata)
     self.test_master.destroy()
コード例 #15
0
ファイル: spectral_plot.py プロジェクト: ashleyrback/MajoRat
    while start >= end:
        yield start
        start -= step

if __name__ == "__main__":
    stack = THStack("SNO+ spectral plot", "SNO+ spectral plot")
    legend = TLegend(0.0, 0.0, 1.0, 1.0)
    legend.SetFillColor(0)
    gStyle.SetOptStat("")

    roi = roi_utils.RoIUtils("reconstructed_energy")
    livetime = defaults.ll_analysis.get("livetime")
    print "spectral_plot.__main__: livetime =", livetime

    two_nu = Production\
        (os.environ.get("MAJORAT_DATA")+\
             "/hist_RAT4-5_prod_decay0_2beta_Te130_0_4_0-0_3-5.ntuple.root")
    total_events = 3.8e6
    hist_label = two_nu._label + "-reco_pos"
    two_nu.scale_by_events(total_events*livetime, hist_label)
    two_nu_hist = two_nu.get_histogram(hist_label)
    two_nu_hist.Draw()
    two_nu_hist.SetLineWidth(2)
    two_nu_hist.SetLineColor(2)
    stack.Add(two_nu_hist)
    legend.AddEntry(two_nu_hist, two_nu_hist.GetTitle(), "l")

    solar = Production\
        (os.environ.get("MAJORAT_DATA")+\
             "/hist_RAT4-5_prod_solar_B8.ntuple.root")
    hist_label = solar._label + "-reco_pos"
コード例 #16
0
                       action="store_true")
    args = parser.parse_args()

    # make list of files
    file_list = []
    if args.is_directory:
        for root, dirs, files in os.walk(args.ntuple_path):
            for file in files:
                match = re.search(r".ntuple.root$", file)
                if match:
                    file_list.append(os.path.join(root, file))
    else:
        file_list.append(args.ntuple_path)
    first_file = True
    for file in file_list:
        spectrum = Production(file)
        spectrum.set_parameters()
        # make list of histogram labels
        hist_labels = [spectrum._label, 
                       spectrum._label+"-no_fv_cut"] # produced by default
        if args.reco_pos:
            hist_labels.append(spectrum._label+"-reco_pos")
        if args.nhit_energy:
            hist_labels.append(spectrum._label+"-nhit_energy")
            hist_labels.append(spectrum._label+"-reco_pos-nhit_energy")
        if args.zero_energy:
            zero_energy_labels = []
            for hist_label in hist_labels:
                hist_label += "-zero_energy"
                zero_energy_labels.append(hist_label)
            hist_labels += zero_energy_labels
def simulation(n_rounds, n_words, n_dimensions, seed, n_exemplars, n_continuers, similarity_bias_word,
               similarity_bias_segment, noise, anti_ambiguity_bias, continuer_G, word_ratio, wedel_start, n_run):
    """
    Run a simulation of n_rounds rounds with the specified parameters.
    :param n_rounds: int; the number of rounds of the simulation run
    :param n_words: int; the number of v_words contained in the lexicon
    :param n_dimensions: int; the number of dimensions of the exemplars
    :param seed: int; a seed used to generate the agents' lexicons
    :param n_exemplars: int; the number of exemplars of a word
    :param n_continuers: int; the number of continuer v_words in the lexicon
    :param similarity_bias_word: boolean; whether the word similarity bias should be applied to the signals
    :param similarity_bias_segment: boolean; whether the segment similarity bias should be applied to the signals
    :param noise: boolean; whether noise should be added to the signals
    :param anti_ambiguity_bias: boolean; whether the anti-ambiguity bias should be applied to storing signals
    :param continuer_G: int; the constant used to determine the strength of the noise bias
    :param word_ratio: float; the relative contribution of the word similarity bias in case of continuer v_words
    :param wedel_start: boolean; whether the means for initialising the lexicon are based on the one used in Wedel's
    model
    :param n_run: int; the current run number
    :return: list; the lexicon consists of a list of v_words, for which each word consists of a list of exemplars, which
                   in turn is a list of the number of dimensions floats
             list; the second agent's lexicon consists of a list of v_words, for which each word consists of a list of
                   exemplars, which in turn is a list of the number of dimensions floats
             list; the indices of the continuers in the lexicon
             list; the indices of the continuers in the lexicon of the second agent
             dataframe; a pandas dataframe containing the starting conditions
    """

    # Initialise agents

    # If the seed is defined use that seed to initialise the agents
    if seed:
        seed_value = seed
    else:
        # Else generate a random seed to use for the simulation
        seed_value = random.randrange(sys.maxsize)

    lexicon_start, v_words, continuer_words, indices_continuer = Agent(n_words, n_dimensions, seed_value, n_exemplars,
                                                                       n_continuers, wedel_start, n_run).\
        generate_lexicon()
    lexicon2_start, v_words2, continuer_words2, indices_continuer2 = Agent(n_words, n_dimensions, seed_value,
                                                                           n_exemplars, n_continuers, wedel_start,
                                                                           n_run).generate_lexicon()
    # print("Lexicon start: ", lexicon_start)
    # print("Continuer_words: ", continuer_words)
    #
    # print("Lexicon start 2: ", lexicon2_start)
    # print("Continuer_words 2: ", continuer_words2)

    # Store the state of the lexicons at the beginning for both agents
    start = pd.DataFrame(columns=["Simulation_run", "Agent", "Word", "Centroid", "Average_distance", "Lexicon",
                                  "Continuer_indices", "Similarity_bias_word", "Similarity_bias_segment", "Noise",
                                  "Anti_ambiguity_bias", "N_words", "N_dimensions", "Seed", "N_exemplars",
                                  "N_continuers", "N_rounds", "State", "Exemplars", "Store", "Probability_storages"])
    start.loc[len(start)] = [None, 1, None, None, None, lexicon_start, indices_continuer, similarity_bias_word,
                             similarity_bias_segment, noise, anti_ambiguity_bias, n_words, n_dimensions, seed,
                             n_exemplars, n_continuers, n_rounds, "Start", None, None, None]
    start.loc[len(start)] = [None, 2, None, None, None, lexicon2_start, indices_continuer2, similarity_bias_word,
                             similarity_bias_segment, noise, anti_ambiguity_bias, n_words, n_dimensions, seed,
                             n_exemplars, n_continuers, n_rounds, "Start", None, None, None]

    # Make a copy of the lexicon for the agents to use in conversation
    lexicon = copy.deepcopy(lexicon_start)
    lexicon2 = copy.deepcopy(lexicon2_start)

    # Start the simulation: i counts the number of runs. One run consists of one production and perception step
    i = 0

    # Store_count(2) counts how often the signal is stored when perceived for both agents
    store_count = 0
    store_count_2 = 0

    # Probability_storage(2) stores the probabilities for which a signal can be stored for both agents
    probability_storages = []
    probability_storages2 = []

    while i < n_rounds:
        print("Run number: ", i)

        # Assign the roles to the agents so they change role every round
        if (i % 2) == 0:
            producer_lex = lexicon
            producer_v_words = v_words
            producer_continuer_words = continuer_words
            perceiver_lex = lexicon2
            perceiver_v_words = v_words2
            perceiver_continuer_words = continuer_words2
        else:
            producer_lex = lexicon2
            producer_v_words = v_words2
            producer_continuer_words = continuer_words2
            perceiver_lex = lexicon
            perceiver_v_words = v_words
            perceiver_continuer_words = continuer_words

        # print("Producer lex: ", producer_lex)
        # print("Perceiver lex: ", perceiver_lex)

        # One agent starts producing something: first the exemplars are selected for every word category
        targets, total_activations = Production(producer_lex, producer_v_words,
                                                producer_continuer_words, n_words, n_dimensions,
                                                n_exemplars, n_continuers, similarity_bias_word,
                                                similarity_bias_segment, noise, continuer_G,
                                                word_ratio).select_exemplar()
        # print("Chosen target exemplars: ", targets)

        # Then the biases are added to the selected exemplars
        target_exemplars = Production(producer_lex, producer_v_words, producer_continuer_words, n_words, n_dimensions,
                                      n_exemplars, n_continuers, similarity_bias_word, similarity_bias_segment, noise,
                                      continuer_G, word_ratio).add_biases(targets, total_activations)
        # print("Bias added to targets: ", target_exemplars)

        # The other agent perceives the produced signals

        # First shuffle the signals so they are not always presented in the same word order
        random.shuffle(target_exemplars)

        for signal in target_exemplars:
            # First the similarity of the signal to every word category is calculated and a best fitting word category
            # is chosen accordingly
            index_max_sim, total_similarities = Perception(perceiver_lex, perceiver_v_words, perceiver_continuer_words,
                                                           n_words, n_dimensions, n_exemplars, n_continuers,
                                                           anti_ambiguity_bias).similarity(signal)
            # print("Word category signal: ", index_max_sim)
            # print("Total similarities: ", total_similarities)

            # Then the anti-ambiguity bias is added and the signal is stored (or not) depending on its probability of
            # being stored. This probability is based on how well the signal fits within the chosen word category
            # relative to the other word categories

            # The signal is stored in the lexicon of the agent being the perceiver this round
            if (i % 2) == 0:
                if anti_ambiguity_bias:
                    lexicon2, store, probability_storage = Perception(perceiver_lex, perceiver_v_words,
                                                                      perceiver_continuer_words, n_words, n_dimensions,
                                                                      n_exemplars, n_continuers, anti_ambiguity_bias). \
                        add_anti_ambiguity_bias(index_max_sim, total_similarities, signal)

                    probability_storages2.append(probability_storage)

                    if store is True:
                        store_count_2 += 1

                # If there's no anti-ambiguity bias, the signal is stored within the best fitting word category
                # whatsoever
                else:
                    lexicon2[index_max_sim][0].insert(0, signal)
                # Only the first 100 exemplars of a word are used
                lexicon2[index_max_sim][0] = lexicon2[index_max_sim][0][:100]
                # print("Stored signal: ", signal)
                # print("Lexicon word: ", lexicon2[index_max_sim])
            else:
                if anti_ambiguity_bias:
                    lexicon, store, probability_storage = Perception(perceiver_lex, perceiver_v_words,
                                                                     perceiver_continuer_words, n_words, n_dimensions,
                                                                     n_exemplars, n_continuers, anti_ambiguity_bias). \
                        add_anti_ambiguity_bias(index_max_sim, total_similarities, signal)

                    if store is True:
                        store_count += 1

                    probability_storages.append(probability_storage)

                # If there's no anti-ambiguity bias, the signal is stored within the best fitting word category
                # whatsoever
                else:
                    lexicon[index_max_sim][0].insert(0, signal)

                # Only the first 100 exemplars of a word are used
                lexicon[index_max_sim][0] = lexicon[index_max_sim][0][:100]

                # print("Stored signal: ", signal)
                # print("Lexicon word: ", lexicon[index_max_sim])

        # After every 500 rounds, store the agents' lexicons
        if i % 500 == 0 and i > 0:
            # Make a copy of the lexicon, and probability storages to store the intermediate results
            lexicon_middle = copy.deepcopy(lexicon)
            lexicon2_middle = copy.deepcopy(lexicon2)

            probability_storages_middle = copy.deepcopy(probability_storages)
            probability_storages2_middle = copy.deepcopy(probability_storages2)

            start.loc[len(start)] = [None, 1, None, None, None, lexicon_middle, indices_continuer, similarity_bias_word,
                                     similarity_bias_segment, noise, anti_ambiguity_bias, n_words, n_dimensions, seed,
                                     n_exemplars, n_continuers, i, "Middle", None, store_count,
                                     probability_storages_middle]
            start.loc[len(start)] = [None, 2, None, None, None, lexicon2_middle, indices_continuer2,
                                     similarity_bias_word, similarity_bias_segment, noise, anti_ambiguity_bias, n_words,
                                     n_dimensions, seed, n_exemplars, n_continuers, i, "Middle", None, store_count_2,
                                     probability_storages2_middle]

        i += 1

    return lexicon, lexicon2, indices_continuer, indices_continuer2, start, store_count, store_count_2, \
           probability_storages, probability_storages2
コード例 #18
0
ファイル: helpers.py プロジェクト: 10183308/eco-1
 def closure_1(self, state_set):
     la_dict = {}
     result = set()
     working_set = set()
     # Step 1
     for element in state_set.elements:
         la_dict[element] = state_set.get_lookahead(element)
         result.add(element)
         working_set.add(element)
     # Step 2
     i = 0
     temp = working_set
     while 1:
         newelements = set()
         for state in temp:
             if state.isfinal():
                 continue
             symbol = state.next_symbol()
             if isinstance(symbol, Nonterminal):
                 f = set()
                 for l in la_dict[state]:
                     betaL = []
                     betaL.extend(state.remaining_symbols())
                     betaL.append(l)
                     f |= self.first(betaL)
                 alternatives = self.grammar[symbol].alternatives
                 i = -1
                 for a in alternatives:
                     i = i + 1
                     # create epsilon symbol if alternative is empty
                     if a == []:
                         a = [Epsilon()]
                     p = Production(symbol, a,
                                    self.grammar[symbol].annotations[i],
                                    self.grammar[symbol].precs[i])
                     if self.grammar[symbol].inserts.has_key(i):
                         insert = self.grammar[symbol].inserts[i]
                         p.inserts[insert[0]] = insert[1]
                     s = LR0Element(p, 0)
                     if a == [epsilon]:
                         s.d = 1
                     # NEW ELEMENT:
                     # 1. completely new (+lookahead): add to result
                     # 2. new lookahead: update lookahead in la_dict
                     # -> add to new working set
                     # 3. already known: ignore
                     if s in result:
                         if f.issubset(
                                 la_dict[s]
                         ):  # lookahead in combination with state already known
                             continue
                         else:
                             la_dict[s] |= f  # new lookahead
                     else:
                         la_dict[s] = set(f)  # completely new
                     result.add(s)
                     newelements.add(s)
         temp = newelements
         if len(temp) == 0:
             break
         i += 1
     # add lookaheads
     final_result = StateSet()
     for element in result:
         final_result.add(element, la_dict[element])
     return final_result
コード例 #19
0
 def setUp(self):
     self._spectrum = Production(path)
     self._spectrum.set_parameters()
コード例 #20
0
ファイル: test.py プロジェクト: minas69/vzhuh-intepreter
from bcolors import Color
from lexer import Template, tokenize
from lr_parser import *
from mu_interpreter import Interpreter
from parser_generator import *
from production import Production

NT = {'GOAL', 'EXP', 'PRIM'}
T = {'(', 'op', 'int', ')', '$'}

P = [
    Production('GOAL', 'EXP',             lambda p: p[0]),
    Production('EXP', '( op PRIM PRIM )', lambda p: ('expr', p[1], p[2], p[3])),
    Production('PRIM', 'EXP',             lambda p: p[0]),
    Production('PRIM', 'int',             lambda p: ('int', p[0]))
]

templates = [
        Template('(', '\('),
        Template(')', '\)'),
        Template('int', '[1-9][0-9]*', lambda a: int(a)),
        Template('space', ' +', lambda a: None),
        Template('newline', '\n', lambda a: None),
        Template('op', 's', after=' |\n')
    ]


def main():
    print(*P, sep='\n', end='\n\n')

    generator = ParserGenerator(P, T, NT)
コード例 #21
0
 def setUp(self):
     path = "TeLoadedK42_r10_s0_p1.ntuple.root"
     self._spectrum = Production(path)
     self._spectrum.set_parameters()
コード例 #22
0
from production import Production
from vzhuh_interpreter import Interpreter

NT = {
    'GOAL', 'PRG', 'VAR_DECS', 'VAR_DEC', 'STATEMENTS', 'VARS', 'OPS', 'OP',
    'IF', 'IF_ELSE', 'BODY', 'FUNC', 'ARGS', 'ASSIGN', 'EXP', 'OR_EXP',
    'AND_EXP', 'TERM', 'P_TERM', 'OPERAND'
}

T = {
    'var', 'type', 'begin', 'end', 'if', 'else', 'then', 'true', 'false', '!',
    '&', '|', ':', ';', ',', '(', ')', '=', 'ident', 'str', '$'
}

P = [
    Production('GOAL', 'PRG', lambda p: p[0]),
    Production('PRG', 'var VAR_DECS STATEMENTS', lambda p:
               ('prg', ('dec', p[1]), p[2])),
    Production('VAR_DECS', 'VAR_DEC VAR_DECS', lambda p: [p[0]] + p[1]),
    Production('VAR_DECS', 'VAR_DEC', lambda p: [p[0]]),
    Production('VAR_DEC', 'VARS : type ;', lambda p: (p[2], p[0])),
    Production('VARS', 'ident , VARS', lambda p: [p[0]] + p[2]),
    Production('VARS', 'ident', lambda p: [p[0]]),
    Production('STATEMENTS', 'begin OPS end', lambda p: ('stmts', p[1])),
    Production('OPS', 'OP OPS', lambda p: [p[0]] + p[1]),
    Production('OPS', 'OP', lambda p: [p[0]]),
    Production('OP', 'FUNC ;', lambda p: p[0]),
    Production('OP', 'ASSIGN ;', lambda p: p[0]),
    Production('OP', 'IF', lambda p: p[0]),
    Production('OP', 'IF_ELSE', lambda p: p[0]),
    Production('IF', 'if EXP then BODY', lambda p: ('if', p[1], p[3])),