def test_reopen(trie): """ Test training and the re-openning of persistant trie """ trie.add_ngram([LE,PETIT,CHAT]) trie.add_ngram([LE,PETIT,CHIEN]) trie.add_ngram([LE,PETIT,RAT]) trie.add_ngram([LE,GROS,RAT]) assert trie.dirty # store trie param trie_class = trie.__class__ trie_path = trie.path print("Will reopen with class:%s and path:%s" % (trie_class, trie_path)) # close and reopen trie.close() del trie trie = trie_class(trie_path) assert trie.dirty assert trie.query_count([LE, PETIT]) == 3 assert float_equal(trie.query_entropy([LE, PETIT]), 1.584962500721156) assert float_equal(trie.query_autonomy([LE, PETIT]), 1.0) assert trie.query_count([]) == 4 assert not trie.dirty # close and reopen (again) trie.close() trie = trie_class(trie_path) assert not trie.dirty assert float_equal(trie.query_autonomy([LE, PETIT]), 1.0)
def test_reopen(trie): """ Test training and the re-openning of persistant trie """ trie.add_ngram([LE, PETIT, CHAT]) trie.add_ngram([LE, PETIT, CHIEN]) trie.add_ngram([LE, PETIT, RAT]) trie.add_ngram([LE, GROS, RAT]) assert trie.dirty # store trie param trie_class = trie.__class__ trie_path = trie.path print("Will reopen with class:%s and path:%s" % (trie_class, trie_path)) # close and reopen trie.close() del trie trie = trie_class(trie_path) assert trie.dirty assert trie.query_count([LE, PETIT]) == 3 assert float_equal(trie.query_entropy([LE, PETIT]), 1.584962500721156) assert float_equal(trie.query_autonomy([LE, PETIT]), 1.0) assert trie.query_count([]) == 4 assert not trie.dirty # close and reopen (again) trie.close() trie = trie_class(trie_path) assert not trie.dirty assert float_equal(trie.query_autonomy([LE, PETIT]), 1.0)
def test_with_tuple(storage): storage.add_sentence(("le", "petit", "chat")) storage.add_sentence(("le", "petit", "chien")) storage.add_sentence(("pour", "le", "petit"), freq=2) assert storage.query_count(("le", "petit")) == 4 assert storage.query_count(("pour",)) == 2 assert float_equal(storage.query_entropy(("le", "petit")), 1.75) assert float_equal(storage.query_autonomy(("le", "petit")), 1.89582)
def test_with_tuple(storage): storage.add_sentence(('le', 'petit', 'chat')) storage.add_sentence(('le', 'petit', 'chien')) storage.add_sentence(('pour', 'le', 'petit'), freq=2) assert storage.query_count(('le', 'petit')) == 4 assert storage.query_count(('pour', )) == 2 assert float_equal(storage.query_entropy(('le', 'petit')), 1.75) assert float_equal(storage.query_autonomy(('le', 'petit')), 1.89582)
def test_with_tuple(storage): storage.add_sentence(('le','petit','chat')) storage.add_sentence(('le','petit','chien')) storage.add_sentence(('pour','le','petit'), freq=2) assert storage.query_count(('le', 'petit')) == 4 assert storage.query_count(('pour',)) == 2 assert float_equal(storage.query_entropy(('le', 'petit')), 1.75) assert float_equal(storage.query_autonomy(('le', 'petit')),1.89582)
def test_basic_trie(trie): """ Minimal test on simple example """ trie.add_ngram([LE,PETIT,CHAT]) trie.add_ngram([LE,PETIT,CHIEN]) trie.add_ngram([LE,PETIT,RAT]) trie.add_ngram([LE,GROS,RAT]) assert trie.query_count([LE, PETIT]) == 3 assert float_equal(trie.query_entropy([LE, PETIT]), 1.584962500721156) assert float_equal(trie.query_autonomy([LE, PETIT]), 1.0) # query the empty list gives the total count assert trie.query_count([]) == 4 # entropy of the "root" assert trie.query_entropy([]) == 0.0 assert isnan(trie.query_autonomy([]))
def test_basic_trie(trie): """ Minimal test on simple example """ trie.add_ngram([LE, PETIT, CHAT]) trie.add_ngram([LE, PETIT, CHIEN]) trie.add_ngram([LE, PETIT, RAT]) trie.add_ngram([LE, GROS, RAT]) assert trie.query_count([LE, PETIT]) == 3 assert float_equal(trie.query_entropy([LE, PETIT]), 1.584962500721156) assert float_equal(trie.query_autonomy([LE, PETIT]), 1.0) # query the empty list gives the total count assert trie.query_count([]) == 4 # entropy of the "root" assert trie.query_entropy([]) == 0.0 assert isnan(trie.query_autonomy([]))
def test_basic(storage): """ Forward that begins by « le petit »: - le petit chat - le petit chien - le petit $ * 2 Backward that begins by « petit le » : - petit le ^ * 2 - petit le pour * 2 --> count is the mean of 4 and 4, and entropy is the mean of 2 (the None are counted separately) and 1.5. """ storage.add_sentence(["le", "petit", "chat"]) storage.add_sentence(["le", "petit", "chien"]) storage.add_sentence(["pour", "le", "petit"], freq=2) assert storage.query_count(["le", "petit"]) == 4 assert storage.query_count(["pour"]) == 2 assert isinstance(storage.query_count(["pour"]), int) assert float_equal(storage.query_entropy(["le", "petit"]), 1.75) assert float_equal(storage.query_autonomy(["le", "petit"]), 1.89582)
def test_add_ngram_negativ_freq(trie): """ Test to add a ngram with negative freq """ trie.add_ngram([LE,PETIT,CHAT]) trie.add_ngram([LE,PETIT,CHIEN]) trie.add_ngram([LE,PETIT,RAT]) trie.add_ngram([LE,GROS,RAT]) # test removing a n-gramm with pytest.raises(ValueError): trie.add_ngram([LE,PETIT,CHAT], -1) with pytest.raises(ValueError): trie.add_ngram([LE,PETIT,CHAT], 0) return ## The following is noted here for a futur release, see #18 assert trie.query_count([LE, PETIT]) == 2 assert float_equal(trie.query_entropy([LE, PETIT]), 1.0) assert float_equal(trie.query_autonomy([LE, PETIT]), 1.0) # test removing more than resonable trie.add_ngram([LE,PETIT,CHAT], -10) assert trie.query_count([LE, PETIT]) == 0
def test_add_ngram_negativ_freq(trie): """ Test to add a ngram with negative freq """ trie.add_ngram([LE, PETIT, CHAT]) trie.add_ngram([LE, PETIT, CHIEN]) trie.add_ngram([LE, PETIT, RAT]) trie.add_ngram([LE, GROS, RAT]) # test removing a n-gramm with pytest.raises(ValueError): trie.add_ngram([LE, PETIT, CHAT], -1) with pytest.raises(ValueError): trie.add_ngram([LE, PETIT, CHAT], 0) return ## The following is noted here for a futur release, see #18 assert trie.query_count([LE, PETIT]) == 2 assert float_equal(trie.query_entropy([LE, PETIT]), 1.0) assert float_equal(trie.query_autonomy([LE, PETIT]), 1.0) # test removing more than resonable trie.add_ngram([LE, PETIT, CHAT], -10) assert trie.query_count([LE, PETIT]) == 0
def test_basic(storage): """ Forward that begins by « le petit »: - le petit chat - le petit chien - le petit $ * 2 Backward that begins by « petit le » : - petit le ^ * 2 - petit le pour * 2 --> count is the mean of 4 and 4, and entropy is the mean of 2 (the None are counted separately) and 1.5. """ storage.add_sentence(['le','petit','chat']) storage.add_sentence(['le','petit','chien']) storage.add_sentence(['pour','le','petit'], freq=2) assert storage.query_count(['le', 'petit']) == 4 assert storage.query_count(['pour']) == 2 assert isinstance(storage.query_count(['pour']), int) assert float_equal(storage.query_entropy(['le', 'petit']), 1.75) assert float_equal(storage.query_autonomy(['le', 'petit']),1.89582)
def run(self): """ Checks several things about the requested line to try and analyze why a line wouldn't be used at its full potential. """ line = self._get_relevant_line() if line is None: line_name = self._get_line_name() print("Line "+str(line_name)+" not found") return BotErrorMessage("I <b>couldn't find the line "+str(line_name)+ "</b>. I'm afraid it doesn't exist.") print("Found line "+str(line.getName())) # Check if the user believes the line is not used as it really is. # Results of this checking will be used to display or not a "actually" # in the bots answer. print("checking user's beliefs") user_utilization_ratio = self.context.get_slot_value("utilization") try: user_utilization_ratio = float(user_utilization_ratio) except Exception: user_utilization_ratio = None precision_adverb = None if ( user_utilization_ratio is not None and not float_equal(user_utilization_ratio*100, self._get_real_utilization(line))): precision_adverb = "actually" # Check 0: is the line saturated? print("doing check #0") if float_equal(self._get_real_utilization(line), 100.0): print("Line "+str(self._get_line_name())+" is saturated") return { "fetched-utilization": 100.0, "precision-adverb": precision_adverb, "machine-utilization-explanation": "which means it is <b>saturated</b>. The point is to "+ "fulfill a maximum number of orders, right?", } # Check 1: is all demand planned? print("doing check #1") nb_orders = Phi.getNumOrders() horizon_date = \ Phi.getTimeBucket(Phi.getNumberOfTimeBuckets()-1).getEndDate() nb_orders_planned = 0 orders = [] unplanned_orders = [] nb_orders_forbidden = 0 for i in range(nb_orders): current_order = Phi.getOrder(i) orders.append(current_order) completion_date = current_order.getCompletionDate() if completion_date < horizon_date: # QUESTION: should the horizon date be included? nb_orders_planned += 1 else: unplanned_orders.append(current_order) if current_order.isForbidden(): nb_orders_forbidden += 1 if nb_orders - nb_orders_forbidden == nb_orders_planned: return { "fetched-utilization": self._get_real_utilization(line), "precision-adverb": precision_adverb, "machine-utilization-explanation": "because <b>all the orders are planned</b>: "+ str(nb_orders_planned)+" out of "+str(nb_orders)+" are "+ "planned (where "+str(nb_orders_forbidden)+" orders are "+ "forbidden)", } # Check 2: is there a part of the unplanned demand that actually goes through this line? print("doing check #2") nb_buckets = Phi.getNumberOfTimeBuckets() unplanned_goes_through_line = False for order in unplanned_orders: current_product_family = order.getFinalPF() # QUESTION: what to do if this returns `None`? if ( current_product_family is not None and line.isPFUsed(current_product_family) != 0): unplanned_goes_through_line = True break if not unplanned_goes_through_line: return { "fetched-utilization": self._get_real_utilization(line), "precision-adverb": precision_adverb, "machine-utilization-explanation": "because it seems there is <b>no unplanned orders "+ "that need to go through this production line</b>", } # Check 3: is it possible to plan the unplanned orders within the horizon (are they already late)? print("doing check #3") orders_already_late = False for order in orders: due_date = order.getDueDate() if due_date is not None and due_date > horizon_date: orders_already_late = True break if orders_already_late: return { "fetched-utilization": self._get_real_utilization(line), "precision-adverb": precision_adverb, "machine-utilization-explanation": "because <b>some orders have their due date after the time "+ "horizon</b> (the end time of the last time bucket), "+ "therefore the solver decided not to plan them", } # Check 4: are there other lines which are saturated over the horizon? print("doing check #4") other_saturated_line = None nb_lines = Phi.getNumberOfLines() # TODO: in the API you also have another function named `getNumLines()` which seems to do the same thing, you might want to look into this. for i in range(nb_lines): other_line = Phi.getLine(i) if ( other_line is not None and self._is_line_saturated(other_line)): # Check whether an unplanned goes through this line for order in unplanned_orders: current_product_family = order.getFinalPF() # QUESTION: what to do if this returns `None`? if ( current_product_family is not None and line.isPFUsed(current_product_family) != 0 and other_line.isPFUsed(current_product_family) != 0): # current unplanned order should go through both lines other_saturated_line = other_line break if other_saturated_line is not None: break if other_saturated_line is not None: return { "fetched-utilization": self._get_real_utilization(line), "precision-adverb": precision_adverb, "machine-utilization-explanation": "because <b>production line "+ str(other_saturated_line.getName())+" is saturated</b> "+ "which prevents some of the unplanned commands that go "+ "through line "+str(line.getName())+" to be completed. "+ "The solver does not plan such commands completely", } # Check 5: are there any limiting flow constraints on the line? print("doing check #5") # Check 6: are there any stock max constraints on some successive lines? print("doing check #6") return { "fetched-utilization": self._get_real_utilization(line), "precision-adverb": precision_adverb, "machine-utilization-explanation": "but I <b>couldn't find out why</b>.\nIf after analyzing the "+ "plan, you still can't understand why this is the case, don't "+ "hesitate to <b>call a consultant</b>", }
def test_lexer_input_float_scientific_enumeration(): i = '4.2e5' r = lexer(i) assert float_equal(4.2 * 10**5, r[0])
def test_lexer_input_float(): i = '4.2' r = lexer(i) assert float_equal(4.2, r[0])
def test_lexer_input_scientific_enumeration(): i = '1e4' r = lexer(i) assert float_equal(1 * 10**4, r[0])