def aa_substring(string=None): print('Testing DFA, Detect if \'aa\' is a substring of w') sigma = ['a', 'b'] Q = ['q1', 'q2', 'q3'] init_state = 'q1' delta = dict() delta[('q1', 'a')] = 'q2' delta[('q1', 'b')] = 'q1' delta[('q2', 'a')] = 'q3' delta[('q2', 'b')] = 'q1' delta[('q3', 'a')] = 'q3' delta[('q3', 'b')] = 'q3' final = ['q3'] my_auto = dfa.DFA(sigma, Q, init_state, delta, final) if string is None: inputs = ['aabbbb', 'bbbbbbb', 'bababaabbba', 'abababababa'] for inp in inputs: if my_auto.consume_input(inp): print('{} was accepted'.format(inp)) else: print('{} was not accepted'.format(inp)) else: if my_auto.consume_input(string): print('{} was accepted'.format(string)) else: print('{} was not accepted'.format(string))
def toDFA(self): d = dfa.DFA(self.sigma) q = queue.Queue() mp = dict() f = frozenset(self.init_state) q.put(f) cnt = 0 mp[f] = cnt d.add_node(cnt) cnt += 1 d.set_init_state(0) while not q.empty(): u = q.get() # print(u) for w in self.sigma: v = self.closure(u, w) if len(v) == 0: continue v = frozenset(v) print(u, v) if not v in mp: mp[v] = cnt d.add_node(cnt) for flabel in self.final_state: if flabel in v: d.final_state.append(cnt) break cnt += 1 q.put(v) d.add_edge(mp[u], mp[v], w) return d
def __init__(self, ip, name, position, sensors): """ -> String -> "in" / "out" -> [String] -> Station() """ self.ip = ip self.name = name self.sensors = Sensors(sensors) self.position = position self.dfa = df.DFA()
def multof3(string=None): print('Testing DFA, Detect if number of 0\'s is a multiple of 3') sigma = ['1', '0'] Q = ['r1', 'r2', 'r3'] init_state = 'r1' delta = dict() delta[('r1', '0')] = 'r2' delta[('r1', '1')] = 'r1' delta[('r2', '0')] = 'r3' delta[('r2', '1')] = 'r2' delta[('r3', '0')] = 'r1' delta[('r3', '1')] = 'r3' final = ['r1'] a = '{} was accepted by the DFA' n = '{} was not accepted by the DFA' my_dfa = dfa.DFA(sigma, Q, init_state, delta, final) if string is None: inputs = ['100100', '1111', '10111010', '0000', '000'] for inp in inputs: if my_dfa.consume_input(inp): print('{} was accepted'.format(inp)) else: print('{} was not accepted'.format(inp)) else: if my_dfa.consume_input(string): print('{} was accepted'.format(string)) else: print('{} was not accepted'.format(string)) print('\n')
def multof2(string=None): print('Testing DFA, Detect if number of 0\'s is even (multiple of 2)') sigma = ['1', '0'] Q = ['q1', 'q2'] init_state = 'q1' delta = dict() delta[('q1', '1')] = 'q1' delta[('q1', '0')] = 'q2' delta[('q2', '0')] = 'q1' delta[('q2', '1')] = 'q2' final = ['q1'] a = '{} was accepted by the DFA' n = '{} was not accepted by the DFA' my_dfa = dfa.DFA(sigma, Q, init_state, delta, final) if string is None: inputs = ['100100', '1111', '10111010', '0000', '000'] results = {} for inp in inputs: try: results[inp] = my_dfa.consume_input(inp) if results[inp]: print(a.format(inp)) else: print(n.format(inp)) except util.stateViolation as e: print(e) else: if my_dfa.consume_input(string): print('{} was accepted'.format(string)) else: print('{} was not accepted'.format(string)) print('\n')
def determinize(self): # based on: # Subset Construction presented by Ullman (Automata course) # extended with closure operation to work with epsilon transitions # if not self.is_complete(): # self.set_error_state() closure = None if not self.is_epsilon_free(): closure = self.closure() dfa_accept = set() dfa_reject = set() if closure is None: start = frozenset(self.start) else: start = frozenset({ x for s in self.start \ for x in closure[s] }) dfa_states = {start} tr = collections.defaultdict(lambda: None) queue = collections.deque([start]) if start & self.accept: dfa_accept.add(start) elif start & self.reject: dfa_reject.add(start) while queue: curr_state = queue.popleft() for a in self.alphabet: next_direct = { x for s in curr_state \ for x in self.delta(s, a) } if closure is None: next_state = frozenset(next_direct) else: next_state = frozenset({ x for s in next_direct \ for x in closure[s] }) if next_state: if next_state not in dfa_states: dfa_states.add(next_state) queue.append(next_state) tr[curr_state, a] = next_state if next_state & self.accept: dfa_accept.add(next_state) elif next_state & self.reject: dfa_reject.add(next_state) # dfa_delta = lambda s,a: tr.get((s, a), None) dfa_delta = lambda q, a: tr[q, a] return dfa.DFA(self.alphabet, dfa_states, start, dfa_accept, dfa_reject, dfa_delta, tr)
def setUp(self): self.alphabets = set(['0', '1']) self.states = set(['a', 'b']) self.initialState = 'a' self.endState = set(list('b')) self.transitionRule = { ('a', '0'): 'b', ('a', '1'): 'a', ('b', '0'): 'b', ('b', '1'): 'a' } self.transitionRuleIncomplete = { ('a', '0'): 'b', ('a', '1'): 'a', ('b', '0'): 'b', } self.dfaTrue = dfa.DFA(self.alphabets, self.states, self.initialState, self.endState, self.transitionRule) self.dfaFalse = dfa.DFA(self.alphabets, self.states, self.initialState, self.endState, self.transitionRuleIncomplete)
def train(total_loss, global_step): """Train CIFAR-10 model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training. """ # Variables that affect learning rate. num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size # Decay the learning rate exponentially based on the number of steps. lr = INITIAL_LEARNING_RATE tf.summary.scalar('learning_rate', lr) # Generate moving averages of all losses and associated summaries. #loss_averages_op = _add_loss_summaries(total_loss) losses = tf.get_collection('losses') for l in losses + [total_loss]: tf.summary.scalar(l.op.name + ' (raw)', l) # Compute gradients. #with tf.control_dependencies([loss_averages_op]): opt = dfa.DFA(lr, stddev=1.0) # Direct Feedback Alignment optimizer grads = opt.compute_gradients(total_loss) # Call custom compute_gradients method maxnorm = [(tf.clip_by_norm(grad, 4), var) for grad, var in grads] # Apply gradients. apply_gradient_op = opt.apply_gradients(maxnorm, global_step=global_step) # Used _apply_dense function from tensorflow's stochastic gradient descent, see dfa.py # Add histograms for trainable variables. for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) # Add histograms for gradients. for grad, var in grads: if grad is not None: tf.summary.histogram(var.op.name + '/gradients', grad) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op
def get(self, query): tokens = process_tokens(query) dfa_ = dfa.DFA() for token in tokens: print token print dfa_.state dfa_.next(token) print dfa_.state ret = { 'state': dfa_.state.get_name(), # For testing 'suggestions': dfa_.state.get_suggestions() } return jsonify(ret), 200
def get(self, query): calls = [] tokens = process_tokens(query) dfa_ = dfa.DFA() for token in tokens: dfa_.next(token) for call in dfa_.get_calls(): print call for call in dfa_.translate_calls(): calls.append(create_api_call(call[0], call[1], call[2])) if len(calls) != 0: return jsonify({'apiCalls': calls}), 200 return jsonify({'error': 'Please try again'}), 400
def toDFA(self): exp = self.exp operators = ('*', '+', '|') alphabet = set(exp) - set(operators) alphabet = list(alphabet) count = 0 key = str(count) # mapping from new keys to the aggregated states states = dict() states[key] = self.findEpsilon([self.startState]) # record relations between keys paths = dict() # store the keys to be processed queue = deque(list()) queue.append(key) while len(queue): newkey = queue.popleft() for c in alphabet: newstate = self.findCondition(states[newkey], c) newstate = self.findEpsilon(newstate) key = self.getKey(states, newstate) if key == 0: count += 1 key = str(count) states[key] = newstate queue.append(key) if paths.has_key(newkey) == False: paths[newkey] = dict() paths[newkey][c] = key sStates = list() fStates = list() for key in states.keys(): if self.startState in states[key]: sStates.append(key) if self.finalState in states[key]: fStates.append(key) # print states states = states.keys() d = dfa.DFA() d.setStates(states, paths, sStates, fStates, alphabet) return d
def main(): try: fileName = sys.argv[1] except IndexError: fileName = input('Enter the program Name: ') userParser = parser.Parser(fileName) userParser.format() try: userDfa = dfa.DFA(userParser.dfaValues[0], userParser.dfaValues[1], userParser.dfaValues[2], userParser.dfaValues[3], userParser.dfaValues[4]) except: exit('Syntax Error Ocuured') print('Your DFA has been loaded to the shell.') string = takeInput() while len(string) >= 0: try: print(userDfa.start(string)) except exceptionCollection.StringError: print('Please enter the valid string') string = takeInput()
def to_dfa(self): ''' 将NFA转化为DFA :return: dfa.DFA对象 设置DFA的非终结符为自然数 DFA的终结符号集与NFA相同 ''' DFA_index = 0 # 设置DFA的非终结符为自然数 # DFA_VN = dict() # DFA的非终结符号集 DFA_f = dict() # DFA的映射函数 DFA_S = self.epsilon_closure(self.S) # DFA的初始状态 DFA_T = [] # DFA的终止状态集 self.C.append(DFA_S) while True: T = self.C[DFA_index] # DFA_VN[DFA_index] = T if not T.isdisjoint(self.T): # 如果T包含NFA终止状态集的元素,则T为DFA的终止状态 DFA_T.append(DFA_index) DFA_f[DFA_index] = dict() for ch in self.VT: U = self.epsilon_closure(self.move(T, ch)) if len(U) == 0: # 如果为空集,处理下一个终结符 continue if U not in self.C: self.C.append(U) self.set_dfa_f( DFA_index, ch, U, DFA_f) # 利用当前self.C的数据,将NFA非终止状态的子集映射到表示非终止状态的自然数 if DFA_index + 1 == len(self.C): # 若没有新的DFA状态可扩展,则转化过程结束 break DFA_index += 1 # 准备进行下一个DFA状态的转化 new_dfa = dfa.DFA([i for i in range(len(self.C))], self.VT, DFA_f, 0, DFA_T) # 生成的新DFA return new_dfa
def import_rules(path): ''' import DFAs from rule file. :parameter path : string path of the rule file, :return set of the DFAs imported. ''' fr = open(path, 'r') data = json.load(fr) rules = dict() # order is a list which define the order of the rules order = data['names'] rules['order'] = order for name in order: d = dfa.DFA() d.decode(data[name]) rules[name] = d return rules
def create_new(): structure_type = '' while True: structure_type = input('Create: (D)FA, (N)FA, R(E), (R)G or (q)uit ') if structure_type == 'q': return if structure_type not in structure_letters: print('Invalid structure selected... ') else: break # ToDo # check for consistency issues (e.g. function contains a state not in # states # # discover how to convert the transition function to required format if structure_type == 'D': print('Creating Deterministic Finite Automaton') states = input('Insert the states: ').split('') input_symbols = input('Insert the input symbols: ').split('') transition_function = input('Insert the transition function: ') initial_state = input('Insert the initial state: ') accept_states = input('Insert the accept states: ').split('') d = dfa.DFA(states, input_symbols, transition_function, initial_state, accept_states) print(d) loaded_structures.append(d) return d elif structure_type == 'E': print('Creating Regular Expression') expression = input('Insert the base expression: ') r = regex.RE(expression) print(r) loaded_structures.append(r) return r elif structure_type == 'G': print('Creating Regular Grammar') nonterminals = input('Insert the nonterminal symbols: ').split('') terminals = input('Insert the terminal symbols: ').split('') productions = input('Insert the grammar productions: ') start_symbol = input('Insert the start symbol: ') g = rg.RG(nonterminals, terminals, productions, start_symbol) print(g) loaded_structures.append(g) return g elif structure_type == 'N': print('Creating Nondeterministic Finite Automaton') states = input('Insert the states: ').split('') input_symbols = input('Insert the input symbols: ').split('') transition_function = input('Insert the transition function: ') initial_state = input('Insert the initial state: ') accept_states = input('Insert the accept states: ').split('') n = nfa.NFA(states, input_symbols, transition_function, initial_state, accept_states) print(n) loaded_structures.append(n) return n
def cnn_model_fn(features, labels, mode): """Model function for CNN.""" # Input Layer # MNIST images are 28x28 pixels, and have one color channel input_layer = input_layer = tf.reshape(features["x"], [-1, 784]) # First dense layer # 200 nodes with tf.variable_scope('dense1') as scope: dim = input_layer.get_shape()[1].value weights = tf.get_variable(name='weights', shape=[dim, 200], dtype=tf.float32, initializer=tf.truncated_normal_initializer( mean=0.0, stddev=0.2, dtype=tf.float32)) biases = tf.get_variable(name='biases', shape=[200], dtype=tf.float32, initializer=tf.constant_initializer( 0.0, dtype=tf.float32)) pre_activation = tf.add(tf.matmul(input_layer, weights), biases, name='pre_activation') dense1 = tf.nn.tanh(pre_activation, name='activations') # Second dense layer # 100 nodes with tf.variable_scope('dense2') as scope: dim = dense1.get_shape()[1].value weights = tf.get_variable(name='weights', shape=[dim, 100], dtype=tf.float32, initializer=tf.truncated_normal_initializer( mean=0.0, stddev=0.2, dtype=tf.float32)) biases = tf.get_variable(name='biases', shape=[100], dtype=tf.float32, initializer=tf.constant_initializer( 0.0, dtype=tf.float32)) pre_activation = tf.add(tf.matmul(dense1, weights), biases, name='pre_activation') dense2 = tf.nn.tanh(pre_activation, name='activations') # Logits layer with tf.variable_scope('logits') as scope: dim = dense2.get_shape()[1].value weights = tf.get_variable(name='weights', shape=[dim, 10], dtype=tf.float32, initializer=tf.truncated_normal_initializer( mean=0.0, stddev=0.2, dtype=tf.float32)) biases = tf.get_variable(name='biases', shape=[10], dtype=tf.float32, initializer=tf.constant_initializer( 0.0, dtype=tf.float32)) pre_activation = tf.add(tf.matmul(dense2, weights), biases, name='pre_activation') logits = tf.add(pre_activation, 0.0, name='activations') predictions = { # Generate predictions (for PREDICT and EVAL mode) "classes": tf.argmax(input=logits, axis=1), # Add `softmax_tensor` to the graph. It is used for PREDICT and by the # `logging_hook`. "probabilities": tf.nn.softmax(logits, name="softmax_tensor") } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate Loss (for both TRAIN and EVAL modes) loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) # Configure the Training Op (for TRAIN mode) if mode == tf.estimator.ModeKeys.TRAIN: optimizer = DFA.DFA(learning_rate=0.5) train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) # Add evaluation metrics (for EVAL mode) eval_metric_ops = { "accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions["classes"]) } return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def get(self): dfa_ = dfa.DFA() return jsonify({'all_words': dfa_.all_words}), 200
3: { 'a': 3, 'b': 5 }, 4: { 'a': 6, 'b': 2 }, 5: { 'a': 3, 'b': 0 }, 6: { 'a': 3, 'b': 1 }, } S = 0 T = {4, 5, 6} new_dfa = dfa.DFA(VN, VT, f, S, T) new_dfa.minimize() print(new_dfa.VN) print(new_dfa.VT) for k, v in new_dfa.f.items(): print(k, v) print(new_dfa.S) print(new_dfa.T)
def convert_to_dfa(self) : dfa_state_size = 1 final_states = [] dfa_final_states = [] dfa_initial_state = 'q0' delta_prime_func = dict() # dfa_states just stores the name of the states. for example : ['q0', 'q1', ...] dfa_states = [dfa_initial_state] # since in the new dfa machine the states might be made up of several states of the nfa machine, and it might be a little bit # hard to store the new states as a list, i used dictionary to store the states in this format # for example : {'q0' : ['Q1', 'Q2'], 'q1' : ['Q0'], ...} # in this example q's are dfa's states and the Q's are nfa's state, but the list of the nfa states is the equivalent dfa_states_name = dict() dfa_delta_func = dict() dfa_moves = [] dfa_states_name[dfa_initial_state] = [self.initial_state] dfa_delta_func[dfa_initial_state] = dict() # first of all i find the new machine's final states which depend's on the nfa's final states for state in self.states : lamda_closure = self.lambda_closure(state) for s in lamda_closure : if s in self.final_states : final_states.append(state) # and then we use lambda_closure function to produce the delta prime transition function of the nfa state # which helps us to produce the result dfa machine for state in self.states : lambda_closure = self.lambda_closure(state) delta_prime_func[state] = dict() for alpha in self.alphabet : res = set() for s in lambda_closure : # since in nfa machine we might have no transition for a state for the given input # i used try in order not to result in exception try : tmp = self.delta_func[s][alpha] for i in tmp : res = res.union(self.lambda_closure(i)) except : pass delta_prime_func[state][alpha] = res # computing the new machine's delta transition function until there is no new state for dfa_state in dfa_states : for alpha in self.alphabet : res = set() for state in dfa_states_name[dfa_state] : res = res.union(delta_prime_func[state][alpha]) found = False res = list(res) next_state = '' for name, state in dfa_states_name.items() : if sorted(state) == sorted(res) : found = True next_state = name # if the result state is not in the dfa_states we will consider it as a new state if not found : new_state_name = 'q' + str(dfa_state_size) dfa_states.append(new_state_name) dfa_states_name[new_state_name] = res dfa_delta_func[new_state_name] = dict() next_state = new_state_name dfa_state_size += 1 dfa_delta_func[dfa_state][alpha] = next_state # computing the final states based on the new names for name, state in dfa_states_name.items() : for s in state : if s in final_states and name not in dfa_final_states: dfa_final_states.append(name) # producing the vertices in order to call the DFA class for current_state in dfa_states_name : for alphabet in self.alphabet : dfa_moves.append([current_state, alphabet, dfa_delta_func[current_state][alphabet]]) return dfa.DFA(self.alphabet, dfa_states, dfa_initial_state, dfa_final_states, dfa_moves)
import dfa """ Initializing a DFA. """ d = dfa.DFA() """ Add a set of symbols to the alphabet of the DFA. You can also add individual symbols. """ d.add_alphabet(1, 0) """ Add a set of states to the DFA. You can also add individual states. """ d.add_states('A', 'BEF', 'CF', 'DF', 'F', 'B', 'C', 'D') """ Add an accepting state. If the state is already in the DFA, the state is marked as an accepting state. """ d.add_accepting_state('A') d.add_accepting_state('BEF') d.add_accepting_state('DF') d.add_accepting_state('F') d.add_accepting_state('D') d.add_accepting_state('CF') """ By default, PyAutomata sets the start state to a state called 'q0'. You can change this by calling the following method. """ d.start_state('A') """ By default, PyAutomata does not include a rejecting state in each DFA.