class Queue: def __init__(self): self.stack = Stack() def enqueue(self, value): self.stack.push(value) return True def dequeue(self): stack1 = Stack() stack2 = Stack() # 通过两个栈 反转栈 得到第一个 for _ in range(self.stack.size() - 1): stack1.push(self.stack.pop()) for _ in range(stack1.size()): stack2.push(stack1.pop()) value = self.stack.pop() # 弹出第一个 self.stack = stack2 return value def is_empty(self) -> bool: return self.stack.size() def size(self) -> int: return self.stack.size()
def isValid(self, s): """ :type s: str :rtype: bool """ # valid parentheses # first check empty string if len(s) == 0: return True # using stack for check parentheses stack = Stack() # call for a stack structure for i in s: if i in ['{', '[', '(']: stack.push(i) elif i in ['}', ']', ')']: if stack.size() == 0: return False chFromstack = stack.pop() if not ((chFromstack == '{' and i == '}') or (chFromstack == '[' and i == ']') or (chFromstack == '(' and i == ')')): # one of the above conditions return False return stack.isEmpty()
def reverse_string(str): temp = Stack() str2 = [] for i in str: temp.push(i) print(temp.size()) i = 0 while temp.size() > 0: str2.append(temp.pop()) return str2
def _parseRunner(root, genids): ps = Stack() # ps.push(Parsers.ParserAHD()) ps.push(ParseColumn.ParseColumn()) # ps.push(Parsers.ParserFitness()) ps.push(Parsers.ParserTestAcc()) ps.push(Parsers.ParserTestFscore()) ps.push(Parsers.ParserTrainAcc()) ps.push(Parsers.ParserTrainFscore()) while not ps.isEmpty(): ps.pop().parse(root,genids)
def dequeue(self): stack1 = Stack() stack2 = Stack() # 通过两个栈 反转栈 得到第一个 for _ in range(self.stack.size() - 1): stack1.push(self.stack.pop()) for _ in range(stack1.size()): stack2.push(stack1.pop()) value = self.stack.pop() # 弹出第一个 self.stack = stack2 return value
def baseConverter(decNumber, base): digits = "0123456789ABCDEF" remstack = Stack() while decNumber > 0: rem = decNumber % base remstack.push(rem) decNumber = decNumber // base newString = "" while not remstack.isEmpty(): newString = newString + digits[remstack.pop()] return newString
def preorder_visit_no_recruse(node): current = node s = Stack() while current or not s.is_empty(): if current: current.echo() if current.right: s.push(current.right) if current.left: current = current.left else: current = None else: current = s.pop()
class QueueViaStacks: def __init__(self): self.first_stack = Stack() self.second_stack = Stack() def __processStacks(self): if self.second_stack.isEmpty(): while not self.first_stack.isEmpty(): item = self.first_stack.pop() self.second_stack.push(item) def push(self, item): self.__processStacks() self.first_stack.push(item) def pop(self): self.__processStacks() return self.second_stack.pop()
def inorder_no_recruse(node): current = node l = Stack() r = Stack() while current: if current.right: r.push(current.right) if current.left: l.push(current.left) current = current.left else: ln = l.pop() while ln: ln.echo() ln = l.pop() rn = r.pop() rn.echo() current = rn
def _compute_block_id(self, block): """ For every block build a Cartesian tree using stack-based approach. During the build process encode stack pushes as *1* and stack pops as *0*. The generated 2b-bit number is the id of the block. @param block (List[int]): An array of integer numbers. @return code (int): A 2b-bit integer giving the id of the block, where b is the size of the block. """ binary_code = [0] * (2 * len(block)) idx = 0 S = Stack() for i in range(len(block)): while (not S.is_empty()) and (S.top() > block[i]): S.pop() idx += 1 S.push(block[i]) binary_code[idx] = 1 idx += 1 code = "".join(str(bit) for bit in binary_code) return int(code, 2)
class ParseColumn(object): __root__ = None __currentdataset__ = None __currentdir__ = None __currentinfile__ = None __currentoutfile__ = None __rdfile__ = None # file under reading __wrtfile__ = None # file under writing __datasets__ = None __dirs__ = None __files__ = None __genidset__ = None def __init__(self): pass def setRoot(self, r_path): self.__root__ = r_path def setGenids(self, genids): self.__genidset__ = set() for gid in genids: self.__genidset__.add(gid) # dataset def readDatasets(self): self.__datasets__ = Stack() flist = os.listdir(self.__root__) for fname in flist: fpath = os.path.join(self.__root__, fname) if (os.path.isdir(fpath)) and ('-v' in fname): self.__datasets__.push(fpath) def hasNextDataset(self): return not self.__datasets__.isEmpty() def nextDataset(self): if not self.__datasets__.isEmpty(): self.__currentdataset__ = self.__datasets__.pop() # dir def readDirs(self): self.__dirs__ = Stack() flist = os.listdir(self.__currentdataset__) for fname in flist: fpath = os.path.join(self.__currentdataset__, fname) if (os.path.isdir(fpath)) and ('hamm' in fname): self.__dirs__.push(fpath) def hasNextDir(self): return not self.__dirs__.isEmpty() def nextDir(self): if not self.__dirs__.isEmpty(): self.__currentdir__ = self.__dirs__.pop() # file def readFiles(self): Parser.files = Stack() flist = os.listdir(self.__currentdir__) for fname in flist: fpath = os.path.join(self.__currentdir__, fname) if 'Gen' in fname and int( fname.split('.')[1]) in self.__genidset__: self.__files__.push(fpath) def hasNextFile(self): return not self.__files__.isEmpty() def nextFile(self): if not self.__files__.isEmpty(): self.__currentinfile__ = self.__files__.pop() def openFileReader(self): self.__rdfile__ = open(self.__currentinfile__, 'r') def openFileWriter(self): self.__wrtfile__ = open(self.__currentoutfile__, 'wb+') def writeLine(self, line): self.__wrtfile__.writelines(line + '\n') self.__wrtfile__.flush() def closeReader(self): self.__rdfile__.close() self.__rdfile__ = None def closeWriter(self): self.__wrtfile__.flush() self.__wrtfile__.close() self.__wrtfile__ = None def setInFile(self, genid): self.__currentinfile__ = os.path.join(self.__currentdir__, "Gen." + str(genid) + ".gpecoc") def setOutFile(self): if '\\' in self.__currentdataset__: datasetName = self.__currentdataset__.split("\\")[-1].split('-')[0] else: datasetName = self.__currentdataset__.split("/")[-1].split('-')[0] fpath = os.path.join(self.__root__, 'a_s' + Configs.version) fpath = os.path.join(fpath, 'a_Column') check_folder(fpath) self.__currentoutfile__ = os.path.join(fpath, datasetName) del_dir_tree(self.__currentoutfile__) def parse_column(self): nextgenid = -1 string = '' while True: nextgenid = nextgenid + 1 if nextgenid >= Configs.generations: break if (nextgenid + 1) not in self.__genidset__: continue self.setInFile(nextgenid) self.openFileReader() reader = self.__rdfile__ # origin column reader.readline() line = reader.readline() origin_column = (len(line) - 2) / 3 # add columns add_columns = 0 line = reader.readline() while "2:" not in line: if "Add one column:" in line: add_columns += 1 line = reader.readline() column = origin_column + add_columns string = string + '%d\t' % column self.closeReader() return string def parse(self, root, genids): self.setGenids(genids) self.setRoot(root) self.readDatasets() while (self.hasNextDataset()): self.nextDataset() self.readDirs() self.setOutFile() self.openFileWriter() while (self.hasNextDir()): self.nextDir() line = self.parse_column() self.writeLine(line) self.closeWriter()
def parse(rule): tokens = tokenizer.tokenize(rule) primary_stack = Stack() args_stack = Stack() head = None body = None idx = 0 while idx < len(tokens): token = tokens[idx] # primary_stack.show() # open parenthesis always has the highest precedence if token["type"] == tokenizer.TOKEN_OPEN_PARA: primary_stack.push(token) elif token["type"] == tokenizer.TOKEN_OPRT: # Since left has higher precedence than right, when we see an operator token, # we must try to make sure all previously pushed operators on the stack are # fully parsed before we push any new operator on the stack. # So, we go through previous operators, and try to parse them if we have enough # information now. while not primary_stack.empty(): lastOprt = primary_stack.top() # It must never happen that two unary operators come immediately after # each other, without any binary operator between them. They can nest # inside one another, but they cannot appear in the same level, and immediately # after each other. That is a syntax error if happens. if (isUnaryOperator(lastOprt["value"])) and (isUnaryOperator( token["value"])): syntax_err("Syntax error near operator " + token["value"]) # If a binary, or unary operator is already on top of the stack, and another # binary operator shows up, we must first finish the parsing of operator on the # stack, and then deal with the new operator. elif (isOperator(lastOprt["value"])) and (isBinaryOperator( token["value"])): if not args_stack.empty(): primary_stack.pop() parse_operator(lastOprt, args_stack) # If top of stack is occupied with coma, and/or parenthesis, that means we are # still parsing arguments of an operator. In such cases, we cannot empty the stack, # because current parsing is not done yet. We need to look into next tokens. # So, we break the loop and continue to receive future tokens. else: break primary_stack.push(token) elif token["type"] == tokenizer.TOKEN_IDENTIFIER: nxt = tokens[idx + 1] if idx + 1 != len(tokens) else None if (nxt != None) and (nxt["type"] == tokenizer.TOKEN_OPEN_PARA): idx, args = parse_predicate_arguments(idx + 2, tokens) #formula = Formula() #formula.setPredicate(token["value"]) pred = token["value"] #formula.setArgs(args) #args_stack.push(LeafNode(formula.getPredicate(), formula)) if pred == "MATH": assert len( args ) == 4, "Expected four parameters for function MATH" args_stack.push(Node(Node.Math, args[0], args[1:])) elif pred == "COMP": assert len( args ) == 3, "Expected four parameters for function COMP" args_stack.push(Node(Node.Comp, args[0], args[1:])) else: args_stack.push(Node(Node.Atom, pred, args)) else: #formula = Formula() #formula.setPredicate(token["value"]) pred = token["value"] #formula.setArgs([]) #args_stack.push(LeafNode(formula.getPredicate(), formula)) args = [] args_stack.push(Node(Node.Atom, pred, args)) elif token["type"] == tokenizer.TOKEN_CLOSE_PARA: while True: if args_stack.empty(): syntax_err("Expected operand or '('") oprt = primary_stack.pop() if oprt["value"] == '(': break parse_operator(oprt, args_stack) elif token["type"] == tokenizer.TOKEN_ENTAILMENT_SIGN: # In principal, it is possible that the rule has no head. # I am not sure if they are useful or not, but they can # exist in theory. if args_stack.empty(): print("No head in the rule") else: # Before parsing the body of the rule # we must make sure all operators in the # head are dealt with. So, we go through # the operator stack, and make sure that # all of them are processed. while not primary_stack.empty(): oprt = primary_stack.pop() parse_operator(oprt, args_stack, True) # Pop the head from the operand stack head = args_stack.pop() head.returnSttt = head.substitutetable #if type(head) != list: # head = head idx += 1 while not primary_stack.empty(): oprt = primary_stack.pop() if oprt["value"] == '(': syntax_err("Missing ')' in rule ") parse_operator(oprt, args_stack) body = args_stack.pop() if type(body) != list: body = [body] body = list(reversed(body)) # By default we only look at the current time point, namely no window #registerScopes(body, {"winType": "time_win", "winSize" : 0, "winSizeUnit": 1}) #body = optimize(body) # Get rid of window operators #print_rule(body) #print(body.getChildren()[1].getChildren()[0].getChildren()[0].getChildren()[0].getChildren()[0].getFormula().getPredicate()) #print(body.getChildren()[0].getChildren()[1].getOperator().getParams()) #print(head.getChildren()[0].getFormula().getArgs()) return {"head": head, "body": body}
def test(self): s1 = Stack() s1.push(7) s1.push(10) s1.push(5) s2 = Stack() s2.push(1) s2.push(3) s2.push(8) s2.push(12) s = sortStack(s1, s2) self.assertListEqual(s.getFullArray(), [1, 3, 5, 7, 8, 10, 12]) s3 = Stack() s3.push(7) s3.push(10) s3.push(5) s4 = Stack() ss = sortStack(s3, s4) self.assertListEqual(ss.getFullArray(), [5, 7, 10])
finally: sys.stdout = std if __name__ == "__main__": datasets = ["vertebral", "zoo"] datasets = ["zoo"] # init queue experiments = list() for dataName in datasets: s = Stack() for i in xrange(10): aimFolder = "hamm" + str(10 - i) s.push((dataName, aimFolder)) experiments.append(s) # init p_list p_list = list() for exp in experiments: (dataName, aimFolder) = exp.pop() p = mul.Process(target=_agp_main_runner, args=(dataName, aimFolder)) print dataName + " " + aimFolder + " begin" p.start() p_list.append(p) while True: time.sleep(5) index_remove = list() for i in xrange(len(p_list)):
class Parser(object): __metaclass__ = ABCMeta __root__ = None __currentdataset__ = None __currentdir__ = None __currentinfile__ = None __currentoutfile__ = None __rdfile__ = None # file under reading __wrtfile__ = None # file under writing __datasets__ = None __dirs__ = None __files__ = None __genidset__ = None def __init__(self): pass def setRoot(self, r_path): self.__root__ = r_path def setGenids(self, genids): self.__genidset__ = set() for gid in genids: self.__genidset__.add(gid) # dataset def readDatasets(self): self.__datasets__ = Stack() flist = os.listdir(self.__root__) for fname in flist: fpath = os.path.join(self.__root__, fname) if (os.path.isdir(fpath)) and ('-v' in fname): self.__datasets__.push(fpath) def hasNextDataset(self): return not self.__datasets__.isEmpty() def nextDataset(self): if not self.__datasets__.isEmpty(): self.__currentdataset__ = self.__datasets__.pop() # dir def readDirs(self): self.__dirs__ = Stack() flist = os.listdir(self.__currentdataset__) for fname in flist: fpath = os.path.join(self.__currentdataset__, fname) if (os.path.isdir(fpath)) and ('hamm' in fname): self.__dirs__.push(fpath) def hasNextDir(self): return not self.__dirs__.isEmpty() def nextDir(self): if not self.__dirs__.isEmpty(): self.__currentdir__ = self.__dirs__.pop() # file def readFiles(self): Parser.files = Stack() flist = os.listdir(self.__currentdir__) for fname in flist: fpath = os.path.join(self.__currentdir__, fname) if 'Gen' in fname and int( fname.split('.')[1]) in self.__genidset__: self.__files__.push(fpath) def hasNextFile(self): return not self.__files__.isEmpty() def nextFile(self): if not self.__files__.isEmpty(): self.__currentinfile__ = self.__files__.pop() @abstractmethod def setInFile(self): pass @abstractmethod def parseFile(self): pass @abstractmethod def setOutFile(self): pass def openFileReader(self): self.__rdfile__ = open(self.__currentinfile__, 'r') def openFileWriter(self): self.__wrtfile__ = open(self.__currentoutfile__, 'wb+') def writeLine(self, line): self.__wrtfile__.writelines(line + '\n') self.__wrtfile__.flush() def closeReader(self): self.__rdfile__.close() self.__rdfile__ = None def closeWriter(self): self.__wrtfile__.flush() self.__wrtfile__.close() self.__wrtfile__ = None def parse(self, root, genids): self.setGenids(genids) self.setRoot(root) self.readDatasets() while (self.hasNextDataset()): self.nextDataset() self.readDirs() self.setOutFile() self.openFileWriter() while (self.hasNextDir()): self.nextDir() self.setInFile() self.openFileReader() line = self.parseFile() self.writeLine(line) self.closeReader() self.closeWriter()
import os from database.database import Database from utils.stack import Stack from utils.functions import add_occurences, check_file_format #create a stack to hold directories visited and add the current directory stack = Stack() #push current working directory into the stack stack.push(os.getcwd()) class Navigator: def __init__(self): #holds all of the working directories self.dirs = [] #holds all of the working files self.files = [] #stops clear feature so traceback can be viewed self.debug = False self.menu_choices = [{ 'command': 'previous_directory', 'hotkey': 'b', 'menu_action': self.previous_dir }, { 'command': 'view_all_files', 'hotkey': 'v', 'menu_action': self.list_directory, 'menu_action_arg': 'list_files'