def dfs_post_nonrec(tree, proc): ''' 非递归后根序遍历 :param tree: :param proc: :return: ''' stack = LifoQueue() node = tree while node is not None or not stack.empty(): while node is not None: # 注意,这一波遍历的规则是能左则左,若不能左往右也行 stack.put(node) if node.left is not None: node = node.left else: node = node.right node = stack.get() # 这个算法的特征之一,处理某个节点的时候,栈中保存着的是这个节点的所有前辈节点 proc(node.data) #### 从这里开始和中根序遍历不一样 #### if stack.empty(): # 若栈空,说明当前节点没有任何前辈节点,即根节点。根节点处理完成后直接跳出程序 break tmp = stack.get() # 由LifoQueue实现的栈没有peek或者top方法,自己模拟一下… stack.put(tmp) if node is tmp.left: # 判断刚才处理的是左子节点还是右子节点 node = tmp.right # 左子节点的话说明右子节点还没处理 else: node = None # 右子节点的话说明tmp节点对应子树已经处理完了
def inToPost(expr): ''' takes as input an infix expression and outputs a post fix expression ''' result = [] stack = LifoQueue(maxsize=len(expr)) #loop iterates over every character in the expression for char in expr: # non variables are put on a stack where their precedence will determine the order of transfer onto the result queue if (Helper.isOp(char) or char == '!'): if (stack.empty() or Helper.peek(stack) == '('): stack.put(char) elif (Helper.precedence(Helper.peek(stack)) > Helper.precedence(char)): result.append(stack.get()) stack.put(char) elif (Helper.precedence( Helper.peek(stack)) == Helper.precedence(char)): result.append(char) else: stack.put(char) elif (char == '('): stack.put(char) # consecutively adding operators between operators on result elif (char == ')'): while (not Helper.peek(stack) == '('): result.append(stack.get()) stack.get() # Variables are directly put on result else: result.append(char) # remaining character on the stack are consecutively added to result while (not stack.empty()): result.append(stack.get()) return result
def lifo_queue_usage(): from Queue import LifoQueue lifo_queue = LifoQueue() lifo_queue.put(1) lifo_queue.put(2) print lifo_queue.get() print lifo_queue.get()
def graham_scan(points): """ :param points: numpy array of 2-dimensional points :return: Convex hull as another numpy array of points """ ch = LifoQueue() leftmost = points[np.argmin(points[:, 0])] # finding the leftmost point... definitely in CH dtype = [('x', np.float64), ('y', np.float64), ('slope', np.float64)] # preparing a nicer object for sorting cpts = np.zeros(len(points) - 1, dtype=dtype) cpts[:]['x'] = points[1:, 0] cpts[:]['y'] = points[1:, 1] cpts[:]['slope'] = (cpts[:]['y'] - leftmost[1]) / (cpts[:]['x'] - leftmost[0]) # angle <-> slope from leftmost sorted_pts = np.sort(cpts, order=['slope', 'x']) # sort by angle (slope), then distance from leftmost # shows which points are colinear mask = np.zeros(len(sorted_pts), dtype=bool) # getting rid of points with same angle from leftmost mask = np.logical_not(mask) for i in range(len(sorted_pts[1:])): mask[i - 1] = not sorted_pts[i - 1]['slope'] == sorted_pts[i]['slope'] # only keep farthest away sorted_pts = sorted_pts[mask] sorted_pts[:] = sorted_pts[::-1] # sort greatest slope to lowest (move clockwise) pts = np.zeros((len(sorted_pts) + 1, 2)) # putting leftmost back into a new array object pts[1:, 0] = sorted_pts[:]['x'] pts[1:, 1] = sorted_pts[:]['y'] pts[0] = leftmost ch.put(pts[0]) # leftmost and the point with the highest slope are in the CH for sure ch.put(pts[1]) for i, pt in enumerate(pts): if i < 2: continue else: last = ch.get() second_to_last = ch.get() side = which_side(second_to_last, pts[i], last) # Less than 0 => on the left, o/w on the right while side > 0: # if last point put in on right side, it must have been wrong to be in CH last = second_to_last second_to_last = ch.get() side = which_side(second_to_last, pts[i], last) ch.put(second_to_last) ch.put(last) ch.put(pt) return np.array([ch.get() for i in range(ch.qsize())]) # Put the queue into an array
class DummyMessageHandler(MessageHandler): # TODO(steffen): locking def __init__(self): MessageHandler.__init__(self) self._messages = LifoQueue() self._devices = [] def register(self, device): self._devices.append(device) def read_message(self): return self._messages.get() def write_message_from_device(self, message): self._messages.put(message) def write_message(self, message): for d in self._devices: d.handle_message(message) def has_messages(self): for d in self._devices: d.loop() return not self._messages.empty() def stop(self): pass
def _visit(self, node, pre_action=None, post_action=None): """Explore the connected component,""" self.time = self.time + 1 self.dd[node] = self.time self.color[node] = "GREY" Q = LifoQueue() Q.put(node) # node is GREY if pre_action: # when Q.put pre_action(node) while not Q.empty(): source = Q.get() # GREY node is processed for edge in self.graph.iteroutedges(source): if self.color[edge.target] == "WHITE": self.parent[edge.target] = source self.dag.add_edge(edge) self.time = self.time + 1 self.dd[edge.target] = self.time self.color[edge.target] = "GREY" Q.put(edge.target) # target is GREY if pre_action: # when Q.put pre_action(edge.target) self.time = self.time + 1 self.ff[source] = self.time self.color[source] = "BLACK" if post_action: # source became BLACK post_action(source)
class MyQueue(): def __init__(self): self.stackNewest = LifoQueue() self.stackOldest = LifoQueue() def shiftStacks(self): if self.stackOldest.empty(): while not self.stackNewest.empty(): self.stackOldest.put(self.stackNewest.get()) def put(self, item): self.stackNewest.put(item) def get(self): self.shiftStacks() return self.stackOldest.get()
class PooledIncomingQueue(IncomingQueue): def init_queues(self, n=5, buffsize=0, maxsize=1000*1000*1000): maxsize = maxsize / n self.write_executor = ThreadPoolExecutor(poolsize=1, queuesize=100) self.rqfile = FileDequeue(self.qdir, reader=FPSortingQueueFileReader) #self.rqfile = DummyFileDequeue(self.qdir) self.qfiles = [FileEnqueue(self.qdir, suffix=str(i), maxsize=maxsize, buffer=buffsize, executor=self.write_executor) for i in range(n)] self.avail = LifoQueue() for q in self.qfiles: self.avail.put(q) def shutdown(self): super(PooledIncomingQueue, self).shutdown() self.write_executor.shutdown() def add(self, curis): processed = 0 t0 = time.time() enq = self.avail.get() t = time.time() - t0 if t > 0.1: logging.warn('self.avail.get() %.4f', t) try: enq.queue(curis) self.addedcount += len(curis) processed += len(curis) return dict(processed=processed) finally: t0 = time.time() self.avail.put(enq) t = time.time() - t0 if t > 0.1: logging.warn('slow self.avail.put() %.4f', t)
def getPath(startingPrime, finalPrime): # print(type(startingPrime)) # print("starting Prime: " + str(startingPrime)) # print(type(finalPrime)) # print("final Prime: " + str(finalPrime)) # your code here #depth limit is 5 #declare stack closedList.clear() stack = LifoQueue() #push <startingPrime (currentPrime), 0 (depth)> into the stack stack.put((startingPrime, 0)) outputString = "" #while stack is not empty while (not stack.empty()): #pop a from stack a = stack.get() #if a.currentPrime == finalPrime if (a[0] == finalPrime): break #else if a.depth >= 5 elif (a[1] >= 5): continue #find all neighbor of currentPrime neighbor = getPossibleActions(a[0]) for i in range(0, len(neighbor)): #set the parent of the neighbor to currentPrime closedList[str(neighbor[i])] = a[0] #push all neighbor as <neighbor,a.depth + 1> into the stack stack.put((neighbor[i], a[1] + 1)) #if(currentPRime != finalPrime) if (a[0] != finalPrime): #unsolvable outputString = 'UNSOLVABLE' else: current = a[0] outputString = "" outputString = str(current) + " " + outputString while (current != startingPrime): current = closedList[str(current)] outputString = str(current) + " " + outputString # outputString = startingPrime + " " + outputString # file = open('output.txt','w') # print >> file,outputString # file.close() sys.stdout.write(outputString + "\n") return
class EulerianCycleDFS: """Finding an Eulerian cycle in a multigraph. Attributes ---------- graph : input graph eulerian_cycle : list of nodes (length |E|+1) _graph_copy : graph, private _stack : LIFO queue, private Notes ----- Based on the description from: http://eduinf.waw.pl./inf/alg/001_search/0135.php """ def __init__(self, graph): """The algorithm initialization.""" self.graph = graph if not self._is_eulerian(): raise ValueError("the graph is not eulerian") self.eulerian_cycle = list() self._graph_copy = self.graph.copy() self._stack = LifoQueue() import sys recursionlimit = sys.getrecursionlimit() sys.setrecursionlimit(max(self.graph.v() * 2, recursionlimit)) def run(self, source=None): """Executable pseudocode.""" if source is None: # get first random node source = self.graph.iternodes().next() self._visit(source) while not self._stack.empty(): self.eulerian_cycle.append(self._stack.get()) #del self._stack #del self._graph_copy def _visit(self, source): """Visiting node.""" while self._graph_copy.outdegree(source) > 0: edge = self._graph_copy.iteroutedges(source).next() self._graph_copy.del_edge(edge) self._visit(edge.target) self._stack.put(source) def _is_eulerian(self): """Test if the graph is eulerian.""" if self.graph.is_directed(): # We assume that the graph is strongly connected. for node in self.graph.iternodes(): if self.graph.indegree(node) != self.graph.outdegree(node): return False else: # We assume that the graph is connected for node in self.graph.iternodes(): if self.graph.degree(node) % 2 == 1: return False return True
def get_max_flow(directed_graph, source, sink): residual = {edge: edge.capacity for edges in directed_graph.values() for edge in edges} flow_paths = [] def flow_path(path): max_flow = float("inf") for edge in path: max_flow = min(max_flow, residual[edge]) for edge in path: residual[edge] -= max_flow flow_paths.append((max_flow, path)) bfs_queue = LifoQueue() bfs_queue.put([]) while not bfs_queue.empty(): path = bfs_queue.get() for edge in directed_graph[source if not path else path[-1].to_node]: if residual[edge] > 0: new_path = path[:] new_path.append(edge) if edge.to_node == sink: flow_path(new_path) else: bfs_queue.put(new_path) return flow_paths
def __queue_speech_segment(self, speech_segment): if self.__ignore_sample: return self.__queue_lock.acquire(True) if self.__speech_segments.qsize() == self.__max_queue_size: temp_queue = LifoQueue() while not self.__speech_segments.empty(): temp_queue.put(self.__speech_segments.get()) # Discard the oldest data temp_queue.get() while not temp_queue.empty(): self.__speech_segments.put(temp_queue.get()) self.__speech_segments.put(speech_segment) self.__queue_lock.release()
def _find_path_dfs(self): """Finding augmenting paths in the residual network.""" parent = dict((node, None) for node in self.residual.iternodes()) # Capacity of found path to node. capacity = {self.source: float("inf")} Q = LifoQueue() Q.put(self.source) while not Q.empty(): node = Q.get() for edge in self.residual.iteroutedges(node): cap = edge.weight - self.flow[edge.source][edge.target] if cap > 0 and parent[edge.target] is None: parent[edge.target] = edge.source capacity[edge.target] = min(capacity[edge.source], cap) if edge.target != self.sink: Q.put(edge.target) else: # Backtrack search and write flow. target = self.sink while target != self.source: node = parent[target] self.flow[node][target] += capacity[self.sink] self.flow[target][node] -= capacity[self.sink] target = node return capacity[self.sink] return 0
class LiveviewStreamThread(threading.Thread): def __init__(self, url): # Direct class call `threading.Thread` instead of `super()` for python2 capability threading.Thread.__init__(self) self.lv_url = url self._lilo_head_pool = LifoQueue() self._lilo_jpeg_pool = LifoQueue() self.header = None self.frameinfo = [] def run(self): sess = urlopen(self.lv_url) while True: header = sess.read(8) ch = common_header(header) data = sess.read(128) payload = payload_header(data, payload_type=ch['payload_type']) if ch['payload_type'] == 1: data_img = sess.read(payload['jpeg_data_size']) assert len(data_img) == payload['jpeg_data_size'] self._lilo_head_pool.put(header) self._lilo_jpeg_pool.put(data_img) elif ch['payload_type'] == 2: self.frameinfo = [] for x in range(payload['frame_count']): data_img = sess.read(payload['frame_size']) self.frameinfo.append(payload_frameinfo(data_img)) sess.read(payload['padding_size']) def get_header(self): if not self.header: try: self.header = self._lilo_head_pool.get_nowait() except Exception as e: self.header = None return self.header def get_latest_view(self): # note this is a blocking call data_img = self._lilo_jpeg_pool.get() # retrive next header try: self.header = self._lilo_head_pool.get_nowait() except Exception as e: self.header = None return data_img def get_frameinfo(self): return self.frameinfo
class camera_receiver(object): def __init__(self, name, topic, msgtype, verbose=False): self.topic = topic self.msgtype = msgtype self.name = name self.bridge = CvBridge() self.convertor = self.convertor_query() self.verbose = verbose self.subthread = None self.ros_image = LifoQueue(maxsize=5) def callback(self, image): if self.ros_image.full(): #self.ros_image.get() self.ros_image.empty() self.ros_image.put(image) def __call__(self): def monitor(threadnam): # disable_signals could set to be true in the future #rospy.init_node(self.name,anonymous=True) rospy.Subscriber(self.topic, self.msgtype, self.callback) #monitor() #rospy.spin() self.subthread = thread.start_new_thread(monitor, (self.name, )) def convertor_query(self): if self.msgtype == Image: def bridge(image, **args): return self.bridge.imgmsg_to_cv2(image, **args) return bridge elif self.msgtype == CompressedImage: def bridge(image, **args): img_array_1d = np.fromstring(image.data, np.uint8) cv_img = cv2.imdecode(img_array_1d, 1) #cv2.CV_LOAD_IMAGE_COLOR) return cv_img return bridge else: print( "the msgtype for this receiver in only support sensor_msgs.msg.Image and sensor_msgs.msg.CompressedImage" ) self.__del__() def spit(self, **args): # desired_encoding="passthrough" if self.verbose: print('received image of type: "%s"' % ros_data.format) screen = self.convertor(self.ros_image.get(), **args) return screen def __del__(self): print("the camera_receiver {} was deleted".format(self.name))
class HierholzerWithEdges: """Finding an Eulerian cycle in a multigraph. Attributes ---------- graph : input graph eulerian_cycle : list of edges (length |E|) _graph_copy : graph, private _stack : LIFO queue, private Notes ----- Based on the description from: https://en.wikipedia.org/wiki/Eulerian_path """ def __init__(self, graph): """The algorithm initialization.""" self.graph = graph if not self._is_eulerian(): raise ValueError("the graph is not eulerian") self.eulerian_cycle = list() self._graph_copy = self.graph.copy() self._stack = LifoQueue() def run(self, source=None): """Executable pseudocode.""" if source is None: # get first random node source = self.graph.iternodes().next() while True: if self._graph_copy.outdegree(source) > 0: edge = self._graph_copy.iteroutedges(source).next() self._stack.put(edge) self._graph_copy.del_edge(edge) source = edge.target else: edge = self._stack.get() source = edge.source self.eulerian_cycle.append(edge) if self._stack.empty(): break self.eulerian_cycle.reverse() #del self._stack #del self._graph_copy def _is_eulerian(self): """Test if the graph is eulerian.""" if self.graph.is_directed(): # We assume that the graph is strongly connected. for node in self.graph.iternodes(): if self.graph.indegree(node) != self.graph.outdegree(node): return False else: # We assume that the graph is connected. for node in self.graph.iternodes(): if self.graph.degree(node) % 2 == 1: return False return True
class EulerianCycleDFS: """Finding an Eulerian cycle in a multigraph, complexity O(E). Attributes ---------- graph : input graph eulerian_cycle : list of nodes (length |E|+1) _graph_copy : graph, private _stack : LIFO queue, private Notes ----- Based on the description from: http://eduinf.waw.pl./inf/alg/001_search/0135.php """ def __init__(self, graph): """The algorithm initialization.""" self.graph = graph if not self._is_eulerian(): raise ValueError("the graph is not eulerian") self.eulerian_cycle = list() self._graph_copy = self.graph.copy() self._stack = LifoQueue() import sys recursionlimit = sys.getrecursionlimit() sys.setrecursionlimit(max(self.graph.v()**2, recursionlimit)) def run(self, source=None): """Executable pseudocode.""" if source is None: # get first random node source = next(self.graph.iternodes()) self._visit(source) while not self._stack.empty(): self.eulerian_cycle.append(self._stack.get()) #del self._stack #del self._graph_copy def _visit(self, source): """Visiting node.""" while self._graph_copy.outdegree(source) > 0: edge = next(self._graph_copy.iteroutedges(source)) self._graph_copy.del_edge(edge) self._visit(edge.target) self._stack.put(source) def _is_eulerian(self): """Test if the graph is eulerian.""" if self.graph.is_directed(): # We assume that the graph is strongly connected. for node in self.graph.iternodes(): if self.graph.indegree(node) != self.graph.outdegree(node): return False else: # We assume that the graph is connected for node in self.graph.iternodes(): if self.graph.degree(node) % 2 == 1: return False return True
def Plan(self, start_config, goal_config): start_time = time.time() if self.visualize and hasattr(self.planning_env, "InitializePlot"): self.planning_env.InitializePlot(goal_config) plan = [] # TODO: Here you will implement the breadth first planner # The return path should be a numpy array # of dimension k x n where k is the number of waypoints # and n is the dimension of the robots configuration space q = LifoQueue() start_id = self.planning_env.discrete_env.ConfigurationToNodeId(start_config) goal_id = self.planning_env.discrete_env.ConfigurationToNodeId(goal_config) found = False q.put(start_id) explored =[start_id] backtrack = {} backtrack[start_id] = None n= 0 while (q.qsize()>0) and not found: current = q.get() successors = self.planning_env.GetSuccessors(current) for successor in successors: if not successor in backtrack: n = n+1 q.put(successor) #explored.append(successor) backtrack[successor] = current if self.visualize: s = self.planning_env.discrete_env.NodeIdToConfiguration(successor) c = self.planning_env.discrete_env.NodeIdToConfiguration(current) self.planning_env.PlotEdge(c,s) if successor == goal_id: found = True break # Shortest Path path = [] path.append(self.planning_env.discrete_env.NodeIdToConfiguration(goal_id)) element = backtrack[goal_id] while element is not None: path.append(self.planning_env.discrete_env.NodeIdToConfiguration(element)) element = backtrack[element] plan = path[::-1] if self.visualize: for i in range(len(path) - 1): self.planning_env.PlotRedEdge(path[i],path[i+1]) print "number of nodes" print n print "time (in seconds):" print time.time()- start_time path_length = 0 for i in range(len(path) - 1): path_length = path_length + self.planning_env.ComputeDistance(self.planning_env.discrete_env.ConfigurationToNodeId(path[i]), self.planning_env.discrete_env.ConfigurationToNodeId(path[i+1])) print "path path_length" print path_length return plan
def pushGoals(self, mapNode, start, marker_container, isreverted, isPathOnService): # x=round(int(target['x'])/float(self.RESOLUTION*0.5),0) # y=round(int(target['y'])/float(self.RESOLUTION*0.5),0) revert = [] x = start['x'] y = start['y'] # goalQueue=LifoQueue() goalQueue = Queue() goalLifo = LifoQueue() goalQueue.put(self.createGoal(x, y)) try: prev = mapNode[str(int(x)) + '_' + str(int(y))] # FIXME TO CHECK NONE VALUE while prev != None: # x=round(int(prev.split('_')[0])/float(self.RESOLUTION*0.5),0) # y=round(int(prev.split('_')[1])/float(self.RESOLUTION*0.5),0) print('GOAL -->' + prev) x = int(prev.split('_')[0]) y = int(prev.split('_')[1]) currentgoal = self.createGoal(x, y) self.createGoalMarker(currentgoal, marker_container, x, y) # rospy.sleep(0.01) goalQueue.put(currentgoal) prev = mapNode[str(x) + '_' + str(y)] except KeyError as e: print('end reverse path') self.pub_marker.publish(marker_container) if (isreverted): while not goalQueue.empty(): goalLifo.put(goalQueue.get()) while not goalLifo.empty(): goalQueue.put(goalLifo.get()) if isPathOnService: ### TODO ### call here the local planner service (self.local_planner_service) ### goalQueue: queue of goal to acheive (Posestamped ros message) ### ### self.local_planner_service: service to call the local planner ( TODO need to be created on the ShortPathMng constructor) # # # # # TODO # # # # ### print('') else: while not goalQueue.empty(): self.pub_goal.publish(goalQueue.get()) rospy.sleep(2)
def dfSearch(start, actions, goalTest, depthLimit=False): """Depth-First Search""" queue = LifoQueue() queue.put(start) while True: if queue.empty(): return node node = queue.get() if goalTest(node): return node if (node.depth <= depthLimit) or (depthLimit is False): queue = node.expand(queue, actions)
class MenuAction(object): def __init__(self): self.undo_commands = LifoQueue() self.commands = defaultdict(Actions) def set_command(self, item, activate, deactivate): self.commands[item] = Actions(activate, deactivate) def activate(self, item): action = self.commands[item].activate action.execute() self.undo_commands.put(action) def deactivate(self, item): action = self.commands[item].deactivate action.execute() self.undo_commands.put(action) def undo(self): if not self.undo_commands.empty(): self.undo_commands.get().undo()
class stack(): def __init__(self): self.s = LifoQueue() def push(self, x): self.s.put(x) def pop(self): return self.s.get() def empty(self): return self.s.empty()
def stackDFS(Graph, vroot): """ Depth First Search: stack version """ Stack=LifoQueue() Stack.put(vroot) while not Stack.empty(): iV=Stack.get() print ("Visit :", iV) Graph.setVisited(iV) for jV in Graph.VertexList: if Graph.Edges[iV,jV] and not Graph.Visited[jV]: Stack.put(jV)
def evalExpr(row, postFix, var): ''' evaluates a post fix boolean expression ''' stack = LifoQueue(len(postFix)) for char in postFix: # operators removes the top two variables of the stack and output their result onto the stack if (Helper.isOp(char)): operand2 = stack.get() if (operand2 in var): operand2 = row[var.index(operand2)] operand1 = stack.get() if (operand1 in var): operand1 = row[var.index(operand1)] stack.put(Helper.evaluate(operand1, char, operand2)) # ! removes the top of the stack and puts its boolean opposite on the stack elif (char == '!'): operand = stack.get() if (operand in var): operand = row[var.index(operand)] stack.put(Helper.notGate(operand)) # variables are directly added to the stack else: stack.put(char) return stack.get()
def depthLimited(self, depth): leaves = LifoQueue() leaves.put(self.start) while True: if leaves.empty(): return None actual = leaves.get() if actual.goalState(): return actual elif actual.depth is not depth: succ = actual.succ() while not succ.empty(): leaves.put(succ.get())
def isValid(self, A): stack = LifoQueue() brackets = {'(': ')', '[': ']', '{': '}'} for x in A: if x in brackets.keys(): stack.put(x) else: if stack.empty(): return 0 if x != brackets[stack.get()]: return 0 if stack.empty(): return 1 return 0
def Plan(self, start_config, goal_config): start_time = time.time() plan = [] # TODO: Here you will implement the depth first planner # The return path should be a numpy array # of dimension k x n where k is the number of waypoints # and n is the dimension of the robots configuration space plan.append(start_config) plan.append(goal_config) nodes_stack = LifoQueue() start_id=self.planning_env.discrete_env.ConfigurationToNodeId(start_config) nodes_stack.put(start_id) end_id = self.planning_env.discrete_env.ConfigurationToNodeId(goal_config) curr_id=start_id seen=[start_id] while curr_id!=end_id: successors = self.planning_env.GetSuccessors(curr_id) for i in range(0,numpy.size(successors,0)): # print "succesor"+str(self.planning_env.discrete_env.NodeIdToConfiguration(successors[i])) # print "grid"+str(self.planning_env.discrete_env.NodeIdToGridCoord(successors[i])) # print "from depth"+str(successors) # print "from depth single"+str(successors[i]) #print seen if successors[i] in seen: continue else: nodes_stack.put(successors[i]) # print nodes_stack node_id=nodes_stack.get() self.nodes[node_id]=curr_id curr_id=node_id seen.append(curr_id) plan_id=curr_id while plan_id != start_id: successor_id=self.nodes.get(plan_id) plan.insert(1,self.planning_env.discrete_env.NodeIdToConfiguration(successor_id)) plan_id=successor_id #raw_input('Press any key to begin planning') plan_time = time.time()-start_time number_of_nodes = numpy.size(plan,0) plan_length = self.Plan_Length(plan) print '---- DFS Stats ----' print 'Plan Time: ' + str(plan_time) print 'Plan Length: ' + str(plan_length) print 'Num of Nodes: ' + str(number_of_nodes) return plan
def DFS(no): global tempo global listaAdjacencia global cores global distancias global oredenacao cores[no] = CINZA pilha = LifoQueue() pilha.put(no) tempo += 1 distancias[no][INICIO] = tempo while not pilha.empty(): no = pilha.get() pilha.put(no) adjacenteBranco = False for adjacente in listaAdjacencia[no]: if cores[adjacente] == BRANCO: adjacenteBranco = True pilha.put(adjacente) tempo += 1 distancias[adjacente][INICIO] = tempo cores[adjacente] = CINZA break if not adjacenteBranco: pilha.get() tempo += 1 distancias[no][FINAL] = tempo cores[no] = PRETO oredenacao.appendleft(no)
class NodeLIFOQueue(object): def __init__(self): self.nodes = LifoQueue() self.priorities = [] def append(self, node, prioriy=1.0): self.nodes.put(node) self.priorities.append(prioriy) def sample(self): return self.nodes.get() def __len__(self): return self.nodes.qsize()
def visit_in_order_iterative(self, current_node): stack = LifoQueue() done = False while done == False: if current_node: stack.put(current_node) current_node = current_node.left_child else: if stack.empty(): done = True else: current_node = stack.get() self.return_array.append(current.value) current = current_node.right_child
class BSTIterator(object): """ the smallest value of current node is at leftest of current node, in order to reach smallest value, we have to search to leftest, however after return smallest value, we have to return to its father node, since there is no pointer to a node's father, we have to cache a node's father node when searching, this is why stack is used. when a node is pop from stack, if the node have a right node the next smallest node is at right sub tree, the right node and it left node, and left node's left node should be put to stack and waiting for pop """ def __init__(self, root): """ :type root: TreeNode """ self.stack = LifoQueue() self.push(root) def next(self): """ @return the next smallest number :rtype: int """ if not self.stack.empty(): node = self.stack.get() # every time to pop a node from stack, # push right node and its leftest node to stack self.push(node.right) return node.val def hasNext(self): """ @return whether we have a next smallest number :rtype: bool """ if not self.stack.empty(): return True return False def push(self, node): """ push node and its left side node to stack recursively :param node: :return: """ if node is None: return self.stack.put(node) self.push(node.left)
def __dereference_stack(self, items): stack = LifoQueue() stack.put(items) ref_list = [] while stack.qsize(): item = stack.get() if isinstance(item, list): for index, im in enumerate(item): if isinstance(im, DBRef) and (not im.database or im.database == self.__collection.database.name): ref_list.append(( item, index, im.collection, im.id )) else: stack.put(im) elif isinstance(item, dict): for k, v in item.iteritems(): if isinstance(v, DBRef) and (not v.database or v.database == self.__collection.database.name): ref_list.append(( item, k, v.collection, v.id )) else: stack.put(v) elif hasattr(item, "__iter__"): for im in item: stack.put(im) if ref_list: ref_query = {} for _, _, coll_name, _id in ref_list: if coll_name not in ref_query: ref_query[coll_name] = set() ref_query[coll_name].add(_id) ref_result = {} for coll_name, id_list in ref_query.iteritems(): _ims = list(self.__collection.database[coll_name].find({"_id": { "$in": list(id_list) }})) for _im in _ims: ref_result[(coll_name, _im["_id"])] = _im for owner, key, coll_name, _id in ref_list: if (coll_name, _id) in ref_result: owner[key] = ref_result[(coll_name, _id)]
class Stack(object): """docstring for stack""" def __init__(self, maxsize): if maxsize > 65535: self.throw('StackOverflowError') self.queue = LifoQueue(maxsize) # 栈顶元素出栈,并将其返回 def pop(self): isEmpty = self.queue.empty() if isEmpty: self.throw('this stack is null ') else: return self.queue.get() # 推送一个元素至栈顶 def push(self, value): isFull = self.queue.full() if isFull: self.throw('this stack is full') else: self.queue.put(value) def throw(self, msg): raise BaseException(msg) # 获取queue的队列内容 def list(self): return self.queue.queue # 清空队列 def clear(self): isEmpty = self.queue.empty() if not isEmpty: self.queue.get() self.clear()
def cloneGraph(self, node): if not node: return None nodeCopy = UndirectedGraphNode(node.label) visited = {node: nodeCopy} stack = Stack() stack.put(node) while not stack.empty(): node = stack.get() for neighbor in node.neighbors: if neighbor not in visited: visited[neighbor] = UndirectedGraphNode(neighbor.label) stack.put(neighbor) visited[node].neighbors.append(visited[neighbor]) return nodeCopy
class PooledEnqueue(object): def __init__(self, qdir, n=5, maxsize=1000*1000*1000, **qargs): maxsize = maxsize / n self.qdir = qdir self.write_executor = ThreadPoolExecutor(poolsize=1, queuesize=100) self.queues = [FileEnqueue(self.qdir, suffix=str(i), maxsize=maxsize, executor=self.write_executor, **qargs) for i in range(n)] self.avail = LifoQueue() for q in self.queues: self.avail.put(q) self.addedcount = 0 def get_status(self): qq = [q.get_status() for q in self.queues] r = dict( buffered=sum(s['buffered'] for s in qq), pending=sum(s['pending'] for s in qq), queues=qq) return r def _flush(self): for q in self.queues: q._flush() def close(self): for q in self.queues: q.close() self.write_executor.shutdown() def queue(self, curis): t0 = time.time() enq = self.avail.get() t = time.time() - t0 if t > 0.1: logging.warn('self.avail.get() %.4f', t) try: enq.queue(curis) self.addedcount += len(curis) finally: t0 = time.time() self.avail.put(enq) t = time.time() - t0 if t > 0.1: logging.warn('slow self.avail.put() %.4f', t)
def dfs_pre_nonrec(tree, proc): ''' 非递归前根序遍历 :param tree: :param proc: :return: ''' stack = LifoQueue() node = tree stack.put(node) while not stack.empty(): node = stack.get() proc(node.data) if node.right is not None: stack.put(node.right) if node.left is not None: stack.put(node.left)
def __init__(self,g,s): queue = LifoQueue() self.marked = {} self.edgeTo = {} self.s = s for v in range(1,g.vertices()+1): self.marked[v]=False self.edgeTo[v]=-1 self.marked[s] = True queue.put(s) while not queue.empty(): v = queue.get() for w in g.adj(v): if not self.marked[w]: queue.put(w) self.marked[w] = True self.edgeTo[w] = v
class Player(Fighter): """A Player character, inherits from Fighter Returns: A player object Functions: update, calcNewPos Attributes: """ def __init__(self, name, imagelist, colour, screenwidth, screenheight, *groups): super(Player, self).__init__(name, imagelist, colour, screenwidth, screenheight, *groups) self.directionqueue = LifoQueue() self.directiondict = {"up": False, "down": False, "left": False, "right": False} self.hp = 10 def handlekeyevent(self, keyevent): """ Handle input and set direction or attacking based on rules :param keyevent: (dict) Keyed on 'action' (e.g. 'keydown') and 'key' (e.g. 'up', 'fire') :return: """ if keyevent["action"] == "keydown": if keyevent["key"] in self.directiondict: self.directiondict[keyevent["key"]] = True self.directionqueue.put(keyevent["key"]) self.direction = keyevent["key"] self.moving = True elif keyevent["key"] == "fire": self.attacking = True elif keyevent["action"] == "keyup": if keyevent["key"] in self.directiondict: self.directiondict[keyevent["key"]] = False elif keyevent["key"] == "fire": self.attacking = False if keyevent["key"] in self.directiondict and self.moving: if not self.directiondict[self.direction]: while not self.directionqueue.empty(): self.direction = self.directionqueue.get() if self.directiondict[self.direction]: break if self.directionqueue.empty(): self.moving = False for direction, active in self.directiondict.iteritems(): if active: self.direction = direction self.moving = True
def BFS(graph, src, sink, augPath): visited = [False] * len(graph) queue = LifoQueue() queue.put(src) visited[src] = True while not queue.empty(): node = queue.get() for ind, val in enumerate(graph[node]): if not visited[ind] and val > 0: queue.put(ind) visited[ind] = True augPath[ind] = node if visited[sink]: return True else: return False
class ViewManager(object): def __init__(self): self.main_frame = None self.current_view = None self.last_view_stack = LifoQueue() self.header = None self.footer = None def start(self): self._loop = urwid.MainLoop(self.main_frame, self.get_pallette(), unhandled_input=self.on_keypress) self._loop.run() def change_view(self, view): self.last_view_stack.put(self.current_view) self._update_view(view) def close_current_view(self): view = self.last_view_stack.get() self._update_view(view) def initialize_frame(self, view): self.header = urwid.AttrMap(urwid.Text("Press any key", wrap='clip'), 'header') self.footer = SearchBar() self.main_frame = urwid.Frame(view, self.header, self.footer.control, focus_part='body') def _update_view(self, view): self.current_view = view self.main_frame.contents['body'] = ( view, None ) self._loop.draw_screen() def on_keypress(self, input): self.current_view.on_keypress(input) def get_pallette(self): palette = [('header', 'black', 'dark green', 'standout'), ('footer', 'black', 'dark green', 'standout'), ('normal', 'white', 'black'), ('reveal focus', 'white', 'dark blue', 'standout'), ('filename', 'light blue', 'black'), ('diff', 'black', 'dark green', 'standout'), ('added', 'dark green', 'black'), ('deleted', 'dark red', 'black')] return palette
def find_path(self): # use DFS """Finding augmenting paths in the residual network.""" parent = dict((node, None) for node in self.residual.iternodes()) # Capacity of found path to node. capacity = {self.source: float("inf")} Q = LifoQueue() Q.put(self.source) while not Q.empty(): node = Q.get() for edge in self.residual.iteroutedges(node): cap = edge.weight - self.flow[edge.source][edge.target] if cap > 0 and parent[edge.target] is None: parent[edge.target] = edge.source capacity[edge.target] = min(capacity[edge.source], cap) if edge.target != self.sink: Q.put(edge.target) else: return capacity[self.sink], parent return 0, parent
def iterative(path, path_data): stack=LifoQueue() while 1: if not (type(path_data) == dict and path_data): changes.append(self.store_one(path, path_data)) else: for node in path_data: node_path = path + '/' + node node_data = path_data[node] change = self.store_one(node_path, node_data) changes.append(change) if type(node_data) == type(dict()): stack.put([node_path, node_data]) if stack.qsize(): path,path_data=stack.get() continue; break;
def validTree(self, n, edges): if edges == [] and n == 1: return True elif edges == []: return False visited = [-1 for i in range(0, n)] father = [-1 for i in range(0, n)] adjl = {i: [] for i in range(0, n)} q = LifoQueue() for edge in edges: i = max(edge) j = min(edge) adjl[i].append(j) adjl[j].append(i) q.put(0) visited[0] == 0 count = 0 while count < n: while q.empty() == False: u = q.get() #print u for i in adjl[u]: if visited[i] == 1 and father[u] != i: return False if visited[i] == -1: visited[i] = 0 father[i] = u q.put(i) visited[u] = 1 count += 1 try: next = visited.index(-1) print next except: return True visited[next] = 0 q.put(next) return True
def inspect_cass_log(config): cass_log_file = get_cass_log_file(config) if not cass_log_file or not os.path.exists(cass_log_file): return None reader = BackwardsFileReader(cass_log_file) lifo = LifoQueue() last_line = reader.readline() lifo.put(last_line) while not re.match('^ERROR', last_line): last_line = reader.readline() if re.match('^\t', last_line): lifo.put(last_line) if re.match('^ERROR', last_line): lifo.put(last_line) if not last_line: break ret_str = "" while not lifo.empty(): ret_str += lifo.get() return ret_str
def process_transaction(self, transaction_id): stack = LifoQueue() tasks = self.storage.get_tasks(transaction_id) logger.debug(tasks) for i, task in enumerate(tasks): try: task = Task(task) task.run() self.storage.set_task_processed(transaction_id, i, True) stack.put(task) except: logger.critical(format_exc()) self.storage.set_task_processed(transaction_id, i, False) while stack.qsize(): task = stack.get() task.reverse() return { 'error': True, 'processed': i, } return { 'success': True }
class TileProvider( QObject ): THREAD_HEARTBEAT = 0.2 Tile = collections.namedtuple('Tile', 'id qimg rectF progress tiling') changed = pyqtSignal( QRectF ) '''TileProvider __init__ Keyword Arguments: cache_size -- maximal number of encountered stacks to cache, i.e. slices if the imagesources draw from slicesources (default 10) request_queue_size -- maximal number of request to queue up (default 100000) n_threads -- maximal number of request threads; this determines the maximal number of simultaneously running requests to the pixelpipeline (default: 2) layerIdChange_means_dirty -- layerId changes invalidate the cache; by default only stackId changes do that (default False) parent -- QObject ''' def __init__( self, tiling, stackedImageSources, cache_size = 10, request_queue_size = 100000, n_threads = 2, layerIdChange_means_dirty=False, parent=None ): QObject.__init__( self, parent = parent ) self.tiling = tiling self._sims = stackedImageSources self._cache_size = cache_size self._request_queue_size = request_queue_size self._n_threads = n_threads self._layerIdChange_means_dirty = layerIdChange_means_dirty self._current_stack_id = self._sims.stackId self._cache = _TilesCache(self._current_stack_id, self._sims, maxstacks=self._cache_size) self._dirtyLayerQueue = LifoQueue(self._request_queue_size) self._sims.layerDirty.connect(self._onLayerDirty) self._sims.visibleChanged.connect(self._onVisibleChanged) self._sims.opacityChanged.connect(self._onOpacityChanged) self._sims.sizeChanged.connect(self._onSizeChanged) self._sims.orderChanged.connect(self._onOrderChanged) self._sims.stackIdChanged.connect(self._onStackIdChanged) if self._layerIdChange_means_dirty: self._sims.layerIdChanged.connect(self._onLayerIdChanged) self._keepRendering = True self._dirtyLayerThreads = [Thread(target=self._dirtyLayersWorker) for i in range(self._n_threads)] for thread in self._dirtyLayerThreads: thread.daemon = True [ thread.start() for thread in self._dirtyLayerThreads ] def getTiles( self, rectF ): '''Get tiles in rect and request a refresh. Returns tiles intersectinf with rectF immediatelly and requests a refresh of these tiles. Next time you call this function the tiles may be already (partially) updated. If you want to wait until the rendering is fully complete, call join(). ''' self.requestRefresh( rectF ) tile_nos = self.tiling.intersectedF( rectF ) stack_id = self._current_stack_id for tile_no in tile_nos: qimg, progress = self._cache.tile(stack_id, tile_no) t = TileProvider.Tile(tile_no, qimg, QRectF(self.tiling.imageRects[tile_no]), progress, self.tiling) yield t def requestRefresh( self, rectF ): '''Requests tiles to be refreshed. Returns immediatelly. Call join() to wait for the end of the rendering. ''' tile_nos = self.tiling.intersectedF( rectF ) for tile_no in tile_nos: stack_id = self._current_stack_id self._refreshTile( stack_id, tile_no ) def join( self ): '''Wait until all refresh request are processed. Blocks until no refresh request pending anymore and all rendering finished. ''' return self._dirtyLayerQueue.join() def notifyThreadsToStop( self ): '''Signals render threads to stop. Call this method at the end of the lifetime of a TileProvider instance. Otherwise the garbage collector will not clean up the instance (even if you call del). ''' self._keepRendering = False def threadsAreNotifiedToStop( self ): '''Check if NotifyThreadsToStop() was called at least once.''' return not self._keepRendering def joinThreads( self, timeout=None ): '''Wait until all threads terminated. Without calling notifyThreadsToStop, threads will never terminate. Arguments: timeout -- timeout in seconds as a floating point number ''' for thread in self._dirtyLayerThreads: thread.join( timeout ) def aliveThreads( self ): '''Return a map of thread identifiers and their alive status. All threads are alive until notifyThreadsToStop() is called. After that, they start dying. Call joinThreads() to wait for the last thread to die. ''' at = {} for thread in self._dirtyLayerThreads: if thread.ident: at[thread.ident] = thread.isAlive() return at def _dirtyLayersWorker( self ): while self._keepRendering: try: ims, tile_nr, stack_id, image_req, timestamp, cache = self._dirtyLayerQueue.get(True, self.THREAD_HEARTBEAT) except (Empty, TypeError): #the TypeError occurs when the self._dirtyLayerQueue #is already None when the thread is being shut down #on program exit. #This avoids a lot of warnings. continue try: if timestamp > cache.layerTimestamp( stack_id, ims, tile_nr ): img = image_req.wait() cache.updateTileIfNecessary( stack_id, ims, tile_nr, timestamp, img ) if stack_id == self._current_stack_id and cache is self._cache: self.changed.emit(QRectF(self.tiling.imageRects[tile_nr])) except KeyError: pass finally: self._dirtyLayerQueue.task_done() def _refreshTile( self, stack_id, tile_no ): try: if self._cache.tileDirty( stack_id, tile_no ): self._cache.setTileDirty(stack_id, tile_no, False) img = self._renderTile( stack_id, tile_no ) self._cache.setTile( stack_id, tile_no, img, self._sims.viewVisible(), self._sims.viewOccluded() ) # refresh dirty layer tiles for ims in self._sims.viewImageSources(): if self._cache.layerDirty(stack_id, ims, tile_no) and not self._sims.isOccluded(ims) and self._sims.isVisible(ims): req = (ims, tile_no, stack_id, ims.request(self.tiling.imageRects[tile_no]), time.time(), self._cache) try: self._dirtyLayerQueue.put_nowait( req ) except Full: warnings.warn("Request queue full. Dropping tile refresh request. Increase queue size!") except KeyError: pass def _renderTile( self, stack_id, tile_nr ): qimg = QImage(self.tiling.imageRects[tile_nr].size(), QImage.Format_ARGB32_Premultiplied) qimg.fill(Qt.white) p = QPainter(qimg) for i, v in enumerate(reversed(self._sims)): visible, layerOpacity, layerImageSource = v if not visible: continue patch = self._cache.layer(stack_id, layerImageSource, tile_nr ) if patch is not None: p.setOpacity(layerOpacity) p.drawImage(0,0, patch) p.end() return qimg def _onLayerDirty(self, dirtyImgSrc, rect ): if dirtyImgSrc in self._sims.viewImageSources(): visibleAndNotOccluded = self._sims.isVisible( dirtyImgSrc ) and not self._sims.isOccluded( dirtyImgSrc ) for tile_no in xrange(len(self.tiling)): #and invalid rect means everything is dirty if not rect.isValid() or self.tiling.tileRects[tile_no].intersected( rect ): for ims in self._sims.viewImageSources(): self._cache.setLayerDirtyAll(ims, tile_no, True) if visibleAndNotOccluded: self._cache.setTileDirtyAll(tile_no, True) if visibleAndNotOccluded: self.changed.emit( QRectF(rect) ) def _onStackIdChanged( self, oldId, newId ): if newId in self._cache: self._cache.touchStack( newId ) else: self._cache.addStack( newId ) self._current_stack_id = newId self.changed.emit(QRectF()) def _onLayerIdChanged( self, ims, oldId, newId ): if self._layerIdChange_means_dirty: self._onLayerDirty( ims, QRect() ) def _onVisibleChanged(self, ims, visible): for tile_no in xrange(len(self.tiling)): self._cache.setTileDirtyAll(tile_no, True) if not self._sims.isOccluded( ims ): self.changed.emit(QRectF()) def _onOpacityChanged(self, ims, opacity): for tile_no in xrange(len(self.tiling)): self._cache.setTileDirtyAll(tile_no, True) if self._sims.isVisible( ims ) and not self._sims.isOccluded( ims ): self.changed.emit(QRectF()) def _onSizeChanged(self): self._cache = _TilesCache(self._current_stack_id, self._sims, maxstacks=self._cache_size) self._dirtyLayerQueue = LifoQueue(self._request_queue_size) self.changed.emit(QRectF()) def _onOrderChanged(self): for tile_no in xrange(len(self.tiling)): self._cache.setTileDirtyAll(tile_no, True) self.changed.emit(QRectF())
from Queue import LifoQueue q = LifoQueue() for i in range(5): q.put(i) print "put: ", i while True: if q.empty(): break else: i = q.get(timeout = 1) print "get :", i
class ConnectionPool(object): """Thread-safe connection pool implementation. ``max_connections`` Maximum number of connections. ``block_timeout`` Maximum number of seconds to wait. """ def __init__(self, max_connections=10, block_timeout=5, **kwds): self.max_connections = max_connections self.block_timeout = block_timeout # kwds is being sent directly to dbapi, so we pop self._pool_id = kwds.pop('pool_id') self._logobj = kwds.pop('logobj') self.kwds = kwds # make sure max_connections is valid is_valid = isinstance(max_connections, int) and \ max_connections > 0 if not is_valid: raise ValueError('max_connections must be a positive int') # if process id is changed, we will close all connections, # and reinstantiate this object self._pid = os.getpid() # this is where we define the pool, and fill it with None values self._pool = LifoQueue(max_connections) while True: try: self._pool.put_nowait(None) except Full: break # open connections self._connections = [] def _checkpid(self): """Closes all connections and reinstantiates the object if pid is changed. """ if self._pid == os.getpid(): return self.disconnect() self.reinstantiate() def _make_connection(self): """Creates a fresh connection. """ connection = psycopg2.connect(**self.kwds) if not hasattr(connection, 'pool_id'): connection.pool_id = self._pool_id # we don't need transaction, so let's turn autocommit on connection.autocommit = True # pass the logger object if needed if isinstance(connection, LoggingConnection): connection.initialize(self._logobj) _register_hstore(connection, unicode=True) # encoding connection.set_client_encoding('UTF8') # timezone cursor = connection.cursor() cursor.execute("set timezone to 'UTC'") cursor.close() self._connections.append(connection) return connection def get_connection(self): """Returns a psycopg2 connection for the given shard_id. Raises `ConnectionError` if necessary. """ self._checkpid() connection = None try: # wait for a connection connection = self._pool.get(block=True, timeout=self.block_timeout) except Empty: # timeout raise ConnectionError('no connection available') # create a new connection if it is not initialized yet if connection is None: connection = self._make_connection() return connection def put_connection(self, connection): """Sends connection back into the pool. """ self._checkpid() try: self._pool.put_nowait(connection) except Full: # reinstantiate may have caused this. # this connection is useless now. pass def disconnect(self): """Closes every connection in every pool. """ for connection in self._connections: connection.close() self._connections = [] def reinstantiate(self): """Reinstantiates connection pools. Make sure you have closed every connection before calling this method. """ # let's add these back to kwds self.kwds['pool_id'] = self._pool_id self.kwds['logobj'] = self._logobj self.__init__(max_connections=self.max_connections, block_timeout=self.block_timeout, **self.kwds)
class SaveManager(QObject): start_save = pyqtSignal() report_error = pyqtSignal(object) save_done = pyqtSignal() def __init__(self, parent): QObject.__init__(self, parent) self.count = 0 self.last_saved = -1 self.requests = LifoQueue() t = Thread(name='save-thread', target=self.run) t.daemon = True t.start() self.status_widget = w = SaveWidget(parent) self.start_save.connect(w.start, type=Qt.QueuedConnection) self.save_done.connect(w.stop, type=Qt.QueuedConnection) def schedule(self, tdir, container): self.count += 1 self.requests.put((self.count, tdir, container)) def run(self): while True: x = self.requests.get() if x is None: self.requests.task_done() self.__empty_queue() break try: count, tdir, container = x self.process_save(count, tdir, container) except: import traceback traceback.print_exc() finally: self.requests.task_done() def __empty_queue(self): ' Only to be used during shutdown ' while True: try: self.requests.get_nowait() except Empty: break else: self.requests.task_done() def process_save(self, count, tdir, container): if count <= self.last_saved: shutil.rmtree(tdir, ignore_errors=True) return self.last_saved = count self.start_save.emit() try: self.do_save(tdir, container) except: import traceback self.report_error.emit(traceback.format_exc()) self.save_done.emit() def do_save(self, tdir, container): try: save_container(container, container.path_to_ebook) finally: shutil.rmtree(tdir, ignore_errors=True) @property def has_tasks(self): return bool(self.requests.unfinished_tasks) def wait(self, timeout=30): if timeout is None: self.requests.join() else: try: join_with_timeout(self.requests, timeout) except RuntimeError: return False return True def shutdown(self): self.requests.put(None)
class Gazzle(object): def __init__(self, *args, **kwargs): self.sockets = [] mongo_client = MongoClient('localhost', 27017) self.mongo = mongo_client['gazzle'] # self.mongo.drop_collection('pages') self.pages = self.mongo['pages'] self._init_whoosh() self.pageset = {} self.crawl_thread_count = kwargs.get('crawl_threads', 3) self.pending_crawls = 0 self.pending_lock = threading.RLock() self.frontier = Queue() self.crawlCount = 0 self.crawling = False self.crawl_cond = threading.Condition() self.crawl_lock = threading.RLock() self._init_crawl() self.index_set = set() self.index_q = LifoQueue() self.index_altq = LifoQueue() self.index_alt_switchoff = False self.indexing = False self.index_cond = threading.Condition() self.index_lock = threading.RLock() self._init_index() self._index_size() self.crosssite_crawl = False self.pagerank_cond = threading.Condition() self._start_thread(target = self._crawl, count = self.crawl_thread_count) self._start_thread(target = self._index, count = 1) # index writer doesn't support multithreading self._start_thread(target = self._pagerank, count = 1) self._start_thread(target = self._assert_thread, count=1) def _init_crawl(self): self.pageset = {} self.frontier = Queue() for page in self.pages.find(): self.pageset[page['url']] = page['page_id'] for page in self.pages.find({'crawled': False}): self.frontier.put(page['page_id']) self.crawlCount = self.pages.find({'crawled': True}).count() print('Added %d pages to page set' % len(self.pageset)) print('Added %d pages to frontier' % self.frontier.qsize()) print('Crawl count set to %d' % self.crawlCount) def _init_index(self): self.index_set = set() self.index_q = LifoQueue() for page in self.pages.find({'indexed': True}): self.index_set.add(page['page_id']) for page in self.pages.find({'crawled':True, 'indexed': False}): self.index_q.put(page['page_id']) print('Added %d pages to index set' % len(self.index_set)) print('Added %d pages to index queue' % self.index_q.qsize()) def _init_whoosh(self, clear = False): schema = Schema(page_id=STORED, title=TEXT(stored=True), content=TEXT, url=ID(stored=True)) if not os.path.exists("index"): os.mkdir("index") clear = True if clear: self.index = create_in('index', schema) else: self.index = open_dir("index") def _assert_thread(self): while True: a = self.pages.find_one({'crawled': True, 'title': {'$exists': False}}) assert a == None, 'Found inconsistent page in db ID: %d URL: %s' % (a['page_id'], a['url']) time.sleep(1) def _pagerank(self): while True: with self.pagerank_cond: self.pagerank_cond.wait() pages = self.pages.find({'crawled': True, 'indexed': True}, { '_id':False, 'content': False, 'links.url': False }) RANK_SCALE = 1 ALPHA = 0.25 page_count = pages.count() id_to_ind = {} ind_to_id = [] for page in pages: ind = len(id_to_ind) ind_to_id.append(page['page_id']) id_to_ind[page['page_id']] = ind pages.rewind() pmat = [] for page in pages: row = [0.0] * page_count link_count = 0 for link in page['links']: if link['page_id'] in id_to_ind: ind = id_to_ind[link['page_id']] row[ind] += RANK_SCALE link_count += 1 alph = ALPHA * RANK_SCALE / page_count for ind in range(page_count): if link_count == 0: row[ind] += 1 / page_count else: row[ind] *= (1 - alph) / link_count row[ind] += alph / page_count pmat.append(row) page_rank = [0] * page_count page_rank[0] = 1 for d in range(30): page_rank = dot(page_rank, pmat) result = [{"page_id": ind_to_id[x], "rank": self._format_rank(page_rank[x])} for x in range(page_count)] self._send_to_all({ 'action': 'page rank', 'pages': result }) for ind in range(page_count): self.pages.update({"page_id": ind_to_id[ind]}, {"$set": {"rank": page_rank[ind]}}, upsert=False) def _index(self): _ = { 'lock': threading.RLock(), 'writer': None, 'need_commit': [], } def flush(_): while True: if len(_['need_commit']) != 0 and _['writer'] != None: _['lock'].acquire() _['writer'].commit() _['writer'] = None need_tmp = _['need_commit'] _['need_commit'] = [] _['lock'].release() self._send_to_all({ 'action': 'index commit', 'pages': map(lambda x: {'page_id': x}, need_tmp) }) self.pages.update({'page_id' : {'$in': need_tmp}}, {'$set': {'indexed': True}}, multi = True, upsert = False) with self.pagerank_cond: self.pagerank_cond.notify() self._send_to_all({ 'action': 'index size', 'value': self.index_size }) time.sleep(5) self._start_thread(target = flush, kwargs={'_':_}) while True: with self.index_cond: with self.pending_lock: pending = self.pending_crawls != 0 while not self.indexing or pending: self.index_cond.wait() with self.pending_lock: pending = self.pending_crawls != 0 try: item_index = self.index_altq.get(False) if self.index_alt_switchoff: self.indexing = False except: item_index = self.index_q.get(True) if item_index in self.index_set: continue item = self.pages.find_one({'page_id': item_index}) _['lock'].acquire() if _['writer'] == None: _['writer'] = self.index.writer() assert item.get('title') != None , 'Uncrawled page in index queue, ID: %d, URL: %s' %(item['page_id'], item['url']) _['writer'].add_document(page_id=item_index, title=item['title'], content=item['content'], url=item['url']) _['need_commit'].append(item_index) _['lock'].release() self.index_set.add(item_index) self._send_to_all({ 'action': 'index page', 'page': {'page_id': item_index} }) def _crawl(self): with self.pending_lock: self.pending_crawls += 1 while True: with self.pending_lock: self.pending_crawls -= 1 with self.crawl_cond: while not self.crawling: if self.indexing: with self.index_cond: self.index_cond.notify() self.crawl_cond.wait() with self.pending_lock: self.pending_crawls += 1 item_index = self.frontier.get(True) item = self.pages.find_one({'page_id': item_index}) page = urllib2.urlopen(item['url']) soup = BeautifulSoup(page.read()) title = soup.title.text #.replace(' - Wikipedia, the free encyclopedia', '') if len(title) > 12: title = title[:12] + '...' body = soup.body.text links = map(lambda link: self.extract_anchor_link(link, item['url']), soup.find_all("a")) links = filter(lambda link: link != '' and link != None, links) with self.crawl_lock: # links = filter(lambda link: link not in self.pageset, links) print("%s Crawling %s found %d links" % (threading.current_thread().name, item['url'], len(links))) result_links = [] for link in links: if link not in self.pageset: page_id = len(self.pageset) self.pages.insert({ 'page_id': page_id, 'url': link, 'crawled': False, 'indexed': False }) self.pageset[link] = page_id self.frontier.put(page_id) else: page_id = self.pageset[link] result_links.append({'url': link, 'page_id': page_id}) self.crawlCount += 1 self.index_q.put(item_index) self.pages.update({'page_id': item_index}, { '$push': {'links': {'$each': result_links}}, '$set': {'title': unicode(title), 'content': unicode(body), 'crawled': True} }) self._send_to_all(json.dumps([ { 'action': 'crawl page', 'page': {'page_id': item_index, 'url': item['url'], 'link_count': len(links), 'title': title} }, { 'action': 'frontier size', 'value': self.frontier.qsize() }, { 'action': 'crawl size', 'value': self.crawlCount }, ])) def extract_anchor_link(self, link, url): href = link.get('href', '') m = re.match('([^?]+)[?].*', unicode(href)) if m != None: href = m.group(1) if href == '': return '' if 'https://' in href: href = href.replace('https://', 'http://') if re.match('#.*', href) != None: return '' elif re.match('//.*', href): return 'http:' + href elif re.match('/.*', href): m = re.match('(http://[0-9a-zA-Z.]+)/*', url) # print("link %s %s going to %s" % (href, "", "")) return m.group(1) + href elif self.crosssite_crawl: return href return '' def search(self, socket, query, rank_part=0): def sort_results(results): scores = {} max_score = 0 max_rank = 0 for res in results: scores[res.fields()['page_id']] = res.score if res.score > max_score: max_score = res.score page_ids = map(lambda x: x.fields()['page_id'], results) pages = self.pages.find({"page_id": {"$in": page_ids}}, {"title": True, "page_id":True, "rank":True, "url": True}) pages = map(lambda x: dict(x), pages) for page in pages: if 'rank' not in page: page['rank'] = 0 if page['rank'] > max_rank: max_rank = page['rank'] for page in pages: del page['_id'] rank = 1 - page['rank'] / float(max_rank) score = scores[page['page_id']] / float(max_score) final_score = rank * (rank_part / 100.0) + score * (1 - rank_part / 100.0) page['score'] = final_score pages.sort(key = lambda x: x['score']) return pages with self.index.searcher() as searcher: parser = QueryParser("content", self.index.schema) parsed_query = parser.parse(query) results = searcher.search(parsed_query) if len(results) > 0: print("found some") print(len(results)) results = sort_results(results) else: results = [] # results = map(lambda x: dict(x), results) print(results) socket.write_message(json.dumps({ 'action': 'search results', 'results' : results })) def clear_index(self): self._init_whoosh(clear = True) self.pages.update({'indexed': True}, {'$set': {'indexed': False}}, multi = True, upsert = False) self._init_index() self._send_to_all({ 'action': 'index clear' }) def clear_frontier(self): self.pages.remove({'crawled': False}) self._init_crawl() self._send_to_all({ 'action': 'init', 'frontier_size': 0 }) def clear_all(self): self.mongo.drop_collection('pages') self._init_whoosh(clear = True) self._init_index() self._init_crawl() self.indexing = False self.crawling = False self.index_size = 0 self.crosssite_crawl = False self._send_to_all(json.dumps({ 'action': 'init', 'pages': [], 'frontier_size': 0, 'crawl_size': 0, 'index_size': 0, 'crawling': False, 'indexing': False, 'crosssite_crawl': False })) def _format_rank(self, rank): if rank == None: return None return "%.2f" % (math.log(rank + 1) * 100) def _send_to_all(self, message): if type(message) != str: message = json.dumps(message) for socket in self.sockets: socket.write_message(message) def _start_thread(self, target, count=1, args=(), kwargs={}): for x in range(count): thread = threading.Thread(target=target, args=args, kwargs=kwargs) thread.setDaemon(True) thread.start() def _index_size(self): self.index_size = sum(os.path.getsize('index/'+f) for f in os.listdir('index') if os.path.isfile('index/'+f)) print("Index Size: %d" % self.index_size) return self.index_size def add_socket(self, socket): self.sockets.append(socket) pages = self.pages.find({'crawled': True}, {'_id': False, 'page_id':True, 'url': True, 'title': True, 'indexed': True, 'rank': True}) pages = map(lambda x: {'page_id': x['page_id'], 'title': x['title'], 'url': x['url'], 'indexed': x['indexed'], 'rank': self._format_rank(x.get('rank'))}, pages) socket.write_message(json.dumps({ 'action': 'init', 'pages': pages, 'frontier_size': self.frontier.qsize(), 'crawl_size': self.crawlCount, 'index_size': self.index_size, 'crawling': self.crawling, 'indexing': self.indexing, 'crosssite_crawl': self.crosssite_crawl })) def remove_socket(self, socket): self.sockets.remove(socket) def start_crawl(self, url=''): if url == '': url = 'http://en.wikipedia.org/wiki/Information_retrieval' with self.crawl_lock: page_id = len(self.pageset) self.pages.insert({ 'page_id': page_id, 'url': url, 'crawled': False, 'indexed': False }) self.frontier.put(len(self.pageset)) self.pageset[url] = page_id self.toggle_crawl(state = True) def toggle_crawl(self, state=None): with self.crawl_cond: if state == None: self.crawling = not self.crawling else: self.crawling = state self.crawl_cond.notifyAll() self._send_to_all({ 'action': 'init', 'crawling': self.crawling }) def toggle_index(self, state=None): with self.index_cond: if state == None: self.indexing = not self.indexing else: self.indexing = state self.index_cond.notifyAll() self._send_to_all({ 'action': 'init', 'indexing': self.indexing }) def index_page(self, page): self.index_altq.put(page) with self.index_cond: self.index_alt_switchoff = not self.indexing self.indexing = True self.index_cond.notifyAll() def toggle_crosssite_crawl(self, state=None): if state == None: self.crosssite_crawl = not self.crosssite_crawl else: self.crosssite_crawl = state self._send_to_all({ 'action': 'init', 'crosssite_crawl': self.crosssite_crawl })
class MacBuilder(): DEFAULT_RAMDISK_SIZE = 2 * 1024 * 500 def __init__(self): parser = optparse.OptionParser() parser.add_option("-v", "--version", action="store", dest="version", type="string", default="0", help="Set the STONIX build version number", metavar="version") parser.add_option("-g", "--gui", action="store_true", dest="compileGui", default=False, help="If set, the PyQt files will be recompiled") options, __ = parser.parse_args() ##### # If version was not included at command line, use hardcoded version number if options.version == "0": self.APPVERSION="0.8.13.10" else: self.APPVERSION=options.version ##### # REQUIRED when tarring up stuff on the Mac filesystem - # IF this is not done, tar will pick up resource forks from HFS+ # filesystems, and when un-archiving, create separate files # of the resource forks and make a MESS of the filesystem. os.environ["COPYFILE_DISABLE"] = "true" self.RSYNC="/usr/bin/rsync" self.HDIUTIL="/usr/bin/hdiutil" self.PYUIC = mbl.getpyuicpath() # Create directory queue to replace pushd/popd self.dirq = LifoQueue(0) self.dirq.put(os.getcwd()) os.chdir("..") print " " print " " print " ******************************************************************" print " ******************************************************************" print " ***** App Version: " + self.APPVERSION print " ******************************************************************" print " ******************************************************************" print " " print " " os.chdir(self.dirq.get()) self.STONIX="stonix" self.STONIXICON="stonix_icon" self.STONIXVERSION=self.APPVERSION self.STONIX4MAC="stonix4mac" self.STONIX4MACICON="stonix_icon" self.STONIX4MACVERSION=self.APPVERSION ############################################################################### ############################################################################### ############################################################################### ##### ##### Logical script start ##### ############################################################################### ############################################################################### ############################################################################### ##### # Check that user building stonix has uid 0 self.CURRENT_USER, self.RUNNING_ID = mbl.checkBuildUser() ##### # Create temp home directory for building with pyinstaller DIRECTORY = os.environ["HOME"] self.TMPHOME=mkdtemp(prefix=self.CURRENT_USER + ".") os.environ["HOME"] = self.TMPHOME os.chmod(self.TMPHOME, 0755) # Create a ramdisk and mount it to the ${self.TMPHOME} Not yet ready for prime time DEVICE=self.setupRamdisk(1300, self.TMPHOME) print "Device for tmp ramdisk is: " + DEVICE ##### # Copy src dir to /tmp/<username> so shutil doesn't freak about long filenames... # ONLY seems to be a problem on Mavericks.. self.dirq.put(os.getcwd()) os.chdir("../..") call([self.RSYNC, "-aqp", "--exclude=\".svn\"", "--exclude=\"*.tar.gz\"", "--exclude=\"*.dmg\"", \ "src", self.TMPHOME]) ##### # capture current directory, so we can copy back to it.. START_BUILD_DIR=os.getcwd() print START_BUILD_DIR ##### # Keep track of the directory we're starting from... self.dirq.put(os.getcwd()) os.chdir(self.TMPHOME + "/src/MacBuild") print os.getcwd() ##### # Compile .ui files to .py files if options.compileGui: self.compileStonix4MacAppUiFiles() # Change the versions in the program_arguments.py in both stonix and stonix4mac self.setProgramArgumentsVersion() # Copy stonix source to scratch build directory self.prepStonixBuild() ##### # Compile the two apps... self.compileApp(self.STONIX, self.STONIXVERSION, self.STONIXICON) self.compileApp(self.STONIX4MAC, self.STONIX4MACVERSION, self.STONIX4MACICON) ##### # Restore the HOME environment variable os.environ["HOME"] = DIRECTORY ##### # Copy and create all neccessary resources to app Resources dir self.buildStonix4MacAppResources(self.STONIX4MAC) ##### # tar up build & create dmg with luggage self.tarAndBuildStonix4MacAppPkg(self.STONIX4MAC, self.STONIX4MACVERSION) self.makeSelfUpdatePackage() os.chdir(self.TMPHOME) ##### # Copy back to pseudo-build directory call([self.RSYNC, "-aqp", self.TMPHOME + "/src", START_BUILD_DIR]) os.chdir(self.dirq.get()) mbl.chownR(self.CURRENT_USER, "src") ##### # chmod so it's readable by everyone, writable by the group mbl.chmodR(stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH|stat.S_IWGRP, "src", "append") ##### # Return to the start dir... os.chdir(self.dirq.get()) ##### # Eject the ramdisk.. Not yet ready for prime time #self.detachRamdisk(DEVICE) print " " print " " print " Done building stonix4mac.app..." print " " print " " def setupRamdisk(self, size=DEFAULT_RAMDISK_SIZE, mntpnt=""): # TODO: Add debug/verbose options message_level = "normal" ramdisk = RamDisk(str(size), mntpnt, message_level) if not ramdisk.success: raise Exception("Ramdisk setup failed...") return ramdisk.getDevice() def detachRamdisk(self, device): # TODO: Add debug/verbose options message_level = "normal" if detach(device, message_level): log_message(r"Successfully detached disk: " + str(device).strip(), "verbose", message_level) else: log_message(r"Couldn't detach disk: " + str(device).strip()) raise Exception(r"Cannot eject disk: " + str(device).strip()) def compileStonix4MacAppUiFiles(self): ############################################################################ ############################################################################ ##### ##### compile the .ui files to .py files for stonix4mac.app ##### ############################################################################ ############################################################################ self.dirq.put(os.getcwd()) os.chdir(self.STONIX4MAC) print "Starting compileStonix4MacAppUiFiles..." print os.getcwd() ################################################### # to compile the .ui files to .py files: print "Compiling Qt ui files to python files, for stonix4mac.app..." call([self.PYUIC, "admin_credentials.ui"], stdout=open("admin_credentials_ui.py", "w")) call([self.PYUIC, "stonix_wrapper.ui"], stdout=open("stonix_wrapper_ui.py", "w")) call([self.PYUIC, "general_warning.ui"], stdout=open("general_warning_ui.py", "w")) os.chdir(self.dirq.get()) print "compileStonix4MacAppUiFiles Finished..." def setProgramArgumentsVersion(self): print "Changing versions in localize.py..." mbl.regexReplace("../stonix_resources/localize.py", r"^STONIXVERSION =.*$", r"STONIXVERSION = '" + self.APPVERSION + "'", backupname="../stonix_resources/localize.py.bak") print "Finished changing versions in localize.py..." def prepStonixBuild(self): ############################################################################ ############################################################################ ##### ##### Copy stonix source to app build directory ##### ############################################################################ ############################################################################ print "Starting prepStonixBuild..." ##### # Make sure the "stonix" directory exists, so we can put # together and create the stonix.app if not os.path.isdir("stonix"): os.mkdir("stonix") elif os.path.islink("stonix"): os.unlink(stonix) else: ##### # Cannot use mkdtmp here because it will make the directory on the # root filesystem instead of the ramdisk, then it will try to link # across filesystems which won't work TMPFILE= "stonix" + str(self.timeStamp()) #TMPFILE=mkdtemp(prefix="stonix.") os.rename("stonix", TMPFILE) os.mkdir("stonix") copy2("../stonix.py", "stonix") call([self.RSYNC, "-ap", "--exclude=\".svn\"", "--exclude=\"*.tar.gz\"", \ "--exclude=\"*.dmg\"", "../stonix_resources", "./stonix"]) print "prepStonixBuild Finished..." def timeStamp(self): ############################################################################ ############################################################################ ##### ##### get a time stamp ##### ############################################################################ ############################################################################ ##### # Get time in seconds ts = time.time() return ts def compileApp(self, appName, appVersion, appIcon): ############################################################################ ############################################################################ ##### ##### Compiling stonix4mac.app ##### ############################################################################ ############################################################################ APPNAME=appName APPVERSION=appVersion APPICON=appIcon print "Started compileApp with " + APPNAME + ", " + APPVERSION + ", " + APPICON self.dirq.put(os.getcwd()) os.chdir(APPNAME) if os.path.isdir("build"): rmtree("build") if os.path.isdir("dist"): rmtree("dist") ################################################### # to compile a pyinstaller spec file for app creation: print "Creating a pyinstaller spec file for the project..." print mbl.pyinstMakespec([APPNAME + ".py"], True, True, False, "../" + APPICON + ".icns", \ pathex=["stonix_resources/rules:stonix_resources"], specpath=os.getcwd()) ################################################### #to build: print "Building the app..." mbl.pyinstBuild(APPNAME + ".spec", "private/tmp", os.getcwd() + "/dist", True, True) plist = "./dist/" + APPNAME + ".app/Contents/Info.plist" ##### # Change version string of the app print "Changing .app version string..." mbl.modplist(os.getcwd() + "/dist/" + APPNAME + ".app/Contents/Info.plist", \ "CFBundleShortVersionString", APPVERSION) ##### # Change icon name in the app print "Changing .app icon..." mbl.modplist(os.getcwd() + "/dist/" + APPNAME + ".app/Contents/Info.plist", \ "CFBundleIconFile", APPICON + ".icns") ##### # Copy icons to the resources directory copy2("../" + APPICON + ".icns", "./dist/" + APPNAME + ".app/Contents/Resources") ##### # Change mode of Info.plist to 0755 os.chmod(plist, 0755) os.chdir(self.dirq.get()) print "compileApp with " + APPNAME + ", " + APPVERSION + " Finished..." def buildStonix4MacAppResources(self, appName): ############################################################################ ############################################################################ ##### ##### Copy and/or create all necessary files to the Resources directory ##### of stonix4mac.app ##### ############################################################################ ############################################################################ APPNAME=appName mypwd=os.getcwd() print "Started buildStonix4MacAppResources with \"" + APPNAME + "\" in " + mypwd + "..." ################################################### # Copy source to app dir call([self.RSYNC, "-aqp", "--exclude=\".svn\"" , "--exclude=\"*.tar.gz\"", "--exclude=\"*.dmg\"",\ "../stonix_resources", "./stonix/dist/stonix.app/Contents/MacOS"]) mypwd=os.getcwd() print "pwd: " + mypwd ##### # Copy stonix.app to the stonix4mac Resources directory call([self.RSYNC, "-aqp", "--exclude=\".svn\"", "--exclude=\"*.tar.gz\"", "--exclude=\"*.dmg\"", \ "./stonix/dist/stonix.app", "./" + APPNAME + "/dist/" + APPNAME + ".app/Contents/Resources"]) # Create an empty stonix.conf file open("./" + APPNAME + "/dist/" + APPNAME + \ ".app/Contents/Resources/stonix.conf", "w") copy2("./stonix/dist/stonix.app/Contents/MacOS/stonix_resources/localize.py", \ "./" + APPNAME + "/dist/" + APPNAME + ".app/Contents/MacOS") mypwd=os.getcwd() print "pwd: " + mypwd print "buildStonix4MacAppResources Finished..." def tarAndBuildStonix4MacAppPkg(self, appName, appVersion): ################################################################################ ################################################################################ ##### ##### Archive, build installer package and wrap into a dmg: ##### stonix4mac.app ##### ################################################################################ ################################################################################ APPNAME=appName APPVERSION=appVersion print "Started tarAndBuildStonix4MacApp..." mypwd=os.getcwd() print "pwd: " + mypwd ##### # Make sure the "tarfiles" directory exists, so we can archive # tarfiles of the name $APPNAME-$APPVERSION.app.tar.gz there if not os.path.isdir("tarfiles"): os.mkdir("tarfiles") else: ##### # Cannot use mkdtmp here because it will make the directory on the # root filesystem instead of the ramdisk, then it will try to link # across filesystems which won't work TMPFILE= "tarfiles" + str(self.timeStamp()) #TMPFILE=mkdtemp(prefix="tariles.") os.rename("tarfiles", TMPFILE) os.mkdir("tarfiles") ##### # tar up the app and put it in the tarfiles directory print "Tarring up the app & putting the tarfile in the ../tarfiles directory" self.dirq.put(os.getcwd()) os.chdir("./" + APPNAME + "/dist") mypwd=os.getcwd() print "pwd: " + mypwd mbl.makeTarball(APPNAME + ".app", "../../tarfiles/" + APPNAME + "-" + APPVERSION + ".app.tar.gz") os.chdir(self.dirq.get()) mypwd=os.getcwd() print "pwd: " + mypwd ################################################### # to create the package self.dirq.put(os.getcwd()) os.chdir(APPNAME) print "Putting new version into Makefile..." mbl.regexReplace("Makefile", r"PACKAGE_VERSION=", "PACKAGE_VERSION=" + APPVERSION) ### # Currently Makefile does not actually have a LUGGAGE_TMP variable mbl.regexReplace("Makefile", r"LUGGAGE_TMP\S+", "LUGGAGE_TMP=" + self.TMPHOME) if not os.path.isdir("../dmgs"): os.mkdir("../dmgs") else: ##### # Cannot use mkdtmp here because it will make the directory on the # root filesystem instead of the ramdisk, then it will try to link # across filesystems which won't work TMPFILE= "dmgs" + str(self.timeStamp()) #TMPFILE=mkdtemp(prefix="dmgs.") os.rename("../dmgs", TMPFILE) os.mkdir("../dmgs") print "Creating a .dmg file with a .pkg file inside for installation purposes..." call(["make", "dmg", "PACKAGE_VERSION=" + APPVERSION,"USE_PKGBUILD=1"]) print "Moving the dmg to the dmgs directory." dmgname = APPNAME + "-" + APPVERSION + ".dmg" os.rename(dmgname, "../dmgs/" + dmgname) os.chdir(self.dirq.get()) print "tarAndBuildStonix4MacApp... Finished" def makeSelfUpdatePackage(self): self.dirq.put(os.getcwd()) os.chdir("dmgs") ##### # Mount the dmg call([self.HDIUTIL, "attach", self.STONIX4MAC + "-" + self.APPVERSION + ".dmg"]) ##### # Copy the pkg to the local directory for processing call(["cp", "-a", "/tmp/the_luggage/" + self.STONIX4MAC + "-" + self.APPVERSION + \ "/payload/" + self.STONIX4MAC + "-" + self.APPVERSION + ".pkg", self.STONIX4MAC + ".pkg"]) ##### # Eject the dmg call([self.HDIUTIL, "eject", "/Volumes/" + self.STONIX4MAC + "-" + self.APPVERSION]) ##### # Zip up the pkg - this will be what is served for self-update mbl.makeZip(self.STONIX4MAC + ".pkg", self.STONIX4MAC + ".zip") ##### # Create the MD5 file - used to ensure package downloads without problem # (NOT FOR SECURITY'S SAKE) md5 = hashlib.md5(open(self.STONIX4MAC + ".zip", "rb").read()) open(self.STONIX4MAC + ".md5.txt", "w").write(md5.hexdigest()) ##### # Create the version file to put up on the server open(self.STONIX4MAC + ".version.txt", "w").write(self.APPVERSION) os.chdir(self.dirq.get())