def handleMessage(self, message): body = message.get_body() try: # Parse the request request = Request(body) except XMLError: # Just return if the request can't be parsed # This throws away the request return # Process the request # If an exception is thrown (other than our RequestErrors), # the message will remain in the queue to be processed again later. # Corrupt data is removed before the exception is raised, so the # request will (hopefully) succeed next time # TODO Add TTL to requests, process max 3 times, or something reply = request.process() # Write the reply to S3 url = S3.uploadFile(request.id + '.xml', reply) # Put the url to the reply in the outqueue m = Message() m.set_body(url) outQueue.write(m)
def __init__(self, **kwargs): Request.__init__(self, **kwargs) self.headers = [] self.headers_sent = False self.the_get_data = self.__read_get_data() self.the_post_data = self.__read_post_data() self.the_cookies = self.__read_cookies()
def fetchPage(self): login_url = 'http://lib.ecjtu.jx.cn/gdweb/ReaderTable.aspx' history_url = 'http://lib.ecjtu.jx.cn/gdweb/HisdoryList.aspx?PageNo='+str(self.page) r = Request() self._html = r.setCookie(self.username,self.password).get(login_url) self._result = r.get(history_url) return self
def _makeRequestsForPieces(self, conn, neededRequests, pieces, endgame): assert neededRequests > 0,'requesting 0 requests?!' for pieceIndex in pieces: #iterate over pieces until we added enough requests if not pieceIndex in self.requestedPieces: #first request for this piece assert not endgame,'Endgame but still pieces left?!' requestObj = Request(self.pieceStatus, pieceIndex, self.torrent.getLengthOfPiece(pieceIndex), 4096) self.requestedPieces[pieceIndex] = requestObj requests = requestObj.getRequests(neededRequests, conn) assert len(requests) > 0,str(pieceIndex)+': new request but nothing requestable?!' else: #a request obj exists requestObj = self.requestedPieces[pieceIndex] excludeRequests = conn.getInRequestsOfPiece(pieceIndex) requests = requestObj.getRequests(neededRequests, conn, excludeRequests, endgame) if len(requests) > 0: #got some valid requests for request in requests: #add request self.log.info('Conn %i: Requesting piece %i, offset %i, length %i', conn.fileno(), pieceIndex, request[0], request[1]) conn.addInRequest(pieceIndex, request[0], request[1], failFunc=self.failedRequest, failFuncArgs=[conn, pieceIndex, request[0], request[1]]) neededRequests -= 1 if neededRequests == 0: #finished break return neededRequests
def fetchHtml(self): post_url = self.BASE_URL + '/gdweb/CombinationScarch.aspx' get_url = self.BASE_URL + '/gdweb/ScarchList.aspx?page='+str(self.page) data = self.rule.make() r = Request() self._html = r.post(post_url,data) self._result = r.get(get_url) print self._result return self
def add_up(self, request, floor, user): if user == "passenger": new_request = Request(2, request, floor, user) elif user == "operator": new_request = Request(0, request, floor, user) elif user == "firefighter": new_request = Request(0, request, floor, user) self.up_queue.put(new_request)
def __init__(self, request, **kwargs): from mod_python import apache, Cookie from mod_python.util import FieldStorage Request.__init__(self, **kwargs) self.cookie_mod = Cookie self.request = request self.env = apache.build_cgi_env(request) self.the_get_data = self.__read_get_data() self.the_post_data = self.__read_post_data() self.the_cookies = self.__read_cookies()
def __init__(self, bucket, request=None): # Copies the context context = request.getContext() if request else [{}] Request.__init__(self, *context) self._running = False self._name = None self._size = 0 #: bucket used for rate limiting self.bucket = bucket
def shutdown(self): '''Tells the server to shut itself down. Use this to stop the server process.''' self._openConnection() request = Request() request.type = "shutdown" self._sendString(request.toString()) response = self._readLine() if response != "OK": print(response) else: print("ReadDB cache cleared") self.closeConnection()
def get_start_requests(self): if len(self.metas) == len(self.start_urls): rets = [ Request(start_url, meta=meta.copy()) for start_url, meta in zip(self.start_urls, self.metas) ] else: rets = [Request(start_url, ) for start_url in self.start_urls] if self.START_FROM_LIKE_URL: for ind, r in enumerate(rets): if self.START_FROM_LIKE_URL in r.url: return rets[ind:] return rets
def run(self): """ This method is the one executed by the worker thread when it is started by the master thread. It check if it is possible to carry the attack, then start performing requests, carrying it on according to the configuration file. """ # the botnet awaits until it can carry the attack at least once (aggressive behaviour) while True: # check if the attack can be carried on greenlight = self.carryAttack() if greenlight: # debug purposes... #TODO to be removed print("Worker", self.getName(), "greenlight: " + str(greenlight)) # performing the attack for i in range(0, self._maxcount): # instantiate request class req = Request(self._request) # running request object try: req.perform() except requests.exceptions.ConnectionError as ce: # a connection error occured during request performing! self._loglist.append((time(),"Connection Error occurred with value: " + str(ce))) continue except requests.exceptions.Timeout as to: # a timeout error occured during request performing! self._loglist.append((time(),"Connection Timed out. Value: " + str(to))) continue except requests.exceptions.URLRequired as ur: self._loglist.append((time(), "Invalid URL provided: " + str(ur))) break except requests.exceptions.RequestException as re: # a not specific error occured! error = True self._loglist.append((time(),"Generic error occured: " + str(re))) continue # storing the value for logging feature in a tuple (timestamp, data) attackData = self.getWorkerTarget() self._loglist.append((time(), attackData)) # thread safe sleeping for the specified interval if len(self._period) == 1: threadSleep(self._period[0]) else: threadSleep(randint(min(self._period), max(self._period))) # exiting from while loop when attack has been carried max_count times (min. 1) break
def add_request(self, request, floor, user): if user == "passenger": new_request = Request(2, request, floor, user) elif user == "operator": new_request = Request(0, request, floor, user) elif user == "firefighter": new_request = Request(0, request, floor, user) if floor < self.get_floor(): print("Floor " + str(floor) + "added to down queue") self.down_queue.put(new_request) else: print("Floor " + str(floor) + "added to up queue") self.up_queue.put(new_request)
def startHandler(self, method): server = self.server.server self.s = server request = Request(server, self) self.server_version = "PWA/0.1" try: request.path = self.path.split("?")[0] request.method = method request.client_adress = self.client_address request.http_version = self.protocol_version request.cookies = self.get_cookies() request.args, request.files = self.get_args(method) request.headers = self.headers handler = None if server._config.enable_statics and request.path[:8] == "/statics": path = os.path.join(server._config.statics_path, request.path[9:]) class handler(FileHandler): def __init__(self, *a, **k): FileHandler.__init__(self, path, *a, **k) else: handler = server._urlConfig.getHandler(server, request) if handler == None: server._log.warn(u"No Handler found for url '{}'".format(request.path)) self.sendError(request, "404") else: handler(server, request, self).onHandle() except Exception as e: self.sendError(request, "500") server._log.exception(e, "Cannot handle requests")
def read(self): with open(BASE_PATH+"request.list.data.txt", 'r+') as f: l = json.load(f) for key in l.keys(): for key2 in l[key].keys(): item = l[key][key2] R = Request() u = usercollection.UserList() u.read() R.ufrom = u.findByString(item['ufrom']) R.uto = u.findByString(item['uto']) R.status = item['status'] R.today = datetime.datetime.strptime(item['today'], "%m/%d/%Y") self.insert(R)
def process_keywords(message): chat_id = message.chat.id request = Bot.request_dict.get(chat_id) if request and request.change: request.keywords = message.text Bot.bot.send_message(chat_id=message.chat.id, reply_markup=Utils.MARKUPS["Changes"], text="What do you want to do?") else: request = Request() Bot.request_dict[chat_id] = request request.keywords = message.text Bot.bot.reply_to(message, reply_markup=Utils.MARKUPS['Sort'], text="Please, choose the sort order")
def list(req): # Logic to get list of the data # Integrate the JSON here images = models.ImageInfo.objects.all() print(type(images)) for image in images: # print(type(image.imageUri)) image_string = base64.b64decode(image.imageUri[23:]) # print(image_string) request = Request(image_string) image.logo = request.response['logo'] image.label = request.response['label'] image.text = request.response['text'] image.color = request.response['color'] image.save() # For image format refer in models.py file in myapp folder list = models.ImageInfo.objects.all() '''result = [] for item in list: temp = {}; temp["id"] = item.id temp["logo"] = item.logo temp["text"] = item.text temp["color"] = item.color result.append(temp);''' context = {'result': list, 'STATIC_URL': settings.STATIC_URL} return render(req, 'list.html', context)
def newArrival(arrivalTS, totalServiceTime, timeout): request = Request(arrivalTS, totalServiceTime) arrivalEvent = Arrival(arrivalTS, request) timeoutEvent = Timeout(arrivalTS + timeout, request) return (arrivalEvent, timeoutEvent)
def publish_module_standard(module_name, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr, environ=os.environ, debug=0, request=None, response=None): must_die = 0 status = 200 after_list = [None] try: try: if response is None: response = Response(stdout=stdout, stderr=stderr) else: stdout = response.stdout if request is None: request = Request(stdin, environ, response) # make sure that the request we hand over has the # default layer/skin set on it; subsequent code that # wants to look up views will likely depend on it setDefaultSkin(request) response = publish(request, module_name, after_list, debug=debug) except SystemExit, v: must_die = sys.exc_info() request.response.exception(must_die) except ImportError, v: if isinstance(v, tuple) and len(v) == 3: must_die = v elif hasattr(sys, 'exc_info'): must_die = sys.exc_info() else: must_die = SystemExit, v, sys.exc_info()[2] request.response.exception(1, v)
def retreive_key(self, *data): time.sleep(random.randint(0, 1000) / 1000) dict = data[0] from_node = data[1] val = self.kv.get(dict.key, None) response = Request("ACK-GET", dict.key, val, dict.request) Messaging.send_message(self, from_node, response)
def handoff(self): for node in self.check_for_sync: keys = list(self.sync_kv[node].keys()) values = list(self.sync_kv[node].values()) synchronize = Request("SYNC", keys, values, generate_random_number()) Messaging.send_message(self, node, synchronize)
def connectionAlive(self): '''Pings the ReadDB server, returns ture if the server pongs''' try: self._openConnection() request = Request() request.type = "ping" self._sendString(request.toString()) response = self._readLine() self.closeConnection() if response == "pong": return True else: return False except: return False
def getSize(self): reqP = Request(self.player.ia.overviewPage + "&cp=" + str(self.id), {}) self.player.ia.execRequest(reqP) soup = BeautifulSoup(reqP.content, "html.parser") #parse the size self.sizeUsed = int(soup.find(attrs={"title": 'Cases occupées'}).text) self.sizeMax = int(soup.find(attrs={"title": 'Cases max. disponibles'}).text)
def fetch(self, data='', page=''): if not page: page = self._page post_url = self.BASE_URL + '/gdweb/CombinationScarch.aspx' get_url = self.BASE_URL + '/gdweb/ScarchList.aspx?page='+str(page) if not data: data = self.rule.make() cache_key = 'search' + ':' + data + ':' + str(page) html = Cache.get(cache_key) if not html: r = Request() r.post(post_url, data) html = r.get(get_url) Cache.set(cache_key, html, 3*30*24*60*60) self._html = html return self
def rename(self, name): #Returns true if renaming worked reqP = Request(self.player.ia.renamingPage + name + "&cp=" + str(self.id), {}) response = self.player.ia.execRequest(reqP) if reqP.response.status_code == 200: self.name = name return True return False
def read(self): """ split the log file into "blobs" which are defined as chunks of text separated by a blank line. if the blob contains output from the RequestProcessor, create a Request object append Requests only if they pass filters (if defined) """ s = open(self.path, 'r').read() print "reading from %s" % self.path blobs = s.split("\n\n") print "processing %d blobs" % len(blobs) ## requestHeaderPattern = "DCS RequestProcessor: PROCESS" requestHeaderPattern = "org.apache.struts.action.RequestProcessor process" requests = [] for blob in blobs: line1 = blob.split("\n")[0] if string.find(line1, requestHeaderPattern) != -1: try: request = Request(blob) requests.append(request) except: print "failed to contstruct Request:", sys.exc_type, sys.exc_value continue else: ## print "%s not found" % requestHeaderPattern ## print "\tfirst line: \n%s" % line1 pass self.requests = requests
def simulateManyServer(num_secs, file_per_min, in_file, num_servers): request_list = [Server(file_per_min) for i in range(num_servers)] print_queue = Queue() waiting_times = [] with open(in_file) as lines: for line in lines: data = line.split(',') request = Request(int(data[0].strip()), data[1], int(data[2].strip())) print_queue.enqueue(request) current_server = 0 for current_second in range(num_secs): if (not request_list[current_server].busy()) and ( not print_queue.is_empty()): next_task = print_queue.dequeue() waiting_times.append(next_task.wait_time()) request_list[current_server].start_next(next_task) current_server = (current_server + 1) % len(request_list) for server in request_list: if server.busy: server.tick() average_wait = sum(waiting_times) / len(waiting_times) print("Average Wait %6.2f secs %3d tasks remaining." % (average_wait, print_queue.size()))
def upgrade(self, planetId): if self.id != None: payload = {'cmd': 'insert', 'tech': self.id} reqB = Request( self.player.ia.researchPage + "&cp=" + str(planetId), payload) self.player.ia.execRequest(reqB) return reqB
def broadcast_put(from_node, node_list, msg): if (len(node_list) == 0): if len(HISTORY.get(msg.request, set())) < from_node.W: from_node.socket.sendto(pickle.dumps("FAILURE"), ('localhost', msg.client)) elif REQUESTS.get(msg.request, False) == True: from_node.socket.sendto(pickle.dumps("SUCCESS"), ('localhost', msg.client)) return msg = Request("STORE", msg.key, msg.value, generate_random_number(), msg.client) nodes = [] for node in node_list: nodes.append(node.id) #print("Preference list="+node.id+msg.action + "from "+from_node.id +" "+str(msg.key)+":"+str(msg.value[0])) Messaging.send_message(from_node, node.id, msg) cur_time = time.time() while int(time.time() - cur_time) < 3: if not REQUESTS.get(msg.request, False) and len( HISTORY.get(msg.request, set())) >= from_node.W: #send client success message from_node.socket.sendto(pickle.dumps("SUCCESS"), ('localhost', msg.client)) REQUESTS[msg.request] = True HISTORY.get(msg.request, set()).add(from_node.id) failed_nodes = set(nodes) - HISTORY[msg.request] from_node.failed_nodes = from_node.failed_nodes + list(failed_nodes) #print("FAILED NODES "+str(from_node.failed_nodes)) Messaging.retry_put_request(from_node, failed_nodes, msg, HISTORY[msg.request])
def broadcast_get(from_node, node_list, msg): if (len(node_list) == 0): if len(HISTORY.get(msg.request, set())) < from_node.W: from_node.socket.sendto(pickle.dumps("FAILURE"), ('localhost', msg.client)) return msg = Request("FETCH", msg.key, msg.value, generate_random_number(), msg.client) nodes = [] for node in node_list: nodes.append(node.id) #print("Preference list="+node.id+msg.action) Messaging.send_message(from_node, node.id, msg) cur_time = time.time() while int(time.time() - cur_time) < 3: if not REQUESTS.get(msg.request, False) and len( HISTORY.get(msg.request, set())) >= from_node.R: result = list() for id, val in HISTORY[msg.request]: if val != None: result += val #print([(number,vector.clock) for number,vector in result]) result = from_node.perform_syntactic_reconcilation(result) from_node.socket.sendto(pickle.dumps(result), ('localhost', msg.client)) #for num,clocks in result: # print(str(msg.key)+" "+str(num)+" "+str(clocks.clock)) REQUESTS[msg.request] = True readers = set([id for id, val in HISTORY[msg.request]]) failed_nodes = set(nodes) - readers from_node.failed_nodes += list(failed_nodes) Messaging.retry_get_request(from_node, failed_nodes, msg, readers)
def choose_path(self,req:Request)->Path: k_paths = dict() # 元素为<路径向量,传播时延> for node in manager.nodes.values(): node: DataCenter _,pre_half, pre_delay = Graph().get_shortest_path(req.src, node.id) pre_half = pre_half[::-1] _,second_half, second_delay = Graph().get_shortest_path(node.id, req.dst) second_half = second_half[::-1] path_vec = pre_half + second_half[1:] path_vec = tuple(path_vec) if self.has_circle(path_vec) or pre_delay+second_delay>=req.maxDelay: continue # print("src = " + str(req.src) + " dst = "+ str(req.dst)) k_paths[path_vec] = pre_delay+second_delay paths = list() for p in k_paths: path:Path = Path(p, k_paths[p]) if self.check_constraints(req, path): paths.append(path) if len(paths) == 0: return None for p in paths: self.path_weight(req, p) target_path:Path = paths[0] for p in paths: p:Path if p.weight > target_path.weight: target_path = p req.path_vec = target_path.vec return target_path
def index(): info = json.loads(request.data) fulfilled_requests = [] timesC = { "Monday": ["18"], "Tuesday": [], "Wednesday": [], "Thursday": [], "Friday": [], "Saturday": [], "Sunday": [], } kerb = info["kerb"] course = info["course"] pending_requests.append(Request(kerb, course, 5, timesC)) pending_requests, fulfilled_requests = fulfill_request(pending_requests[0], pending_requests[1:], fulfilled_requests) req = fulfilled_requests[0][1] return_info = { "kerb": req.get_name(), "course": req.get_course(), } return json.dumps(return_info)
def application(environ,start_response): path = environ.get('PATH_INFO','/') print(path) # 站点根目录 rootPath = os.getcwd() environ['root_path'] = rootPath #生成请求对象 req = Request(environ,start_response) # 路由 for pattern,func in patterns: # print(re.match(pattern,path),pattern,path) result = re.match(pattern, path) if result: if func.__code__.co_argcount == 1: return func(req) elif func.__code__.co_argcount == len(result.groups()) + 1: return func(req,*result.groups()) else: start_response('200 ok', [('ContentType', 'text/html')]) return ["服务器内部错误,请检查代码".encode('utf8')] start_response('200 ok', [('ContentType', 'text/html')]) # 响应体,是一个可迭代对象,元素必须是字节流字符串 return ["<h1>404 Not found</h1>".encode('utf8')]
def scanSystem(self, galaxy, system): #TODO add check for not enough deut payload = {} payload["galaxy"] = galaxy payload["system"] = system scanSystemRequest = Request(self.player.ia.galaxyPage + "&cp=" + str(self.id), payload) self.player.ia.execRequest(scanSystemRequest) soup = BeautifulSoup(scanSystemRequest.content, "html.parser") #parse all available locations divContent = soup.find("div", id="content") systemTable = divContent.find("table", recursive=False) locationList = systemTable.find_all("tr")[2:-5] #the first 2 and last 5 are headers planets = [] locationNumber = 0 for location in locationList: locationNumber += 1 tdList = location.find_all("td") if tdList[0].a is None: # if there is a planet in this location nameWithActivity = tdList[2].text #TODO remove activity from the name planet = Planet(None, nameWithActivity, [galaxy, system, locationNumber, 1], None) planets.append(planet) #TODO add moon for planet in self.player.planets: if planet.pos[0] == galaxy and planet.pos[1] == system: planet.lastKnownSystem = planets return planets
def produce_on_request_type(self, request_type): request_list = [] # produce arrival time list arrival_time_list = [] prev_sum = 0 while prev_sum < self.total_time: arrival_slot = np.random.exponential(request_type.arrival_rate) prev_sum += arrival_slot arrival_time_list.append(prev_sum) # produce service time list service_time_list = [np.random.exponential(request_type.service_rate) \ for _ in arrival_time_list] for arrival_time, service_time in \ zip(arrival_time_list, service_time_list): id = str(arrival_time) + '-' + str(request_type.id) request_list.append(Request(id,\ request_type.source, request_type.sink, \ arrival_time, request_type.bandwidth_list[0], \ service_time, request_type)) if not request_type.isStatic: # produce scale request # initial bandwidth current_bandwidth = random.choice(request_type.bandwidth_list) prev_sum = 0 while prev_sum < service_time: if current_bandwidth > request_type.bandwidth_list[0]: scale_bw = current_bandwidth- \ request_type.bandwidth_list[0] # service time for scale request scale_service_time = \ np.random.exponential(request_type.switch_rate_list[1]) request_list.append(Request(id,request_type.source, \ request_type.sink, \ arrival_time+prev_sum, scale_bw, \ scale_service_time, request_type, True)) prev_sum += scale_service_time current_bandwidth = request_type.bandwidth_list[0] else: lower_bw_service_time = \ np.random.exponential(request_type.switch_rate_list[0]) prev_sum += lower_bw_service_time current_bandwidth = request_type.bandwidth_list[1] return request_list
def choose_node(self, req: Request, path: Path) -> DataCenter: # 首先计算部署在每个节点上的利润 for node in path.nodes: node: DataCenter node.weight = req.unitBid - path.band_cost - node.unitCpuPrice * req.process_source target_node: DataCenter = path.nodes[0] for node in path.nodes: if node.weight > target_node.weight and node.leftCpu >= req.process_source: target_node = node if target_node.leftCpu < req.process_source: return None req.profit = req.bid - (path.band_cost + target_node.unitCpuPrice * req.process_source) * (req.offtime - req.ontime) target_node.leftCpu -= req.process_source req.node_id = target_node.id return target_node
def _handle_socket(self, client_socket, client_addr): request = client_socket.recv(self.BUFFER_SIZE) print("Accept %s:%d" % client_addr) (type, verified, deadline) = self._verify_request(request) if verified: self._allow_client(Request(client_addr, deadline)) client_socket.send(self.response[type]) client_socket.close()
def upgrade(self): if self.id != None: payload = {'cmd': 'insert', 'building': self.id} reqB = Request( self.planet.player.ia.buildingPage + "&cp=" + str(self.planet.id), payload) self.planet.player.ia.execRequest(reqB) return reqB
def upd_time(self, dt): self.time_to_finish -= dt if self.time_to_finish <= 1e-5: self.time_to_finish = self.work_time_distribution.generate() return Request() return None
def generate_request(t, always_create=False): if np.random.rand() < a_r or always_create: request = Request( random_position(), t, store_positions[np.random.randint(0, len(store_positions) - 1)]) return request return None
def get(self): logging.info("Getting all open requests") self.response.status = 200 self.response.headerlist = [("Content-type", "application/json")] requests = [] for req in Request.getAllOpenRequests(): requests.append(req.to_dict()) # requests.append(encoder.default(req.to_dict())) self.response.write(json.dumps(requests))
def parse_funct(filename, config, db_handler): start_time = datetime.datetime.now() history = {} result = {"start_time": start_time, "data": history} r = Request(config=config, decode="utf-8-sig") # Signal handler try: signal.signal(signal.SIGINT, r.signal_handler) # Do nothing in main thread except: pass if config.depth >= 0: linktexts = [] linktexts.append((config.target_url, config.target_name)) history.update(r.navigate(linktexts=linktexts, history=history, config=config, decode="utf-8-sig")) Output.output_handler(result=result, config=config, output_filename=filename, db_handler=db_handler) r.close()
def post(self): logging.info("Creating new request") #content_len = int(self.response.headers['content-length']) post_body = self.request.body print(post_body) #postBody is a dictionary with key #value pairs of json values json_dict = json.loads(post_body) r = Request(requestId = json_dict["requestId"], name = json_dict["name"], requestTime = str(datetime.datetime.now()), phoneNumber = json_dict["phoneNumber"], urgency = json_dict["urgency"], startLocation_lat = str(json_dict["startLocation_lat"]), startLocation_lon = str(json_dict["startLocation_lon"]), endLocation_lat = str(json_dict["endLocation_lat"]), endLocation_lon = str(json_dict["endLocation_lon"]), walkCompleted = False, requestAccepted = False) # Add request to datastore r.put() self.response.status = 200
def getRequestByUUID(self,id): match = Request.query(Request.requestId == id).fetch() logging.info("match %s" %str(match[0])) return match[0]
import sys import re import json from bson import ObjectId from Filter import Filter from Request import Request class JSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, ObjectId): return str(o) return json.JSONEncoder.default(self, o) # Create request object to handle user input. q = Request() months = {"Jan": 1, "Feb": 2, "Mar": 3, "Apr": 4, "May": 5, "Jun": 6, "Jul": 7, "Aug": 8, "Sep": 9, "Oct": 10, "Nov": 11, "Dec": 12} date = q.date.split() month = date[0] day = date[1] # connect to Mongo and search based on criteria q.connect() criteria = {"lang": "en", "created_at": {'$regex': q.date}, "text": {"$not": re.compile("RT")}} cursor = q.search(criteria).limit(1000) # Open file I/O streams directory = os.path.dirname(os.getcwd())
def __init__(self, dict=None): Request.__init__(self) self._stack = [] if dict: # Dictionaries come in from web server adapters like the CGIAdapter assert dict['format'] == 'CGI' self._time = dict['time'] self._environ = dict['environ'] self._input = dict['input'] self._requestID = dict['requestID'] self._fields = FieldStorage.FieldStorage( self._input, environ=self._environ, keep_blank_values=True, strict_parsing=False) self._fields.parse_qs() self._cookies = Cookie() if self._environ.has_key('HTTP_COOKIE'): # Protect the loading of cookies with an exception handler, # because MSIE cookies sometimes can break the cookie module. try: self._cookies.load(self._environ['HTTP_COOKIE']) except Exception: traceback.print_exc(file=sys.stderr) else: # If there's no dictionary, we pretend we're a CGI script # and see what happens... self._time = time.time() self._environ = os.environ.copy() self._input = None self._fields = cgi.FieldStorage(keep_blank_values=True) self._cookies = Cookie() env = self._environ # Debugging if debug: f = open('env.text', 'a') save = sys.stdout sys.stdout = f print '>> env for request:' keys = env.keys() keys.sort() for key in keys: print '%s: %s' % (repr(key), repr(env[key])) print sys.stdout = save f.close() # Get adapter, servlet path and query string self._servletPath = env.get('SCRIPT_NAME', '') self._pathInfo = env.get('PATH_INFO', '') self._queryString = env.get('QUERY_STRING', '') if env.has_key('REQUEST_URI'): self._uri = env['REQUEST_URI'] # correct servletPath if there was a redirection if not (self._uri + '/').startswith(self._servletPath + '/'): i = self._uri.find(self._pathInfo) self._servletPath = i >= 0 and self._uri[:i] or '/' else: # REQUEST_URI isn't actually part of the CGI standard and some # web servers like IIS don't set it (as of 8/22/2000). if env.has_key('SCRIPT_URL'): self._uri = self._environ['SCRIPT_URL'] # correct servletPath if there was a redirection if not (self._uri + '/').startswith(self._servletPath + '/'): i = self._uri.find(self._pathInfo) self._servletPath = i >= 0 and self._uri[:i] or '/' else: self._uri = self._servletPath + self._pathInfo if self._queryString: self._uri += '?' + self._queryString self._absolutepath = env.has_key('WK_ABSOLUTE') # set by adapter if self._absolutepath: self._fsPath = self.fsPath() # We use the cgi module to get the fields, # but then change them into an ordinary dictionary of values: try: keys = self._fields.keys() except TypeError: # This can happen if, for example, the request is an XML-RPC request, # not a regular POST from an HTML form. In that case we just create # an empty set of fields. keys = [] dict = {} for key in keys: value = self._fields[key] if type(value) is not ListType: if value.filename: if debug: print "Uploaded File Found" else: # i.e., if we don't have a list, # we have one of those cgi.MiniFieldStorage objects. value = value.value # get it's value. else: value = map(lambda miniFieldStorage: miniFieldStorage.value, value) # extract those value's dict[key] = value self._fieldStorage = self._fields self._fields = dict # We use Tim O'Malley's Cookie class to get the cookies, # but then change them into an ordinary dictionary of values dict = {} for key in self._cookies.keys(): dict[key] = self._cookies[key].value self._cookies = dict self._contextName = None self._serverSidePath = self._serverSideContextPath = None self._serverRootPath = self._extraURLPath = '' self._sessionExpired = False self._pathInfo = self.pathInfo() if debug: print "Done setting up request, found keys %r" % self._fields.keys()
class Manager(Lister): """ class Manager: Main class to create DARball. - processes initial request, - decides whether it should be an incremental distribution. For example, if the base distribution for the same release is found in the dar shared pool, and user's top release directory is in private development area. If yes: - creates incremental distribution, including a reference to the base release darball, and the private part of the application. If not: - creates a private base darball and incremental part, and generates a request for a public base darball. The base creation of a base darball includes following steps: - create RTE directory structure with simlinks according to request, - create metadata, - calculate criteria for inital request, - check if calculated createria are satisfactory - run tests (optional) - adjust request and repeat the procedure until satisfied (optional in interactive, or semi-interactive mode) - create and save darball """ # Defaults: def __init__(self, darInput, pltf=None, cnf=None): """ __init__ Initializes DAR Manager apects common to both create and install mode. """ Lister.__init__(self) # Set defaults: self.darballSuffix = '_dar.tar.gz' # and initialize variables: self.baseBomDict = {} # need for incremental darball self.darpool = None # self.dar = None # need for creating darball self.config = cnf infoOut('Validating DAR configuration ...') if getVerbose(): print "CONFIGURATION:", self.config # Check that dar shared pool is set up, exists and has right permissions: # DAR shared pool is to lookup for a base dar distributions and/or # installations . if self.config.has_key("dar_shared_pool"): self.sharedPool = self.config["dar_shared_pool"] else: sys.exit("ERROR:dar shared pool is not defined in the configuration") if self.config.has_key("dar_dist_pool"): self.distPool = self.config["dar_dist_pool"] else: sys.exit("ERROR: dar dist pool is not defined in the configuration") if self.config.has_key("dar_inst_pool"): self.instPool = self.config["dar_inst_pool"] else: sys.exit("ERROR: dar inst pool is not defined in the configuration") # Check that dar tmpdir is set, exists, and has right permissions: if self.config.has_key("dar_tmp_dir"): self.tmpdir = self.config["dar_tmp_dir"] else: sys.exit("ERROR: dar_tmp_dir is not defined in the configuration") if notWritable(self.tmpdir): sys.exit(notWritable(self.tmpdir)) # Each time when dar is called, it creates a new build directory # in tmpdir pool, using a unique datestamp: self.blddir = self.tmpdir+'/'+str(time.time()) # Start logger and pre-set log files: self.logdir = self.blddir + '/logfiles' # one dir for all logfiles self.timingLog = Logger(self.logdir+'/session.timing') self.sessionStdoutLog = self.logdir+'/session.stdout' self.sessionHistoryLog = self.logdir+'/session.history' # Create necessary directories and files: os.makedirs(self.blddir) os.makedirs(self.logdir) for logfile in (self.sessionStdoutLog, self.sessionHistoryLog): infoOut('Creating ' + logfile + ' file') open(logfile, 'w').close() # Get platform info: self.platform = pltf self.timingLog('Completed configuration and setup ') # Create a request self.currentRequest = Request(darInput, self.timingLog) self.timingLog('Created request object') # Initialize dar metadata self.darMeta = Metadata() self.darMeta.setDarVersion(getDarVersion()) # Get release/version metadata from request and # put them into Metadata container: self.darMeta.setDarInput(darInput) self.darMeta.setBaseReleaseName(self.currentRequest.getBaseReleaseName()) self.darMeta.setProjectName(self.currentRequest.getProjectName()) self.darMeta.setVersionTag(self.currentRequest.getVersionTag()) # Architecture self.darMeta.setArchitecture(self.currentRequest.getArchitecture()) def prepareDistribution(self, method = 'copy'): """ Manager.prepareDistribution: Creates DARball structure according to current request, using copy, or link method. """ # Check how much space is left in tmp dir, # where the darball will be built: spaceLeft(self.tmpdir) self.timingLog('Checked space left in '+self.tmpdir) # If incremental darbal was requested, get its metadata, # and fill up base bomdictionary baseDar = self.currentRequest.baseDar self.darMeta.setBaseDar(baseDar) if baseDar: if os.path.isdir(baseDar): # This should be an installation directory: bomFile = baseDar+'/' + getBOMFileName() if os.path.isfile(bomFile): # Create base bom dictionary directly from file for entry in open(bomFile,'r').readlines(): # a list of lines (md5, entryPath) = string.split(entry) self.baseBomDict[entryPath] = md5 else: raise InputError(baseDar, 'Could not find ' + getBOMFileName() + ' here.') else: if os.path.isfile(baseDar): # This should be a DARball: baseDarball = baseDar else: # This should be a release tag of a base darball # available from the dar pool. # Lookup for base darball in the distribution pool: if notReadable(self.distPool): sys.exit(notReadable(self.distPool)) baseDarball = self.findBaseDarball(self.distPool, baseDar) if not baseDarball: sys.exit( 'Could not find base distribution for ' \ +baseDar+' in '+self.sharedPool) # Create base bom dictionary on the flight from the archive: result = readFileFromArchive(baseDarball, getBOMFileName()) for entry in string.split(result,'\n'): md5, entryPath = string.split(entry) self.baseBomDict[entryPath] = md5 # Now create DAR directory structure: self.dar = Structure(self.blddir, self.currentRequest, method, baseBom = self.baseBomDict) self.timingLog('Created DAR in ' + self.blddir + ' using ' + method + ' method') instDir = self.dar.getTopInstDir() if method == 'copy': self.timingLog('Counted install. size ' + 'before cleanup in shared dir') # Make cleanup and create BOM only for the FSO image (shared # between the environment variables): cleanup(self.dar.getSharedTop(), os.path.join(self.dar.getTopInstDir(), getBOMFileName())) self.timingLog('Removed duplicates and created BOM for ' + self.dar.getSharedTop()) size = noteSize(instDir) self.timingLog('Counted size after cleanup in ' + self.dar.getSharedTop()) self.darMeta.setInstallationSize(size) # fakeScram() # including scram runtime command, which may replace setup scripts. self.saveMetadata(os.path.join(self.dar.getTopInstDir(), getMetaDataFile())) # - saves into metadata file info about creation of a darball, # project conifguration info. Adds a spec file (and darInput in # scram mode). DAR info and its source code go here. self.createReadmeFile() # DAR info, istallation instructions, reference to documentation. # self.rememberSize() def installApplication(self, installDir, testmode): """ Manager.installApplication Installs the application by performing the following steps: - checks to see if the installation directory is writable - loads metadata - checks if enough disk space is available - checks to see if this is an incremental installation - looks for the base disribution for this release - checks for previous installation of this package and checks the md5sum <not implemented> - unpacks the installation package - publishes package metadata - sets up the environment scripts and runs setup scripts - creates links and checks installation size """ if notWritable(installDir): sys.exit(notWritable(installDir)) # Absolutize path if needed: if not os.path.isabs(installDir): installDir = os.path.abspath(installDir) # Extract metadata from distribution: metadata = loadMetadata(getMetaDataFile(), archive = self.currentRequest.getDarFile()) infoOut('Loaded DAR metadata from '+self.currentRequest.getDarFile()) # If in test mode, print out users info and exit: if testmode: print metadata.userInfo() return # Check that there is enough space in the installation directory: available = spaceLeft(installDir) installSize = float(metadata.getInstallationSize()) if available < installSize: sys.exit('Not enough space on the disk!\n Installation size: ' \ + str(installSize) + ' KB\n Available: ' + str(available) + ' KB') self.timingLog('Checked space left in '+installDir) ########################################## # Handling incremental DARballs: ########################################## # Check if darball metadata contain a reference to a base dar: baseDar = metadata.getBaseDar() if baseDar: infoOut("This is incremental distribution based on "+baseDar) # This is an incremental darball, so # we need matching base installation. baseInstallation = self.currentRequest.baseDar if not baseInstallation: usageError ('Please specify a base .') # Lookup for base installation in the installation pool: # baseInstallation = self.findBaseInstallation(self.distPool, # baseDar) # if not baseInstallation: # sys.exit( 'Could not find base installation for ' + # baseDar + ' # in '+self.sharedPool) infoOut("(todo)Verifying base installation "+baseInstallation) ########################################## # General actions for all DARballs: ########################################## # Check if the installation already exists: releaseInstTop = os.path.join(installDir, metadata.getVersionTag(), metadata.getArchitecture()) if os.path.exists( releaseInstTop): # TODO: validate the installation using md5sum and # tell user the results sys.exit("ERROR: You already have installation here: \n " \ +releaseInstTop+"\nExiting ....\n") # Unpack darball infoOut('Unpacking '+self.currentRequest.getDarFile()+' .... ') unpackCommand = 'tar -xz -C ' + \ installDir + ' -f ' + \ self.currentRequest.getDarFile() (status, out) = commands.getstatusoutput(unpackCommand) # Check that in unpacked into toInstallDir as expected: if status: # failed if out: # save command output in the logfile: unpackLogfile = os.path.join(self.logdir, 'dar_unpack.log') tarlog = open(unpackLogfile, 'w') tarlog.write('Output from unpacking command:\n' + \ unpackCommand + '\n' + out ) tarlog.close() sys.exit ('Unpacking failed with exit status ' + status + \ '\nOutput can be found in \n' + unpackLogfile ) elif not os.path.isdir(releaseInstTop): sys.exit ('Can not find '+releaseInstTop) # Link to a base installation for incremental darballs if baseDar: infoOut("Create a link to base installation:\n ln -s " +baseInstallation+'/shared '+releaseInstTop+'/base') os.symlink(baseInstallation+'/shared', releaseInstTop+'/base') # Set up environment scripts: infoOut("Setting up the installation") templateStub = os.path.join(releaseInstTop, getSetupScriptBasename()) newSetupScriptStub = os.path.join(releaseInstTop, 'envSetup') helpText = self.updateSetupScripts(\ templateStub, \ releaseInstTop, \ newSetupScriptStub ) # For compatibility with the old Production tools: oldSetupScriptStub = os.path.join(releaseInstTop, metadata.getVersionTag() + '_env') self.updateSetupScripts(\ templateStub, \ releaseInstTop, \ oldSetupScriptStub ) # Move script templates to the DAR admin directory. #infoOut('Removing setup scripts templates ...') cmd = 'mv ' + templateStub + '.*sh' + ' ' \ + installDir + '/' + getDarDirName() (status, out)=commands.getstatusoutput(cmd) if status != 0: # did not succeed DARInternalError("In installApplication: " + "doing command" + cmd + "\ncommand output :\n" + out) #infoOut("(todo) Do md5sum check of BOM in resulting installation") #infoOut("(todo) If successful, " + # "register installation in publishing service ") self.publishMetadata(installDir + '/' + getDarDirName()) # Publish installation metadata: self.publishMetadata(installDir + '/' + getDarDirName()) #Print out runtime environment setup help (and exit): infoOut(helpText) infoOut("Installation completed.") def updateSetupScripts(self, template, installDir, newScriptStub): """ Manager.updateSetupScript Copies the setup scripts for the different shell environments and prints instructions for using them """ # Look into using shutils.copyfile(src,dest) # For bash shell: envScriptSh = newScriptStub + '.sh' fileRead = open(template + '.sh') contents = fileRead.readlines() contents.insert(0, 'export ' + getTopEnvName() + '=\"' + installDir +'\";\n') fileRead.close() fileWrite = open(envScriptSh, 'w') fileWrite.writelines(contents) fileWrite.close() # For tcsh/csh shell: envScriptCsh = newScriptStub + '.csh' fileRead = open(template + '.csh') contents = fileRead.readlines() contents.insert(0, 'setenv ' + getTopEnvName() + ' \"'+installDir+'\";\n') fileRead.close() fileWrite = open(envScriptCsh, 'w') fileWrite.writelines(contents) fileWrite.close() helpText = """ To set the runtime environment: ------------------------------ in csh or tcsh: source """+envScriptCsh+""" in sh, bash, zsh: . """+envScriptSh+""" """ return helpText def publishMetadata(self, metadataDir): """ Publishing step after successful install """ # Currently it simply removes .DAR directory, # in future is could update a simple database of dar installations cmd = 'rm -rf '+ metadataDir (status, out)=commands.getstatusoutput(cmd) if status != 0: # did not succeed DARInternalError("In publishMetadata: " + "doing command" + cmd + "\ncommand output :\n" + out) def createReadmeFile(self): """ Manager.createReadmeFile: Include DAR info, installation instructions, reference to documentation for the user to read after manually unpacking the darball. <not implemented> """ print "Creating users README file in the darball (not implemented)" def rememberSize(self): """ FSO only reflects the size of the shared space. Without the RTE structure, and in incremental installations, size above the base does not contribute to the total installation size """ # Do we still need this? The size is attribute of the globalFso, # and it can be saved in Metadata. # Yes, we need, because fso only reflects the size of the shared part, # without rte structure , and because for incremental dar files # from the base do not contribute into the total installation size. #infoOut( "Counting and remembering the size of installation..." ) #size = noteSize(self.blddir) #if size: # self.darMeta.setInstallationSize(int(string.split(output)[0])) # infoOut ("Installation size is " + size + " KB") #else: # warning('Could not get size of ' + self.blddir) pass def saveAndQuit(self, darFile = 'useDefault'): """ Creates the final DARball and returns. This is called when the user is satisfied with the result, or when the criteria are satisfied (if in non-interactive mode). If successful, it will return the name of the final darball that was created, otherwise it will return 'None'. """ if darFile == 'useDefault': darFile = self.blddir + "/" +\ self.darMeta.getVersionTag() +\ "." + self.currentRequest.getArchitecture() +\ self.darballSuffix else: darFile = self.blddir + "/" +\ self.currentRequest.getDarName() +\ self.darballSuffix tarCmd = string.join ( [ 'tar -cz -f', darFile, '-C', os.path.join(self.blddir, self.dar.getTopInstDir()), getDarDirName(), '-C', self.blddir, self.darMeta.getVersionTag() ]) infoOut("Creating DAR distribution in " + self.blddir + "using tar command:") infoOut(tarCmd) (status, out) = commands.getstatusoutput(tarCmd) if status == 0: # successfull infoOut( "Created " + darFile) self.timingLog('Created darball: ' + darFile) return darFile else: infoOut( "Creation of dar file failed!\n " + out) return None def setDarPool(self, location): """ Manager.setDarPool: Mutator method to set the DAR Pool location """ self.darpool = location def findBaseDarball(self, darPool, releaseName): """ Manager.findBaseDarball Finds a proper darball in the darpool, based on the project release name """ for filename in os.listdir(darPool): if filename == releaseName+self.darballSuffix: return darPool+'/'+filename def changeRequest(self): """ Manager.changeRequest <not implemented> """ print "Changing request (not implemented)" def runTest(self): """ Manager.runTest <not implemented> """ print "Running test (not implemented)" def checkCriteria(self): """ Manager.checkCriteria: <not implemented> """ print "Checks if criteria are satisfied (not implemented)" def getCriteria(self): """ Manager.getCriteria: <not implemented> """ print "Calculating criteria (not implemented)" def findBest(self): """ Manager.findBest: <not implemented> """ print "Finding request with best criteria (not implemented)" def runDebug(self): """ Manager.runDebug: <not implemented> """ print "Running debug test (not implemented)" def saveMetadata(self, metadataFile): """ Manager.saveMetadata Saves metadata information to a given file """ print "saving DAR metadata" self.darMeta.saveMetadata(metadataFile)
def __init__(self, darInput, pltf=None, cnf=None): """ __init__ Initializes DAR Manager apects common to both create and install mode. """ Lister.__init__(self) # Set defaults: self.darballSuffix = '_dar.tar.gz' # and initialize variables: self.baseBomDict = {} # need for incremental darball self.darpool = None # self.dar = None # need for creating darball self.config = cnf infoOut('Validating DAR configuration ...') if getVerbose(): print "CONFIGURATION:", self.config # Check that dar shared pool is set up, exists and has right permissions: # DAR shared pool is to lookup for a base dar distributions and/or # installations . if self.config.has_key("dar_shared_pool"): self.sharedPool = self.config["dar_shared_pool"] else: sys.exit("ERROR:dar shared pool is not defined in the configuration") if self.config.has_key("dar_dist_pool"): self.distPool = self.config["dar_dist_pool"] else: sys.exit("ERROR: dar dist pool is not defined in the configuration") if self.config.has_key("dar_inst_pool"): self.instPool = self.config["dar_inst_pool"] else: sys.exit("ERROR: dar inst pool is not defined in the configuration") # Check that dar tmpdir is set, exists, and has right permissions: if self.config.has_key("dar_tmp_dir"): self.tmpdir = self.config["dar_tmp_dir"] else: sys.exit("ERROR: dar_tmp_dir is not defined in the configuration") if notWritable(self.tmpdir): sys.exit(notWritable(self.tmpdir)) # Each time when dar is called, it creates a new build directory # in tmpdir pool, using a unique datestamp: self.blddir = self.tmpdir+'/'+str(time.time()) # Start logger and pre-set log files: self.logdir = self.blddir + '/logfiles' # one dir for all logfiles self.timingLog = Logger(self.logdir+'/session.timing') self.sessionStdoutLog = self.logdir+'/session.stdout' self.sessionHistoryLog = self.logdir+'/session.history' # Create necessary directories and files: os.makedirs(self.blddir) os.makedirs(self.logdir) for logfile in (self.sessionStdoutLog, self.sessionHistoryLog): infoOut('Creating ' + logfile + ' file') open(logfile, 'w').close() # Get platform info: self.platform = pltf self.timingLog('Completed configuration and setup ') # Create a request self.currentRequest = Request(darInput, self.timingLog) self.timingLog('Created request object') # Initialize dar metadata self.darMeta = Metadata() self.darMeta.setDarVersion(getDarVersion()) # Get release/version metadata from request and # put them into Metadata container: self.darMeta.setDarInput(darInput) self.darMeta.setBaseReleaseName(self.currentRequest.getBaseReleaseName()) self.darMeta.setProjectName(self.currentRequest.getProjectName()) self.darMeta.setVersionTag(self.currentRequest.getVersionTag()) # Architecture self.darMeta.setArchitecture(self.currentRequest.getArchitecture())
from __future__ import print_function import os import re import random from time import time from Filter import Filter from Request import Request # Create request object to handle user input. q = Request() months = {"Jan": 1, "Feb": 2, "Mar": 3, "Apr": 4, "May": 5, "Jun": 6, "Jul": 7, "Aug": 8, "Sep": 9, "Oct": 10, "Nov": 11, "Dec": 12} date = q.date.split() month = date[0] day = date[1] # connect to Mongo and search based on criteria q.connect() criteria = {"lang": "en", "created_at": {'$regex': q.date}, "text": {"$not": re.compile("RT")}} cursor = q.search(criteria, {"text": 1}) # load tweet with id corpus = [] ids = [] tweet_filter = Filter(25) for document in cursor: text = ' '.join(document["text"].encode("utf-8").split()) corpus.append(text) ids.append(document["_id"])
import socket, errno HOST = "127.0.0.1" # The remote host TEST_CMDID = 10000 PORT = 8889 # The same port as used by the server s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) # If use Django or Tornado, you can keep this connection alive # when the client-side server starts. requestData = {"content": "Hello world!"} request = Request(cmdid=TEST_CMDID, data=requestData) # remember to catch IOError here in order to avoid or solve situations # like server rebooting or internal network breakdown. try: s.send(request.serialization()) except IOError as e: if e.errno == errno.EPIPE: # you should try to reconnect your socket connection here pass result = s.recv(1024) print "result is", repr(result) # You can choose wait for reply or not # s.close()
def publish_module(module_name, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr, environ=os.environ, debug=0, request=None, response=None, extra={}): must_die=0 status=200 after_list=[None] from Response import Response from Request import Request from Publish import publish from zope.publisher.interfaces import ISkinnable from zope.publisher.skinnable import setDefaultSkin try: try: if response is None: response=Response(stdout=stdout, stderr=stderr) else: stdout=response.stdout # debug is just used by tests (has nothing to do with debug_mode!) response.handle_errors = not debug if request is None: request=Request(stdin, environ, response) # make sure that the request we hand over has the # default layer/skin set on it; subsequent code that # wants to look up views will likely depend on it if ISkinnable.providedBy(request): setDefaultSkin(request) for k, v in extra.items(): request[k]=v response = publish(request, module_name, after_list, debug=debug) except (SystemExit, ImportError): # XXX: Rendered ImportErrors were never caught here because they # were re-raised as string exceptions. Maybe we should handle # ImportErrors like all other exceptions. Currently they are not # re-raised at all, so they don't show up here. must_die = sys.exc_info() request.response.exception(1) except: # debug is just used by tests (has nothing to do with debug_mode!) if debug: raise request.response.exception() status = response.getStatus() if response: outputBody=getattr(response, 'outputBody', None) if outputBody is not None: outputBody() else: response=str(response) if response: stdout.write(response) # The module defined a post-access function, call it if after_list[0] is not None: after_list[0]() finally: if request is not None: request.close() if must_die: # Try to turn exception value into an exit code. try: if hasattr(must_die[1], 'code'): code = must_die[1].code else: code = int(must_die[1]) except: code = must_die[1] and 1 or 0 if hasattr(request.response, '_requestShutdown'): request.response._requestShutdown(code) try: raise must_die[0], must_die[1], must_die[2] finally: must_die=None return status
def __init__(self, requestDict=None): Request.__init__(self) self._stack = [] if requestDict: # Dictionaries come in from web server adapters like the CGIAdapter assert requestDict['format'] == 'CGI' self._time = requestDict['time'] self._environ = requestDict['environ'] self._input = requestDict['input'] self._requestID = requestDict['requestID'] self._fields = FieldStorage.FieldStorage( self._input, environ=self._environ, keep_blank_values=True, strict_parsing=False) self._cookies = Cookie() if 'HTTP_COOKIE' in self._environ: # Protect the loading of cookies with an exception handler, # because MSIE cookies sometimes can break the cookie module. try: self._cookies.load(self._environ['HTTP_COOKIE']) except Exception: traceback.print_exc(file=sys.stderr) else: # If there's no dictionary, we pretend we're a CGI script # and see what happens... self._time = time() self._environ = os.environ.copy() self._input = None self._fields = cgi.FieldStorage(keep_blank_values=True) self._cookies = Cookie() env = self._environ # Debugging if debug: f = open('env.text', 'a') save = sys.stdout sys.stdout = f print '>> env for request:' for key in sorted(env): print '%s: %s' % (repr(key), repr(env[key])) print sys.stdout = save f.close() # Get adapter, servlet path and query string self._absolutepath = 'WK_ABSOLUTE' in env # set by adapter if self._absolutepath: # this is set when the servlet is a webserver file that shall # be handled without context (e.g. when the psp-handler is used) self._servletPath = '' # make it look like the normal handler self._extraURLPath = env.get('PATH_INFO', '') self._pathInfo = env.get('SCRIPT_NAME', '') + self._extraURLPath self._fsPath = self.fsPath() else: self._servletPath = env.get('SCRIPT_NAME', '') self._pathInfo = env.get('PATH_INFO', '') self._extraURLPath = '' # will be determined later self._queryString = env.get('QUERY_STRING', '') if 'REQUEST_URI' in env: self._uri = env['REQUEST_URI'] # correct servletPath if there was a redirection if not (self._uri + '/').startswith(self._servletPath + '/'): i = self._uri.find(self._pathInfo) self._servletPath = i > 0 and self._uri[:i] or '' else: # REQUEST_URI isn't actually part of the CGI standard and some # web servers like IIS don't set it (as of 8/22/2000). if 'SCRIPT_URL' in env: self._uri = self._environ['SCRIPT_URL'] # correct servletPath if there was a redirection if not (self._uri + '/').startswith(self._servletPath + '/'): i = self._uri.find(self._pathInfo) self._servletPath = i > 0 and self._uri[:i] or '' else: self._uri = self._servletPath + self._pathInfo if self._queryString: self._uri += '?' + self._queryString # We use the cgi module to get the fields, # but then change them into an ordinary dictionary of values: fieldStorage, fields = self._fields, {} try: # Avoid accessing fieldStorage as dict; that would be very slow # as it always iterates over all items to find a certain key. # Instead, iterate directly over the items of the internal list. fieldItems = fieldStorage.list except AttributeError: # This can happen if we do not have a a regular POST # from an HTML form, but, for example, an XML-RPC request. fieldItems = None if debug: print "Cannot get fieldstorage list." if fieldItems: for item in fieldItems: if item.filename: if debug: print "Uploaded file found:", item.filename fields.setdefault(item.name, []).append(item) else: fields.setdefault(item.name, []).append(item.value) for key, value in fields.iteritems(): if len(value) == 1: fields[key] = value[0] self._fieldStorage, self._fields = fieldStorage, fields # We use Tim O'Malley's Cookie class to get the cookies, # but then change them into an ordinary dictionary of values self._cookies = dict( (key, self._cookies[key].value) for key in self._cookies) self._contextName = None self._serverSidePath = self._serverSideContextPath = None self._serverRootPath = '' self._sessionExpired = False self._pathInfo = self.pathInfo() if debug: print "Done setting up request, found keys %r" % fields.keys()
from Request import Request import os months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] q = Request() q.connect() directory = os.path.dirname(os.getcwd()) f = open(directory + "/data/test.csv", "w+") for i in range(10): start_date = q.date # criteria = [{"$group": {"_id": "$created_at", # "count": {"$sum": 1}}}] criteria = {"created_at": {'$regex': start_date}} count = q.count(criteria) print q.date + " " + str(count) f.write(start_date + ", " + str(count) + "\n") date = start_date.split() month = date[0] day = date[1] if day == "31": for index in range(len(months)): if month == months[index]: month = months[index+1] day = "1" break else: day = str(int(day) + 1)
def input_loop(datatimes=None, spq=None): global request line = '' # loop over the lines in a sqp adding them with priority 1001 which should put them at the top if spq is not None: request = spq else: request = Request() while True: line = raw_input(':::: ') if line in ['stop', 'write']: break if line == 'help': print_inst() continue # make an entry from the input try: line = line.split(',') except AttributeError: continue line = [v.strip() for v in line] # is the input a ShortCut? if len(line) == 2: try: sc = int(line[0]) except ValueError: print("** invalid sc [1,2,3,4]**") continue if line[1].upper() == 'DATA_TIMES::': entry = Entry(sc, 'DATA_TIMES', datetime.datetime.utcnow().replace(microsecond=0), 1, 1000) print('{0}: CREATED -- {1} to {2}').format(entry, entry.date.isoformat(), entry.endDate.isoformat()) request.addEntry(entry) continue elif line[1].upper().startswith('CONFIG'): tmp = line[1].split(':') try: date = datetime.datetime.strptime(tmp[1], '%Y%m%d') except ValueError: continue date2 = date while date2.day == date.day: try: entry = Entry(sc, 'CONFIG', date2, int(typeDict['CONFIG']['dataPerBlock'])*60 , 900, datatimes=datatimes) # hi pri except FIREOffException: date2 += datetime.timedelta(seconds=1) continue print('{0}: created -- {1} to {2}').format(entry, entry.date.isoformat(), entry.endDate.isoformat()) request.addEntry(entry) date2 += datetime.timedelta(seconds=entry.duration) continue elif line[1].upper().startswith('CONTEXT'): tmp = line[1].split(':') try: date = datetime.datetime.strptime(tmp[1], '%Y%m%d') except ValueError: continue date2 = date while date2.day == date.day: try: entry = Entry(sc, 'CONTEXT', date2, int(typeDict['CONTEXT']['dataPerBlock'])*60 , 700, datatimes=datatimes) # hi pri except FIREOffException: date2 += datetime.timedelta(seconds=1) continue print('{0}: created -- {1} to {2}').format(entry, entry.date.isoformat(), entry.endDate.isoformat()) request.addEntry(entry) date2 += datetime.timedelta(seconds=entry.duration) continue elif line[1].upper().startswith('MICRO_BURST'): tmp = line[1].split(':') try: date = datetime.datetime.strptime(tmp[1], '%Y%m%dT%H') except ValueError: continue date2 = date while date2.day == date.day: try: entry = Entry(sc, 'MICRO_BURST', date2, int(typeDict['MICRO_BURST']['dataPerBlock'])*60 , 500, datatimes=datatimes) except FIREOffException: date2 += datetime.timedelta(seconds=1) continue print('{0}: created -- {1} to {2}').format(entry, entry.date.isoformat(), entry.endDate.isoformat()) request.addEntry(entry) date2 += datetime.timedelta(seconds=entry.duration) continue elif len(line) != 5: print('** input much be 5 entries **') continue else: try: sc = int(line[0]) except ValueError: print("** invalid sc [1,2,3,4]**") continue typ = line[1].upper() if typ not in typeDict: print("** invalid type [{0}]**".format(' '.join(typeDict.keys()))) continue try: date = datetime.datetime.strptime(line[2], '%Y%m%dT%H:%M:%S') except ValueError, e: try: date = datetime.datetime.strptime(line[2], '%Y-%m-%dT%H:%M:%S') except ValueError, e: print e continue try: dur = int(line[3]) except ValueError: print("** invalid duration**") continue if dur <= 0: print("** invalid duration**") continue try: pri = int(line[4]) except ValueError: print("** invalid priority**") continue if pri <= 0: print("** invalid priority**") continue warnings.simplefilter('error') try: entry = Entry(sc, typ, date, dur, pri, datatimes=datatimes) except UserWarning, e: warnings.simplefilter('always') entry = Entry(sc, typ, date, dur, pri, datatimes=datatimes) print('** {0} **'.format(e))