def handler(self, request): action = 'api_' + request.get('action', '') if hasattr(self, action): try: return getattr(self, action)(**(request.get('params') or {})) except TypeError: return None
def build_request(self): request = {} if self.sql_select.order_by: request['sort'] = sort_translator.translate_sort(self.sql_select) if self.sql_select.limit: request['size'] = self.sql_select.limit if self.sql_select.where: request['query'] = filter_translator.create_compound_filter( self.sql_select.where.tokens[1:]) if self.sql_select.join_table: join_filters = join_translator.translate_join(self.sql_select) if len(join_filters) == 1: request['query'] = { 'bool': { 'filter': [request.get('query', {}), join_filters[0]] } } else: request['query'] = { 'bool': { 'filter': request.get('query', {}), 'should': join_filters } } return request
def recordSet_Sort(self, REQUEST=None): request = self.REQUEST metaObj = self.getMetaobj(self.meta_id) res = request['res'] if 'sort_id' in [x['id'] for x in metaObj['attrs']]: l = [(x.get('sort_id', 1), x) for x in res] # Sort (FK). for metaObjAttr in metaObj['attrs'][1:]: if metaObjAttr.get('type', '') in self.getMetaobjIds(): d = {} # FK-id for primary-sort. [ self.operator_setitem(d, x.get(metaObjAttr['id']), x.get(metaObjAttr['id'])) for x in res ] for fkContainer in self.getParentNode().getChildNodes( request, metaObjAttr['type']): fkMetaObj = self.getMetaobj(fkContainer.meta_id) fkMetaObjAttrIdRecordSet = fkMetaObj['attrs'][0]['id'] fkMetaObjRecordSet = fkContainer.attr( fkMetaObjAttrIdRecordSet) fkMetaObjIdId = self.getMetaobjAttrIdentifierId( fkContainer.meta_id) # FK-sort_id for primary-sort. [ self.operator_setitem(d, x.get(fkMetaObjIdId), x.get('sort_id')) for x in fkMetaObjRecordSet ] # Add primary-sort. l = [((d.get(x[1].get(metaObjAttr['id'])), x[0]), x[1]) for x in l] break l.sort() res = [x[1] for x in l] else: qorder = request.get('qorder', '') qorderdir = 'asc' if qorder == '': skiptypes = ['file', 'image'] + self.getMetaobjManager( ).valid_xtypes + self.getMetaobjIds() for attr in metaObj['attrs'][1:]: if attr.get('type', '') not in skiptypes and \ attr.get('name', '') != '' and \ attr.get('custom', '') != '': qorder = attr['id'] if attr.get('type', '') in ['date', 'datetime', 'time']: qorderdir = 'desc' break if qorder: qorderdir = request.get('qorderdir', qorderdir) res = standard.sort_list(res, qorder, qorderdir) request.set('qorder', qorder) request.set('qorderdir', qorderdir) request.set('res', res) return res
def _update(self, node, d): zcm = self.getCatalogAdapter() # Prepare object. for attr_id in extra_column_ids: attr_name = 'zcat_column_%s' % attr_id value = d.get(attr_id) setattr(node, attr_name, value) for attr_id in zcm._getAttrIds(): last_id = attr_id attr_name = 'zcat_index_%s' % attr_id value = umlaut_quote(self, d.get(attr_id)) setattr(node, attr_name, value) # Reindex object. request = self.REQUEST lang = request.get('lang', self.getPrimaryLanguage()) zcatalog = getZCatalog(self, lang) if zcatalog is not None: path = node.getPath() if zcatalog.getrid(path): zcatalog.uncatalog_object(path) zcatalog.catalog_object(node, path) # Unprepare object. for attr_id in extra_column_ids: attr_name = 'zcat_column_%s' % attr_id delattr(node, attr_name) for attr_id in zcm._getAttrIds(): attr_name = 'zcat_index_%s' % attr_id delattr(node, attr_name) # premature commit req_key = 'ZMSZCatalogConnector._update.transaction_count' cfg_key = 'ZMSZCatalogConnector._update.transaction_size' if request.get(req_key, 0) >= int(self.getConfProperty(cfg_key, 999)): import transaction transaction.commit() request.set(req_key, request.get(req_key, 0) + 1)
def build_request(self, obj, download_size_limit=config.download_size_limit): env = obj['env'] rule = obj['rule'] request = self.render(obj['request'], env['variables'], env['session']) method = request['method'] url = request['url'] headers = dict((e['name'], e['value']) for e in request['headers']) cookies = dict((e['name'], e['value']) for e in request['cookies']) data = request.get('data') if method == 'GET': data = None elif method == 'POST': data = request.get('data', '') def set_size_limit_callback(curl): def size_limit(download_size, downloaded, upload_size, uploaded): if download_size and download_size > download_size_limit: return 1 if downloaded > download_size_limit: return 1 return 0 curl.setopt(pycurl.NOPROGRESS, 0) curl.setopt(pycurl.PROGRESSFUNCTION, size_limit) return curl req = httpclient.HTTPRequest( url=url, method=method, headers=headers, body=data, follow_redirects=False, max_redirects=0, decompress_response=True, allow_nonstandard_methods=True, allow_ipv6=True, prepare_curl_callback=set_size_limit_callback, ) session = cookie_utils.CookieSession() if req.headers.get('Cookie'): session.update(dict(x.strip().split('=', 1) \ for x in req.headers['Cookie'].split(';') \ if '=' in x)) if isinstance(env['session'], cookie_utils.CookieSession): session.from_json(env['session'].to_json()) else: session.from_json(env['session']) session.update(cookies) cookie_header = session.get_cookie_header(req) if cookie_header: req.headers['Cookie'] = cookie_header env['session'] = session return req, rule, env
def getInternalLinkUrl(self, url, ob): request = self.REQUEST if ob is None: index_html = './index_%s.html?error_type=NotFound&op=not_found&url=%s'%(request.get('lang', self.getPrimaryLanguage()), str(url)) else: # Contextualized index_html. context = request.get('ZMS_THIS', self) index_html = ob.getHref2IndexHtmlInContext(context, REQUEST=request) return index_html
def callMethod(self, request): name = request.get('action', '') params = request.get('params', {}) reply = None if name == 'addNote': return self.addNote(**params) if name == 'downloadAudio': return self.downloadAudio(**params) return reply
def handler(self, request): action = 'api_' + request.get('action', '') params = request.get('params') or {} if hasattr(self, action): try: return getattr(self, action)(**params) except TypeError as e: oops("TypeError (%s) running %s! parmas: %s"%(e,action,params)) return None else: oops("action %s doesnt exist"%action,1)
def notify(usernames, message): tokens = [] if usernames == 'all': tokens = ExperimentCoordinator().alarm_config.values() else: usernames = usernames.split(",") for username in usernames: tokens.append(ExperimentCoordinator().alarm_config[username]) for token in tokens: get("https://alarmerbot.ru/?key={0}&message={1}".format( token, quote(message)))
def callback(request, db): try: urlResult, emailResult = get_callback(request, db) logger.debug('request : %s do callback.' % request.get('_id')) callback = request.get('callback') if not CALLBACK_CACHE.exists(prefix_callback_email_username + request.get('username')): if callback.get('email'): email = [ {"username": request.get('username'), "to_addrs": callback.get('email'), "title": 'refresh callback', "body": emailResult}] queue.put_json2('email', email) logger.debug('email :%s put email_queue!' % callback.get('email')) if callback.get('url'): status = doPost(callback.get('url'), urlResult) logger.debug('request : %s ,urlcallback status:%s.' % (request.get('_id'), status)) if status != 200: for i in range(3): status = doPost(callback.get('url'), urlResult) logger.debug('request : %s ,urlcallback retry count:%s ,status:%s.' % (request.get('_id'), i, status)) if status == 200: break if callback.get('ntease_itemid'): result = '<?xml version=\"1.0\" encoding=\"utf-8\" ?><fwif><item_id>%s</item_id><op_result>SUCCESS</op_result><detail>SUCCESS</detail></fwif>' % callback.get( 'ntease_itemid') headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/xml", "Authorization": "Basic " + str.strip(base64.encodestring(USERNAME_NTESE + ':' + PASSWORD_NTESE))} hc = http.client.HTTPConnection(NTESE_DOMAIN, NTEST_PORT, timeout=4) params = urllib.parse.urlencode({'content': result.encode('utf-8')}) hc.request('POST', '/cdnreport/', params, headers) hc.close() except: logger.debug("r_ud:%s do callback error, callback body :%s." % (request.get("_id"), request.get("callback")))
def handler(self, request): if self.log is not None: self.log.write('[request]\n') json.dump(request, self.log, indent=4, sort_keys=True) self.log.write('\n\n') name = request.get('action', '') version = request.get('version', 4) params = request.get('params', {}) reply = {'result': None, 'error': None} try: method = None for methodName, methodInst in inspect.getmembers( self, predicate=inspect.ismethod): apiVersionLast = 0 apiNameLast = None if getattr(methodInst, 'api', False): for apiVersion, apiName in getattr(methodInst, 'versions', []): if apiVersionLast < apiVersion <= version: apiVersionLast = apiVersion apiNameLast = apiName if apiNameLast is None and apiVersionLast == 0: apiNameLast = methodName if apiNameLast is not None and apiNameLast == name: method = methodInst break if method is None: raise Exception('unsupported action') else: reply['result'] = methodInst(**params) except Exception as e: reply['error'] = str(e) if version <= 4: reply = reply['result'] if self.log is not None: self.log.write('[reply]\n') json.dump(reply, self.log, indent=4, sort_keys=True) self.log.write('\n\n') return reply
def getLinkUrl( self, url, REQUEST=None): request = self.REQUEST if isInternalLink(url): # Params. ref_params = {} if url.find(';') > 0: ref_params = dict(re.findall(';(\w*)=(\w*)', url[url.find(';'):-1])) url = '{$%s}'%url[2:url.find(';')] # Anchor. ref_anchor = '' if url.find('#') > 0: ref_anchor = url[url.find('#'):-1] # Prepare request. bak_params = {} for key in ref_params: bak_params[key] = request.get(key, None) request.set(key, ref_params[key]) # Get index_html. ref_obj = self.getLinkObj(url) index_html = getInternalLinkUrl(self, url, ref_obj) # Unprepare request. for key in bak_params: request.set(key, bak_params[key]) # Return index_html. url = index_html + ref_anchor elif isMailLink (url): prefix = 'mailto:' url = 'javascript:window.location.href=\''+prefix+'\'+atob(\''+base64.b64encode(url[len(prefix):].encode()).decode()+'\')' return url
def downloadJPG(imgUrl, fileName): # 可自动关闭请求和响应的模块 from contextlib import closing with closing(request.get(imgUrl, stream=True)) as resp: with open(fileName, 'wb') as f: for chunk in resp.iter_content(128): f.write(chunk)
def drupal_get_version(target): #<meta name="generator" content try: soup = str(core.get_page(target + "CHANGELOG.txt")) regex = re.findall(r'Drupal (.*?),', str(soup)) if regex != []: return regex[0], "/CHANGELOG.txt" #return soup.split(",")[0].split(">")[3] else: #print ("CHANGELOG.txt not found") soup = core.get_page(target) regex = re.findall( r'content="Drupal (.*?) \(http(s|):\/\/(www\.|)drupal.org\)"', str(soup)) #print (soup) if regex != []: return regex[0][0], "META Generator Tag" #for link in soup.find_all('meta'): # if "generator" in str(link): # c=str(link).split('"')[1] # if hasNumbers(c) and c.lower().startswith("drupal "): # return str(link).split('"')[1].split("(")[0] else: r = request.get(target) if r.status_code == 200 and r.headers["X-Generator"]: return r.headers["X-Generator"], "X-Generator HTTP Header" return 'X.X.X', "" #return "###" #return 'NO <meta name="generator"' #return "###" except Exception as e: return "X.X.X"
def main(url,self): try: rid = str(url.split("/")[-1]) self.progressBar.setValue(14) if True: json_text = urllib.request.urlopen("http://www.kuwo.cn/url?format=mp3&rid=%s&response=url&type=convert_url3&br=128kmp3&from=web&t=1611321054772&httpsStatus=1&reqId=4265a0b0-5cb3-11eb-8d35-9939327ef0bf" % rid) .read() .decode() self.progressBar.setValue(28) j = json.loads(json_text) self.progressBar.setValue(42) req = request.get(f"http://m.kuwo.cn/newh5/singles/songinfoandlrc?musicId={rid}&httpsStatus=1&reqId=4dfb9620-9aaf-11eb-988c-d7dfa60cda35") J = json.loads(req.text) try: with open(str(J['data']['lrclist'][0]["lineLyric"]+".mp3"),"wb") as f: self.progressBar.setValue(56) music_file_r = urllib.request.urlopen(j["url"]) self.progressBar.setValue(70) music = music_file_r.read() self.progressBar.setValue(84) f.write(music) except: with open(str(str(rid)+".mp3"),"wb") as f: self.progressBar.setValue(56) music_file_r = urllib.request.urlopen(j["url"]) self.progressBar.setValue(70) music = music_file_r.read() self.progressBar.setValue(84) f.write(music) self.progressBar.setValue(100) return True except: e=traceback.format_exc() QMessageBox.critical(self, "内部错误", e)
def getLinkUrl(self, url, REQUEST=None): self.startMeasurement('%s.getLinkUrl' % self.meta_id) request = self.REQUEST if isInternalLink(url): # Params. ref_params = {} if url.find(';') > 0: ref_params = dict( re.findall(';(\w*)=(\w*)', url[url.find(';'):-1])) url = '{$%s}' % url[2:url.find(';')] # Anchor. ref_anchor = '' if url.find('#') > 0: ref_anchor = url[url.find('#'):-1] # Prepare request. bak_params = {} for key in ref_params: bak_params[key] = request.get(key, None) request.set(key, ref_params[key]) # Get index_html. ref_obj = self.getLinkObj(url) index_html = getInternalLinkUrl(self, url, ref_obj) # Unprepare request. for key in bak_params: request.set(key, bak_params[key]) # Return index_html. url = index_html + ref_anchor elif isMailLink(url): prefix = 'mailto:' url = prefix + standard.encrypt_ordtype(url[len(prefix):]) self.stopMeasurement('%s.getLinkUrl' % self.meta_id) return url
def _request_until_succeed(self, url): self.response = None request_warns = 0 while True: try: self.response = request.get(url) if self.response.status_code == 200: break else: logger.warn(self.response) request_warns += 1 if request_warns >= 5: break except Exception as e: # Other errors are possible, such as IOError. print(("Error: " + str(e))) pass data = force_text(self.response.text) return json.loads(data)
def trade_spider(max_pages): page = 1 while page < max_pages: url = "http://php.net/manual/en/function.file.php"+str(page) source_code = request.get(url) plain_text = source_code.text soup = BeautifulSoup(plain_text) for link in soup.findAll('a', {"class":"item.name"}): href = "http://php.net" +link.get('hrf') title = link.string print(href) print(title) page+=1 trade_spider(1) def get_single_item_data(item_url): source_code = request.get(item_url) plain_text = source_code.text soup = BeautifulSoup(plain_text) for item_name in soup.findAll('div', {'class': 'i.name'}): print(item_name.string) for link in soup.findAll('a'): href = "http://php.net" +link.get('href') print(href) trade_spider(3)
def redirect(url, code=None): if not code: code = 303 if request.get('SERVER_PROTOCOL') == 'HTTP/1.1' else 302 response.status = code response.body = '' response.set_header('Location', urllib.parse.urljoin(request.url, url)) return response
def normalize_ids_after_copy(node, id_prefix='e', ids=[]): request = node.REQUEST copy_of_prefix = 'copy_of_' for childNode in node.getChildNodes(): # validate id id = childNode.getId() new_id = None if '*' in ids or id in ids or id.startswith(copy_of_prefix): # reset ref_by childNode.ref_by = [] # init object-state if not '*' in ids: lang = request.get('lang') for langId in node.getLangIds(): request.set('lang', langId) childNode.setObjStateNew(request, reset=0) childNode.onChangeObj(request) request.set('lang', lang) # new id new_id = node.getNewId(id_prefix) else: # new id new_id = node.getNewId(standard.id_prefix(id)) # reset id if new_id is not None and new_id != id: standard.writeBlock( node, '[CopySupport._normalize_ids_after_copy]: rename %s(%s) to %s' % (childNode.absolute_url(), childNode.meta_id, new_id)) node.manage_renameObject(id=id, new_id=new_id) # traverse tree normalize_ids_after_copy(childNode, id_prefix, ids=['*'])
def normalize_ids_after_move(node, id_prefix='e', ids=[]): request = node.REQUEST copy_of_prefix = 'copy_of_' for childNode in node.getChildNodes(): # validate id id = childNode.getId() new_id = None if '*' in ids or id in ids or id.startswith(copy_of_prefix): # init object-state if not '*' in ids: lang = request.get('lang') for langId in node.getLangIds(): request.set('lang', langId) childNode.setObjStateModified(request) childNode.onChangeObj(request) request.set('lang', lang) # new id if id.startswith(copy_of_prefix): new_id = id[len(id.startswith(copy_of_prefix)):] elif standard.id_prefix(id) != id_prefix: new_id = node.getNewId(id_prefix) # reset id if new_id is not None and new_id != id: standard.writeBlock( node, '[CopySupport._normalize_ids_after_move]: rename %s(%s) to %s' % (childNode.absolute_url(), childNode.meta_id, new_id)) node.manage_renameObject(id=id, new_id=new_id)
def search(self, q, fq='', order=None): rtn = [] # ZCatalog. request = self.REQUEST lang = request.get('lang', self.getPrimaryLanguage()) zcatalog = getZCatalog(self, lang) # Find search-results. items = [] prototype = {} for fqs in fq.split(','): attr_id = fqs[:fqs.find(':')] if attr_id.endswith('_s'): attr_id = attr_id[:-2] fqk = 'zcat_index_%s' % attr_id if fqk in zcatalog.indexes(): fqv = fqs[fqs.find(':') + 1:] fqv = umlaut_quote(self, fqv) prototype[fqk] = fqv for index in zcatalog.indexes(): if index.find('zcat_index_') == 0: query = copy.deepcopy(prototype) query[index] = umlaut_quote(self, q) qr = zcatalog(query) standard.writeLog(self, "[search]: %s=%i" % (str(query), len(qr))) for item in qr: if item not in items: items.append(item.aq_base) # Process search-results. results = [] for item in items: data_record_id = item.data_record_id_ path = zcatalog.getpath(data_record_id) # Append to valid results. if len([x for x in results if x[1]['path'] == path]) == 0: result = {} result['path'] = path result['score'] = intValue(item.data_record_score_) result['normscore'] = intValue( item.data_record_normalized_score_) for column in zcatalog.schema(): k = column if k.find('zcat_index_') == 0: k = k[len('zcat_index_'):] result[k] = getattr(item, column, None) results.append((item.data_record_score_, result)) # Sort search-results. results.sort() results.reverse() # Append search-results. rtn.extend([x[1] for x in results]) # Return list of search-results in correct sort-order. return rtn
def FromRequest(cls, request): """Create a Drive State instance from an HTTP request. Args: cls: Type this class method is called against. request: HTTP request. """ return DriveState(request.get('state'))
def update(self): url = "https://samplefirebaseapp-3de75.firebaseio.com/export.json" response = request.get(url) data = json.load(response) with open(os.getcwd() + '/questions/firbase.json', 'w') as outfile: # w is write mode or r is read mode json.dump(data, outfile) print(os.getcwd())
def getHTMLText(url): try: r = request.get(url) r.raise_for_status() r.encoding = r.apparent_encoding return r.text except: return ""
def download(self): ''' A simple method which checks if the archive exists in the local sources directory, and if not, downloads it from the specified address.''' # Check that the file doesn't exist if not path.isfile(self.source + self.extension): try: # Download the file get(self.url, self.source + self.extension) # This is thrown if something goes wrong with the download except HTTPError: pass # And return if the file exists return path.isfile(self.source + self.extension)
def do_POST(self): try: request = json.loads(self._read_request()) if 'type' not in request.keys(): request['type'] = 'GET' if 'url' not in request.keys() or \ (request['type'] == 'POST' and 'content' not in request.keys()): return self._return_error('invalid json') except: return self._return_error('invalid json') new_request = urllib.request.Request( url=request['url'], data=bytes(request.get('content'), 'UTF-8') if 'content' in request else None, headers=request.get('headers', {}), method=request['type']) self.open_follow(new_request, timeout=request.get('timeout', 1))
def print_mail(url): now_content = requests.get(HOST + url).content now_soup = BeautifulSoup(now_content) email = now_soup.find('span', attrs={'class': 'email'}).a.get('href') email = email.split(':') if (len(email) > 1): print(email[1]) return email[1] return ""
def validateURL(url: str): try: rsp = requests.get(url) logger.info("%s - %s" % (rsp.status_code, url)) if rsp.status_code == 200: return True except: logger.info("%s does not exist on Internet" % url) return False
def get_single_item_data(item_url): source_code = request.get(item_url) plaint_text = source_code.text soup = BeautifulSoup(plaint_text) for item_name in soup.findAll("div", {"class": "i-name"}): print(item_name.string) for link in soup.findAll("a"): href = "https://buckysroom.org" + link.get("href") print(href)
def dtable_external_link_plugin_asset_view(request, token, workspace_id, name, plugin_id, path): """ used in external page """ dtable_external_link = DTableExternalLinks.objects.filter( token=token).first() if not dtable_external_link: raise Http404 try: plugin_record = DTablePlugins.objects.get(pk=plugin_id) except DTablePlugins.DoesNotExist: error_msg = 'Plugin %s not found.' % plugin_id return render_error(request, error_msg) workspace = Workspaces.objects.get_workspace_by_id(workspace_id) if not workspace: error_msg = 'Workspace %s not found.' % workspace_id return render_error(request, error_msg) table_name = name dtable = DTables.objects.get_dtable(workspace, table_name) if not dtable: error_msg = 'DTable %s not found.' % table_name return render_error(request, error_msg) repo_id = workspace.repo_id repo = seafile_api.get_repo(repo_id) if not repo: error_msg = 'Library %s not found.' % repo_id return render_error(request, error_msg) plugin_file_path = os.path.join('/asset', str(dtable.uuid), 'plugins', plugin_record.name) asset_path = os.path.join(plugin_file_path, path) plugin_file_dir_id = seafile_api.get_file_id_by_path(repo_id, asset_path) if not plugin_file_dir_id: return render_error(request, 'Asset file does not exist.') token = seafile_api.get_fileserver_access_token(workspace.repo_id, plugin_file_dir_id, 'view', '', use_onetime=False) url = gen_file_get_url(token, asset_path) import requests r = requests.get(url) response = HttpResponse(r.content) content_type = mimetypes.guess_type(path) if type: response['Content-Type'] = content_type[0] return response
def trade_spider(max_pages=1): # its all about html page = 1 while page < max_pages: url = "https://buckysroom.org/trade/search.php?page=" + str(page) source_code = request.get(url) plaint_text = source_code.text soup = BeautifulSoup(plaint_text) for link in soup.findAll("a", {"class": "item-name"}): href = "https://buckysroom.org" + link.get("href") title = link.string # print(href) # print(title) get_single_item_data(href) page += 1
def get_user_info(): """Retrieves the user info to cache it locally""" credentials = read_api_key() if not credentials: return [] url = settings.SITE_URL + "/api/users/me" request = http.Request(url, headers={"Authorization": "Token " + credentials["token"]}) response = request.get() account_info = response.json if not account_info: logger.warning("Unable to fetch user info for %s", credentials["username"]) with open(USER_INFO_FILE_PATH, "w") as token_file: json.dump(account_info, token_file, indent=2) if account_info.get("avatar_url"): resources.download_media(account_info["avatar_url"], USER_ICON_FILE_PATH)
def get_library(): """Return the remote library as a list of dicts.""" credentials = read_api_key() if not credentials: return [] url = settings.SITE_URL + "/api/games/library/%s" % credentials["username"] request = http.Request(url, headers={"Authorization": "Token " + credentials["token"]}) try: response = request.get() except http.HTTPError as ex: logger.error("Unable to load library: %s", ex) return [] response_data = response.json if response_data: return response_data["games"] return []
def get_library(): """Return the remote library as a list of dicts.""" logger.debug("Fetching game library") credentials = read_api_key() if not credentials: return [] username = credentials["username"] token = credentials["token"] url = settings.SITE_URL + "/api/games/library/%s" % username headers = {'Authorization': 'Token ' + token} request = http.Request(url, headers=headers) response = request.get() response_data = response.json if response_data: return response_data['games'] else: return []
def post(self): try: request = json.loads(self.request.body.decode()) code = request['code'] scope = request.get('scope', 'snsapi_base') web_access_code = yield wxutil.get_oauth2_access_code(code) if scope == 'snsapi_userinfo': user_info = yield wxutil.pull_user_info(web_access_code['openid'], web_access_code['access_token']) elif scope == 'snsapi_base': user_info = yield wxutil.pull_user_info(web_access_code['openid']) else: raise AttributeError self.write(Response( status=1, msg='ok', result=user_info ).json()) except Exception as e: self.write(Response(msg='sorry,亲,获取用户信息失败').json()) logging.exception('GetUserInfoHandler error: {0}'.format(str(e)))
def from_request(cls, request): """Create new TransientShardState from webapp request.""" mapreduce_spec = MapreduceSpec.from_json_str(request.get("mapreduce_spec")) mapper_spec = mapreduce_spec.mapper input_reader_spec_dict = simplejson.loads(request.get("input_reader_state"), cls=json_util.JsonDecoder) input_reader = mapper_spec.input_reader_class().from_json( input_reader_spec_dict) initial_input_reader_spec_dict = simplejson.loads( request.get("initial_input_reader_state"), cls=json_util.JsonDecoder) initial_input_reader = mapper_spec.input_reader_class().from_json( initial_input_reader_spec_dict) output_writer = None if mapper_spec.output_writer_class(): output_writer = mapper_spec.output_writer_class().from_json( simplejson.loads(request.get("output_writer_state", "{}"), cls=json_util.JsonDecoder)) assert isinstance(output_writer, mapper_spec.output_writer_class()), ( "%s.from_json returned an instance of wrong class: %s" % ( mapper_spec.output_writer_class(), output_writer.__class__)) handler = util.try_deserialize_handler(request.get("serialized_handler")) if not handler: handler = mapreduce_spec.mapper.handler return cls(mapreduce_spec.params["base_path"], mapreduce_spec, str(request.get("shard_id")), int(request.get("slice_id")), input_reader, initial_input_reader, output_writer=output_writer, retries=int(request.get("retries")), handler=handler)
#SampleCollection+Json test #https://pypi.python.org/pypi/collection-json/0.1.0 # import urllib.request as requests from collection_json import Collection data=requests.get('http://www.youtypeitwepostit.com/api/').text collection=Collection.from_json(data) print collection print data
count = 0 fileNameHead = 'D:\jandan\jandan\jandan' session = request.Session() response = session.get(origin_url + str(origin_page),headers=hearderData) while(response.status_code==200): try: print('\n--------------------\n'+str(origin_page)+'\n-------------------\n') response = session.get(origin_url + str(origin_page),headers=hearderData) response.encoding = 'utf-8' html = response.text soup = BeautifulSoup(html,'lxml') imgBox = soup.find_all('img') for img in imgBox: matchObj = re.match( r'(.*?\.jpg)', img['src']) if matchObj: r = request.get(img['src'],timeout=5) print(r.status_code) if(r.status_code==200): fileName = fileNameHead + str(count) + '.jpg' with open(fileName, "wb") as imgFile: print(img['src'] + ' is downloading!') imgFile.write(r.content) count+=1 origin_page+=1 except request.exceptions.RequestException: print('RequestException') count+=1 origin_page+=1 finally: print('Exception')