def blog_logout(request): try: logout(request) except Exception as e: logger.error(e) #返回提交请求前的地址 return redirect(request.META['HTTP_REFERER'])
def prepare_files(self, finder, **kwargs): wheel_dir = CacheOpts().wheelhouse self.wheel_download_dir = wheel_dir super(FasterRequirementSet, self).prepare_files(finder, **kwargs) # build wheels before install. wb = FasterWheelBuilder( self, finder, wheel_dir=wheel_dir, ) # TODO-TEST: we only incur the build cost once on uncached install for req in wb.build(): link = optimistic_wheel_search(req, finder.find_links) if link is None: logger.error( 'SLOW!! no wheel found after building (couldn\'t be wheeled?): %s', req) continue # replace the setup.py "sdist" with the wheel "bdist" from pip.util import rmtree, unzip_file rmtree(req.source_dir) unzip_file(link.path, req.source_dir, flatten=False) req.url = link.url
def comment_reply_child(request): try: # 这里像数据库写数据等号赋值之间不能有空格,函数中的变量吗必须小写 #用户只有nickname没有user_id的时候,所以不一定向数据库中保存 if request.method == "POST": nickname = request.POST['NickName'] useremail = request.POST['BEmail'] rpycontent = request.POST['CmtText'] userid = request.POST['UserId'] cmtartid = request.POST['CmtArtId'] #这里是回复顶级评论的时候 user = CommentReply.objects.get(pk =request.POST['ParentComment'] ) #回复具体那条子评论的id reply_to_user_id = user.user_id reply_to_user_nickname = user.nickname comment_id = user.comment_id commentreply = CommentReply.objects.create(nickname=nickname, email=useremail, content=rpycontent, comment_id=comment_id, user_id=userid if userid !="0" else None, article_id=cmtartid, reply_to_nickname = reply_to_user_nickname, reply_to_user_id =reply_to_user_id) commentreply.save() print('yyy') except Exception as e: logger.error(e) # 重定向到当前地址 return redirect(request.META['HTTP_REFERER'])
def after_request(response): response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Cache-Control', 'no-cache') timestamp = strftime('[%Y-%b-%d %H:%M]') logger.error('%s %s %s %s %s %s', timestamp, request.remote_addr, request.method, request.scheme, request.full_path, response.status) return response
def _get_default_config_path(): for name in ("prod.py", "dev.py"): path = "config/{}".format(name) if os.path.exists(path): return path else: logger.error("Couldn't locate the files in config/") exit(1)
def getOnePage(pageObj): try: sleepTime = random.randint(2, 6) time.sleep(sleepTime) _url = "http://www.xiami.com/search/song/page/" songName = pageObj["obj"][0] #歌曲 singer = pageObj["obj"][1] #歌手 id = pageObj["obj"][2] #歌曲ID curentPage = pageObj["p"] parms = {"category": "-1", "key": songName} data = urllib.urlencode(parms) url = _url + str(curentPage) + "?" + data j = None try: page = requests.get( url, headers={ "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.3{0} (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.3{0}" .format(sleepTime) }) bsObj = BeautifulSoup(page.text, "html.parser") except: traceback.print_exc() print "*** URL GET出错 歌曲:{0},歌手:{1}".format(songName, singer).decode( "utf8", "ignore") sleepTime = random.randint(5, 10) time.sleep(sleepTime) try: page = requests.get( url, headers={ "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.3{0} (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.3{0}" .format(sleepTime) }) except: traceback.print_exc() logger.error( "** URL GET出错再次失败,跳过歌曲,歌曲:{0},歌手:{1},id:{2}".format( songName, singer, id)) return None bsObj = BeautifulSoup(page.text, "html.parser") if bsObj is None: return None else: return getJsonData(pageObj, bsObj) #下面开始获得数据 except: traceback.print_exc() print "*** 出错 歌曲:{0},歌手:{1}".format(songName, singer).decode("utf8", "ignore") return None
def category_articles(request): try: #这里获取的是category里面的id category = request.GET.get('category',None) category_articles = Article.objects.filter(category=category) #在article表中的category对应Category表中的id字段 category_name = Category.objects.get(pk=category) except Exception as e: logger.error(e) return render(request,'category_articles.html',locals())
def archive(request): try: year = request.GET.get('year',None)#如果取不到值则用none month= request.GET.get('month',None) archive_list = Article.objects.filter(date_publish__icontains= year+'-'+month)#利用模糊查询把2017年5月的文章都找出来 archive_list = page_pagionation(request,archive_list) #print (archive_list_id) except Exception as e: logger.error(e) return render(request,'archive.html',locals())
def init(self): regex = '<img src="/flags/png/ua.png" title="(.+?)" />' html = urllib2.urlopen("http://myip.com.ua/").read() result_1 = re.findall(regex, html) print "my OLD ip is", result_1 try: self.display = Display(visible=0, size=(1024, 768)) #self.display.start() # uncoment for server except Exception, e: logger.error(e, exc_info=True, extra={'Message': 'Cant start display.'})
def trace_requirements(requirements): """given an iterable of pip InstallRequirements, return the set of required packages, given their transitive requirements. """ from collections import deque from pip import logger from pip.req import InstallRequirement from pip._vendor import pkg_resources working_set = fresh_working_set() # breadth-first traversal: errors = False queue = deque(requirements) result = [] seen_warnings = set() while queue: req = queue.popleft() if req.req is None: # a file:/// requirement continue try: dist = working_set.find(req.req) except pkg_resources.VersionConflict as conflict: dist = conflict.args[0] if req.name not in seen_warnings: # TODO-TEST: conflict with an egg in a directory install via -e ... if dist.location: location = ' (%s)' % timid_relpath(dist.location) else: location = '' logger.error('Error: version conflict: %s%s <-> %s' % (dist, location, req)) errors = True seen_warnings.add(req.name) if dist is None: logger.error('Error: unmet dependency: %s' % req) errors = True continue result.append(dist_to_req(dist)) for dist_req in sorted(dist.requires(), key=lambda req: req.key): # there really shouldn't be any circular dependencies... queue.append(InstallRequirement(dist_req, str(req))) if errors: exit(1) return result
def trace_requirements(requirements): """given an iterable of pip InstallRequirements, return the set of required packages, given their transitive requirements. """ from collections import deque from pip import logger from pip.req import InstallRequirement from pip._vendor import pkg_resources working_set = fresh_working_set() # breadth-first traversal: errors = False queue = deque(requirements) result = [] seen_warnings = set() while queue: req = queue.popleft() if req.req is None: # a file:/// requirement continue try: dist = working_set.find(req.req) except pkg_resources.VersionConflict as conflict: dist = conflict.args[0] if req.name not in seen_warnings: logger.error("Error: version conflict: %s <-> %s" % (dist, req)) errors = True seen_warnings.add(req.name) if dist is None: logger.error('Error: unmet dependency: %s' % req) errors = True continue result.append(dist_to_req(dist)) for dist_req in sorted(dist.requires(), key=lambda req: req.key): # there really shouldn't be any circular dependencies... queue.append(InstallRequirement(dist_req, str(req))) if errors: exit(1) return result
def trace_requirements(requirements): """given an iterable of pip InstallRequirements, return the set of required packages, given their transitive requirements. """ from collections import deque from pip import logger from pip.req import InstallRequirement from pip._vendor import pkg_resources working_set = fresh_working_set() # breadth-first traversal: queue = deque(requirements) result = [] seen_warnings = set() while queue: req = queue.popleft() if req is None: # a file:/// requirement continue try: dist = working_set.find(req.req) except pkg_resources.VersionConflict as conflict: # TODO: This should really be an error, but throw a warning for now, while we integrate. # TODO: test case, eg: install pylint, install old astroid, update # astroid should still be installed after dist = conflict.args[0] if req.name not in seen_warnings: logger.warn("Warning: version conflict: %s <-> %s" % (dist, req)) seen_warnings.add(req.name) if dist is None: # TODO: test case, eg: install pylint, uninstall astroid, update # -> Unmet dependency: astroid>=1.3.2 (from pylint (from -r faster.txt (line 4))) logger.error('Unmet dependency: %s' % req) exit(1) result.append(dist_to_req(dist)) for dist_req in dist.requires(): # should we support extras? # there really shouldn't be any circular dependencies... queue.append(InstallRequirement(dist_req, str(req))) return result
def article(request): try: id = request.GET.get('id',None) #这里是当提交的参数为空的时候,也会跳转错误 if(id=="") or id==None: id = 0 pre = int(id)-1 next =int(id)+1 try: #精确查询,这里的id是字符串类型 article=Article.objects.get(pk=id) print(settings.STATIC_ROOT) #get匹配一条结果,filter匹配多条结果 comments = Comment.objects.filter(article_id=article.id).order_by('date_publish') comment_list=comments_pagionation(request,comments) except Article.DoesNotExist: return render(request,'failure.html',{'reason':'文章不存在'}) except Exception as e: logger.error(e) return render(request,'article.html',locals())
def comment_post(request): try: #这里像数据库写数据等号赋值之间不能有空格,函数中的变量吗必须小写 if request.method == "POST": nickname = request.POST['NickName'] useremail = request.POST['BEmail'] cmtcontent = request.POST['CmtText'] articleid = request.POST['CmtArtId'] #考虑匿名提交情况 userid = request.POST['UserId'] comment = Comment.objects.create(nickname=nickname, email=useremail, content=cmtcontent, article_id=articleid, user_id = userid if userid !="0" else None,) comment.save() except Exception as e: logger.error(e) #重定向到当前地址 return redirect(request.META['HTTP_REFERER'])
def verify_user_email(email) -> bool: """ Checks email address for existence :param email: Email for existence verification in "Email Hunter" service :return: Boolean """ exists = False try: # Check if a given email address is deliverable and has been found on the internet. # I've chosen this method because there was no explicit requirements about verification in # task description. exists = _user_email_deliverable(email) # Checks for existence in Email Hunter database. # Don't think this is what we need. # exists = _user_email_exists(email) except Exception as e: logger.error( f"Email verification with Email Hunter service failed. {e.args[-1]}" ) return exists
def prepare_files(self, finder, **kwargs): wheel_dir = CacheOpts().wheelhouse self.wheel_download_dir = wheel_dir super(FasterRequirementSet, self).prepare_files(finder, **kwargs) # build wheels before install. wb = FasterWheelBuilder( self, finder, wheel_dir=wheel_dir, ) # TODO-TEST: we only incur the build cost once on uncached install for req in wb.build(): link = optimistic_wheel_search(req, finder.find_links) if link is None: logger.error('SLOW!! no wheel found after building (couldn\'t be wheeled?): %s', req) continue # replace the setup.py "sdist" with the wheel "bdist" from pip.util import rmtree, unzip_file rmtree(req.source_dir) unzip_file(link.path, req.source_dir, flatten=False) req.url = link.url
async def uptime_coro(future): C = MQTTClient() await C.connect('mqtt://vpn.alvin.tw/', ) await C.subscribe(TOPIC) try: i = 1 while 1: message = await C.deliver_message() i += 1 packet = message.publish_packet global count count += 1 msg = packet.payload.data.decode('utf8') stock_id = packet.variable_header.topic_name.split('/')[-1] content = msg #print(stock_id) conn.rpush(stock_id, content) await C.unsubscribe([TOPIC]) await C.disconnect() except ClientException as ce: logger.error("Client exception: %s" % ce) future.set_result('Finish')
def getJsonData(pageObj, bsObj): returnValue = [] try: songName = pageObj["obj"][0] # 歌曲 singerName = pageObj["obj"][1] # 歌手 id = pageObj["obj"][2] # id nodeTable = bsObj.find(name="table", attrs={"class", "track_list"}) if nodeTable is None: # 搜索没有记录 return returnValue nodeTbody = nodeTable.find(name="tbody") if nodeTbody is None: return returnValue nodeTrs = nodeTbody.findAll(name="tr") if nodeTrs is None or len(nodeTrs) == 0: return returnValue for tr in nodeTrs: obj = { "song_Id": id, "app_Id": app_Id, "load_Date": time.strftime('%Y%m%d', time.localtime(time.time())) } # 歌手 td_Singer = tr.find(name="td", attrs={"class": "song_artist"}) if td_Singer is None: continue _singer = td_Singer.get_text().strip() if singerName.lower() not in _singer.lower() and _singer.lower( ) not in singerName.lower(): ## 不是指定歌手则跳过 continue obj["singer_Name"] = _singer # key td_Singer = tr.find(name="td", attrs={"class": "song_act"}) if td_Singer is None: continue nodeAs = td_Singer.findAll(name="a") nodeA = None for a in nodeAs: if a.attrs.has_key("onclick"): nodeA = a break if nodeA is not None: strClick = nodeA.attrs["onclick"] arr = strClick.split(",") if len(arr) > 0: arr = arr[0].split("(") if len(arr) > 1: _id = arr[1].replace("'", "") obj["keys"] = _id.strip() # 歌名 td_songName = tr.find(name="td", attrs={"class": "song_name"}) if td_songName is None: continue nodeA = td_songName.find(name="a") if nodeA is None: _songName = nodeA.get_text().strip() else: _songName = nodeA.attrs["title"].strip() obj["song_Name"] = _songName # 专辑 td_album = tr.find(name="td", attrs={"class": "song_album"}) if td_album is None: continue nodeA = td_album.find(name="a") if nodeA is None: _album = nodeA.get_text().strip() else: _album = nodeA.attrs["title"].strip() obj["des"] = _album returnValue.append(obj) nodeP = bsObj.find(name="p", attrs={"class": "seek_counts"}) if nodeP is not None: nodeB = nodeP.find(name="b") if nodeB is not None: try: # 总页码 num = int(nodeB.get_text()) except: pass else: pageObj["totalNum"] = num except Exception, e: logger.error(e)