def delete(dataId): # 論理削除を実行 book = session.query(Books).filter(Books.id_ == dataId).first() book.delFlg = 1 session.commit() session.close() redirect('/list')
async def send(self): guild_count = len(self.guilds) member_count = len([x for x in self.get_all_members()]) if not self.dbl_token: return session = aiohttp.ClientSession() dump = json.dumps({'server_count': len(client.guilds)}) head = { 'authorization': self.dbl_token, 'content-type': 'application/json' } url = 'https://discordbots.org/api/bots/stats' async with session.post(url, data=dump, headers=head) as resp: print('returned {0.status} for {1}'.format(resp, dump)) session.close() async with aiohttp.ClientSession() as session: async with session.post( 'https://api.fusiondiscordbots.com/{}/'.format( self.user.id), data={ 'token': 'WxxjHtXWk0-JphXi', 'guilds': guild_count, 'members': member_count }) as resp: print('returned {0.status} from api.fusiondiscordbots.com'. format(resp))
def session_context(): try: yield session except Exception: raise finally: session.close()
def save(self, username, password, phone, email): pwd = encrypt(password) user = Users(username=username, password=pwd, phone=phone, email=email) session.add(user) session.commit() session.close() return True
async def subscribe(request): schema = SubscribeSchema() data, error = schema.load(request.form) # todo 传入空的字符串不能required不能识别 if error: for key, value in error.items(): error[key] = key + ':' + ','.join(value) return resp_error(';'.join(error.values())) subscriber = Subscriber() subscriber.nick_name = data['nick_name'] subscriber.email = data['email'] subscriber.resources = data['resources'] session.add(subscriber) try: session.commit() session.close() except Exception as e: session.rollback() session.close() return resp_error(e.args) email_ = await init_email(data['nick_name'], data['email'], data['resources']) if not email_: return resp_ok('您已经成功订阅') else: return resp_error(email_)
def regist(): name = request.forms.decode().get('name') volume = request.forms.decode().get('volume') author = request.forms.decode().get('author') publisher = request.forms.decode().get('publisher') memo = request.forms.decode().get('memo') registId = request.forms.decode().get('id') if request.forms.get('next') == 'back': response.status = 307 response.set_header("Location", '/add') return response else: if registId is not None: books = session.query(Books).filter(Books.id_ == registId).first() books.name = name books.volume = volume books.author = author books.publisher = publisher books.memo = memo session.commit() session.close() else: logging.info('Adding a new book') books = Books(name=name, volume=volume, author=author, publisher=publisher, memo=memo) session.add(books) session.commit() session.close() redirect('/list')
async def _retry_error_pan(self): while self.fail_pan_info: pan_info = self.fail_pan_info.pop() location = Location(episode=pan_info[0], url=pan_info[1], resource=pan_info[2]) session.add(location) try: session.commit() except Exception: session.rollback() self.fail_pan_info.append(pan_info) else: session.close()
def identify_user(next): user = None cookie = ctx.request.cookies.get(_COOKIE_NAME) if cookie: user = parse_cookie(cookie) ctx.request.user = user if user: ctx.request.manager = True if user.admin else None # return next() try: return next() finally: if session.is_active: print 'close session' session.close()
async def _retry_error_page(self): while self.fail_page_info: page_info = self.fail_page_info.pop() resource = Resources(name=page_info[0], owner='电波字幕组', stype='tvshow', original=page_info[1]) session.add(resource) try: session.commit() except Exception: session.rollback() self.fail_page_info.append(page_info) else: session.close()
def storage_in_mysql(self, length, name_list, content_list, logo_url_list, img_url_list): for v in range(0, length): print("v ", v) print("length", length) print("name数组长度: ", len(name_list)) name = name_list[v] content = content_list[v] logo_url = logo_url_list[v] logo_str = str(logo_url).rsplit('/', 1)[1] logo = self.logoPath + logo_str img_url = img_url_list[v] img_str = str(img_url).rsplit('/', 1)[1] img = self.imgPath + img_str data = Qshi(name=name, logo=logo, content=content, img=img) session.add(data) session.commit() print('第%d条数据存储成功!' % v) session.close()
async def process_page_info(self, html): """ 返回单页上所有的数据列表 """ soup2 = self.soup(html).find_all('a', attrs={'rel': 'bookmark'}) info_list = [] for s in soup2: s = self.soup(str(s)) info_list.append( (s.span.string, s.a['href'])) # todo 放进redis数据库后再读取 resource = Resources(name=s.span.string, owner='电波字幕组', stype='tvshow', original=s.a['href']) session.add(resource) try: session.commit() info_list = [] except Exception as e: session.rollback() logging.error('插入数据库发生错误--{}'.format(str(e))) finally: session.close() return info_list
def leaderboard(): from urllib import unquote import re players = session.query(MxitUser).order_by('points desc').limit(10) session.close() #ua = request.headers['X-Device-User-Agent'] listy = {} expr = r'#([a-fA-F0-9]{6}|[a-fA-F0-9]{3})' def convert(matchobj): output = '</span><span style="color: ' + matchobj.group() + ';">' return output for instance in players: span_in = unquote(instance.mxit_nick).decode('utf-8') # Get name, and unquote name = re.sub(expr, convert, span_in) # Replace and add spans listy[name] = instance.points import operator sorted_listy = sorted(listy.iteritems(), key=operator.itemgetter(1), reverse=True) return render_template('leaderboard.html', listy=sorted_listy )
async def process_pan_info(self, html, resource, update=False): """ 返回每季资源的百度网盘地址 """ soup2 = self.soup(html).find_all('p') pan_list = [] for s in soup2: try: if s.a.string == '城通网盘': url = s.a.next_sibling.next_sibling['href'] elif s.a.string in ['百度网盘', '百度云盘']: url = s.a['href'] else: continue str_list = [item for item in s.strings] if not re.search(r'[A-Za-z]+', str_list[-1]): str_list.append('无密码') pan_list.append((str_list[0], url, resource, str_list[-1])) # todo 放进redis数据库后再读取 except AttributeError: continue if update: pan_list = [pan_list[-1]] for item in pan_list: location = Location(episode=item[0], url=item[1], resource=item[2], password=item[3]) session.add(location) try: session.commit() pan_list = [] except Exception as e: session.rollback() logging.error('插入数据库发生错误--{}'.format(str(e))) finally: session.close() return pan_list
def index(): if "X-Mxit-USERID-R" not in request.headers: mxit_id = 1 nick = 'brad' ua = 'MANdroid' else: mxit_id = request.headers['X-Mxit-USERID-R'] nick = request.headers['X-Mxit-Nick'] ua = request.headers['X-Device-User-Agent'] user = session.query(MxitUser).filter_by(mxit_id=mxit_id).first() if not user: user = MxitUser(mxit_id, nick) session.add(user) session.commit() session.close() return render_template('index.html', nick=nick, ua=ua, user=user )
def tearDown(self): """Test teardown""" session.close() # Remove the tables and their data from the database Base.metadata.drop_all(engine)
def run(): now = datetime.utcnow() utc.localize(now) five_minutes_ago = now - timedelta(minutes=5) ten_minutes_ago = now - timedelta(minutes=10) twenty_four_hours_ago = now - timedelta(days=1) try: five_minute_moving_average, = (session.query(func.avg(Messages.price)) .filter(Messages.type == "match") .filter(Messages.time.op('AT TIME ZONE')('UTC').between(five_minutes_ago, now)) .group_by(Messages.type).one()) except NoResultFound: logger.error('Twitter feed log: no results found for 5 minute moving average.') threading.Timer(60 * minutes, run).start() return except DatabaseError: # TODO add text log for database errors # logger.error('Twitter feed log: database error.') threading.Timer(60 * minutes, run).start() return try: last_five_minute_moving_average, = (session.query(func.avg(Messages.price)) .filter(Messages.type == "match") .filter(Messages.time.op('AT TIME ZONE')('UTC').between(ten_minutes_ago, five_minutes_ago)) .group_by(Messages.type).one()) except NoResultFound: logger.error('Twitter feed log: no results found for last 5 minute moving average.') threading.Timer(60 * minutes, run).start() return except DatabaseError: # logger.error('Twitter feed log: database error.') threading.Timer(60 * minutes, run).start() return try: twenty_four_hour_moving_average, = (session.query(func.avg(Messages.price)) .filter(Messages.type == "match") .filter(Messages.time.op('AT TIME ZONE')('UTC').between(twenty_four_hours_ago, now)) .group_by(Messages.type).one()) except NoResultFound: logger.error('Twitter feed log: no results found for 24 hour moving average.') threading.Timer(60 * minutes, run).start() return except DatabaseError: # logger.error('Twitter feed log: database error.') threading.Timer(60 * minutes, run).start() return this_change = (five_minute_moving_average - twenty_four_hour_moving_average)/twenty_four_hour_moving_average last_change = (last_five_minute_moving_average - twenty_four_hour_moving_average)/twenty_four_hour_moving_average tweet_text = "24 hour moving average: {1:.2f}\n" \ "5 min moving average: {0:.2f}\n" \ "Percent change: {2:.2%}\n" \ "#Bitcoin".format(five_minute_moving_average, twenty_four_hour_moving_average, this_change) logger.info(tweet_text) if (this_change > 0.05 or this_change < -0.05) and not (last_change > 0.05 or last_change < -0.05): try: price_series = (session.query(Messages.time, Messages.price, Messages.size) .filter(Messages.type == "match") .filter(Messages.time.op('AT TIME ZONE')('UTC').between(twenty_four_hours_ago, now)) .order_by(Messages.time) .all()) except DatabaseError: threading.Timer(60 * minutes, run).start() return fig, ax = pyplot.subplots() time, price, volume = zip(*price_series) ax.plot(time, price) ax.xaxis.set_major_locator(dates.HourLocator()) ax.xaxis.set_major_formatter(dates.DateFormatter('%H')) ax.xaxis.set_minor_locator(dates.MinuteLocator()) pyplot.gcf().autofmt_xdate() ax.format_xdata = dates.DateFormatter('%H') pyplot.subplots_adjust(left=0.1, right=0.9, bottom=0.3, top=0.9) pyplot.savefig('foo.png') pyplot.close() photo = open('foo.png', 'rb') response = twitter.upload_media(media=photo) twitter.update_status(status=tweet_text, media_ids=[response['media_id']]) else: threading.Timer(60 * minutes, run).start() return session.close() threading.Timer(60 * minutes, run).start()
e1.gender.label("dm_gender"), e1.hire_date.label("dm_hire_date"), DeptManager.from_date.label("dm_from_date"), DeptManager.to_date.label("dm_to_date"), t2.title, t2.from_date.label("t_from_date"), t2.to_date.label("t_to_date"), e2.birth_date, e2.first_name, e2.last_name, e2.gender, e2.hire_date, DeptEmp.from_date, DeptEmp.to_date).\ join(DeptEmp, DeptEmp.emp_no==e2.emp_no).\ join(Department, Department.dept_no==DeptEmp.dept_no).\ join(DeptManager, DeptManager.dept_no==Department.dept_no).\ join(e1, e1.emp_no==DeptManager.emp_no).\ join(t1, and_(t1.emp_no==e1.emp_no, t1.from_date==DeptManager.from_date, t1.to_date==DeptManager.to_date)).\ join(t2, t2.emp_no==e2.emp_no).\ filter(func.year('1999-01-01').between(func.year(DeptEmp.from_date), func.year(DeptEmp.to_date)), func.year('1999-01-01').between(func.year(DeptManager.from_date), func.year(DeptManager.to_date)), func.year('1999-01-01').between(func.year(t2.from_date), func.year(t2.to_date)),).\ group_by(e2.emp_no, Department.dept_no, Department.dept_name, e1.emp_no, t1.title, e1.birth_date, e1.first_name, e1.last_name, e1.gender, e1.hire_date, DeptManager.from_date, DeptManager.to_date, t2.title, t2.from_date, t2.to_date, e2.birth_date, e2.first_name, e2.last_name, e2.gender, e2.hire_date, DeptEmp.from_date, DeptEmp.to_date).limit(10).all() '''比较两个结果,应该是True''' for d in zip(sql_data, alchemy_data): print(d) print('第二例结果是:{}'.format(operator.eq(sql_data, alchemy_data))) '''第二例内容总结:使用 sqlalchemy 的时候,要注意 query 的字段顺序,该字段所在的表的顺序与要转换为 SQL 语句中的 FROM 后面的表的顺序是一致的''' '''-------------------------------------------------------------------------------------------------''' session.commit() session.close()
def run(): now = datetime.utcnow() utc.localize(now) five_minutes_ago = now - timedelta(minutes=5) ten_minutes_ago = now - timedelta(minutes=10) twenty_four_hours_ago = now - timedelta(days=1) try: five_minute_moving_average, = (session.query(func.avg( Messages.price)).filter(Messages.type == "match").filter( Messages.time.op('AT TIME ZONE')('UTC').between( five_minutes_ago, now)).group_by(Messages.type).one()) except NoResultFound: logger.error( 'Twitter feed log: no results found for 5 minute moving average.') threading.Timer(60 * minutes, run).start() return except DatabaseError: # TODO add text log for database errors # logger.error('Twitter feed log: database error.') threading.Timer(60 * minutes, run).start() return try: last_five_minute_moving_average, = (session.query( func.avg(Messages.price)).filter(Messages.type == "match").filter( Messages.time.op('AT TIME ZONE')('UTC').between( ten_minutes_ago, five_minutes_ago)).group_by(Messages.type).one()) except NoResultFound: logger.error( 'Twitter feed log: no results found for last 5 minute moving average.' ) threading.Timer(60 * minutes, run).start() return except DatabaseError: # logger.error('Twitter feed log: database error.') threading.Timer(60 * minutes, run).start() return try: twenty_four_hour_moving_average, = (session.query( func.avg(Messages.price)).filter(Messages.type == "match").filter( Messages.time.op('AT TIME ZONE')('UTC').between( twenty_four_hours_ago, now)).group_by(Messages.type).one()) except NoResultFound: logger.error( 'Twitter feed log: no results found for 24 hour moving average.') threading.Timer(60 * minutes, run).start() return except DatabaseError: # logger.error('Twitter feed log: database error.') threading.Timer(60 * minutes, run).start() return this_change = (five_minute_moving_average - twenty_four_hour_moving_average ) / twenty_four_hour_moving_average last_change = ( last_five_minute_moving_average - twenty_four_hour_moving_average) / twenty_four_hour_moving_average tweet_text = "24 hour moving average: {1:.2f}\n" \ "5 min moving average: {0:.2f}\n" \ "Percent change: {2:.2%}\n" \ "#Bitcoin".format(five_minute_moving_average, twenty_four_hour_moving_average, this_change) logger.info(tweet_text) if (this_change > 0.05 or this_change < -0.05 ) and not (last_change > 0.05 or last_change < -0.05): try: price_series = (session.query( Messages.time, Messages.price, Messages.size).filter(Messages.type == "match").filter( Messages.time.op('AT TIME ZONE')('UTC').between( twenty_four_hours_ago, now)).order_by(Messages.time).all()) except DatabaseError: threading.Timer(60 * minutes, run).start() return fig, ax = pyplot.subplots() time, price, volume = zip(*price_series) ax.plot(time, price) ax.xaxis.set_major_locator(dates.HourLocator()) ax.xaxis.set_major_formatter(dates.DateFormatter('%H')) ax.xaxis.set_minor_locator(dates.MinuteLocator()) pyplot.gcf().autofmt_xdate() ax.format_xdata = dates.DateFormatter('%H') pyplot.subplots_adjust(left=0.1, right=0.9, bottom=0.3, top=0.9) pyplot.savefig('foo.png') pyplot.close() photo = open('foo.png', 'rb') response = twitter.upload_media(media=photo) twitter.update_status(status=tweet_text, media_ids=[response['media_id']]) else: threading.Timer(60 * minutes, run).start() return session.close() threading.Timer(60 * minutes, run).start()