def _gen_next_update(date_string=""): if date_string: dt = parse_date(date_string) now = datetime.now() if dt.weekday() == now.weekday(): now = now + rdelta(weeks=1) else: now = now + rdelta(weekday=dt.weekday()) return datetime.combine(now.date(), dt.time())
def calculate_momentum(self, recalc: bool = False): for ticker in set(self.tickers_for_mom): df = tl.load_csv(ticker) for mom in self.momentums.keys(): if recalc or 'Momentum_' + str(mom) not in df.keys(): print( f"Calculate momentum for {ticker} with {mom} month-period" ) mom_list = [] for day in range(len(df.Date)): if day != len(df) - 1 and df.Date[ day].month != df.Date[day + 1].month: find_month = df.Date[day] - rdelta(months=mom) month_cls = df[ (df.Date.dt.year == find_month.year) & (df.Date.dt.month == find_month.month)].Close if len(month_cls) != 0: mom_list.append(df.Close[day] / month_cls.iloc[-1]) else: mom_list.append(None) elif day != 0 and df.Date[day].month != df.Date[ day - 1].month: find_month = df.Date[day] - rdelta(months=mom) month_opn = df[ (df.Date.dt.year == find_month.year) & (df.Date.dt.month == find_month.month)].Open if len(month_opn) != 0: mom_list.append(df.Open[day] / month_opn.iloc[0]) else: mom_list.append(None) else: mom_list.append(None) else: continue df['Momentum_' + str(mom)] = mom_list tl.save_csv(self.FOLDER_WITH_DATA, ticker, df)
def calculate_momentum(self, asset: str, recalculate: bool = False): if asset == 'stocks': ticker, mom = self.signal_stocks, self.momentum_stocks elif asset == 'bonds': ticker, mom = self.signal_bonds, self.momentum_bonds else: raise ValueError( "Incorrect value for 'asset' in 'calculate_momentum' func") df = tl.load_csv(ticker) if recalculate or 'Momentum_' + str(mom) not in df.keys(): print(f"Calculate momentum for {ticker} with {mom} month-period") mom_list = [] for day in range(len(df.Date)): if day != len(df) - 1 and df.Date[day].month != df.Date[ day + 1].month: find_month = df.Date[day] - rdelta(months=mom) month_cls = df[(df.Date.dt.year == find_month.year) & ( df.Date.dt.month == find_month.month)].Close if len(month_cls) != 0: mom_list.append(df.Close[day] / month_cls.iloc[-1]) else: mom_list.append(None) elif day != 0 and df.Date[day].month != df.Date[day - 1].month: find_month = df.Date[day] - rdelta(months=mom) month_opn = df[(df.Date.dt.year == find_month.year) & (df.Date.dt.month == find_month.month)].Open if len(month_opn) != 0: mom_list.append(df.Open[day] / month_opn.iloc[0]) else: mom_list.append(None) else: mom_list.append(None) else: return df['Momentum_' + str(mom)] = mom_list tl.save_csv(self.FOLDER_WITH_DATA, ticker, df)
def data_sufficiency_check(self, start_index: int): for index in range(start_index, len(self.trading_days)): if self.vol_calc_period == 'month' and \ (self.trading_days[index] - rdelta(months=self.vol_calc_range) >= self.trading_days[0]): break elif self.vol_calc_period == 'day' and len( self.trading_days[:index + 1]) >= self.vol_calc_range: break return index
def purge_old_records(sess, months): # We're doing bulk deletes in unsynchronized mode, so first flush all # pending changes and expire all in-memory ORM instances, but don't # commit yet. Expiring prevents inconsistencies because the session # doesn't know which records are deleted. It doesn't actually matter in # this program because the changes are to a different table than the # deletions, but it's a good principle for safety. sess.flush() sess.expire_all() cutoff = datetime.date.today() - rdelta(months=months, day=1) sess.query(Access).filter(Access.ts < cutoff).delete(False) sess.query(Referer).filter(Referer.ts < cutoff).delete(False)
def calculate_current_vol(self, day_number: int): if self.rebalance_at == 'open': day_number -= 1 data = self.all_tickers_data strata = self.strategy_data # Find index for start_date if self.vol_calc_period == 'month': start_date = self.trading_days[day_number] - rdelta( months=self.vol_calc_range) print(start_date) start_date = self.trading_days[ (self.trading_days.dt.year == start_date.year) & (self.trading_days.dt.month == start_date.month)].iloc[-1] start_index = list(self.trading_days).index(start_date) else: start_index = day_number - self.vol_calc_range # Calculate capital, if port is not placed yet or we calc standard volatility type if self.capital_not_placed or self.vola_type == 'standard': virtual_port = np.zeros( len(self.trading_days[start_index:day_number + 1])) for ticker in self.portfolios['risk_on'].keys(): ticker_data = data[ticker] prices = ticker_data[start_index:day_number + 1][self.price] virtual_port += np.array( prices) * self.portfolios['risk_on'][ticker] # Calculate modify volatility type else: total_cap = 0 for ticker in self.portfolios['risk_on'].keys(): total_cap += strata['On_Shares_' + ticker][ day_number - 1] * strata['On_Price_' + ticker][day_number] for ticker in self.portfolios['risk_off'].keys(): total_cap += strata['Off_Shares_' + ticker][ day_number - 1] * strata['Off_Price_' + ticker][day_number] strata['Capital'][day_number] = total_cap + strata['Cash'][ day_number - 1] capitals = strata['Capital'][start_index:day_number + 1] virtual_port = np.array(capitals) virtual_port_cng = np.diff(virtual_port) / virtual_port[:-1] * 100 vol_coff = self.vol_target / (np.std(virtual_port_cng, ddof=1) * np.sqrt(252)) self.cur_leverage = min(vol_coff, 1) if self.use_margin is False else vol_coff
def get_news(self): print('ニュース取得開始') now = dt.now() table = self.base.classes.gis_utils_app_newsadmin news_admins = self.session.query(table) for news_admin in news_admins: print(news_admin.serch_keyword) news = self.newsapi.get_everything( q=news_admin.serch_keyword.replace(' ', ' AND '), from_param=(now - rdelta(months=1)).strftime('%Y-%m-%d'), to=now.strftime('%Y-%m-%d')) print(news) if (news['totalResults'] == 0): print('取得したニュースはありません。') continue self.limit = int(news['totalResults']) if self.limit > int( news['totalResults']) else self.limit models = [] for i in range(self.limit): print('count : %d' % i) article = news['articles'][i] table = self.base.classes.gis_utils_app_news model = table( source_id=article['source']['id'] or "", source_name=article['source']['name'] or "", author=article['author'] or "", title=article['title'] or "", # google翻訳の上限に注意 title_jp=self.translator.translate(article['title'], dest='ja').text or "", # title_jp = 'google 翻訳機能停止中', description=article['description'] or "", # google翻訳の上限に注意 description_jp=self.translator.translate( article['description'], dest='ja').text or "", # description_jp = 'google 翻訳機能停止中', url=article['url'] or "", url_to_image=article['urlToImage'] or "", published_at=dt.fromisoformat( article['publishedAt'].replace('Z', '+00:00')) if article['publishedAt'] != None else article['publishedAt'], content=article['content'] or "", topic_id=news_admin.topic_id) models.append(model) self.count = self.count + 1 self.session.add_all(models) print('ニュース取得終了')
def mk_periods(self): """Generate list of periods the Project contains. Each element of the list is a list of four elements: [start, end, dta, q, status] start: datetime() of start of period end: datetime() of end of period dta: duration of period, in days (yes, redundant) q: quota, in GB status: either 'ok', 'ko' or 'buffer' """ periods = [] old_end, old_q = None, None for p in Period.objects.filter(proj=self.id).order_by("start"): start_date = p.start if old_end and start_date.date() == old_end.date(): start_date += rdelta(days=1) # Check for a possible expired period in # between end of previous period and start # of current one: if old_end and (p.start - old_end).days > 0: dta = (p.start - old_end).total_seconds() / 86400. q = old_q periods.append([old_end, p.start, dta, q, 'ko']) dta = (p.end - p.start).total_seconds() / 86400. status2str = { "expired": "ko", "frozen": "buffer", "active": "ok", } periods.append( [start_date, p.end, dta, p.quota, status2str[p.status]]) old_end = p.end old_q = p.quota # If project finished, that's it. Else: if not self.finished: now = timezone.now() if old_end < now: dta = (now - old_end).total_seconds() / 86400. if self.in_buffer: periods.append([old_end, now, dta, old_q, 'buffer']) else: periods.append([old_end, now, dta, old_q, 'ko']) return periods
def create(self, vals): # Get 2 Digits Org formatCode = "" Employees = self.env["hr.employee"] Orgs = self.env['res.org'] Emp = Employees.search([['id', '=', vals.get('admin_id', False)]], limit=1) Org = Orgs.search([['id', '=', Emp.org_id.id]], limit=1) if not Org: msg = _('You cannot create PO contract without Org') raise osv.except_osv(_(u'Error!'), msg) three_mon_rel = rdelta(months=3) if vals.get('action_date', False): ActionDate = datetime.datetime.strptime( vals.get('action_date'), '%Y-%m-%d') + three_mon_rel RevNo = vals.get('poc_rev', 0) # New Contract (CENTRAL-2016-322-R1) if RevNo == 0: self.env.cr.execute("SELECT Count(id) AS c" " FROM purchase_contract" " WHERE poc_org = %s" " AND year = %s" " AND poc_rev = 0", (str(Org.code), str(ActionDate.year))) datas = self.env.cr.fetchone() CountPO = datas and datas[0] or 0 running = str(CountPO + 1) formatCode = str(Org.code) formatCode += '-' + str(ActionDate.year) formatCode += '-' + str(running) # Reversion (CO-51-2016-322-R1) else: running = vals.get('running', 0) formatCode = str(Org.code) formatCode += '-' + str(ActionDate.year) formatCode += '-' + str(running) vals.update({'poc_rev': RevNo}) vals.update({'poc_org': str(Org.code)}) vals.update({'poc_code': formatCode}) vals.update({'year': str(ActionDate.year)}) vals.update({'running': running}) vals.update({'state': GENERATE}) po_contract = super(PurchaseContract, self).create(vals) return po_contract
def create(self, vals): # Get 2 Digits Org formatCode = "" Employees = self.env["hr.employee"] Orgs = self.env['res.org'] Emp = Employees.search( [['id', '=', vals.get('admin_id', False)]], limit=1) Org = Orgs.search([['id', '=', Emp.org_id.id]], limit=1) if not Org: msg = _('You cannot create PO contract without Org') raise osv.except_osv(_(u'Error!'), msg) three_mon_rel = rdelta(months=3) if vals.get('action_date', False): ActionDate = datetime.datetime.strptime(vals.get('action_date'), '%Y-%m-%d') + three_mon_rel RevNo = vals.get('poc_rev', 0) # New Contract (CENTRAL-2016-322-R1) if RevNo == 0: self.env.cr.execute( "SELECT Count(id) AS c" " FROM purchase_contract" " WHERE poc_org = %s" " AND year = %s" " AND poc_rev = 0", (str(Org.code), str(ActionDate.year))) datas = self.env.cr.fetchone() CountPO = datas and datas[0] or 0 running = str(CountPO + 1) formatCode = str(Org.code) formatCode += '-' + str(ActionDate.year) formatCode += '-' + str(running) # Reversion (CO-51-2016-322-R1) else: running = vals.get('running', 0) formatCode = str(Org.code) formatCode += '-' + str(ActionDate.year) formatCode += '-' + str(running) vals.update({'poc_rev': RevNo}) vals.update({'poc_org': str(Org.code)}) vals.update({'poc_code': formatCode}) vals.update({'year': str(ActionDate.year)}) vals.update({'running': running}) vals.update({'state': GENERATE}) po_contract = super(PurchaseContract, self).create(vals) return po_contract
def _correlation_gear(fav_prods: List[str], plant_id: str, order_date: str): """ Find goods that are usually taken with fav_prods. It is supposed that fav_prods were gone through the similarity_gear. """ order_date = datetime.strptime(order_date, '%Y-%m-%d') bottom_date = order_date - rdelta(months=1) prev_month_transaction_df = transaction_df[ (transaction_df.chq_date_e >= bottom_date) & (transaction_df.chq_date_e <= order_date)] cl_mat_counts_df = pd.pivot_table(prev_month_transaction_df, index='client_id', columns='material', values='chq_date_e', aggfunc='count') # HACK: We can work only with goods that are represented well # (more than 30 clients bought them) cl_mat_counts_df = cl_mat_counts_df.loc[:, ( cl_mat_counts_df.count(axis=0) >= 30)] # HACK: Avoid difference in transaction_df and materials. # Better to make them consistent and use random. # fav_prods_sample = np.random.choice(fav_prods, size=2, replace=False) fav_prods_sample = fav_prods[:2] add_prods = [] for p in fav_prods_sample: prod_to_complement = cl_mat_counts_df[p] similar_prods = cl_mat_counts_df.corrwith(prod_to_complement) similar_prods = similar_prods.dropna() similar_prods = pd.DataFrame(similar_prods) similar_prods = similar_prods.sort_values(0, ascending=False) similar_prods = similar_prods[~similar_prods.index. isin(fav_prods_sample)] for sim_p, corr in similar_prods.iterrows(): if all(_check_availability([sim_p], plant_id, order_date)): add_prods.append(sim_p) break return fav_prods + add_prods
def _compute_duration_start2end(self): for rec in self: start2end = '0 Day' if rec.start_date and rec.end_date: if rec.start_date <= rec.end_date: start_date = datetime.strptime(rec.start_date, '%Y-%m-%d') end_date = datetime.strptime(rec.end_date, '%Y-%m-%d') start = start_date.date() end = end_date.date() if end < start: rec.duration_start2end = start2end return enddate = end + timedelta(days=1) rd = rdelta(enddate, start) start2end = _('%s Year %s Month %s Day') % \ (str(rd.years), str(rd.months), str(rd.days)) else: start2end = _("** Wrong Date **") rec.duration_start2end = start2end
def update_month(site, date, sess, feed_callback): log.debug("Updating site '{}' year {} month {}".format( site, date.year, date.month)) adding = False # Make a site & month filter for Access queries. (``smf``) start = date.replace(day=1) end = date + rdelta(months=1, day=1) smf = sa.and_(Access.site == site, Access.ts >= start, Access.ts < end) # Fetch the Monthly record. (``mth``) q_mth = sess.query(Monthly) q_mth = q_mth.filter_by(site=site, year=date.year, month=date.month) mth = q_mth.first() # Create the Monthly record if it doesn't exist. if not mth: mth = Monthly(site=site, year=date.year, month=date.month) mth.feeds = 0 mth.feedlinks = 0 adding = True # Don't add it to the session yet because not all fields are # initialized. # Set page view count. static_prefixes = [u"/{}".format(x[:6]) for x in const.STATIC_SECTIONS] non_static_filter = ~sa.func.substr(Access.url, 1, 7).in_(static_prefixes) q = sess.query(sa.func.count()).select_from(Access).filter(smf) q = q.filter(non_static_filter) q = q.filter(~Access.url.in_(INEWS_FEED_URLS)) mth.page_views = q.scalar() # Set unique remote IP count. q = sess.query(sa.func.count(Access.remote_addr.distinct())).filter(smf) mth.ips = q.scalar() # Set unique session ID count. q = sess.query(sa.func.count(Access.session.distinct())).filter(smf) mth.sessions = q.scalar() # Set feed count and feed link count. if feed_callback: feeds, feedlinks = feed_callback(site, smf, sess) mth.feeds = feeds mth.feedlinks = feedlinks mth.updated = sa.func.current_timestamp() if adding: sess.add(mth)
def update_month(site, date, sess, feed_callback): log.debug("Updating site '{}' year {} month {}".format( site, date.year, date.month)) adding = False # Make a site & month filter for Access queries. (``smf``) start = date.replace(day=1) end = date + rdelta(months=1, day=1) smf = sa.and_(Access.site == site, Access.ts >= start, Access.ts < end) # Fetch the Monthly record. (``mth``) q_mth = sess.query(Monthly) q_mth = q_mth.filter_by(site=site, year=date.year, month=date.month) mth = q_mth.first() # Create the Monthly record if it doesn't exist. if not mth: mth = Monthly(site=site, year=date.year, month=date.month) mth.feeds = 0 mth.feedlinks = 0 adding = True # Don't add it to the session yet because not all fields are # initialized. # Set page view count. static_prefixes = [u"/{}".format(x[:6]) for x in const.STATIC_SECTIONS] non_static_filter = ~ sa.func.substr(Access.url, 1, 7).in_(static_prefixes) q = sess.query(sa.func.count()).select_from(Access).filter(smf) q = q.filter(non_static_filter) q = q.filter(~ Access.url.in_(INEWS_FEED_URLS)) mth.page_views = q.scalar() # Set unique remote IP count. q = sess.query(sa.func.count(Access.remote_addr.distinct())).filter(smf) mth.ips = q.scalar() # Set unique session ID count. q = sess.query(sa.func.count(Access.session.distinct())).filter(smf) mth.sessions = q.scalar() # Set feed count and feed link count. if feed_callback: feeds, feedlinks = feed_callback(site, smf, sess) mth.feeds = feeds mth.feedlinks = feedlinks mth.updated = sa.func.current_timestamp() if adding: sess.add(mth)
def retirement_main(g: argparse.Namespace) -> tuple: """ hiredate -- used for calculating years of service. birthday -- to determine age min_age -- gotta be old enough req_years -- gotta work here a while magic_number -- there always is one. returns -- a tuple of possibly relevant info, or None if there is no ability to retire. """ now = datetime.date.today() # Only the most cursory checking. if not g.birthday < g.hiredate < now: print("\n".join([ "you must have been born before you were hired,", "and you must have been hired before today." ])) sys.exit(os.EX_DATAERR) current_age = now - g.birthday current_service = now - g.hiredate old_enough_on = g.birthday + rdelta(years=g.min_age) old_enough_now = old_enough_on < now long_enough_on = g.hiredate + rdelta(years=g.req_years) long_enough_now = long_enough_on < now if long_enough_now and old_enough_now: return now, 0, old_enough_on, long_enough_on, current_age.days, current_service.days no_earlier_than = max(long_enough_on, old_enough_on) magic_date = now + rdelta( rdelta(years=g.magic_number) - rdelta(current_age) - rdelta(current_service)) / 2 quit_on = max(magic_date, no_earlier_than) if old_enough_on < g.hiredate: old_enough_on = 'when you started.' elif old_enough_on < now: old_enough_on = 'now' if long_enough_on < now: long_enough_on = 'now' return quit_on, ( quit_on - now ).days, old_enough_on, long_enough_on, current_age.days, current_service.days
def main(): parser = get_parser() args = parser.parse_args() init_logging(args.debug, args.sql) engine = sa.create_engine(args.dburl) conn = engine.connect() sess = orm.Session(bind=conn) this_month = datetime.date.today().replace(day=1) last_month = this_month - rdelta(months=1) update_month(u"cameo", this_month, sess, None) update_month(u"cameo", last_month, sess, None) update_month(u"inews", this_month, sess, get_inews_feeds) update_month(u"inews", last_month, sess, get_inews_feeds) update_month(u"rlink", this_month, sess, None) update_month(u"rlink", last_month, sess, None) update_month(u"goods", this_month, sess, None) update_month(u"goods", last_month, sess, None) update_month(u"goods-erd", this_month, sess, None) update_month(u"goods-erd", last_month, sess, None) if args.delete_months_ago: purge_old_records(sess, args.delete_months_ago) sess.commit()
def date_interval_endpoints(starttime, endtime, day_of_new_interval): """ Return a list of half-month endpoints. Keyword arguments: - starttime: datetime or date - endtime: datetime or date - day_of_new_interval: int Returns: - dates: list(datetime) """ from datetime import datetime from dateutil.relativedelta import relativedelta as rdelta from dateutil.rrule import rrule, MONTHLY from pandas import to_datetime starttime = datetime(*starttime.timetuple()[:3],0,0) endtime = datetime(*endtime.timetuple()[:3],0,0) d=day_of_new_interval dates = list(rrule(MONTHLY, dtstart=starttime, until=endtime, bymonthday=[1,d-1,d,-1])) # add starttime/endtime if not included in dates if not dates[0].day == 1 and not dates[0].day == d: dates = [starttime] + dates if (not dates[-1].day == to_datetime(dates[-1]).daysinmonth and not dates[-1].day == d-1): dates = dates + [endtime] # set time of right endpoints to 23:59:59 for i in range(1,len(dates),2): dates[i] = dates[i]+rdelta(hour=23, minute=59, second=59) return dates
def _compute_duration_start2end(self): start2end = '0 Day' if self.start_date and self.end_date: if self.start_date <= self.end_date: start_date = datetime.datetime.strptime( self.start_date, "%Y-%m-%d") end_date = datetime.datetime.strptime(self.end_date, "%Y-%m-%d") start = start_date.date() end = end_date.date() if end < start: self.duration_start2end = start2end return enddate = end + datetime.timedelta(days=1) rd = rdelta(enddate, start) start2end = str(rd.years) start2end += _(' Year ') start2end += str(rd.months) start2end += _(' Month ') start2end += str(rd.days) start2end += _(' Day ') else: start2end = _("** Wrong Date **") self.duration_start2end = start2end
def _compute_duration_start2end(self): start2end = '0 Day' if self.start_date and self.end_date: if self.start_date <= self.end_date: start_date = datetime.datetime.strptime(self.start_date, "%Y-%m-%d") end_date = datetime.datetime.strptime(self.end_date, "%Y-%m-%d") start = start_date.date() end = end_date.date() if end < start: self.duration_start2end = start2end return enddate = end + datetime.timedelta(days=1) rd = rdelta(enddate, start) start2end = str(rd.years) start2end += _(' Year ') start2end += str(rd.months) start2end += _(' Month ') start2end += str(rd.days) start2end += _(' Day ') else: start2end = _("** Wrong Date **") self.duration_start2end = start2end
async def on_message(self, m): if m.channel.id in []: #グローバルチャットの外部との相互連携作成時用 return if m.content.startswith("s-"): return if m.content.startswith("//"): return if m.author.id == self.bot.user.id: return if m.is_system(): return if "cu:on_msg" in self.bot.features.get(m.author.id, []): return if isinstance(m.channel, discord.DMChannel): return if m.webhook_id: return self.bot.cursor.execute("select * from users where id=?", (m.author.id, )) upf = self.bot.cursor.fetchone() gchat_cinfo = self.bot.cursor.execute( "select * from gchat_cinfo where id == ?", (m.channel.id, )).fetchone() if gchat_cinfo: if upf["gban"] == 1: if not gchat_cinfo["connected_to"] in self.without_react: dc = await ut.opendm(m.author) await dc.send( self.bot._(m.author, "global-banned", m.author.mention)) await self.repomsg(m, "思惟奈ちゃんグローバルチャットの使用禁止") await m.add_reaction("❌") await asyncio.sleep(5) await m.remove_reaction("❌", self.bot.user) if (datetime.datetime.now() - rdelta(hours=9) - rdelta(days=7) >= m.author.created_at) or upf["gmod"] or upf["gstar"]: try: content_checker(self.bot, m) except MaliciousInput as err: await self.repomsg(m, err.reason, err.should_ban) return try: if not gchat_cinfo["connected_to"] in self.without_react: await m.add_reaction( self.bot.get_emoji(653161518346534912)) except: pass self.bot.cursor.execute("select * from guilds where id=?", (m.guild.id, )) gpf = self.bot.cursor.fetchone() status_embed = discord.Embed(title="", description="", color=upf["gcolor"]) status_embed.set_author( name=f"{ut.ondevicon(m.author)},({str(m.author.id)})") if gpf["verified"]: status_embed.set_footer( text=f"✅:{m.guild.name}(id:{m.guild.id})", icon_url=m.guild.icon_url_as(static_format="png")) else: status_embed.set_footer( text=f"{m.guild.name}(id:{m.guild.id})", icon_url=m.guild.icon_url_as(static_format="png")) if m.type == discord.MessageType.default and m.reference: ref = m.reference if ref.cached_message: msg = ref.cached_message else: try: msg = await self.bot.get_channel( ref.channel_id).fetch_message(ref.message_id) except: msg = None if msg: status_embed.add_field( name=f"{msg.author.display_name}のメッセージへの返信", value=f"{msg.clean_content}") else: status_embed.add_field( name="メッセージへの返信", value="(このメッセージは削除されている等で取得できません。)") if gchat_cinfo["connected_to"] in self.without_react: embeds = [] else: embeds = [status_embed] if m.stickers: sticker = m.stickers[0] sembed = discord.Embed(title=f"スタンプ:{sticker.name}", ) if sticker.format == discord.StickerType.png: sembed.set_image(url=sticker.image_url) elif sticker.format == discord.StickerType.apng: sembed.set_image( url= f"https://dsticker.herokuapp.com/convert.gif?url={sticker.image_url}" ) elif sticker.format == discord.StickerType.lottie: # メモ: https://cdn.discordapp.com/stickers/{id}/{hash}.json?size=1024 sembed.description = "画像取得非対応のスタンプです。" embeds.append(sembed) embeds = embeds + m.embeds[0:10 - len(embeds)] attachments = m.attachments spicon = "" if m.author.id in self.bot.team_sina: # チーム☆思惟奈ちゃん spicon = spicon + "🌠" if m.author.bot: spicon = spicon + "⚙" if upf["sinapartner"]: spicon = spicon + "💠" # 認証済みアカウント if m.author.id in config.partner_ids: spicon = spicon + "🔗" if upf["gmod"]: spicon = spicon + "🔧" if upf["gstar"]: spicon = spicon + "🌟" if spicon == "": spicon = "👤" name = f"[{spicon}]{upf['gnick']}" sendto = self.bot.cursor.execute( "select * from gchat_cinfo where connected_to = ?", (gchat_cinfo["connected_to"], )).fetchall() rtn = await self.gchat_send( sendto, m.channel, m.clean_content, name, m.author.avatar_url_as(static_format="png"), embeds, attachments) self.bot.cursor.execute( "INSERT INTO gchat_pinfo(id,content,allids,author_id,guild_id,timestamp) VALUES(?,?,?,?,?,?)", (m.id, [m.clean_content], rtn, m.author.id, m.guild.id, [ str( m.created_at.strftime( '%Y{0}%m{1}%d{2} %H{3}%M{4}%S{5}').format( *'年月日時分秒')) ])) try: if not gchat_cinfo["connected_to"] in self.without_react: await m.remove_reaction( self.bot.get_emoji(653161518346534912), self.bot.user) await m.add_reaction( self.bot.get_emoji(653161518195539975)) await asyncio.sleep(5) await m.remove_reaction( self.bot.get_emoji(653161518195539975), self.bot.user) except: pass else: await self.repomsg(m, "作成後7日に満たないアカウント")
async def msg_info(self, ctx, target:Union[commands.MessageConverter,None]): if target: fetch_from = "引数" msg = target else: if ctx.message.reference and ctx.message.type == discord.MessageType.default: if ctx.message.reference.cached_message: fetch_from = "返信" msg = ctx.message.reference.cached_message else: try: fetch_from = "返信" msg = await self.bot.get_channel(ctx.message.reference.channel_id).fetch_message(ctx.message.reference.message_id) except: fetch_from = "コマンド実行メッセージ" msg = ctx.message else: fetch_from = "コマンド実行メッセージ" msg = ctx.message #msgに入ったメッセージで詳細情報Embedを作成して送信。 e = discord.Embed(title=f"メッセージ情報({fetch_from}より取得)", description=msg.system_content, color=self.bot.ec) e.set_author(name=f"{msg.author.display_name}({msg.author.id}){'[bot]' if msg.author.bot else ''}のメッセージ",icon_url=msg.author.avatar_url_as(static_format="png")) post_time = (msg.created_at + rdelta(hours=9) ).strftime("%Y{0}%m{1}%d{2} %H{3}%M{4}%S{5}").format(*"年月日時分秒") if msg.edited_at: edit_time = (msg.edited_at + rdelta(hours=9) ).strftime("%Y{0}%m{1}%d{2} %H{3}%M{4}%S{5}").format(*"年月日時分秒") else: edit_time = "なし" e.set_footer(text=f"メッセージ送信時間:{post_time}/最終編集時間:{edit_time}") e.add_field(name="含まれる埋め込みの数",value=f"{len(msg.embeds)}個") e.add_field(name="含まれる添付ファイルの数",value=f"{len(msg.attachments)}個") e.add_field(name="システムメッセージかどうか",value=msg.is_system()) if msg.guild.rules_channel and msg.channel.id == msg.guild.rules_channel.id: chtype = f"{msg.channel.name}({msg.channel.id}):ルールチャンネル" elif msg.channel.is_news(): chtype = f"{msg.channel.name}({msg.channel.id}):アナウンスチャンネル" else: chtype = f"{msg.channel.name}({msg.channel.id}):テキストチャンネル" e.add_field(name="メッセージの送信先チャンネル",value=chtype) if msg.reference: e.add_field(name="あるメッセージへの返信等",value=f"返信元確認用:`{msg.reference.channel_id}-{msg.reference.message_id}`") e.add_field(name="メンションの内訳",value=f"全員あてメンション:{msg.mention_everyone}\nユーザーメンション:{len(msg.mentions)}個\n役職メンション:{len(msg.role_mentions)}個\nチャンネルメンション:{len(msg.channel_mentions)}個") e.add_field(name="メッセージID",value=str(msg.id)) if msg.webhook_id: e.add_field(name="Webhook投稿",value=f"ID:{msg.webhook_id}") e.add_field(name="ピン留めされているかどうか",value=str(msg.pinned)) if len(msg.reactions) != 0: e.add_field(name="リアクション",value=",".join([f"{r.emoji}:{r.count}" for r in msg.reactions])) e.add_field(name="メッセージのフラグ",value=[i[0] for i in iter(msg.flags) if i[1]]) e.add_field(name="このメッセージに飛ぶ",value=msg.jump_url) try: await ctx.reply(embed=e,mention_author=False) except: await ctx.send(embed=e)
def deletion_date(self): """Return date in which project will be deleted from IHBuffer.""" return self.get_end() + rdelta(months=+18)
def ihbuffer_date(self): """Date in which project will go to IHBuffer.""" return self.get_end() + rdelta(months=+6)
def backtest_basket( series: list, weights: list, costs: list = None, rebal_freq: RebalFreq = RebalFreq.DAILY, ): num_assets = len(series) costs = costs or [0] * num_assets if not all(isinstance(x, pd.Series) for x in series): raise MqTypeError("expected a list of series") if len(weights) != num_assets or len(weights) != len(costs): raise MqValueError( "series, weights, and cost lists must have the same length") # For all inputs which are Pandas series, get the intersection of their calendars cal = pd.DatetimeIndex( reduce( np.intersect1d, (curve.index for curve in series + weights + costs if isinstance(curve, pd.Series)), )) # Reindex inputs and convert to pandas dataframes series = pd.concat([curve.reindex(cal) for curve in series], axis=1) weights = pd.concat([pd.Series(w, index=cal) for w in weights], axis=1) costs = pd.concat([pd.Series(c, index=cal) for c in costs], axis=1) if rebal_freq == RebalFreq.DAILY: rebal_dates = cal else: # Get hypothetical monthly rebalances num_rebals = (cal[-1].year - cal[0].year) * 12 + cal[-1].month - cal[0].month rebal_dates = [ cal[0] + i * rdelta(months=1) for i in range(num_rebals + 1) ] # Convert these to actual calendar days rebal_dates = [d for d in rebal_dates if d < max(cal)] rebal_dates = [min(cal[cal >= date]) for date in rebal_dates] # Create Units dataframe units = pd.DataFrame(index=cal, columns=series.columns) actual_weights = pd.DataFrame(index=cal, columns=series.columns) output = pd.Series(dtype='float64', index=cal) # Initialize backtest output.values[0] = 100 units.values[0, ] = (output.values[0] * weights.values[0, ] / series.values[0, ]) actual_weights.values[0, ] = weights.values[0, ] # Run backtest prev_rebal = 0 for i, date in enumerate(cal[1:], 1): # Update performance output.values[i] = output.values[i - 1] + np.dot( units.values[i - 1, ], series.values[i, ] - series.values[i - 1, ]) actual_weights.values[i, ] = ( weights.values[prev_rebal, ] * (series.values[i, ] / series.values[prev_rebal, ]) * (output.values[prev_rebal] / output.values[i])) # Rebalance on rebal_dates if date in rebal_dates: # Compute costs output.values[i] -= (np.dot( costs.values[i, ], np.abs(weights.values[i, ] - actual_weights.values[i, ])) * output.values[i]) # Rebalance units.values[i, ] = (output.values[i] * weights.values[i, ] / series.values[i, ]) prev_rebal = i actual_weights.values[i, ] = weights.values[i, ] else: units.values[i, ] = units.values[i - 1, ] return output, actual_weights
def basket_series( series: list, weights: list, costs: list = None, rebal_freq: RebalFreq = RebalFreq.DAILY, return_type: ReturnType = ReturnType.EXCESS_RETURN, ) -> pd.Series: """ Calculates a basket return series. :param series: list of time series of instrument prices :param weights: list of weights :param costs: list of execution costs in decimal; defaults to costs of 0 :param rebal_freq: rebalancing frequency - Daily or Monthly :param return_type: return type of underlying instruments - only excess return is supported :return: time series of the resulting basket **Usage** Calculates a basket return series. **Examples** Generate price series and combine them in a basket (weights 70%/30%) which rebalances monthly and assumes execution costs 5bps and 10bps each time the constituents are traded. >>> prices1 = generate_series(100) >>> prices2 = generate_series(100) >>> mybasket = basket_series([prices1, prices2], [0.7, 0.3], [0.0005, 0.001], monthly) **See also** :func:`prices` """ num_assets = len(series) costs = costs or [0] * num_assets if not all(isinstance(x, pd.Series) for x in series): raise MqTypeError("expected a list of series") if len(weights) != num_assets or len(weights) != len(costs): raise MqValueError( "series, weights, and cost lists must have the same length") # For all inputs which are Pandas series, get the intersection of their calendars cal = pd.DatetimeIndex( reduce( np.intersect1d, (curve.index for curve in series + weights + costs if isinstance(curve, pd.Series)), )) # Reindex inputs and convert to pandas dataframes series = pd.concat([curve.reindex(cal) for curve in series], axis=1) weights = pd.concat([pd.Series(w, index=cal) for w in weights], axis=1) costs = pd.concat([pd.Series(c, index=cal) for c in costs], axis=1) if rebal_freq == RebalFreq.DAILY: rebal_dates = cal else: # Get hypothetical monthly rebalances num_rebals = (cal[-1].year - cal[0].year) * 12 + cal[-1].month - cal[0].month rebal_dates = [ cal[0] + i * rdelta(months=1) for i in range(num_rebals + 1) ] # Convert these to actual calendar days rebal_dates = [d for d in rebal_dates if d < max(cal)] rebal_dates = [min(cal[cal >= date]) for date in rebal_dates] # Create Units dataframe units = pd.DataFrame(index=cal, columns=series.columns) output = pd.Series(index=cal) # Initialize backtest output.values[0] = 100 units.values[0, ] = (output.values[0] * weights.values[0, ] / series.values[0, ]) # Run backtest prev_rebal = 0 for i, date in enumerate(cal[1:], 1): # Update performance output.values[i] = output.values[i - 1] + np.dot( units.values[i - 1, ], series.values[i, ] - series.values[i - 1, ]) # Rebalance on rebal_dates if date in rebal_dates: # Compute costs actual_weights = ( weights.values[prev_rebal, ] * (series.values[i, ] / series.values[prev_rebal, ]) * (output.values[prev_rebal] / output.values[i])) output.values[i] -= ( np.dot(costs.values[i, ], np.abs(weights.values[i, ] - actual_weights)) * output.values[i]) # Rebalance units.values[i, ] = (output.values[i] * weights.values[i, ] / series.values[i, ]) prev_rebal = i else: units.values[i, ] = units.values[i - 1, ] return output