def load_tick(code, start_date, end_date, with_filter=True, strict=False, time=None): start = utils.to_datetime(start_date) end = utils.to_datetime(end_date) current = start data = None while current <= end: month = current.strftime("%Y%m") current = utils.to_datetime( utils.to_format(current + relativedelta(months=1), output_format="%Y-%m-01")) try: data_ = pandas.read_csv(Loader.tick_dir + "/" + month + "/" + str(code) + ".csv", header=None) except: continue data = data_ if (data is None) else pandas.concat([data, data_]) if data is None: return None data = Loader.tick_format(data) if with_filter: end_time = "23:59:59" if time is None else utils.format( "%s %s" % (end_date, time), output_format="%H:%M:%S") filtered = Loader.filter(data, "%s 00:00:00" % start_date, "%s %s" % (end_date, end_time), strict) if len(filtered) == 0: return None return filtered return data
def __init__(self, bug, hash): """ Initialize attachments :arg hash: Dictionary of attachment details :arg bug: Instance of :class:`~bz_xmlrpc.classes.Bug` object :return: Instance of :class:`Attachment` .. note:: No need to use this directly. Use :meth:`~bz_xmlrpc.classes.Bug.get_attachments()` """ self._hash = hash self.id = extract(hash, "id", "attach_id") self.content_type = extract(hash, "content_type", "mimetype") self.creation_time = to_datetime(extract(hash, "creation_time", "creation_ts")) self.attacher = extract(hash, "attacher", "submitter_id") self.description = extract(hash, "description") self.file_name = extract(hash, "file_name", "filename") self.bug = bug self.is_private = bool(extract(hash, "is_private", "isprivate")) self.is_obsolete = bool(extract(hash, "is_obsolete", "isobsolete")) self.is_patch = bool(extract(hash, "is_patch", "ispatch")) self.is_url = bool(extract(hash, "is_url", "isurl")) self.last_change_time = to_datetime(extract(hash, "last_change_time", "modification_time")) if self.id and self.bug: self.fetch_url = bug.bz.url.replace("xmlrpc.cgi", "attachment.cgi?id=%s" % self.id)
def add_to_time_buckets(self, parameter): clf = parameter['clf'] request = parameter['request'] if self.last_event == None: # Just store the first event self.time_buckets[self.current_index].append(parameter) else: current_date = utils.to_datetime(clf.date) last_date = utils.to_datetime(self.last_event['clf'].date) diff = math.floor((current_date - last_date).total_seconds()) for i in range(1, diff + 1): # ignored if diff == 0 new_index = (self.current_index + i) % self.WINDOW # Fill the intermediate indexes with no log events self.time_buckets[new_index] = [] # Max len of the circular buffer is WINDOW self.current_index = (self.current_index + diff) % self.WINDOW # Given the previous loop this should already be an array self.time_buckets[self.current_index].append(parameter) self.last_event = parameter
def __init__(self, bug, hash): """ Initialize attachments :arg hash: Dictionary of attachment details :arg bug: Instance of :class:`~bz_xmlrpc.classes.Bug` object :return: Instance of :class:`Attachment` .. note:: No need to use this directly. Use :meth:`~bz_xmlrpc.classes.Bug.get_attachments()` """ self._hash = hash self.id = extract(hash, 'id', 'attach_id') self.content_type = extract(hash, 'content_type', 'mimetype') self.creation_time = to_datetime( extract(hash, 'creation_time', 'creation_ts')) self.attacher = extract(hash, 'attacher', 'submitter_id') self.description = extract(hash, 'description') self.file_name = extract(hash, 'file_name', 'filename') self.bug = bug self.is_private = bool(extract(hash, 'is_private', 'isprivate')) self.is_obsolete = bool(extract(hash, 'is_obsolete', 'isobsolete')) self.is_patch = bool(extract(hash, 'is_patch', 'ispatch')) self.is_url = bool(extract(hash, 'is_url', 'isurl')) self.last_change_time = to_datetime( extract(hash, 'last_change_time', 'modification_time')) if self.id and self.bug: self.fetch_url = bug.bz.url.replace( 'xmlrpc.cgi', 'attachment.cgi?id=%s' % self.id)
def apply_manda_date(self, date): if utils.to_datetime(date) < utils.to_datetime("2019-08-01"): # 2019-08-01より前は3日前から株価データは変わっている return self.select_weekday(date, 3) else: # 2019-08-01以降は2日前から株価データは変わっている return self.select_weekday(date, 2)
def load_bitcoin_ohlc(code, start_date, end_date, with_filter=True, strict=False, time=None): start = utils.to_datetime(start_date) end = utils.to_datetime(end_date) current = start data = None while current <= end: current = utils.to_datetime(utils.to_format(current + relativedelta(days=1), output_format="%Y-%m-%d")) try: data_ = pandas.read_csv(Loader.bitcoin_dir + "/" + str(code) + "/" + utils.to_format(current) + ".csv") except: continue data = data_ if (data is None) else pandas.concat([data, data_]) if data is None: return None data = Loader.format(data, float, date_format="%Y-%m-%d %H:%M:%S") data["low"] = data[["low", "open", "close"]].apply(min, axis=1) data["high"] = data[["high", "open", "close"]].apply(max, axis=1) if with_filter: end_time = "23:59:59" if time is None else utils.format("%s %s" % (end_date, time), output_format="%H:%M:%S") filtered = Loader.filter(data, "%s 00:00:00" % start_date, "%s %s" % (end_date, end_time), strict) if len(filtered) == 0: return None return filtered return data return data
def _populate(self, hash): """ Accepts a bug hash populates bug attributes """ # Hack for searched bugs if 'internals' in hash: hash = hash['internals'] hash['status'] = hash['status']['value'] self._hash = hash self.id = self._be('id', 'bug_id') or self.id self.summary = self._be('summary', 'short_desc') or self.summary self.description = self._be('description') or self.description self.assigned_to = self._be('assigned_to') or self.assigned_to self.qa_contact = self._be('qa_contact') or self.qa_contact self.reporter = self._be('reporter', 'creator') or self.reporter self.product = self._be('product') or self.product self.component = self._be('component') or self.component self.creation_time = (to_datetime(self._be('creation_time')) or self.creation_time) self.last_change_time = (to_datetime(self._be('last_change_time')) or self.last_change_time) self.dupe_of = self._be('dupe_of', 'dupe_id') or self.dupe_of self.priority = self._be('priority') or self.priority self.severity = self._be('severity', 'bug_severity') or self.severity self.partner = self._be('cf_partner') or self.partner self.target_milestone = (self._be('target_milestone') or self.target_milestone) self.status = self._be('status', 'bug_status') or self.status self.whiteboard = (self._be('status_whiteboard', 'whiteboard') or self.whiteboard) self.resolution = self._be('resolution') or self.resolution self.version = self._be('version') or self.version self.platform = self._be('platform', 'rep_platform') or self.platform self.keywords = self._be('keywords') or self.keywords if isinstance(self.keywords, str): self.keywords = self.keywords.split(', ') self.fixed_in = self._be('fixed_in') or self.fixed_in self._comments = self._be('longdescs', 'comments') if self._comments: self._hash2comments(self._comments) self._attachments = self._be('attachments') if self._attachments: self._hash2attachments(self._attachments) self._groups = self._be('groups') if self._groups: self.groups = [_Group(group) for group in self._groups] self._flags = self._be('flags') if self._flags: self.flags = [Flag(flag) for flag in self._flags] if self.id and self.bz: self.url = show_bug_url(self.bz.url) + str(self.id)
def subject(self, date): length = 10 if self.setting.portfolio_size is None else self.setting.portfolio_size data = self.load_portfolio(utils.to_datetime(date), length=length) market = high_update.load_portfolio(utils.to_datetime(date), self.setting.assets, 1000) if data is None: # or market is None or len(market) < 15 or len(market) > 550: codes = [] else: codes = data["code"].values.tolist() return codes
def _populate(self, hash): """ Accepts a bug hash populates bug attributes """ # Hack for searched bugs if "internals" in hash: hash = hash["internals"] hash["status"] = hash["status"]["value"] self._hash = hash self.id = self._be("id", "bug_id") or self.id self.summary = self._be("summary", "short_desc") or self.summary self.description = self._be("description") or self.description self.assigned_to = self._be("assigned_to") or self.assigned_to self.qa_contact = self._be("qa_contact") or self.qa_contact self.reporter = self._be("reporter", "creator") or self.reporter self.product = self._be("product") or self.product self.component = self._be("component") or self.component self.creation_time = to_datetime(self._be("creation_time")) or self.creation_time self.last_change_time = to_datetime(self._be("last_change_time")) or self.last_change_time self.dupe_of = self._be("dupe_of", "dupe_id") or self.dupe_of self.priority = self._be("priority") or self.priority self.severity = self._be("severity", "bug_severity") or self.severity self.partner = self._be("cf_partner") or self.partner self.target_milestone = self._be("target_milestone") or self.target_milestone self.status = self._be("status", "bug_status") or self.status self.whiteboard = self._be("status_whiteboard", "whiteboard") or self.whiteboard self.resolution = self._be("resolution") or self.resolution self.version = self._be("version") or self.version self.platform = self._be("platform", "rep_platform") or self.platform self.keywords = self._be("keywords") or self.keywords if isinstance(self.keywords, str): self.keywords = self.keywords.split(", ") self.fixed_in = self._be("fixed_in") or self.fixed_in self._comments = self._be("longdescs", "comments") if self._comments: self._hash2comments(self._comments) self._attachments = self._be("attachments") if self._attachments: self._hash2attachments(self._attachments) self._groups = self._be("groups") if self._groups: self.groups = [_Group(group) for group in self._groups] self._flags = self._be("flags") if self._flags: self.flags = [Flag(flag) for flag in self._flags] if self.id and self.bz: self.url = show_bug_url(self.bz.url) + str(self.id)
def _filter_by_date_interval(self, params): start_date = params.get('start', None) end_date = params.get('end', None) #str(utils.today())) if start_date: d1 = utils.to_datetime(start_date) d2 = utils.to_datetime(end_date) if d1 and d2: return {'range': { 'created_at': { 'gte': str(d1).replace(' ', 'T'), 'lte': str(d2).replace(' ', 'T') } }} return None
def __init__(self, info): self.worker_id = info['worker_id'] self.study_code = info['study_code'] self.admin_experiment_group = info.get('admin_experiment_group') self.admin_fb_max_mins = info.get('admin_fb_max_mins') self.admin_fb_max_opens = info.get('admin_fb_max_opens') self.admin_treatment_start = to_datetime( info.get('admin_treatment_start'), "%Y%m-%d") self.admin_followup_start = to_datetime( info.get('admin_followup_start'), "%Y-%m-%d") self.admin_logging_stop = to_datetime(info.get('admin_logging_stop'), "%Y-%m-%d")
def subject(self, date): before = self.load_portfolio(utils.to_datetime(date) - utils.relativeterm(1)) # 前月のポートフォリオの状況次第で変える length = 10 if self.setting.portfolio_size is None else self.setting.portfolio_size length = int(length/2) if before is None else length data = self.load_portfolio(utils.to_datetime(date), length=length) if data is None: codes = [] else: codes = data["code"].values.tolist() return codes
def subject(self, date): data = self.load_portfolio(utils.to_datetime(date)) if data is None: codes = [] else: codes = data["code"].values.tolist() return codes
def _filter_by_date_interval(self, params): start_date = params.get('start', None) end_date = params.get('end', None) #str(utils.today())) if start_date: d1 = utils.to_datetime(start_date) d2 = utils.to_datetime(end_date) if d1 and d2: return { 'range': { 'created_at': { 'gte': str(d1).replace(' ', 'T'), 'lte': str(d2).replace(' ', 'T') } } } return None
def before_ranking(date, ranking_type, before=1): d = utils.to_datetime(date) - utils.relativeterm(before, with_time=True) while not utils.is_weekday(d): d = d - utils.relativeterm(1, with_time=True) d = utils.to_format(d) stocks = Loader.ranking(d, ranking_type) return stocks
def lambda_handler(event, lambda_context): api = utils.get_twitter_api() today = datetime.today() time_max = datetime(today.year, today.month, today.day, 23, 59, 59) + relativedelta(days=6) time_min = datetime(today.year, today.month, today.day, 0, 0, 0) payload = { 'maxResults': 50, 'order_by': 'startTime', 'timeMax': time_max.strftime('%Y-%m-%dT23:59:59Z'), 'timeMin': time_min.strftime('%Y-%m-%dT00:00:00Z'), 'singleEvents': True, 'key': utils.GOOGLE_APIKEY, } logger.info('Start to fetch events at %s .', today.strftime('%Y-%m-%d')) r = requests.get(utils.URL, params=payload) res = r.json() try: events = res['items'] logger.info('Fetching events succeed.') except KeyError: logger.error('Fetching events failed.') raise KeyError target_events = [] for event in events: date = utils.to_datetime(event['start']['date']) summary = event['summary'] link = event['htmlLink'] if date < time_max and utils.is_live(summary) or utils.is_event_schedule(summary): target_events.append({ 'summary': summary, 'link': link, 'date': event['start']['date'].replace('-', '/') }) logger.info('%s events within %s to %s', len(target_events), time_min.strftime('%Y-%m-%d'), time_max.strftime('%Y-%m-%d') ) logger.info('Start to tweet events info') if target_events: for event in sorted(target_events, key=lambda x: x['date']): summary = event['summary'] link = event['link'] date = event['date'] msg = f'【今週の #オサカナ 】{date} {summary} ({link})' api.update_status(status=msg) logger.info('[%s] was tweeted', summary) time.sleep(3) logger.info('Tweet succeed')
def get_performance_score(performances): gain = 0 gains = [] for d in sorted(performances.items(), key=lambda x: utils.to_datetime(x[0])): gain = gain + d[1]["gain"] gains = gains + [gain] sum_gain = sum(list(map(lambda x: x["gain"], performances.values()))) # 総利益 sum_trade = sum(list(map(lambda x: x["trade"], performances.values()))) # 総トレード数 ave_trade = numpy.average( list(map(lambda x: x["trade"], performances.values()))) # 平均トレード数 if sum_gain == 0 or sum_trade == 0: return 0 min_gain = min(gains) gain_per_trade = ( sum_gain - (min_gain if min_gain < 0 else -min_gain)) / sum_trade # 1トレード当たりの利益 diff = [] for i, gain in enumerate(gains): average = (i + 1) * ave_trade * gain_per_trade + min_gain diff = diff + [abs(abs(gain) - abs(average))] score = 0 if sum(diff) == 0 else sum_gain / sum(diff) return score
def _parse_org2_docx_events(doc): def clean_text(pg): return ' '.join(pg.text.split(':')[1:]).strip() events = [] for i, p in enumerate(doc.paragraphs): if re.match('\d+\)\s+(n|N)ombre', p.text) is not None: parsed = {'actor': {}, 'location': {}, 'prison': {}, 'source': {}} parsed['actor']['name'] = clean_text(doc.paragraphs[i]) parsed['actor']['organisation'] = clean_text(doc.paragraphs[i + 1]) parsed['location']['name'] = clean_text(doc.paragraphs[i + 2]) parsed['consequences'] = clean_text(doc.paragraphs[i + 3]) parsed['charges'] = clean_text(doc.paragraphs[i + 4]) parsed['prison']['name'] = clean_text(doc.paragraphs[i + 5]) parsed['actor']['telephone'] = clean_text(doc.paragraphs[i + 6]) parsed['description'] = clean_text(doc.paragraphs[i + 7]) parsed['source']['name'] = clean_text(doc.paragraphs[i + 8]) parsed['report_date'] = clean_text(doc.paragraphs[i + 9]) # If a report date was supplied, parse it out and convert it to a datetime object. if len(parsed['report_date']) > 0: parsed['report_date'] = parsed['report_date'].replace('.', '') try: parsed['report_date'] = time.strptime(parsed['report_date'], '%d-%m-%Y') except: parsed['report_date'] = time.strptime(parsed['report_date'], '%d-%m-%y') parsed['report_date'] = utils.to_datetime(parsed['report_date']) else: del parsed['report_date'] events.append(parsed) return events
def create_performance(args, simulator_setting, performances): if len(performances) == 0: return {} # 簡易レポート for date, performance in sorted(performances.items(), key=lambda x: utils.to_datetime(x[0])): summary = ["gain", "min_unrealized_gain", "crash", "max_drawdown"] if not (args.soft_limit is None and args.hard_limit is None): summary = summary + ["auto_stop_loss"] # auto_stop_lossが有効なら入れる if args.apply_compound_interest: summary = summary + ["init_assets"] stats = list(map(lambda x: "%s: %.02f" % (x, performance[x]), summary)) print(date, ",\t".join(stats)) gain = sum(list(map(lambda x: x["gain"], performances.values()))) average_trade_size = numpy.average( list(map(lambda x: len(x["codes"]), performances.values()))) result = { "gain": gain, "return": round(gain / simulator_setting.assets, 3), "max_drawdown": max(list(map(lambda x: x["max_drawdown"], performances.values()))), "max_position_term": max(list(map(lambda x: x["max_position_term"], performances.values()))), "max_position_size": max(list(map(lambda x: x["max_position_size"], performances.values()))), "average_trade_size": round(average_trade_size), "max_unavailable_assets": max( list( map(lambda x: x["max_unavailable_assets"], performances.values()))), "sum_contract_price": sum(list(map(lambda x: x["sum_contract_price"], performances.values()))), "commission": sum(list(map(lambda x: x["commission"], performances.values()))), "oneday_commission": sum(list(map(lambda x: x["oneday_commission"], performances.values()))), "interest": sum(list(map(lambda x: x["interest"], performances.values()))), "auto_stop_loss": sum(list(map(lambda x: x["auto_stop_loss"], performances.values()))), "trade": sum(list(map(lambda x: x["trade"], performances.values()))), "win_trade": sum(list(map(lambda x: x["win_trade"], performances.values()))), } print(json.dumps(utils.to_jsonizable(result))) return result
def get_targets(self, args, targets, date, use_cache=False): if args.code is None: if args.instant: date = utils.to_format(utils.select_weekday(utils.to_datetime(date), to_before=False)) targets = list(self.strategy_creator(args, use_cache=use_cache).subject(date)) else: targets = [args.code] return targets
def subject(self, date): length = 10 if self.setting.portfolio_size is None else self.setting.portfolio_size data = self.load_portfolio(utils.to_datetime(date), length=length) if data is None: codes = [] else: codes = data["code"].values.tolist() return codes
def validate_expiry(expiry): from utils import from_datetime, to_datetime if isinstance(expiry, datetime): expiry = from_datetime(expiry) try: return to_datetime(expiry) except: raise ValidationError( {'expiry': 'Expiry needs to be in format %Y-%m-%d %H:%M:%S.'})
def load_portfolio(date, price, length=10): d = utils.to_datetime(date) year = d.year month = (int(d.month / 3) + 1) * 3 # if d.month in [3, 6, 9, 12]: # month = month + 3 if 12 < month: year = d.year + 1 month = 3 code = "nikkei225mini_%s%02d_daytime" % (year, month) return pandas.DataFrame([code], columns=["code"])
def load_stock(self, start_date, end_date): combination_setting = strategy.CombinationSetting() strategy_creator = CombinationStrategy(combination_setting) codes = [] for date in utils.daterange(utils.to_datetime(start_date), utils.to_datetime(end_date)): codes = list( set(codes + strategy_creator.subject(utils.to_format(date)))) data = None while data is None or len(data) <= self.state_size: self.code = numpy.random.choice(codes) data = Loader.load( self.code, utils.to_format( utils.to_datetime(start_date) - relativedelta(months=12)), end_date) data = utils.add_stats(data) data = Loader.filter(data, start_date, end_date) print("code: [%s]" % self.code) return data
def _to_logs(self, hits): logs = [] for hit in hits: log = Log() log.id = hit.meta.id log.filename = hit.filename log.level = hit.level log.message = hit.message log.created_at = utils.to_datetime(hit.created_at, '%Y-%m-%d') logs.append(log) return logs
def get_start_dates_daily(self): end_date = self.end_date if self.end_date\ else datetime.date(datetime.date.today().year+1, 1, 1) if self.repeat_on == self.DAILY: return rrule(DAILY, dtstart=self.start_date, interval=self.repeat_every, until=end_date) return (to_datetime(self.start_date), )
def simulate_params(stocks, terms, strategy_simulator, ignore_manda=True): params = [] strategy_simulator.simulator_setting.strategy = None strategy_creator = strategy_simulator.strategy_creator(stocks["args"]) for term in terms: start = utils.to_format(term["start_date"]) end = utils.to_format(term["end_date"]) codes = select_codes(stocks["args"], start, end, strategy_simulator) select = select_data(codes, stocks, start, end, strategy_creator) params.append((select, utils.to_format(utils.to_datetime(start)), end, ignore_manda)) # print("simulate params:", start, end, utils.timestamp()) return params
def simulate_dates(self, codes, stocks, start_date, end_date): dates = [] dates_dict = {} for code in codes: if not code in stocks.keys(): continue dates_dict[code] = stocks[code].dates(start_date, end_date) dates = list(set(dates + dates_dict[code])) self.log("dates: %s" % dates) # 日付ごとにシミュレーション dates = sorted(dates, key=lambda x: utils.to_datetime(x)) return dates
def transform(data): covid_df = None try: logger.info('Starting transformation') nyc_times_df = data['nyc_times'] johns_hopkins_df = data['johns_hopkins'] nyc_times_df['date'] = to_datetime('date', nyc_times_df) johns_hopkins_df['Date'] = to_datetime('Date', johns_hopkins_df) johns_hopkins_df = johns_hopkins_df[(johns_hopkins_df['Country/Region'] == 'US')] johns_hopkins_df = johns_hopkins_df[['Date', 'Recovered']] johns_hopkins_df.columns = [column.lower() for column in johns_hopkins_df.columns] covid_df = pd.merge(nyc_times_df, johns_hopkins_df, on='date') except (Exception) as err: n.notify('Error in the transformation') logger.error('Error in the transformation', err) logger.info('Transformation completed') return covid_df
def select_data(codes, stocks, start, end, strategy_creator): select = {"data": {}, "index": stocks["index"], "args": stocks["args"]} args = select["args"] for code in codes: if not code in stocks["data"].keys(): continue start_date = utils.to_format( utils.to_datetime(start) - utils.relativeterm(3)) select["data"][code] = stocks["data"][code].split(start_date, end) select["data"][code] = strategy_creator.add_data( stocks["data"][code], stocks["index"]) return select
def refresh(self): me = frozenset([ '*****@*****.**', '*****@*****.**', '*****@*****.**', '*****@*****.**' ]) # incoming = self.data['isIncoming'] # indicator self._w_indicator_faw.set_text('★' if '#favorite' in self.data['labels'] else '☆') self._w_indicator_inc.set_text('»»»»' if incoming else '««««') s = 'incoming' if self.data['isIncoming'] else 'outgoing' self._w_indicator_inc._original_map[0] = s # timestamp val = to_datetime(self.data['timestamp']) val = val.strftime( f'%A, %d %B {"%Y " if val.year!=datetime_today().year else ""}%H:%M:%S' ) self._w_timestamp.set_text(val) # members if incoming: self._w_label_membersMain.set_text('From: ') val = self.data['from'] self._w_membersMain.set_text(val) val = set() for k in ['to', 'cc', 'bcc']: if self.data[k]: val.update(self.data[k]) self._w_membersMore.set_text(', '.join(val)) self._w_label_membersMore.set_text('And To: ' if val else '') else: self._w_label_membersMain.set_text('To: ') val = self.data['to'] self._w_membersMain.set_text(', '.join(sorted(val))) val = set() for k in ['cc', 'bcc']: if self.data[k]: val.update(self.data[k]) self._w_membersMore.set_text(', '.join(sorted(val))) self._w_label_membersMore.set_text('And To: ' if val else '') # subject re_clearReply = re.compile(r'^((?:(?:re)|(?:Re)|(?:RE)):\s*)+') val = re_clearReply.sub('', self.data['subject']) self._w_subject.set_text(val) # last message val = self.data['bodyPlain'] or self.data['bodyHtml'] val = '\n'.join(s for s in val.split('\n') if not s.startswith('>')) val = val.replace('\r', '').replace('\t', ' ') self._w_msg.set_text(val)
def steady_trend_stocks(start_date, end_date, filename="rising_stocks.csv"): current = start_date data = {"all": []} output_format = "%Y%m" while int(utils.format(current, output_format=output_format)) <= int(utils.format(end_date, output_format=output_format)): try: months = utils.format(current, output_format=output_format) d = pandas.read_csv("%s/steady_trend_stocks/%s/%s" % (Loader.settings_dir, months, filename), header=None) d.columns = ['code'] data[months] = d["code"].values.tolist() data["all"] = list(set(data["all"] + data[months])) except: continue finally: current = utils.to_format(utils.to_datetime(current) + utils.relativeterm(1)) return data
def create_terms(args): optimize_terms = [] validate_terms = [] valid_end_date = utils.to_datetime(args.date) for c in range(args.count): if args.instant: end_date = valid_end_date - utils.relativeterm(args.validate_term, with_time=True) start_date = end_date - utils.relativeterm( args.validate_term * args.optimize_count, with_time=True) else: end_date = valid_end_date - utils.relativeterm(args.validate_term) start_date = end_date - utils.relativeterm( args.validate_term * args.optimize_count) term = { "start_date": start_date, "end_date": end_date - utils.relativeterm(1, with_time=True) } validate_term = { "start_date": end_date, "end_date": valid_end_date - utils.relativeterm(1, with_time=True) } if args.optimize_count > 0: optimize_terms.append(term) validate_terms.append(validate_term) valid_end_date = start_date print( list( map( lambda x: "%s - %s" % (str(x["start_date"]), str(x["end_date"])), optimize_terms))) print( list( map( lambda x: "%s - %s" % (str(x["start_date"]), str(x["end_date"])), validate_terms))) optimize_terms = sorted(optimize_terms, key=lambda x: x["start_date"]) validate_terms = sorted(validate_terms, key=lambda x: x["start_date"]) return optimize_terms, validate_terms
def __init__(self, bug, hash): """ Initialize comments :arg hash: Dictionary of comment details :arg bug: Instance of :class:`~bz_xmlrpc.classes.Bug` object :return: Instance of :class:`Comment` .. note:: No need to use this directly. Use :meth:`~bz_xmlrpc.classes.Bug.get_comments()` """ self._hash = hash self.id = extract(hash, "id", "comment_id") self.author = extract(hash, "email", "author") self.bug = bug self.is_private = bool(extract(hash, "is_private", "isprivate")) self.text = extract(hash, "text", "body") self.time = to_datetime(extract(hash, "time", "bug_when"))
def __init__(self, source_path, destination, use_in_feeds, translations, default_lang, blog_url, compile_html): """Initialize post. The base path is the .txt post file. From it we calculate the meta file, as well as any translations available, and the .html fragment file path. `compile_html` is a function that knows how to compile this Post to html. """ self.prev_post = None self.next_post = None self.use_in_feeds = use_in_feeds self.blog_url = blog_url self.source_path = source_path # posts/blah.txt self.post_name = os.path.splitext(source_path)[0] # posts/blah self.base_path = os.path.join("cache", self.post_name + ".html") # cache/posts/blah.html self.metadata_path = self.post_name + ".meta" # posts/blah.meta self.folder = destination self.translations = translations self.default_lang = default_lang if os.path.isfile(self.metadata_path): with codecs.open(self.metadata_path, "r", "utf8") as meta_file: meta_data = meta_file.readlines() while len(meta_data) < 5: meta_data.append("") default_title, default_pagename, self.date, self.tags, self.link = [x.strip() for x in meta_data][:5] else: default_title, default_pagename, self.date, self.tags, self.link = utils.get_meta(self.source_path) if not default_title or not default_pagename or not self.date: raise OSError, "You must set a title and slug and date!" self.date = utils.to_datetime(self.date) self.tags = [x.strip() for x in self.tags.split(",")] self.tags = filter(None, self.tags) self.compile_html = compile_html self.pagenames = {} self.titles = {} # Load internationalized titles for lang in translations: if lang == default_lang: self.titles[lang] = default_title self.pagenames[lang] = default_pagename else: metadata_path = self.metadata_path + "." + lang source_path = self.source_path + "." + lang try: if os.path.isfile(metadata_path): with codecs.open(metadata_path, "r", "utf8") as meta_file: meta_data = [x.strip() for x in meta_file.readlines()] while len(meta_data) < 2: meta_data.append("") self.titles[lang] = meta_data[0] or default_title self.pagenames[lang] = meta_data[1] or default_pagename else: ttitle, ppagename, tmp1, tmp2, tmp3 = utils.get_meta(source_path) self.titles[lang] = ttitle or default_title self.pagenames[lang] = ppagename or default_pagename except: self.titles[lang] = default_title self.pagenames[lang] = default_pagename
def __init__(self, source_path, cache_folder, destination, use_in_feeds, translations, default_lang, blog_url, messages): """Initialize post. The base path is the .txt post file. From it we calculate the meta file, as well as any translations available, and the .html fragment file path. `compile_html` is a function that knows how to compile this Post to html. """ self.prev_post = None self.next_post = None self.blog_url = blog_url self.is_draft = False self.source_path = source_path # posts/blah.txt self.post_name = os.path.splitext(source_path)[0] # posts/blah # cache/posts/blah.html self.base_path = os.path.join(cache_folder, self.post_name + ".html") self.metadata_path = self.post_name + ".meta" # posts/blah.meta self.folder = destination self.translations = translations self.default_lang = default_lang self.messages = messages if os.path.isfile(self.metadata_path): with codecs.open(self.metadata_path, "r", "utf8") as meta_file: meta_data = meta_file.readlines() while len(meta_data) < 6: meta_data.append("") (default_title, default_pagename, self.date, self.tags, self.link, default_description) = \ [x.strip() for x in meta_data][:6] else: (default_title, default_pagename, self.date, self.tags, self.link, default_description) = \ utils.get_meta(self.source_path) if not default_title or not default_pagename or not self.date: raise OSError("You must set a title and slug and date!") self.date = utils.to_datetime(self.date) self.tags = [x.strip() for x in self.tags.split(',')] self.tags = filter(None, self.tags) # While draft comes from the tags, it's not really a tag self.use_in_feeds = use_in_feeds and "draft" not in self.tags self.is_draft = 'draft' in self.tags self.tags = [t for t in self.tags if t != 'draft'] self.pagenames = {} self.titles = {} self.descriptions = {} # Load internationalized titles # TODO: this has gotten much too complicated. Rethink. for lang in translations: if lang == default_lang: self.titles[lang] = default_title self.pagenames[lang] = default_pagename self.descriptions[lang] = default_description else: metadata_path = self.metadata_path + "." + lang source_path = self.source_path + "." + lang try: if os.path.isfile(metadata_path): with codecs.open( metadata_path, "r", "utf8") as meta_file: meta_data = [x.strip() for x in meta_file.readlines()] while len(meta_data) < 6: meta_data.append("") self.titles[lang] = meta_data[0] or default_title self.pagenames[lang] = meta_data[1] or\ default_pagename self.descriptions[lang] = meta_data[5] or\ default_description else: ttitle, ppagename, tmp1, tmp2, tmp3, ddescription = \ utils.get_meta(source_path) self.titles[lang] = ttitle or default_title self.pagenames[lang] = ppagename or default_pagename self.descriptions[lang] = ddescription or\ default_description except: self.titles[lang] = default_title self.pagenames[lang] = default_pagename self.descriptions[lang] = default_description