def test_03(self): logger = HenriqueLogger.func_level2logger(self.test_02, logging.DEBUG) Chatroom.chatrooms2upsert([ChatroomKakaotalk.chatroom()]) sender_name = "iris" channel_user_codename = ChannelUserKakaotalk.sender_name2codename( sender_name) ChannelUser.channel_users2upsert( [ChannelUserKakaotalk.sender_name2channel_user(sender_name)]) now_seoul = datetime.now( tz=pytz.timezone(HenriqueLocale.lang2tzdb("ko"))) dt_target = now_seoul - timedelta(seconds=3 * 60) text = "?남만 {}".format(dt_target.strftime("%I:%M %p").lstrip("0")) logger.debug({ "text": text, "now_seoul": now_seoul, }) packet = { KhalaPacket.Field.TEXT: text, KhalaPacket.Field.CHATROOM: KakaotalkUWOChatroom.codename(), KhalaPacket.Field.CHANNEL_USER: channel_user_codename, KhalaPacket.Field.SENDER_NAME: sender_name, } response = NanbanSkill.packet2response(packet) # pprint(text) # pprint(response) response_lines = response.splitlines() span = ( len("다음 남만 시각: "), len("다음 남만 시각: 3:58:00 PM (KST) "), ) hyp = SpanTool.list_span2sublist(response_lines[2], span).strip() dt_nanban = dt_target + NanbanTimedelta.period() ref = dt_nanban.strftime("%I:%M:00 %p (KST)").lstrip("0") self.assertEqual( hyp, ref, )
def date_list2chunks_yearly_fullweeks(cls, date_list): n = len(date_list) def is_year_changed(date_list, i): if i == 0: return False return date_list[i - 1].year != date_list[i].year i_start = 0 for i, d in enumerate(date_list): if not is_year_changed(date_list, i): continue span_fullweek_raw = cls.date_list_span_weekday2span_fullweek( date_list, (i_start, i), DayOfWeek.SUNDAY) i_start = i # update to next span_fullweek = SpanTool.span_size2valid(span_fullweek_raw, n) yield span_fullweek
def str_spans_func2processed(cls, str_in, span_list, func,): from foxylib.tools.span.span_tool import SpanTool str_out = SpanTool.list_spans_func2processed(str_in, span_list, func, f_list2chain="".join) return str_out
def str_span2substr(cls, str_in, span): return SpanTool.list_span2sublist(str_in, span)
def match2len(cls, m): return SpanTool.span2len(cls.match2span(m))
def entity_list2entities_list_grouped(cls, text, entity_list): return [ SpanTool.list_span2sublist(entity_list, span) for span in cls.entity_list2group_spans(text, entity_list) ]
def cospan2inc(cls, cospan): return { cls.Field.SPAN_REF: SpanTool.span2inc(cls.cospan2span_ref(cospan)), cls.Field.SPAN_HYP: SpanTool.span2inc(cls.cospan2span_hyp(cospan)), }
def fulltext2text(_entity): fulltext = cls.entity2fulltext(_entity) span = cls.entity2span(_entity) return SpanTool.list_span2sublist(fulltext, span)
def colspan2span(cls, colspan): return SpanTool.add_each(colspan, -cls.COUNT_COLHEAD)
def span2colspan(cls, span): return SpanTool.add_each(span, cls.COUNT_COLHEAD)
def table_colspan2rowindex_list_starting(cls, table, colspan): n = len(table) return lfilter( lambda i: SpanTool.covers_index( colspan, cls.table_i2colindex_first(table, i)), range(n))
def match2has_equal_token_span_len(cls, _match): span_hyp = cls.match2tokenspan_ref(_match) span_ref = cls.match2tokenspan_hyp(_match) return SpanTool.span2len(span_hyp) == SpanTool.span2len(span_ref)
def gap2valid(span): str_span = SpanTool.list_span2sublist(text_in, span) return RegexTool.pattern_str2match_full(cls.pattern_colon(), str_span)
def data2match_list_digit_2(cls, data): m_list_digit = cls.data2match_list_digit(data) return lfilter(lambda m: SpanTool.span2len(m.span()) == 2, m_list_digit)
def entity_1day2is_not_covered(entity_1day): span_1day = FoxylibEntity.entity2span(entity_1day) for span_multiday in span_list_multiday: if SpanTool.covers(span_multiday, span_1day): return False return True