def test_split_job(self, mocker, api, edge_class, next_edge_class, id_field): """Test that split will correctly downsize edge_object""" interval = pendulum.Period(pendulum.Date(2010, 1, 1), pendulum.Date(2010, 1, 10)) params = {"time_increment": 1, "breakdowns": []} job = InsightAsyncJob(api=api, edge_object=edge_class(1), interval=interval, params=params) mocker.patch.object(edge_class, "get_insights", return_value=[{ id_field: 1 }, { id_field: 2 }, { id_field: 3 }]) small_jobs = job.split_job() edge_class.get_insights.assert_called_once() assert len(small_jobs) == 3 assert all(j.interval == job.interval for j in small_jobs) for i, small_job in enumerate(small_jobs, start=1): assert str( small_job ) == f"InsightAsyncJob(id=<None>, {next_edge_class(i)}, time_range={job.interval}, breakdowns={[]})"
def test_less_than_or_equal_false(): d1 = pendulum.Date(2000, 1, 2) d2 = pendulum.Date(2000, 1, 1) d3 = date(2000, 1, 1) assert not d1 <= d2 assert not d1 <= d3
def test_not_equal_to_true(): d1 = pendulum.Date(2000, 1, 1) d2 = pendulum.Date(2000, 1, 2) d3 = date(2000, 1, 2) assert d1 != d2 assert d1 != d3
def test_greater_than_true(): d1 = pendulum.Date(2000, 1, 1) d2 = pendulum.Date(1999, 12, 31) d3 = date(1999, 12, 31) assert d1 > d2 assert d1 > d3
def test_greater_than_or_equal_true_equal(): d1 = pendulum.Date(2000, 1, 1) d2 = pendulum.Date(2000, 1, 1) d3 = date(2000, 1, 1) assert d1 >= d2 assert d1 >= d3
def test_equal_to_true(): d1 = pendulum.Date(2000, 1, 1) d2 = pendulum.Date(2000, 1, 1) d3 = date(2000, 1, 1) assert d2 == d1 assert d3 == d1
def test_greater_than_or_equal_false(): d1 = pendulum.Date(2000, 1, 1) d2 = pendulum.Date(2000, 1, 2) d3 = date(2000, 1, 2) assert not d1 >= d2 assert not d1 >= d3
def test_less_than_true(): d1 = pendulum.Date(2000, 1, 1) d2 = pendulum.Date(2000, 1, 2) d3 = date(2000, 1, 2) assert d1 < d2 assert d1 < d3
def test_less_than_or_equal_true_equal(): d1 = pendulum.Date(2000, 1, 1) d2 = pendulum.Date(2000, 1, 1) d3 = date(2000, 1, 1) assert d1 <= d2 assert d1 <= d3
def test_hash(): d1 = pendulum.Date(2016, 8, 27) d2 = pendulum.Date(2016, 8, 27) d3 = pendulum.Date(2016, 8, 28) assert hash(d2) == hash(d1) assert hash(d1) != hash(d3)
def test_farthest(): instance = pendulum.Date(2015, 5, 28) dt1 = pendulum.Date(2015, 5, 27) dt2 = pendulum.Date(2015, 5, 30) closest = instance.farthest(dt1, dt2) assert closest == dt2 closest = instance.farthest(dt2, dt1) assert closest == dt2
def test_week_of_month(): assert pendulum.Date(2012, 9, 30).week_of_month == 5 assert pendulum.Date(2012, 9, 28).week_of_month == 5 assert pendulum.Date(2012, 9, 20).week_of_month == 4 assert pendulum.Date(2012, 9, 8).week_of_month == 2 assert pendulum.Date(2012, 9, 1).week_of_month == 1 assert pendulum.date(2020, 1, 1).week_of_month == 1 assert pendulum.date(2020, 1, 7).week_of_month == 2 assert pendulum.date(2020, 1, 14).week_of_month == 3
def test_is_same_day(): dt1 = pendulum.Date(2015, 5, 28) dt2 = pendulum.Date(2015, 5, 29) dt3 = pendulum.Date(2015, 5, 28) dt4 = date(2015, 5, 28) dt5 = date(2015, 5, 29) assert not dt1.is_same_day(dt2) assert dt1.is_same_day(dt3) assert dt1.is_same_day(dt4) assert not dt1.is_same_day(dt5)
def test_str(self, api, account): interval = pendulum.Period(pendulum.Date(2010, 1, 1), pendulum.Date(2011, 1, 1)) job = InsightAsyncJob( edge_object=account, api=api, params={"breakdowns": [10, 20]}, interval=interval, ) assert str( job ) == f"InsightAsyncJob(id=<None>, {account}, time_range=<Period [2010-01-01 -> 2011-01-01]>, breakdowns=[10, 20])"
def test_split_job_smallest(self, mocker, api): """Test that split will correctly downsize edge_object""" interval = pendulum.Period(pendulum.Date(2010, 1, 1), pendulum.Date(2010, 1, 10)) params = {"time_increment": 1, "breakdowns": []} job = InsightAsyncJob(api=api, edge_object=Ad(1), interval=interval, params=params) with pytest.raises( ValueError, match="The job is already splitted to the smallest size."): job.split_job()
def test_is_birthday(): # backward compatibility d = pendulum.Date.today() an_anniversary = d.subtract(years=1) assert an_anniversary.is_birthday() not_an_anniversary = d.subtract(days=1) assert not not_an_anniversary.is_birthday() also_not_an_anniversary = d.add(days=2) assert not also_not_an_anniversary.is_birthday() d1 = pendulum.Date(1987, 4, 23) d2 = pendulum.Date(2014, 9, 26) d3 = pendulum.Date(2014, 4, 23) assert not d2.is_birthday(d1) assert d3.is_birthday(d1)
def test_is_birthday(): d = pendulum.Date.today() a_birthday = d.subtract(years=1) assert a_birthday.is_birthday() not_a_birthday = d.subtract(days=1) assert not not_a_birthday.is_birthday() also_not_a_birthday = d.add(days=2) assert not also_not_a_birthday.is_birthday() d1 = pendulum.Date(1987, 4, 23) d2 = pendulum.Date(2014, 9, 26) d3 = pendulum.Date(2014, 4, 23) assert not d2.is_birthday(d1) assert d3.is_birthday(d1)
def test_datetime_primitives(): dt = datetime.now() utc = UTC() assert utc.utcoffset(dt) == ZERO assert utc.utcoffset(None) == ZERO assert utc.tzname(dt) == "UTC" assert utc.dst(dt) == ZERO assert utc.dst(None) == ZERO p = pendulum.instance(dt) n = naive(p) assert n == dt assert type(n) == type(p) # use pendulum naive type p2 = utcnow() assert p2.tz == p2.in_timezone("UTC").tz p3 = localnow() v = vanilla(p3) assert pendulum.instance(v) == p3 tod = parse_time_of_day("2015-01-01 12:34:56") assert str(tod) == "12:34:56" d = pendulum.Date(2017, 1, 1) dt = combine_date_and_time(d, tod) assert str(dt) == "2017-01-01T12:34:56+00:00" sbrd = sqlbagrelativedelta(days=5, weeks=6, months=7) assert str(sbrd) == "7 months 47 days"
def _parse_header(self, string): re_header = PAT_TRANSACTION_DATA.match(string) if re_header is not None: t_primary_date = pendulum.Date(int(re_header.group('year')), int(re_header.group('month')), int(re_header.group('day'))) t_status = re_header.group('state') or None t_code = re_header.group('code') or None t_description = self._remove_comment(re_header.group('payee')) or None t_comment = None t_tags = {} # check for comment in header t_comment = self._parse_comments(re_header.string) # check comment for tags if t_comment is not None: t_tags = self._parse_tags(t_comment) # remove tags from comment string for tag, value in t_tags.items(): t_comment = t_comment.replace(f'{tag}:{value}', '') # remove artefacts from comment t_comment = t_comment.replace(' , ', '') return dict(primary_date=t_primary_date, status=t_status, code=t_code, description=t_description, comment=t_comment, tags=t_tags) else: return None
def test_all_at_date(conn: connection): date = pendulum.Date(2000, 1, 1) journey_id = test_journey.insert_journey_data(conn) journey.queries.start_journey(conn, journey_id=journey_id, date=date) steps_data = [ { "gargling_id": 6, "amount": 1778 }, { "gargling_id": 2, "amount": 1152 }, { "gargling_id": 3, "amount": 638 }, { "gargling_id": 5, "amount": 11 }, ] journey.store_steps(conn, steps_data, journey_id, date) achv = achievements.all_at_date(conn=conn, date=date) assert achv == ( "Flest skritt gått av en gargling på én dag: 1778 skritt - name6 :first_place_medal: (1.1.2000)\n" "Nest flest skritt gått av en gargling på én dag: 1152 skritt - name2 :second_place_medal: (1.1.2000)\n" "Tredje flest skritt gått av en gargling på én dag: 638 skritt - name3 :third_place_medal: (1.1.2000)\n" "Flest skritt gått av hele gargen på én dag: 3579 skritt :trophy: - 1.1.2000\n" "Størst andel av dagens skritt: 50 % - name6 :sports_medal: (1.1.2000)\n" "Lengste streak med førsteplasser: 1 dager - name6 :sports_medal: (1.1.2000)" )
def test_most_steps_one_day_individual_some_data(conn: connection): date = pendulum.Date(2000, 1, 1) journey_id = test_journey.insert_journey_data(conn) steps_data = [ { "gargling_id": 6, "amount": 1778 }, { "gargling_id": 2, "amount": 1152 }, { "gargling_id": 3, "amount": 638 }, { "gargling_id": 5, "amount": 11 }, ] journey.store_steps(conn, steps_data, journey_id, date) achv = achievements.extract( query=queries.most_steps_one_day_individual, conn=conn, journey_id=journey_id, date=date, less_than=None, ) assert achv is None
def get_month_info(self, year, month): ''' 取得月份应当进行的计划 :return: ''' config_datas = self.env["metro_park_maintenance.plan_config_data"]\ .search([], order="index asc") # 有修次信息的设备 records = self.search([]) rst = {} target = pendulum.date(year, month, 1) for record in records: offset_month = pendulum.Date(record.year, record.month, 1) delta = target - offset_month delta_month = delta.months offset_num = (record.offset_num + delta_month) % len(config_datas) rst[record.dev.id] = { "offset_num": offset_num, "year": record.year, "month": record.month } # 没有设置则默认进行第一个修程 dev_type_electric_train = self.env.ref( 'metro_park_base.dev_type_electric_train') devs = self.env["metro_park_maintenance.train_dev"]\ .search([('dev_type', '=', dev_type_electric_train.id)]) for dev in devs: if dev.id not in rst: rst[dev.id] = {"offset": 0, "year": year, "month": month} return rst
def test_start_two_journeys_fails(conn): journey_id1 = insert_journey_data(conn) journey_id2 = insert_journey_data(conn) date = pendulum.Date(2013, 3, 31) journey.queries.start_journey(conn, journey_id=journey_id1, date=date) with pytest.raises(psycopg2.errors.UniqueViolation): journey.queries.start_journey(conn, journey_id=journey_id2, date=date)
def test_store_steps_twice_fails(conn: connection): journey_id = insert_journey_data(conn) date = pendulum.Date(2013, 3, 31) steps_data, _ = example_activity_data() journey.store_steps(conn, steps_data, journey_id, date) with pytest.raises(psycopg2.errors.UniqueViolation): journey.store_steps(conn, steps_data, journey_id, date)
def job_fixture(api, account): params = { "level": "ad", "action_breakdowns": [], "breakdowns": [], "fields": ["field1", "field2"], "time_increment": 1, "action_attribution_windows": [], } interval = pendulum.Period(pendulum.Date(2019, 1, 1), pendulum.Date(2019, 1, 1)) return InsightAsyncJob(edge_object=account, api=api, interval=interval, params=params)
def test_fitbit_steps(conn: connection): users = fitbit_users(conn) user1, user2 = users user1_return_value = { "activities-steps": [ { "dateTime": "2020-01-01", "value": "13475" }, { "dateTime": "2019-12-29", "value": "1" }, ] } user2_return_value = { "activities-steps": [ { "dateTime": "2020-01-01", "value": "13474" }, { "dateTime": "2020-01-02", "value": "86" }, ] } user1._steps_api_call = lambda date: user1_return_value # type: ignore user2._steps_api_call = lambda date: user2_return_value # type: ignore test_date = pendulum.Date(2020, 1, 2) steps = [user.steps(test_date) for user in users] assert steps == [13475, 13474]
def test_farthest_with_date(): instance = pendulum.Date(2015, 5, 28) dt1 = date(2015, 5, 27) dt2 = date(2015, 5, 30) closest = instance.farthest(dt1, dt2) assert isinstance(closest, pendulum.Date) assert_date(closest, 2015, 5, 30)
def test_fitbit_body_no_data(conn: connection): users = fitbit_users(conn) user1, user2 = users user1_weight_return_value: dict[str, list] = {"weight": []} user1_bodyfat_return_value: dict[str, list] = {"fat": []} user1._weight_api_call = lambda date: user1_weight_return_value # type: ignore user1._bodyfat_api_call = lambda date: user1_bodyfat_return_value # type: ignore user2_weight_return_value: dict[str, list] = {"weight": []} user2_bodyfat_return_value: dict[str, list] = {"fat": []} user2._weight_api_call = lambda date: user2_weight_return_value # type: ignore user2._bodyfat_api_call = lambda date: user2_bodyfat_return_value # type: ignore expected = [ { "elapsed": None, "fat": None, "weight": None }, { "elapsed": None, "fat": None, "weight": None }, ] test_date = pendulum.Date(2020, 1, 2) data = [user.body(test_date) for user in users] assert data == expected
def test_activity(conn): user1 = conftest.users[0] test_fitbit.register_user(user1, conn, enable_steps=True) user2 = conftest.users[1] test_fitbit.register_user(user2, conn, enable_steps=False) test_date = pendulum.Date(2020, 1, 2) health.activity(conn, test_date)
def get_plan_data(self, year_plan_id): ''' 取得计划数据, 年的话只显示每个月的统计量, 分组的话是按设备进行分组 :return: ''' record = self.browse(year_plan_id) year = record.year # 取得开始日期和结束日期 start_date = pendulum.date(year, 1, 1).format('YYYY-MM-DD') end_date = pendulum.Date(year, 12, 31).format('YYYY-MM-DD') # 取得所有设备 model = self.env['metro_park_maintenance.plan_data'] records = model.search([('year', '=', year)]) # 对修程进行缓存 rule_model = self.env['metro_park_maintenance.repair_rule'] rules = rule_model.search([]) rule_cache = {rule.id: rule for rule in rules} # 对检技通进行缓存 tmp_rule_model = self.env['metro_park_maintenance.repair_tmp_rule'] tmp_rules = tmp_rule_model.search([]) tmp_rule_cache = {rule.id: rule for rule in tmp_rules} rst = { 'year': year, 'start_date': start_date, 'end_date': end_date, } # 组装数据 groups = {} for record in records: if len(record.rule_infos) > 0: start_time = pendulum.parse(str(record.date)) rule_infos = record.rule_infos for info in rule_infos: if info["rule_type"] == "normal": rule = rule_cache[info["rule_id"]] repair_days = rule.repair_days end_time = start_time.add(days=repair_days) groups.setdefault(record.dev.id, []).append({ 'rule_type': info["rule_type"], 'start': start_time.format('YYYY-MM-DD'), 'end': end_time.format('YYYY-MM-DD'), 'content': rule.content }) else: rule = tmp_rule_cache[info["rule_id"]] groups.setdefault(record.dev.id, []).append({ 'rule_type': info["rule_type"], 'start': rule.start_date.format('YYYY-MM-DD'), 'end': rule.end_date.format('YYYY-MM-DD'), 'content': rule.content }) rst['groups'] = groups return rst