def test_add_timedelta(): mytime = MyTime( year=2018, month=4, day=5, hour=8, minute=0, tzinfo=timezone("US/Pacific") ) mytime_plus_65_mins = mytime.add_timedelta(minutes=65) assert (mytime.epoch + 60 * 65) == mytime_plus_65_mins.epoch assert str(mytime_plus_65_mins.tzinfo) == "US/Pacific"
def test_invalid_initializtion(): with pytest.raises(ValueError): mytime = MyTime( year=2018, month=4, day=5, hour=8, minute=0, tzinfo=timezone('US/Pacific'), epoch=12345) with pytest.raises(ValueError): mytime = MyTime( year=2018, month=4, day=5, hour=8, minute=0, tzinfo=timezone('US/Pacific'), dt=datetime.now()) with pytest.raises(ValueError): mytime = MyTime( year=2018, month=4, day=5, hour=8, minute=0, tzinfo=timezone('US/Pacific'), dt=datetime.now(), epoch=1234) with pytest.raises(ValueError): mytime = MyTime(dt=datetime.now(), epoch=1234)
def test_pst_to_utc_conversion(): m = MyTime(year=2018, month=1, day=1, hour=8) m = m.to_pst() m_in_pst = MyTime( year=2018, month=1, day=1, hour=0, tzinfo=timezone('US/Pacific')) print(f'm: {m}, pst: {m_in_pst}') assert m.epoch == m_in_pst.epoch
def test_alb_producer(): parsed_data_bucket = 'sr-oss-dummy-data' apps = ['myELB'] mytime = MyTime( year=2018, month=6, day=21, hour=14, minute=7, second=0, microsecond=0, tzinfo='US/Pacific') mytime = mytime.to_utc() access_logs_bucket = 'sr-oss-dummy-data/AWSLogs/123456789/elasticloadbalancing/us-east-1/' testing = {'put_file': False} ddb_items, identifier = alb.main( mytime=mytime, bucket_w_logs=access_logs_bucket, apps=apps, parsed_data_bucket=parsed_data_bucket, testing=testing) print('--ddb-items--') assert len(ddb_items) > 0 return ddb_items == [{ 'app': 'myELB', 'num_uris': 5, 's3_key': 'myELB/2018/05/06/06/1525589940', 'timestamp': 1525589940 }]
def test_obj_creation_and_comparison_from_epoch(): time_before = 1517314953 time_after = 1517320293 before = MyTime(epoch=time_before) after = MyTime(epoch=time_after) print(before, after) assert before < after
def test_set_seconds_to_zero(): x = MyTime( year=2018, month=1, day=2, hour=0, tzinfo=timezone('US/Pacific')) x_new = x.set_seconds_to_zero() assert x_new.second == 0 assert str(x_new.tzinfo) == 'US/Pacific' assert x.epoch == x_new.epoch
def test_init_from_replay_start_time(): replay_start_time = "2018-2-5-18-15" # epoch time for 2018-2-5, 6:15PM PST is 1517883300 mytime = MyTime.set_to_replay_start_time_env_var(replay_start_time, timezone('US/Pacific')) print(mytime) print(mytime.epoch) assert mytime.epoch == 1517883300
def test_orchestrator_lambda_handler(monkeypatch): defaults = { "apps_to_test": ["test-app1"], "test_params": { "rate": 23, "loop_duration": 19, "replay_start_time": "2018-6-18-17-06", "base_url": "http://shadowreader.example.com", "identifier": "qa", }, "overrides": [{ "app": "test-app1", "rate": 50, "loop_duration": 19, "replay_start_time": "2018-6-18-17-06", "base_url": "http://shadowreader.example.com", "identifier": "qa", }], "timezone": "US/Pacific", } if monkeypatch: monkeypatch.setattr("utils.conf.sr_plugins.exists", lambda x: False) cur_params, consumer_event = orchestrator_past.lambda_handler(defaults, {}) timestamp = consumer_event["cur_timestamp"] mytime = MyTime.set_to_replay_start_time_env_var( defaults["test_params"]["replay_start_time"], timezone("US/Pacific")) rate = cur_params["test_params"]["rate"] assert rate == 23 and timestamp >= mytime.epoch assert consumer_event["app"] == defaults["apps_to_test"][0]
def test_strip_timezone_from_isoformat(): # Test that ISO 8601 formatted times with timezone # have the timezone part removed times = ["2018-08-03T08:30:00.00011+10:00" "2018-08-03T13:52:19.235608-07:00"] for time in times: time_stripped = MyTime._strip_timezone_from_isoformat(time) assert time[:-6] == time_stripped
def _generate_s3_key(mytime: MyTime, elb_logs_path: str) -> str: date_prefix = _generate_key_prefix_base_on_time( mytime=mytime.to_utc(), time_format="%Y/%m/%d" ) elb_logs_path = f"{elb_logs_path}/{date_prefix}" return elb_logs_path
def test_init_apps_from_test_params(): defaults = { 'apps_to_test': ['test-app1', 'test-app2', 'test-app3'], 'test_params': { "rate": 100, "loop_duration": 60, "replay_start_time": "2018-3-20-16-00", "base_url": "http://shadowreader.example.com", "identifier": "qa", }, "overrides": [], 'timezone': 'US/Pacific' } apps, test_params = orchestrator.init_apps_from_test_params(defaults) app1 = apps[0] app2 = App(name='test-app1', replay_start_time=MyTime().from_epoch(epoch=1521586800, tzinfo='US/Pacific'), rate=100, base_url='http://shadowreader.example.com', identifier='qa', loop_duration=60, baseline=0) assert app1 == app2 and len(apps) == 3
def test_init_apps_from_test_params_w_replay_end_time(): defaults = { "apps_to_test": ["test-app1", "test-app2", "test-app3"], "test_params": { "rate": 100, "replay_start_time": "2018-08-29T10:30", "replay_end_time": "2018-08-30T12:31", "base_url": "http://shadowreader.example.com", "identifier": "qa", }, "overrides": [], "timezone": "Japan", } apps, test_params = orchestrator.init_apps_from_test_params(defaults) app1 = apps[0] app2 = App( name="test-app1", replay_start_time=MyTime(year=2018, month=8, day=29, hour=10, minute=30, tzinfo="Japan"), rate=100, base_url="http://shadowreader.example.com", identifier="qa", loop_duration=1561, baseline=0, ) assert app1 == app2 assert len(apps) == 3 assert app2.replay_start_time == app1.replay_start_time
def test_init_apps_from_test_params_w_isoformat(): defaults = { "apps_to_test": ["test-app1", "test-app2", "test-app3"], "test_params": { "rate": 100, "loop_duration": 60, "replay_start_time": "2018-08-02T00:30", "base_url": "http://shadowreader.example.com", "identifier": "qa", }, "overrides": [], "timezone": "US/Pacific", } apps, test_params = orchestrator.init_apps_from_test_params(defaults) app1 = apps[0] app2 = App( name="test-app1", replay_start_time=MyTime().from_epoch(epoch=1533195000, tzinfo="US/Pacific"), rate=100, base_url="http://shadowreader.example.com", identifier="qa", loop_duration=60, baseline=0, ) assert app1 == app2 and len(apps) == 3 assert app2.replay_start_time == app1.replay_start_time
def _batch_lines_by_timestamp(lines: list, payload: dict, app: str) -> dict: for line in lines: epoch = line["timestamp"].timestamp() mytime = MyTime.from_epoch(epoch=epoch, tzinfo="UTC").set_seconds_to_zero() mytime = mytime.epoch payload[app][mytime].append(line) return payload
def main(**kwargs): cur_timestamp = kwargs.get("app_cur_timestamp", None) app = kwargs.get("app_name", "") cur_timestamp = MyTime.from_epoch(epoch=cur_timestamp, tzinfo="UTC") s3_parsed_data_key = s3._generate_s3_key(mytime=cur_timestamp, app_name=app) return s3_parsed_data_key
def determine_replay_time_window(params: dict, tzinfo: str) -> Tuple[int, MyTime]: replay_start_time = params["replay_start_time"] replay_start_time = MyTime.set_to_replay_start_time_env_var( replay_start_time, tzinfo) if "loop_duration" in params: replay_duration = params["loop_duration"] elif "replay_end_time" in params: replay_end_time = MyTime.set_to_replay_start_time_env_var( params["replay_end_time"], tzinfo) time_diff = replay_end_time.dt - replay_start_time.dt replay_duration = int(time_diff.total_seconds() // 60) else: raise InvalidTestParametersError( "Must set either loop_duration or replay_end_time in test_params") return replay_duration, replay_start_time
def main(**kwargs): cur_timestamp = kwargs.get('app_cur_timestamp', None) app = kwargs.get('app_name', '') cur_timestamp = MyTime.from_epoch(epoch=cur_timestamp, tzinfo='UTC') s3_parsed_data_key = s3._generate_s3_key(mytime=cur_timestamp, app_name=app) return s3_parsed_data_key
def test_validate_base_url(): mytime = MyTime(epoch=1522091259) base_url = 'shadowreader.example.com/' app = App(name='test', replay_start_time=mytime, loop_duration=60, base_url=base_url, rate=100, identifier='qa', baseline=100) assert app.base_url == 'https://shadowreader.example.com'
def fetch_file_names_on_s3(bucket: str, key_prefix: str, mytime: MyTime, time_offset: int): files = _list_folder_contents(bucket, key_prefix) print('# files pre filter:', len(files)) start_time = mytime.add_timedelta(minutes=-time_offset) end_time = mytime files_filtered = _filter_files_on_time(files, start_time, end_time) return files_filtered, start_time
def _batch_lines_by_timestamp(lines: list, payload: dict, app: str) -> dict: for line in lines: epoch = line['timestamp'].timestamp() mytime = MyTime.from_epoch( epoch=epoch, tzinfo='UTC').set_seconds_to_zero() mytime = mytime.epoch if not mytime in payload[app]: payload[app][mytime] = [] payload[app][mytime].append(line) return payload
def test_app_str(): mytime = MyTime(epoch=1522091259) base_url = 'http://shadowreader.example.com' app = App(name='test', replay_start_time=mytime, loop_duration=60, base_url=base_url, rate=100, identifier='qa', baseline=100) s = 'App(name="test", replay_start_time=2018-03-26 19:07:39 UTC, loop_duration=60, base_url="http://shadowreader.example.com", identifier="qa", rate=100, cur_timestamp=1522091259, baseline=100)' assert str(app) == s
def test_put_metric(): mytime = MyTime() resp = cw._put_metric( namespace="sr", metric_name="pytest", dimensions=[], timestamp=mytime.epoch, val=1, storage_resolution=60, ) status_code = resp["ResponseMetadata"]["HTTPStatusCode"] assert status_code == 200
def test_app_init(): mytime = MyTime() base_url = 'http://shadowreader.example.com' app = App(name='test', replay_start_time=mytime, loop_duration=60, base_url=base_url, rate=100, baseline=0) assert app.name == 'test' and app.loop_duration == 60 and app.base_url == base_url assert app.cur_timestamp == mytime.epoch and app.rate == 100 and app.identifier == base_url assert app.baseline == 0
def put_payload_on_s3(*, payload: dict, bucket: str, mytime: MyTime): mytime = mytime.add_timedelta(minutes=-1) ddb_items = [] for app, data in payload.items(): s3_key = put_on_s3(data, mytime, app, bucket) ddb_item = { 'timestamp': mytime.epoch, 's3_key': s3_key, 'app': app, 'num_uris': len(data) } ddb_items.append(ddb_item) return ddb_items
def put_payload_on_s3(*, payload: dict, bucket: str, mytime: MyTime): mytime = mytime.add_timedelta(minutes=-1) ddb_items = [] for app, data in payload.items(): s3_key = put_on_s3(data, mytime, app, bucket) ddb_item = { "timestamp": mytime.epoch, "s3_key": s3_key, "app": app, "num_uris": len(data), } ddb_items.append(ddb_item) return ddb_items
def test_put_lambda_metric_w_app_and_env_to_test(): mytime = MyTime() resp = cw.put_lambda_metric_w_app_and_env_to_test( "pytest", sr_stage="local", lambda_name="pytest", app="pytest", identifier="local", mytime=mytime, val=1, ) status_code = resp["ResponseMetadata"]["HTTPStatusCode"] assert status_code == 200
def test_validate_base_url(): mytime = MyTime(epoch=1522091259) base_url = "shadowreader.example.com/" app = App( name="test", replay_start_time=mytime, loop_duration=60, base_url=base_url, rate=100, identifier="qa", baseline=100, ) assert app.base_url == "http://shadowreader.example.com"
def test_init_from_replay_start_time_w_isoformat(): # Test that ISO 8601 formatted times are parsed correctly # Test for Thursday, August 2, 2018 12:30:00 AM GMT-07:00 DST replay_start_times = [ "2018-08-02T00:30", "2018-08-02T00:30:40", "2018-08-02T00:30:40.873460", "2018-08-02T00:30:40.873460-07:00", "2018-08-02T00:30:40.873Z", "2018-08-02T00:30:40.873+10:00", ] for t in replay_start_times: mytime = MyTime.set_to_replay_start_time_env_var(t, timezone("US/Pacific")) assert mytime.epoch == 1533195000
def test_main(): app_name = "myELB" lambda_start_time = MyTime( year=2018, month=10, day=1, hour=10, minute=31, second=31, tzinfo="Europe/Zurich", ) s3_parsed_data_key = replay_live.main(lambda_start_time=lambda_start_time, app_name=app_name) assert s3_parsed_data_key == "myELB/2018/10/01/08/1538382180"
def test_main(): app_name = "myELB" app_cur_timestamp = MyTime( year=2018, month=10, day=1, hour=10, minute=31, second=31, tzinfo="Europe/Zurich", ).epoch s3_parsed_data_key = replay_past.main( app_cur_timestamp=app_cur_timestamp, app_name=app_name ) assert s3_parsed_data_key == "myELB/2018/10/01/08/1538382691"