def _get_filter_map(self, filters): filter_map = {} if filters.get("text"): filter_names = { 'user': '******', 'id': 'query_id', 'name': 'state', 'type': 'stmt_type', 'status': 'status' } def make_lambda(name, value): return lambda app: app[name] == value for key, name in list(filter_names.items()): text_filter = re.search(r"\s*(" + key + ")\s*:([^ ]+)", filters.get("text")) if text_filter and text_filter.group(1) == key: filter_map[name] = text_filter.group(2).strip() if filters.get("time"): time_filter = filters.get("time") period_ms = self._time_in_ms(float(time_filter.get("time_value")), time_filter.get("time_unit")[0:1]) ms_diff = current_ms_from_utc() - period_ms filter_map["date"] = datetime.strftime( datetime.fromtimestamp(ms_diff / 1000), "%Y-%m-%d") if time_filter.get("time_unit")[0:1] != 'd': filter_map["time"] = int(ms_diff) if filters.get("states"): if len(filters.get("states")) == 1: filter_map["status"] = filters.get("states")[0] return filter_map
def app(self, appid): jobs = query_history.get_query_by_id(self.user.get_username(), query_id=appid) current_time = current_ms_from_utc() if not jobs['data']: raise PopupException(_('Could not find query id %s' % appid)) job = jobs['data'][0] app = { 'id': job[0], 'name': job[5]['queryText'].replace('\r\n', ' ')[:60] + ('...' if len(job[5]) > 60 else '') if job[5] else '', 'status': self._get_status(job), 'apiStatus': self._api_status(self._get_status(job)), 'type': job[2], 'user': job[3], 'queue': job[4], 'progress': '100' if len(job[1]) >= 2 else '', 'isRunning': len(job[1]) <= 1, 'canWrite': False, 'duration': job[1][-1] - job[1][0] if len(job[1]) > 1 else max(current_time - job[1][0], 0), 'submitted': job[1][0], # Extra specific 'rows_fetched': 0, 'waiting': '', 'waiting_time': 0, 'properties': { 'plan': { 'stmt': job[5]['queryText'] if job[5] else '', 'plan': job[5]['queryPlan'] if job[5] else '', 'perf': job[6] } } } return app
def test_with_idbroker(self): try: finish = conf.AWS_ACCOUNTS.set_for_testing( {}) # Set empty to test when no configs are set with patch('aws.client.conf_idbroker.get_conf') as get_conf: with patch('aws.client.Client.get_s3_connection'): with patch('aws.client.IDBroker.get_cab') as get_cab: get_conf.return_value = { 'fs.s3a.ext.cab.address': 'address' } get_cab.return_value = { 'Credentials': { 'AccessKeyId': 'AccessKeyId', 'Expiration': 0 } } provider = get_credential_provider('default', 'hue') assert_equal( provider.get_credentials().get('AccessKeyId'), 'AccessKeyId') client1 = get_client(name='default', fs='s3a', user='******') client2 = get_client(name='default', fs='s3a', user='******') assert_not_equal( client1, client2 ) # Test that with Expiration 0 clients not equal get_cab.return_value = { 'Credentials': { 'AccessKeyId': 'AccessKeyId', 'Expiration': int(current_ms_from_utc()) + 10 * 1000 } } client3 = get_client(name='default', fs='s3a', user='******') client4 = get_client(name='default', fs='s3a', user='******') client5 = get_client(name='default', fs='s3a', user='******') assert_equal( client3, client4 ) # Test that with 10 sec expiration, clients equal assert_not_equal( client4, client5 ) # Test different user have different clients finally: finish() clear_cache() conf.clear_cache()
def _get_client_cached(fs, name, user): global CLIENT_CACHE if CLIENT_CACHE is None: CLIENT_CACHE = {} cache_key = _get_cache_key(fs, name, user) if conf_idbroker.is_idbroker_enabled(fs) else _get_cache_key(fs, name) # We don't want to cache by username when IDBroker not enabled client = CLIENT_CACHE.get(cache_key) if client and (client.expiration is None or client.expiration > int(current_ms_from_utc())): # expiration from IDBroker returns java timestamp in MS return client else: client = _make_client(fs, name, user) CLIENT_CACHE[cache_key] = client return client
def _get_token(self, params=None): LOG.debug("Authenticating to Azure Active Directory: %s" % self._url) data = { "grant_type": "client_credentials", "client_id": self._access_key_id, "client_secret": self._secret_access_key } data.update(params) token = self._root.post("/", data=data, log_response=False) token["expires_on"] = int( token.get( "expires_on", (current_ms_from_utc() + int(token.get("expires_in")) * 1000) / 1000)) return token
def apps(self, filters): filter_map = self._get_filter_map(filters) limit = filters.get('pagination', {'limit': 25}).get('limit') jobs = query_history.get_query_history(request_user=filter_map.get('effective_user'), start_date=filter_map.get('date'), start_time=filter_map.get('time'), query_id=filter_map.get('query_id'), status=filter_map.get('status'), limit=limit) current_time = current_ms_from_utc() apps = { 'apps': [{ 'id': job[0], 'name': job[5]['queryText'].replace('\r\n', ' ')[:60] + ('...' if len(job[5]) > 60 else '') if job[5] else '', 'status': self._get_status(job), 'apiStatus': self._api_status(self._get_status(job)), 'type': job[2], 'user': job[3], 'queue': job[4], 'progress': '100' if len(job[1]) >= 2 else '', 'isRunning': len(job[1]) <= 1, 'canWrite': False, 'duration': job[1][-1] - job[1][0] if len(job[1]) > 1 else max(current_time - job[1][0], 0), 'submitted': job[1][0], # Extra specific 'rows_fetched': 0, 'waiting': '', 'waiting_time': 0, 'properties': { 'plan': { 'stmt': job[5]['queryText'] if job[5] else '', 'plan': job[5]['queryPlan'] if job[5] else '', 'perf': job[6] } } } for job in jobs['data']], 'total': 0 } apps['total'] = len(apps['apps']) return apps
def test_with_idbroker(self): try: finish = (conf.AZURE_ACCOUNTS.set_for_testing({}), conf.ADLS_CLUSTERS.set_for_testing({ 'default': { 'fs_defaultfs': 'fs_defaultfs', 'webhdfs_url': 'webhdfs_url' } })) with patch('azure.client.conf_idbroker.get_conf') as get_conf: with patch('azure.client.WebHdfs.get_client'): with patch('azure.client.IDBroker.get_cab') as get_cab: get_conf.return_value = { 'fs.azure.ext.cab.address': 'address' } get_cab.return_value = { 'access_token': 'access_token', 'token_type': 'token_type', 'expires_on': 0 } provider = get_credential_provider('default', 'hue') assert_equal( provider.get_credentials().get('access_token'), 'access_token') client1 = get_client(name='default', fs='adl', user='******') client2 = get_client(name='default', fs='adl', user='******') assert_not_equal( client1, client2 ) # Test that with Expiration 0 clients not equal get_cab.return_value = { 'Credentials': { 'access_token': 'access_token', 'token_type': 'token_type', 'expires_on': int(current_ms_from_utc()) + 10 * 1000 } } client3 = get_client(name='default', fs='adl', user='******') client4 = get_client(name='default', fs='adl', user='******') client5 = get_client(name='default', fs='adl', user='******') assert_equal( client3, client4 ) # Test that with 10 sec expiration, clients equal assert_not_equal( client4, client5 ) # Test different user have different clients finally: for f in finish: f() clear_cache()