def setUp(self): # Beware: Monkey patching if not hasattr(resource_manager_api, 'old_get_resource_manager_api'): resource_manager_api.old_get_resource_manager = resource_manager_api.get_resource_manager if not hasattr(resource_manager_api, 'old_get_mapreduce_api'): mapreduce_api.old_get_mapreduce_api = mapreduce_api.get_mapreduce_api if not hasattr(history_server_api, 'old_get_history_server_api'): history_server_api.old_get_history_server_api = history_server_api.get_history_server_api self.c = make_logged_in_client(is_superuser=False) grant_access("test", "test", "jobbrowser") self.user = User.objects.get(username='******') self.c2 = make_logged_in_client(is_superuser=False, username="******") grant_access("test2", "test2", "jobbrowser") self.user2 = User.objects.get(username='******') resource_manager_api.get_resource_manager = lambda user: MockResourceManagerApi(user) mapreduce_api.get_mapreduce_api = lambda: MockMapreduceApi() history_server_api.get_history_server_api = lambda: HistoryServerApi() self.finish = [ YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True), SHARE_JOBS.set_for_testing(False) ] assert_true(cluster.is_yarn())
def teardown_class(cls): if is_live_cluster(): # Delete test DB and tables query_server = get_query_server_config() client = make_logged_in_client() user = User.objects.get(username='******') db = dbms.get(user, query_server) # Kill Spark context if running if is_hive_on_spark() and cluster.is_yarn(): # TODO: We should clean up the running Hive on Spark job here pass for db_name in [cls.db_name, '%s_other' % cls.db_name]: databases = db.get_databases() if db_name in databases: tables = db.get_tables(database=db_name) for table in tables: make_query(client, 'DROP TABLE IF EXISTS `%(db)s`.`%(table)s`' % {'db': db_name, 'table': table}, wait=True) make_query(client, 'DROP VIEW IF EXISTS `%(db)s`.`myview`' % {'db': db_name}, wait=True) make_query(client, 'DROP DATABASE IF EXISTS %(db)s' % {'db': db_name}, wait=True) # Check the cleanup databases = db.get_databases() assert_false(db_name in databases) global _INITIALIZED _INITIALIZED = False
def setUp(self): # Beware: Monkey patching if not hasattr(resource_manager_api, 'old_get_resource_manager_api'): resource_manager_api.old_get_resource_manager = resource_manager_api.get_resource_manager if not hasattr(resource_manager_api, 'old_get_mapreduce_api'): mapreduce_api.old_get_mapreduce_api = mapreduce_api.get_mapreduce_api if not hasattr(history_server_api, 'old_get_history_server_api'): history_server_api.old_get_history_server_api = history_server_api.get_history_server_api resource_manager_api.get_resource_manager = lambda: MockResourceManagerApi() mapreduce_api.get_mapreduce_api = lambda: MockMapreduceApi() history_server_api.get_history_server_api = lambda: HistoryServerApi() self.c = make_logged_in_client(is_superuser=False) grant_access("test", "test", "jobbrowser") self.finish = YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True) assert_true(cluster.is_yarn())
def jobs(request): user = request.GET.get('user', request.user.username) state = request.GET.get('state') text = request.GET.get('text') retired = request.GET.get('retired') if request.GET.get('format') == 'json': jobs = get_api(request.user, request.jt).get_jobs(user=request.user, username=user, state=state, text=text, retired=retired) json_jobs = [massage_job_for_json(job, request) for job in jobs] return HttpResponse(encode_json_for_js(json_jobs), mimetype="application/json") return render('jobs.mako', request, { 'request': request, 'state_filter': state, 'user_filter': user, 'text_filter': text, 'retired': retired, 'filtered': not (state == 'all' and user == '' and text == ''), 'is_yarn': cluster.is_yarn() })
def get_api(user, jt): if cluster.is_yarn(): return YarnApi(user) else: return JtApi(jt)
raise PopupException(_('Job Tracker cannot be contacted or might be down.')) else: raise ex json_jobs = { 'jobs': [massage_job_for_json(job, request) for job in jobs], } return JsonResponse(json_jobs, encoder=JSONEncoderForHTML) return render('jobs.mako', request, { 'request': request, 'state_filter': state, 'user_filter': user, 'text_filter': text, 'retired': retired, 'filtered': not (state == 'all' and user == '' and text == ''), 'is_yarn': cluster.is_yarn() }) def massage_job_for_json(job, request): job = { 'id': job.jobId, 'shortId': job.jobId_short, 'name': hasattr(job, 'jobName') and job.jobName or '', 'status': job.status, 'url': job.jobId and reverse('jobbrowser.views.single_job', kwargs={'job': job.jobId}) or '', 'logs': job.jobId and reverse('jobbrowser.views.job_single_logs', kwargs={'job': job.jobId}) or '', 'queueName': hasattr(job, 'queueName') and job.queueName or _('N/A'), 'priority': hasattr(job, 'priority') and job.priority.lower() or _('N/A'), 'user': job.user, 'isRetired': job.is_retired, 'isMR2': job.is_mr2,
return render( 'jobs.mako', request, { 'request': request, 'state_filter': state, 'user_filter': user, 'text_filter': text, 'retired': retired, 'filtered': not (state == 'all' and user == '' and text == ''), 'is_yarn': cluster.is_yarn(), 'hiveserver2_impersonation_enabled': hiveserver2_impersonation_enabled() }) def massage_job_for_json(job, request=None, user=None): job = { 'id': job.jobId, 'shortId': job.jobId_short, 'name': hasattr(job, 'jobName') and job.jobName or '', 'status': job.status,
else: raise ex json_jobs = { 'jobs': [massage_job_for_json(job, request) for job in jobs], } return JsonResponse(json_jobs, encoder=JSONEncoderForHTML) return render( 'jobs.mako', request, { 'request': request, 'state_filter': state, 'user_filter': user, 'text_filter': text, 'retired': retired, 'filtered': not (state == 'all' and user == '' and text == ''), 'is_yarn': cluster.is_yarn() }) def massage_job_for_json(job, request): job = { 'id': job.jobId, 'shortId': job.jobId_short, 'name': hasattr(job, 'jobName') and job.jobName or '', 'status': job.status, 'url': job.jobId
else: raise ex json_jobs = {"jobs": [massage_job_for_json(job, request) for job in jobs]} return JsonResponse(json_jobs, encoder=JSONEncoderForHTML) return render( "jobs.mako", request, { "request": request, "state_filter": state, "user_filter": user, "text_filter": text, "retired": retired, "filtered": not (state == "all" and user == "" and text == ""), "is_yarn": cluster.is_yarn(), }, ) def massage_job_for_json(job, request): job = { "id": job.jobId, "shortId": job.jobId_short, "name": hasattr(job, "jobName") and job.jobName or "", "status": job.status, "url": job.jobId and reverse("jobbrowser.views.single_job", kwargs={"job": job.jobId}) or "", "logs": job.jobId and reverse("jobbrowser.views.job_single_logs", kwargs={"job": job.jobId}) or "", "queueName": hasattr(job, "queueName") and job.queueName or _("N/A"), "priority": hasattr(job, "priority") and job.priority.lower() or _("N/A"), "user": job.user,