def post(self, request): ip = request.POST['ip'] web_port = request.POST['web_port'] rpyc_port = request.POST['rpyc_port'] context = context_processors.base_variables_all(request) cluster_id = request.session[str(request.session['user'])] service_id = Services.objects.get(name="elasticsearch").id if request.POST['standby_ip']: if not Elastic_search.objects.filter(ip=ip, cluster=request.session[str(request.session['user'])]).exists(): Elastic_search.objects.create(ip=request.POST['standby_ip'],cluster_id=cluster_id, type=1,state=0, web_port=web_port, rpyc_port=rpyc_port) context['standby_ip'] = request.POST['standby_ip'] try: if not Elastic_search.objects.filter(ip=ip, cluster=request.session[str(request.session['user'])]).exists(): Elastic_search.objects.create(ip=ip, type=1,cluster_id=cluster_id, web_port=web_port,state=1, rpyc_port=rpyc_port) except IntegrityError as e: context['message'] = e context['ip'] = ip context['type'] = type context['web_port'] = web_port context['rpyc_port'] = rpyc_port if not Service_cluster_reference.objects.filter(service_id=service_id, cluster_id=cluster_id).exists(): Service_cluster_reference.objects.create(service_id=service_id, cluster_id=Clusters.objects.get(id=cluster_id)) messages.success(request, 'elasticsearch successfully added') return redirect('index') else: messages.error(request, "<b>%s</b> as Masternode for this cluster is already set" % ip) return render(request, 'elasticsearch/add.html', context)
def post(self, request): context = context_processors.base_variables_all(request) group_name = request.POST["groupname"] if self.validate_group_name_character(group_name): if self.validate_group_name_length(group_name): if self.validate_group_name_exists(group_name): try: Role.objects.create(name=group_name) group_id = Role.objects.get(name=group_name).id messages.success( request, "%s successfully added to the group" % group_name) if "addanother" in request.POST: return redirect('add_group') elif "continue" in request.POST: return redirect('change_group', id=group_id) except Exception: messages.error( request, "%s adding to group failed" % group_name) return redirect('add_group') else: context[ "error_msg"] = " %s group name already exists" % group_name else: context["error_msg"] = " %s group name is < 5" % group_name else: context["error_msg"] = "group name must be all alphabet character" context["group_name"] = group_name return render(request, 'account/add_group.html', context)
def get(self, request, id): nodes = Nodes.objects.filter(id=id).values("ip", "hostname", "fqdn", "id", "port").first() context = context_processors.base_variables_all(request) context["node"] = nodes context["ip"] = nodes["ip"] if request.is_ajax(): try: url = 'http://%s:%s/system/statistics' % (nodes["ip"], nodes["port"]) response = CreateConnection.connect(request, url) djson = response.json() except ValueError as e: djson = ast.literal_eval(response.content.decode()) except Exception as e: return JsonResponse(e) return JsonResponse(djson) else: try: url = 'http://%s:%s/system/statistics' % (nodes["ip"], nodes["port"]) data = CreateConnection.connect(request, url) djson = data.json() except ValueError as e: djson = ast.literal_eval(data.content.decode()) except Exception as e: return render(request, 'nodes/realtime_graphs.html', context) context["data"] = djson context["cpu_num"] = len(djson["cpu_usage"]) return render(request, 'nodes/realtime_graphs.html', context)
def post(self, request): ip = request.POST['ip'] # type = request.POST['type'] web_port = request.POST['web_port'] rpyc_port = request.POST['rpyc_port'] service_id = Services.objects.get(name="spark").id cluster_id = request.session[str(request.session['user'])] context = context_processors.base_variables_all(request) try: if not Spark.objects.filter(ip=ip, cluster=request.session[str(request.session['user'])]).exists(): Spark.objects.create(ip=ip, type=1, web_port=web_port,cluster_id=cluster_id, rpyc_port=rpyc_port, state=1) except IntegrityError as e: context['message'] = e context['type'] = type context['ip'] = ip context['web_port'] = web_port context['rpyc_port'] = rpyc_port if not Service_cluster_reference.objects.filter(service_id=service_id, cluster_id=cluster_id).exists(): Service_cluster_reference.objects.create(service_id=service_id, cluster_id=Clusters.objects.get(id=cluster_id)) messages.success(request, 'spark successfully added') return redirect('index') else: messages.error(request, "<b>%s</b> as Master for this cluster is already set" % ip) return render(request, 'spark/add.html', context)
def browse_hdfs_ajax(request): directory = request.GET['directory_name'] client = True context = context_processors.base_variables_all(request) if directory == "": directory = "/" obj = helper.helper(request, Hdfs) master = obj.get_active_master() if master: master_ip = master["ip"] master_port = master["web_port"] url = "https://%s:%s/webhdfs/v1%s?op=LISTSTATUS" % ( master_ip, master_port, directory) r = requests.get(url, verify=False) response_json = r.json() data = response_json['FileStatuses'] return JsonResponse(data) else: data = '' messages.error(request, "client is not installed on the master node ") client = False context["client"] = client return JsonResponse(data)
def get(self, request): if 'user' not in request.session: return redirect('login') context = context_processors.base_variables_all(request) unapproved=Nodes.objects.filter(approved=0).count() apprroved = Nodes.objects.filter(approved=1) context["unapproved"]=unapproved context["approved"]=apprroved return render(request,"settings/setting.html",context)
def get(self, request): if 'user' not in request.session: return redirect('login') context = base_variables_all(request) keys = self.get_api_key() if keys: context["keys"] = keys[0] return render(request, 'security/api_key.html', context)
def get(self, request): context = context_processors.base_variables_all(request) nodes = Nodes.objects.filter(approved=0).all() if not nodes: messages.info(request, "No nodes to approve right now") else: context['nodes'] = nodes return render(request, 'nodes/unapproved_index.html', context)
def get(self, request, id): context = context_processors.base_variables_all(request) nodes = Nodes.objects.filter(approved=0).all() context['nodes'] = nodes cluster_id = str(request.session[str(request.session['user'])]) edit_node = Nodes.objects.get(id=id) context["edit_node"] = edit_node context["edit_cluster"] = Clusters.objects.get(id=cluster_id) return render(request, 'nodes/edit_nodes.html', context)
def browse_hdfs(request): if 'user' not in request.session: return redirect('login') obj = helper.helper(request, Hdfs) master_ip = "" client = True master = "" context = context_processors.base_variables_all(request) if obj.atleast_one_client_is_installed(): if obj.clientIsInstalledOnMaster(): master = obj.get_active_master() if master: master_ip = master["ip"] master_port = master["web_port"] context["client"] = client else: messages.error( request, "Sorry there is some problem in your configuration file <h3>namenode is down</h3> " ) context["error_in_conf_file"] = True return render(request, 'browse/browse_hdfs.html', context) else: messages.error(request, "Client is not installed on master node") context["client"] = False return render(request, 'browse/browse_hdfs.html', context) else: messages.error(request, "Seems like no client is installed") return render(request, 'browse/browse_hdfs.html', context) cluster_id = str(request.session[str(request.session['user'])]) nodes_hdfs = Nodes.objects.all() try: url = "https://%s:%s/webhdfs/v1/?op=LISTSTATUS" % (master_ip, master_port) r = requests.get(url, verify=False) response_json = r.json() rows = response_json['FileStatuses']['FileStatus'] except ConnectionError as e: messages.error(request, "There is some problem with the connection.Please make sure your internet connection" \ " is working and client is up and running on " + master_ip) return render(request, "error/500.html", context) context["rows"] = rows context["nodes_hdfs"] = nodes_hdfs context["master_ip"] = master_ip return render(request, 'browse/browse_hdfs.html', context)
def get(self, request): context = context_processors.base_variables_all(request) user_list = User.objects.all() page = request.GET.get('page', 1) paginator = Paginator(user_list, 2) try: users = paginator.page(page) except PageNotAnInteger: users = paginator.page(1) except EmptyPage: users = paginator.page(paginator.num_pages) context["users_data"] = users return render(request, 'account/users.html', context)
def show_backup_configure_service(request, node, service): context = context_processors.base_variables_all(request) service_id = Services.objects.get(name=service).id node_ip = Nodes.objects.get(id=node).ip key_configurations = Backup_configuration.objects.filter( service_id=service_id, value__contains=node_ip) context["key_configurations"] = key_configurations context["node_ip"] = node_ip context["service_name"] = service return render(request, 'configuraion/backup_configuration.html', context)
def get(request, id): context = context_processors.base_variables_all(request) permissions = Permission.objects.all() try: context["group"] = Role.objects.get(id=id) except Exception as e: print(e) context["permissions"] = permissions group_permission = Role_permission.objects.filter(role_id=id) if group_permission: context["group_permissions"] = list( group_permission.values_list("permission_id", flat=True)) return render(request, 'account/change_group.html', context)
def edit_configure_service(request, service): context = context_processors.base_variables_all(request) service_object = Services.objects.get(name=service) nodes_configuration = helper(request).get_all_nodes() key_configurations = User_preferred_configuration.objects.filter( service_id=service_object.id) context["key_configurations"] = key_configurations context["id"] = service_object.id context["service_name"] = service context["nodes_configuration"] = nodes_configuration return render(request, 'configuraion/edit_configuration.html', context)
def add_configure_service(request, service): service_object = Services.objects.get(name=service) key_configurations_users = User_preferred_configuration.objects.filter( service_id=service_object.id) key_configurations = Default_configuration.objects.exclude( name__in=[x.key_name for x in key_configurations_users]).filter( service_id=service_object.id) nodes_configuration = helper(request).get_all_nodes() context = context_processors.base_variables_all(request) context["key_configurations"] = key_configurations context["id"] = service_object.id context["service_name"] = service context["nodes_configuration"] = nodes_configuration return render(request, 'configuraion/configure_service.html', context)
def post(self, request): context = context_processors.base_variables_all(request) username = request.POST["username"] password1 = request.POST["password1"] password2 = request.POST["password2"] if self.username_exists(username): if self.username_validate_pattern(username): if self.password_validate_length(password1): if self.password_validate_numeric(password1): if self.confirm_password(password1, password2): salt = get_salt() hashed_password = hash_string(salt, password1) User.objects.create( user_name=username, salt=salt, hashed_password=hashed_password) user_id = User.objects.get(user_name=username).id context["success_msg"] = "successfully added user" messages.success( request, '%s successfully added' % username) if "addanother" in request.POST: return redirect('add_user') elif "continue" in request.POST: return redirect('change_user', id=user_id) else: context["error_msg"] = "password mismatched" else: context["error_msg"] = "password is all numeric" else: context["error_msg"] = "password length is less than 8" else: context["error_msg"] = "username pattern is invalid" else: context["error_msg"] = "username already exists" context["username"] = username context["password1"] = password1 context["password2"] = password2 return render(request, 'account/add_user.html', context)
def browse_nodes(request, id): if 'user' not in request.session: return redirect('login') global cluster_id obj = helper.helper(request, Hdfs) context = context_processors.base_variables_all(request) cluster_id = str(request.session[str(request.session['user'])]) node = Nodes.objects.get(id=id) url = "http://%s:%s/command/ls/" % (node.ip, node.port) data = {} if "browse" in request.GET: data["path"] = request.GET["browse"] else: data["path"] = "user" headers = {"API-KEY": obj.get_api_key()} try: response = requests.post(url, data=json.dumps(data), headers=headers) response_jsons = response.json() context["path"] = response_jsons["path"] print(response_jsons["path"]) back_dir = back_browse(response_jsons["path"]) + '/' except ConnectionError as e: messages.error(request, "There is some problem with the connection.Please make sure your internet connection" \ " is working and client is up and running on " + node.ip) return render(request, "error/500.html", context) rows = directory_information(response_jsons) context["rows"] = rows context["node_ip"] = node.ip context["node_name"] = node.name context["id"] = id context["back_path"] = back_dir return render(request, 'browse/browse.html', context)
def show_configure_service(request): context = context_processors.base_variables_all(request) node = request.GET["node"] service = request.GET["service_id"] key_configurations = User_preferred_configuration.objects.filter( service_id=service, value__contains=node) if key_configurations: key_configuration_list = [] for key_configuration in key_configurations: key_configuration_dict = {} key_configuration_dict["key"] = key_configuration.key_name key_configuration_dict["type"] = key_configuration.key_type value = ast.literal_eval(key_configuration.value) key_configuration_dict["value"] = value[str(node)] key_configuration_list.append(key_configuration_dict) context["key_configurations"] = key_configuration_list backup_key_configurations = Backup_configuration.objects.filter( service_id=service, value__contains=node) if backup_key_configurations: backup_key_configurations_list = [] for key_configuration in backup_key_configurations: backup_key_configurations_dict = {} backup_key_configurations_dict["key"] = key_configuration.key_name backup_key_configurations_dict["type"] = key_configuration.key_type value = ast.literal_eval(key_configuration.value) backup_key_configurations_dict["value"] = value[str(node)] backup_key_configurations_list.append( backup_key_configurations_dict) context["backup_key_configurations"] = backup_key_configurations_list context["node_ip"] = node context["service_name"] = service return render(request, 'configuraion/show_configuration.html', context)
def back_with_hdfs_ajax(request): directory = request.GET['directory_name'] context = context_processors.base_variables_all(request) directory_list = directory.split('/') directory_list.pop(-1) directory_str = "/".join(directory_list) obj = helper.helper(request, Hdfs) master = obj.get_active_master() if master: master_ip = master["ip"] master_port = master["web_port"] if directory_str == "": url = "https://%s:%s/webhdfs/v1/%s?op=LISTSTATUS" % ( master_ip, master_port, directory_str) else: url = "https://%s:%s/webhdfs/v1%s?op=LISTSTATUS" % ( master_ip, master_port, directory_str) r = requests.get(url, verify=False) response_json = r.json() data = response_json['FileStatuses'] data['back_directory'] = directory_str return JsonResponse(data) else: data = '' messages.error(request, "client is not installed on the master node ") client = False context["client"] = client return JsonResponse(data)
def get(request, id): context = context_processors.base_variables_all(request) user = User.objects.filter(id=id) groups = Role.objects.all() page = request.GET.get('page', 1) paginator = Paginator(groups, 1) try: groups = paginator.page(page) except PageNotAnInteger: groups = paginator.page(1) except EmptyPage: groups = paginator.page(paginator.num_pages) if user: context["user_data"] = user.values()[0] context["groups"] = groups user_groups = User_role.objects.filter(user_id=id) if user_groups: context["user_groups"] = list( user_groups.values_list("role_id", flat=True)) else: messages.error(request, "user doesnot exist") return render(request, 'account/users.html', context) return render(request, 'account/change_user.html', context)
def create_dir_hdfs_ajax(request): root_folder = request.GET['root_folder'] folder_name = request.GET['folder_name'] context = context_processors.base_variables_all(request) cluster_id = str(request.session[str(request.session['user'])]) obj = helper.helper(request, Hdfs) master = obj.get_active_master() if master: master_ip = master["ip"] master_port = master["web_port"] url = "https://%s:%s/webhdfs/v1%s/%s?op=MKDIRS&permission=711" % ( master_ip, master_port, root_folder, folder_name) requests.put(url, verify=False) response = {'success': True} return JsonResponse(response) else: messages.error(request, "client is not installed on the master node ") client = False context["client"] = client response = {'success': False} return JsonResponse(response)
def get(self, request, id): context = context_processors.base_variables_all(request) context["id"] = id context["metrics_nodes"] = Nodes.objects.filter(id=id).values( "name", "ip").first() return render(request, 'nodes/node_details.html', context)
def get(self, request): context = context_processors.base_variables_all(request) return render(request, 'yarn/add.html', context)
def index(request): obj = helper(request, Spark) master_ip = "" client = True master = "" context = context_processors.base_variables_all(request) if obj.atleast_one_client_is_installed(): if obj.clientIsInstalledOnMaster(): master = obj.get_active_master() if master: master_ip = master["ip"] context["client"] = client else: messages.error( request, "Sorry !! due to some problem we are unable to fetch the information from server." " You can perform following steps to find the problem and then restart the services." "<ul>" "<li> Reload after 10 seconds</li>" "<li> Restart again</li>" "<li> Check the log of spark master and spark worker</li>" "<li> make there is no problem in configuration file </li> " ) context["error_in_conf_file"] = True s_master = obj.get_service_master() if s_master: context["master_ip"] = s_master["ip"] context["client"] = client return render(request, 'spark/spark.html', context) else: messages.error( request, "We have encountered some problems." "Please make sure following conditions are met" "<ul>" "<li> Client is installed on master node</li>" "<li> Environment variables for all services are set properly</li>" "<li> Restart agent on master node [url here]</li>") context["client"] = False return render(request, 'spark/spark.html', context) else: messages.error(request, "Seems like no client is installed") context["client"] = False return render(request, 'spark/spark.html', context) all_nodes = obj.get_all_nodes() cursor = connection.cursor() node_with_client = "select s.ip from spark_spark as s join administer_nodes " \ "as n on s.ip=n.ip" masters_sql = "select s.*,n.hostname,n.fqdn,n.name from spark_spark as s join administer_nodes " \ "as n on s.ip=n.ip where s.type=1" slave_with_client = "select s.*,sm.*,n.hostname,n.fqdn,n.name from spark_spark as s join administer_nodes as n on " \ "s.ip=n.ip join spark_metrics as sm on s.id=sm.node_id " \ " where s.type=0 and sm.updated_at in (select max(updated_at) " \ "from spark_metrics limit 1)" slave_without_client = "select s.*,sm.* from spark_spark as s join spark_metrics as sm on s.id=sm.node_id " \ "where s.type=0 and s.ip not in (" + node_with_client + ") and sm.updated_at in" \ " (select max(updated_at) from spark_metrics limit 1)" alive_workers_list = [] dead_workers_list = [] cursor.execute(masters_sql) masters = cursor.fetchall() colnames = [desc[0] for desc in cursor.description] for node in masters: c = dict(zip(colnames, node)) client_installed = True if c["ip"] not in all_nodes: client_installed = False if c["type"] == 1 and c["state"] == 1: master = c if c["type"] == 1 == 1 and c["state"] == 0: c["client_installed"] = client_installed standby_data = c cursor.execute(slave_with_client) nodes_with_client = cursor.fetchall() colnames = [desc[0] for desc in cursor.description] for node in nodes_with_client: c = dict(zip(colnames, node)) client_installed = True if c["ip"] not in all_nodes: client_installed = False c["client_installed"] = client_installed if c["status"] == "RUNNING": alive_workers_list.append(c) else: dead_workers_list.append(c) cursor.execute(slave_without_client) nodes_without_client = cursor.fetchall() for node in nodes_without_client: c = dict(zip(colnames, node)) client_installed = True if c["ip"] not in all_nodes: client_installed = False c["client_installed"] = client_installed if c["status"] == "RUNNING": alive_workers_list.append(c) else: dead_workers_list.append(c) service_object = Services.objects.get(name='spark') restart_status_checks = Restart_after_configuration.objects.filter( service_id=service_object.id).exists() if restart_status_checks: restart_status_check = Restart_after_configuration.objects.get( service_id=service_object.id) restart_status = restart_status_check.status else: restart_status = 0 context["alive_spark_workers"] = alive_workers_list context["dead_spark_workers"] = dead_workers_list context["restart_status"] = restart_status context["spark_master"] = master context["master_ip"] = master_ip context["service_id"] = service_object.id return render(request, 'spark/spark.html', context)
def index(request): obj = helper(request, Hbase) master_ip = "" client = True context = context_processors.base_variables_all(request) if obj.atleast_one_client_is_installed(): if obj.clientIsInstalledOnMaster(): master = obj.get_active_master() if master: master_ip = master["ip"] context["master_ip"] = master_ip context["master_id"] = master["id"] context["client"] = client else: messages.error(request, "Sorry !! due to some problem we are unable to fetch the information from server." " You can perform following steps to find the problem and then restart the services." "<ul>" "<li> Reload after 10 seconds</li>" "<li> Restart again</li>" "<li> Check the log of Hmaster and RegionServers</li>" "<li> Check the log of Namenode and Datanode</li>" "<li> make there is no problem in configuration file </li> " "</ul>") service_master = obj.get_service_master() if service_master: context["error_in_conf_file"] = True context["master_ip"] = service_master["ip"] context["master_id"] = service_master["id"] context["client"] = client return render(request, 'hbase/hbase.html', context) else: messages.error(request, "We have encountered some problems." "Please make sure following conditions are met" "<ul>" "<li> Client is installed on master node</li>" "<li> Environment variables for all services are set properly</li>" "<li> Restart agent on master node [url here]</li>") context["client"] = False return render(request, 'hbase/hbase.html', context) else: messages.error(request, "Seems like no client is installed") context["client"] = False return render(request, 'hbase/hbase.html', context) all_nodes = obj.get_all_nodes() cursor = connection.cursor() node_with_client = "select hb.ip from hbase_hbase as hb join administer_nodes " \ "as n on hb.ip=n.ip" masters_sql = "select hb.*,n.hostname,n.fqdn,n.name from hbase_hbase as hb join administer_nodes " \ "as n on hb.ip=n.ip where hb.type=1" slave_with_client = "select h.*,hbm.*,n.hostname,n.fqdn,n.name from hbase_hbase as h join administer_nodes as n on " \ "h.ip=n.ip join hbase_metrics as hbm on h.id=hbm.node_id " \ "where h.type=0 and hbm.updated_at in (select max(updated_at) " \ "from hbase_metrics limit 1)" slave_without_client = "select h.*,hbm.* from hbase_hbase as h join hbase_metrics as hbm on h.id=hbm.node_id " \ "where h.type=0 and h.ip not in (" + node_with_client + ") and hbm.updated_at in (select max(updated_at) from hbase_metrics limit 1)" live_regionservers_list = [] dead_regionservers_list = [] active_data = "" backup_data = "" cursor.execute(masters_sql) masters = cursor.fetchall() colnames = [desc[0] for desc in cursor.description] for node in masters: print(node) c = dict(zip(colnames, node)) client_installed = True if c["ip"] not in all_nodes: client_installed = False if c["type"] == 1 and c["state"] == 1: active_data = c if c["type"] == 1 and c["state"] == 0: c["client_installed"] = client_installed backup_data = c cursor.execute(slave_with_client) nodes_with_client = cursor.fetchall() colnames = [desc[0] for desc in cursor.description] for node in nodes_with_client: c = dict(zip(colnames, node)) client_installed = True if c["ip"] not in all_nodes: client_installed = False c["client_installed"] = client_installed if c["status"] == "RUNNING": live_regionservers_list.append(c) else: dead_regionservers_list.append(c) cursor.execute(slave_without_client) nodes_without_client = cursor.fetchall() for node in nodes_without_client: c = dict(zip(colnames, node)) client_installed = True if c["ip"] not in all_nodes: client_installed = False c["client_installed"] = client_installed if c["status"] == "RUNNING": live_regionservers_list.append(c) else: dead_regionservers_list.append(c) tst = {"k1": "v1", "k2": "v2", "k3": "v3"} # tst = 'bibek' service_object = Services.objects.get(name='hbase') restart_status_checks = Restart_after_configuration.objects.filter(service_id=service_object.id).exists() if restart_status_checks: restart_status_check = Restart_after_configuration.objects.get(service_id=service_object.id) restart_status = restart_status_check.status else: restart_status = 0 context["live_regionservers"] = live_regionservers_list context["dead_regionservers"] = dead_regionservers_list context["active_master"] = active_data context["backup_master"] = backup_data context["restart_status"] = restart_status context["service_id"] = service_object.id return render(request, 'hbase/hbase.html', context)
def get(self, request): if 'user' in request.session: context = context_processors.base_variables_all(request) all_nodes = Nodes.objects.filter(approved=1).all() nodes = [] data = {} if all_nodes: for node in all_nodes: url = 'http://%s:%s/system/total/space/' % (node.ip, node.port) response = CreateConnection.connect(request, url) if response: try: data = response.json() except ValueError as e: data = ast.literal_eval(response.content.decode()) except Exception as e: messages.error(request, e) print(data) if data and "success" not in data: print("if") nodes.append({ "memory": data["total_memory"], "name": node.name, "approved": node.approved, "disk": data["total_disk"], "ip": node.ip, "hostname": node.hostname, "fqdn": node.fqdn, "id": node.id }) else: print("first else") offline = True nodes.append({ "name": node.name, "approved": node.approved, "ip": node.ip, "hostname": node.hostname, "fqdn": node.fqdn, "id": node.id, "offline": offline }) else: print("second else") offline = True nodes.append({ "name": node.name, "approved": node.approved, "ip": node.ip, "hostname": node.hostname, "fqdn": node.fqdn, "id": node.id, "offline": offline }) context["nodes"] = nodes return render(request, 'nodes/approved_index.html', context) else: context["not_a_single_client"] = True messages.error( request, "no client detected.Did you forget to approve them ??? Go to settings->Unapproved " "nodes to approve them") return render(request, 'nodes/approved_index.html', context) else: return redirect('login')
def get(self, request): context = context_processors.base_variables_all(request) return render(request, 'account/add_user.html', context)
def index_edit(request): context = context_processors.base_variables_all(request) context["action"] = "edit" return render(request, 'configuraion/configuration.html', context)
def index_show_backup(request, id): context = context_processors.base_variables_all(request) node = Nodes.objects.all() context["node"] = node context["action"] = "backup" return render(request, 'configuraion/configuration.html', context)
def post(self, request): cluster_id = str(request.session[str(request.session['user'])]) nodes = Nodes.objects.filter(id=request.POST["node_id"]).values( "ip", "hostname", "fqdn", "id", "port").first() ip = nodes["ip"] context = context_processors.base_variables_all(request) context["node"] = nodes context["ip"] = ip url = 'http://%s:%s/system/statistics/history/' % (ip, nodes["port"]) current_time = datetime.now().replace(microsecond=0) # current_time = datetime.today().replace(microsecond=0) format = "%b-%d %H:%M" if request.POST['data_type'] == 'day': date = request.POST['data_value'] start_date = current_time - timedelta(days=int(date)) payload = {'days': date} else: time = request.POST['data_value'] start_date = current_time - timedelta(hours=int(time)) payload = {'time': request.POST['data_value']} format = "%H:%M %p" data = CreateConnection.connect(request, url, payload, True) try: djson = data.content.decode() djson = ast.literal_eval(djson) except ValueError as e: djson = data.json() except Exception as e: return JsonResponse( { "success": 0, "msg": [ "We encountered some problem connecting to client on this node" ] }, safe=False) if "success" in djson and djson["success"] == 0: return JsonResponse(djson, safe=False) e, d, h, sm, vm, nbw, diol, dul, p, dt = [], [], [], [], [], [], [], [], [], [] vm_shared, vm_used, vm_free, vm_available, vm_total, vm_buffer, vm_cache = [], [], [], [], [], [], [] sm_total, sm_used, sm_free = [], [], [] dio_write_count, dio_read_count, dio_read_time, dio_write_time = [], [], [], [] du_disk_free, du_disk_percentage, du_disk_used, du_disk_total = [], [], [], [] n_bytes_sent, n_packet_sent, n_packet_received, n_bytes_received, n_d_list = [], [], [], [], [] data = sorted(djson["final_data"], key=itemgetter('time')) for a in data: abcd = datetime.utcfromtimestamp(a["time"]) category = abcd.strftime(format) if abcd >= start_date: p = a["data"]["processes"] dt.append(category) d = a["data"]["cpu_usage"] r = a["data"]["memory_usage"]["swap_memory_usage"] t = a["data"]["memory_usage"]["virtual_memory_usage"] n_d_list.append(a["data"]["network_bandwidth"]) dio_d = a["data"]["disk_io"] du_d = a["data"]["disk_usage"] vm_shared.append(t["shared"]) vm_used.append(t["used"]) vm_free.append(t["free"]) vm_available.append(t["available"]) vm_total.append(t["total"]) vm_buffer.append(t["buffer"]) vm_cache.append(t["cached"]) sm_total.append(r["total"]) sm_used.append(r["used"]) sm_free.append(r["free"]) dio_write_count.append(dio_d["write_count"] / 1000000) dio_read_count.append(dio_d["read_count"] / 1000000) dio_read_time.append(dio_d["read_time"] / 600000) dio_write_time.append(dio_d["write_time"] / 600000) du_disk_free.append(du_d["disk_free"]) du_disk_percentage.append(du_d["disk_percentage"]) du_disk_used.append(du_d["disk_used"]) du_disk_total.append(du_d["disk_total"]) e.append(d) # code for network starts here r = [] l1 = n_d_list if l1: len_l1 = len(l1) len_inf = len(l1[0]) x = [[[] * len_l1, [] * len_l1] for i in range(len_inf)] q = [] i = 0 for a in range(0, len(l1)): for b in range(0, len(l1[a])): for k, v in l1[a][b].items(): # Interface if k not in q: q.append(k) x[i][0].append(v["bytes_sent"]) x[i][1].append(v["bytes_recv"]) i = i + 1 i = 0 n_bytes_sent_diff, n_bytes_received_diff = [], [] dividend = 1000 for i in range(0, len(x)): for j in range(1, len(x[i][0])): if j < len(x[i][0]): dtl = abs((x[i][0][j] - x[i][0][j - 1])) if dtl >= 1000 and dtl <= 1000000: dividend = 1000 elif dtl >= 3000000: dividend = 3000000 n_bytes_sent_diff.append(dtl / dividend) for j in range(1, len(x[i][1])): if j < len(x[i][1]): dtl = abs((x[i][1][j] - x[i][1][j - 1])) if dtl >= 1000 and dtl <= 1000000: dividend = 1000 elif dtl >= 3000000: dividend = 3000000 n_bytes_received_diff.append(dtl / dividend) g = [{ "label": "bytes_sent", "data": n_bytes_sent_diff, "pointRadius": 0, "borderColor": "rgba(" + str(randint(0, 255)) + "," + str(randint(0, 255)) + "," + str(randint(0, 255)) + ",1)" }, { "label": "bytes_recv", "data": n_bytes_received_diff, "pointRadius": 0, "borderColor": "rgba(" + str(randint(0, 255)) + "," + str(randint(0, 255)) + "," + str(randint(0, 255)) + ",1)" }] r.append({q[i]: g}) n_bytes_sent_diff, n_bytes_received_diff = [], [] # code for network ends here z = [] hidden = False for a in range(len(d)): for c in range(len(e)): z.append(e[c][a]) g = { "label": "CPU " + str(a), "data": z, "hidden": hidden, "pointRadius": 0, "borderColor": "rgba(" + str(randint(0, 255)) + "," + str(randint(0, 255)) + "," + str(randint(0, 255)) + ",1)" } hidden = True z = [] h.append(g) cursor = connection.cursor() import time start_date = str(time.mktime(start_date.timetuple())).split(".")[0] current_time = str(time.mktime(current_time.timetuple())).split(".")[0] sql = "select st.table_name,st.metrics_table from administer_services as s join service_table_ref as st " \ "on s.id=st.service_id join administer_service_cluster_reference as ads " \ "on s.id=ads.service_id where ads.cluster_id_id='" + cluster_id + "'" cursor.execute(sql) tables = cursor.fetchall() service_data = [] for table in tables: sql = "select sm.* from " + table[0] + " as s join " + table[1] + " as sm on s.id=sm.node_id " \ + " and s.ip='" + ip + "'" \ " and sm.updated_at >='" + str(start_date) +\ "' and sm.updated_at <= '" + str(current_time) + "'" cursor.execute(sql) colnames = [desc[0] for desc in cursor.description] colnames.remove("id") colnames.remove("node_id") a = cursor.fetchall() z = [] w = [] date_l = [] date = [] if a: # hidden = False for i in range(len(colnames)): if colnames[i] == "updated_at": for j in range(len(a)): pop_column = list(a[j]) pop_column.pop(0) category = datetime.utcfromtimestamp( pop_column[i]).strftime(format) date.append(category) date_l.append(date) date = [] else: for j in range(len(a)): pop_column = list(a[j]) pop_column.pop(0) z.append(pop_column[i]) g = { "label": colnames[i], "data": z, # "hidden": hidden, "pointRadius": 0, "borderColor": "rgba(" + str(randint(0, 255)) + "," + str(randint(0, 255)) + "," + str(randint(0, 255)) + ",1)" } z = [] w.append(g) # hidden = True service_data.append({table[0]: [date_l[0], w]}) return JsonResponse( { "date": dt, "cpu": h, "vm_shared": vm_shared, "vm_used": vm_used, "vm_free": vm_free, "vm_available": vm_available, "vm_total": vm_total, "vm_buffer": vm_buffer, "vm_cache": vm_cache, "sm_total": sm_total, "sm_used": sm_used, "sm_free": sm_free, "dio_write_count": dio_write_count, "dio_read_count": dio_read_count, "dio_read_time": dio_read_time, "dio_write_time": dio_write_time, "du_disk_free": du_disk_free, "du_disk_percentage": du_disk_percentage, "du_disk_used": du_disk_used, "du_disk_total": du_disk_total, "network": r, "process": p, "service_data": service_data }, safe=False)