Example #1
0
 def get(self, driver, mac_or_name):
     try:
         driver = to_driver(driver)
         flag = int(request.args.get('flag', 0))
         return output_json(driver.inspect_host_interface(mac_or_name, flag), 200)
     except ValueError:
         abort(400, message="bad parameter")
Example #2
0
 def post(self, driver, uuid_or_name):
     try:
         driver = to_driver(driver)
         flag = int(request.args.get('flag', 0))
         return output_json(driver.restart_node(uuid_or_name, flag), 204)
     except ValueError:
         abort(400, message="bad parameter")
Example #3
0
def output_json_exception(data, code, *args, **kwargs):
    """Render exceptions as JSON documents with the 
        exception's message."""
    if isinstance(data, Exception):
        data = {'status': code, 'message': str(data)}

    return output_json(data, code, *args, **kwargs)
Example #4
0
 def post(self, cluster_name):
     try:
         json_data = request.get_json(force=True)
         json_str = json.dumps(json_data)
         dict_data = JSONDecoder().decode(json_str)
         project_name = request.args.get('project', "admin")
         masterIP = shell.call(
             get_sahara_cluster_s_masterIP_cmd(project_name,
                                               cluster_name)).strip()
         sshKeyPath = dict_data.get('sshKeyPath')
         jar_dir = dict_data.get('jarDir')
         base_jar_dir = os.path.basename(jar_dir)
         jar_name = dict_data.get('jarName')
         jar_class = dict_data.get('jarClass')
         jar_params = dict_data.get('jarParams')
         if not masterIP or not sshKeyPath or not jar_dir or not jar_name or not jar_params:
             abort(400, message="bad parameter in request body")
         shell.call("scp -i %s -r %s centos@%s:/home/centos" %
                    (sshKeyPath, jar_dir, masterIP))
         if base_jar_dir == 'jars':
             shell.call("ssh -i %s centos@%s \"sudo su - -c \'cp -R /home/centos/%s /home/hadoop && chown -R hadoop:hadoop /home/hadoop/jars\'\"" \
                    % (sshKeyPath, masterIP, base_jar_dir))
         else:
             shell.call("ssh -i %s centos@%s \"sudo su - -c \'cp -R /home/centos/%s /home/hadoop && mv /home/hadoop/%s /home/hadoop/jars && chown -R hadoop:hadoop /home/hadoop/jars\'\"" \
                        % (sshKeyPath, masterIP, base_jar_dir, base_jar_dir))
         output = shell.call("ssh -i %s centos@%s \"sudo su - -c \'%s\' hadoop\"" \
                                 %(sshKeyPath, masterIP, submit_job_on_hadoop_cmd(jar_name, jar_class, jar_params))).strip()
         return output_json(output, 200)
     except Exception:
         log.exception(traceback.format_exc())
         abort(400, message="Request failed")
Example #5
0
File: api.py Project: strinh418/ok
def envelope_api(data, code, headers=None):
    """ API response envelope (for metadata/pagination).
    Wraps JSON response in envelope to match v1 API output format.
    This is for successful requests only. Exceptions are handled elsewhere.

        data is the object returned by the API.
        code is the HTTP status code as an int
        message will always be sucess since the request did not fail.
    """
    if request.args.get('envelope') == 'false':
        return output_json(data, code, headers)
    message = 'success'
    if 'message' in data:
        message = data['message']
        del data['message']
    data = {'data': data, 'code': code, 'message': message}
    return output_json(data, code, headers)
Example #6
0
 def get(self, driver):
     try:
         driver = to_driver(driver) 
         list_all = to_bool(request.args.get('all', False))
         flag = int(request.args.get('flag', 0))
         return output_json(driver.list_nodes(flag, list_all), 200)
     except ValueError:
         abort(400, message="bad parameter")
Example #7
0
 def post(self, driver, uuid_or_name):
     try:
         driver = to_driver(driver)
         json_data = request.get_json(force=True)
         flag = int(request.args.get('flag', 0))
         return output_json(driver.node_update_device(uuid_or_name, json_data, flag), 201)
     except ValueError:
         abort(400, message="bad parameter") 
Example #8
0
 def post(self, driver, uuid_or_name):
     try:
         driver = to_driver(driver)
         vcpu = int(request.args.get('size', 0))
         flag = int(request.args.get('flag', 0))
         if not vcpu:
             abort(400, message="bad parameter")
         return output_json(driver.node_resize_cpu(uuid_or_name, vcpu, flag), 204)
     except ValueError:
         abort(400, message="bad parameter") 
Example #9
0
def envelope_api(data, code, headers=None):
    """ API response envelope (for metadata/pagination).
    Optionally wraps JSON response in envelope.
    This is for successful requests only.

        data is the object returned by the API.
        code is the HTTP status code as an int
    """
    if not request.args.get('envelope'):
        return output_json(data, code, headers)
    message = 'success'
    if 'message' in data:
        message = data['message']
        del data['message']
    data = {
        'data': data,
        'code': code,
        'message': message
    }
    return output_json(data, code, headers)
Example #10
0
 def post(self, driver, uuid_or_name):
     try:
         driver = to_driver(driver)
         memory = int(request.args.get('size', 0))
         memory_type = request.args.get('type', None)
         flag = int(request.args.get('flag', 0))
         if not memory or not memory_type:
             abort(400, message="bad parameter")
         return output_json(driver.node_resize_memory(uuid_or_name, memory, memory_type, flag), 204)
     except ValueError:
         abort(400, message="bad parameter") 
Example #11
0
 def post(self):
     try:
         json_data = request.get_json(force=True)
         json_str = json.dumps(json_data)
         dict_data = JSONDecoder().decode(json_str)
         project_name = request.args.get('project', "admin")
         master_node_template_name = dict_data.get('masterNodeTemplateName')
         master_node_template_plugin = dict_data.get(
             'masterNodeTemplatePlugin')
         master_node_template_plugin_version = dict_data.get(
             'masterNodeTemplatePluginVersion')
         master_node_template_processes = dict_data.get(
             'masterNodeTemplateProcesses')
         master_node_template_flavor = dict_data.get(
             'masterNodeTemplateFlavor')
         master_node_template_floating_ip_pool = dict_data.get(
             'masterNodeTemplateFloatingIpPool')
         worker_node_template_name = dict_data.get('workerNodeTemplateName')
         worker_node_template_plugin = dict_data.get(
             'workerNodeTemplatePlugin')
         worker_node_template_plugin_version = dict_data.get(
             'workerNodeTemplatePluginVersion')
         worker_node_template_processes = dict_data.get(
             'workerNodeTemplateProcesses')
         worker_node_template_flavor = dict_data.get(
             'workerNodeTemplateFlavor')
         worker_node_template_floating_ip_pool = dict_data.get(
             'workerNodeTemplateFloatingIpPool')
         cluster_template_name = dict_data.get('clusterTemplateName')
         cluster_worker_count = dict_data.get('clusterWorkerCount')
         if not master_node_template_name or not master_node_template_plugin or not master_node_template_plugin_version \
         or not master_node_template_processes or not master_node_template_flavor or not master_node_template_floating_ip_pool \
         or not worker_node_template_name or not worker_node_template_plugin or not worker_node_template_plugin_version \
         or not worker_node_template_processes or not worker_node_template_flavor or not worker_node_template_floating_ip_pool \
         or not cluster_template_name or not cluster_worker_count:
             abort(400, message="bad parameter in request body")
         shell.call(create_sahara_node_group_template_cmd(project_name, master_node_template_name, master_node_template_plugin, \
                              master_node_template_plugin_version, master_node_template_processes, \
                              master_node_template_flavor, master_node_template_floating_ip_pool))
         shell.call(create_sahara_node_group_template_cmd(project_name, worker_node_template_name, worker_node_template_plugin, \
                              worker_node_template_plugin_version, worker_node_template_processes, \
                              worker_node_template_flavor, worker_node_template_floating_ip_pool))
         output = shell.call(create_sahara_cluster_template_cmd(project_name, cluster_template_name, master_node_template_name, \
                                              worker_node_template_name, cluster_worker_count)).strip()
         return output_json(output, 200)
     except Exception:
         log.exception(traceback.format_exc())
         abort(400, message="Request failed")
Example #12
0
 def post(self, cluster_name):
     try:
         project_name = request.args.get('project', "admin")
         json_data = request.get_json(force=True)
         json_str = json.dumps(json_data)
         dict_data = JSONDecoder().decode(json_str)
         masterIP = shell.call(
             get_sahara_cluster_s_masterIP_cmd(project_name,
                                               cluster_name)).strip()
         sshKeyPath = dict_data.get('sshKeyPath')
         restartDataNode = dict_data.get('restartDataNode', False)
         restartNameNode = dict_data.get('restartNameNode', False)
         restartHistoryServer = dict_data.get('restartHistoryServer', False)
         restartNodeManager = dict_data.get('restartNodeManager', False)
         vcpuNum = dict_data.get('vcpuNum')
         memMB = dict_data.get('memMB')
         vcpuNumOfContainer = dict_data.get('vcpuNumOfContainer')
         memMBOfContainer = dict_data.get('memMBOfContainer')
         if not masterIP or not sshKeyPath or not vcpuNum or not memMB or not vcpuNumOfContainer or not memMBOfContainer:
             abort(400, message="bad parameter in request body")
         else:
             project_name = request.args.get('project', "admin")
             slaveIPArray = shell.call(
                 get_sahara_cluster_s_slavesIP_cmd(project_name,
                                                   cluster_name)).strip()
             slaveIPArrayStr = str(slaveIPArray).replace("\n", " ")
             restartServicesArrayStr = "resourcemanager%s%s%s%s" \
             % (" nodemanager" if to_bool(restartNodeManager) else "", \
                " namenode" if to_bool(restartNameNode) else "", \
                " datanode" if to_bool(restartDataNode) else "", \
                " historyserver" if to_bool(restartHistoryServer) else "")
             script_setting_str = "master_ip=%s\nslave_ip_array=(%s)\ncluster_name=%s\nrestart_services_array=(%s)\nuser=centos\nssh_key=\\\"%s\\\"\nyarn_vcpu_new_value=\\\"<value>%s<\/value>\\\"\nyarn_mem_new_value=\\\"<value>%s<\/value>\\\"\nyarn_container_vcpu_new_value=\\\"<value>%s<\/value>\\\"\nyarn_container_mem_new_value=\\\"<value>%s<\/value>\\\"" \
                                 % (masterIP, slaveIPArrayStr, cluster_name, restartServicesArrayStr, sshKeyPath, str(vcpuNum), str(memMB), str(vcpuNumOfContainer), str(memMBOfContainer))
             work_path = "/home/optimizer/%s" % cluster_name
             if not os.path.exists(work_path):
                 os.makedirs(work_path)
             shell.call("echo \"%s\" > %s/cluster_yarn.conf" %
                        (script_setting_str, work_path))
             shell.call(
                 "/usr/bin/cp -f /home/optimizer/scripts/cluster_yarn.sh %s"
                 % (work_path))
             output = shell.call(
                 "/usr/bin/bash cluster_yarn.sh reconfigure",
                 workdir=work_path)
         return output_json(output, 200)
     except Exception:
         log.exception(traceback.format_exc())
         abort(400, message="Request failed")
Example #13
0
 def post(self, cluster_name):
     try:
         project_name = request.args.get('project', "admin")
         json_data = request.get_json(force=True)
         json_str = json.dumps(json_data)
         dict_data = JSONDecoder().decode(json_str)
         project_name = request.args.get('project', "admin")
         size = dict_data.get('size')
         worker_template_name = dict_data.get('workerTemplateName')
         if not size:
             abort(400, message="bad parameter in request body")
         output = shell.call(
             scale_sahara_cluster_cmd(project_name, cluster_name,
                                      worker_template_name, size)).strip()
         return output_json(output, 200)
     except Exception:
         log.exception(traceback.format_exc())
         abort(400, message="Request failed")
Example #14
0
 def get(self, cluster_name):
     try:
         retv = {}
         work_path = "/home/optimizer/%s" % cluster_name
         if not os.path.exists(work_path):
             os.makedirs(work_path)
         project_name = request.args.get('project', "admin")
         output = shell.call(
             get_sahara_cluster_s_masterIP_cmd(project_name,
                                               cluster_name)).strip()
         retv['masterIP'] = output
         shell.call(
             write_sahara_cluster_s_slavesIP_to_file_cmd(
                 project_name, cluster_name, work_path)).strip()
         return output_json("%s" % retv, 200)
     except Exception:
         log.exception(traceback.format_exc())
         abort(400, message="Request failed")
Example #15
0
File: api.py Project: gratimax/ok
def envelope_api(data, code, headers=None):
    """ API response envelope (for metadata/pagination).
    Wraps JSON response in envelope to match v1 API output format.
    This is for successful requests only. Exceptions are handled elsewhere.

        data is the object returned by the API.
        code is the HTTP status code as an int
        message will always be sucess since the request did not fail.
    """
    message = 'success'
    if 'message' in data:
        message = data['message']
        del data['message']
    data = {
        'data': data,
        'code': code,
        'message': message
    }
    return output_json(data, code, headers)
Example #16
0
 def post(self):
     try:
         json_data = request.get_json(force=True)
         json_str = json.dumps(json_data)
         dict_data = JSONDecoder().decode(json_str)
         project_name = request.args.get('project', "admin")
         cluster_name = dict_data.get('clusterName')
         template_name = dict_data.get('template')
         key_pair = dict_data.get('keyPair')
         neutron_private_network = dict_data.get('privateNetwork')
         image = dict_data.get('image')
         if not project_name or not cluster_name or not template_name or not key_pair or not neutron_private_network or not image:
             abort(400, message="bad parameter in request body")
         output = shell.call(create_sahara_cluster_from_template_cmd(project_name, cluster_name, template_name, \
                                                                     key_pair, neutron_private_network, image)).strip()
         return output_json(output, 200)
     except Exception:
         log.exception(traceback.format_exc())
         abort(400, message="Request failed")
Example #17
0
 def post(self, cluster_name):
     try:
         json_data = request.get_json(force=True)
         json_str = json.dumps(json_data)
         dict_data = JSONDecoder().decode(json_str)
         workDir = dict_data.get('workDir')
         scriptName = dict_data.get('scriptName', self.scriptName)
         sshKeyPath = dict_data.get('sshKeyPath')
         masterIP = shell.call(
             get_sahara_cluster_s_masterIP_cmd(project_name,
                                               cluster_name)).strip()
         if not workDir or not sshKeyPath or not masterIP:
             abort(400, message="bad parameter in request body")
         output = shell.call("ssh -i %s centos@%s \"cd %s; /usr/bin/bash %s slave\"" \
                             %(sshKeyPath, masterIP, workDir, scriptName))
         return output_json(output, 200)
     except Exception:
         log.exception(traceback.format_exc())
         abort(400, message="Request failed")
Example #18
0
 def post(self, cluster_name):
     try:
         ip_file = "/home/optimizer/%s/slave" % cluster_name
         project_name = request.args.get('project', "admin")
         json_data = request.get_json(force=True)
         json_str = json.dumps(json_data)
         dict_data = JSONDecoder().decode(json_str)
         sourceDir = dict_data.get('sourceDir', self.sourceDir)
         scriptName = dict_data.get('scriptName', self.scriptName)
         sshKeyPath = dict_data.get('sshKeyPath')
         masterIP = shell.call(
             get_sahara_cluster_s_masterIP_cmd(project_name,
                                               cluster_name)).strip()
         if not sshKeyPath or not masterIP:
             abort(400, message="bad parameter in request body")
         output = shell.call("/usr/bin/bash %s/%s %s %s %s &" \
                              %(sourceDir, scriptName, ip_file, sshKeyPath, masterIP), \
                             workdir=sourceDir)
         return output_json(output, 200)
     except Exception:
         log.exception(traceback.format_exc())
         abort(400, message="Request failed")
Example #19
0
 def post(self, cluster_name):
     try:
         project_name = request.args.get('project', "admin")
         json_data = request.get_json(force=True)
         json_str = json.dumps(json_data)
         dict_data = JSONDecoder().decode(json_str)
         masterIP = shell.call(
             get_sahara_cluster_s_masterIP_cmd(project_name,
                                               cluster_name)).strip()
         sshKeyPath = dict_data.get('sshKeyPath')
         rpms_dir = dict_data.get('rpmsDir',
                                  '/home/optimizer/rpms/ganglia-rpms')
         if not masterIP or not sshKeyPath:
             abort(400, message="bad parameter in request body")
         else:
             project_name = request.args.get('project', "admin")
             slaveIPArray = shell.call(
                 get_sahara_cluster_s_slavesIP_cmd(project_name,
                                                   cluster_name)).strip()
             slaveIPArrayStr = str(slaveIPArray).replace("\n", " ")
             script_setting_str = "master_ip=%s\nslave_ip_array=(%s)\ncluster_name=%s\nuser=centos\nssh_key=\\\"%s\\\"\nganglia_rpms=\\\"%s\\\"\n" \
                                 % (masterIP, slaveIPArrayStr, cluster_name, sshKeyPath, rpms_dir)
             work_path = "/home/optimizer/%s" % cluster_name
             if not os.path.exists(work_path):
                 os.makedirs(work_path)
             shell.call("echo \"%s\" > %s/cluster_yarn.conf" %
                        (script_setting_str, work_path))
             shell.call(
                 "/usr/bin/cp -f /home/optimizer/scripts/cluster_yarn.sh %s"
                 % (work_path))
             output = shell.call(
                 "/usr/bin/bash cluster_yarn.sh restart-ganglia",
                 workdir=work_path)
         return output_json(output, 200)
     except Exception:
         log.exception(traceback.format_exc())
         abort(400, message="Request failed")
Example #20
0
File: api.py Project: Xzya/navitia
def output_jsonp(data, code, headers=None):
    resp = json.output_json(data, code, headers)
    callback = request.args.get('callback', False)
    if callback:
        resp.data = six.text_type(callback) + '(' + resp.data + ')'
    return resp
Example #21
0
 def post(self, driver, uuid_or_name):
     try:
         driver = to_driver(driver)
         return output_json(driver.resume_node(uuid_or_name), 204)
     except ValueError:
         abort(400, message="bad parameter")   
Example #22
0
 def get(self, driver):
     try:
         driver = to_driver(driver)
         return output_json(driver.inspect_host_system(), 200)
     except ValueError:
         abort(400, message="bad parameter")
Example #23
0
 def post(self, driver):
     driver = to_driver(driver)
     json_data = request.get_json(force=True)
     vm_uuid = driver.create_node(json_data)
     return output_json({"Uuid": vm_uuid, "Warnings": []}, 201)
Example #24
0
 def post(self, cluster_name):
     try:
         work_path = "/home/optimizer/%s" % cluster_name
         if not os.path.exists(work_path):
             os.makedirs(work_path)
         project_name = request.args.get('project', "admin")
         json_data = request.get_json(force=True)
         json_str = json.dumps(json_data)
         dict_data = JSONDecoder().decode(json_str)
         jobID = dict_data.get('jobID')
         workDir = dict_data.get('workDir')
         compute_node_max_cpu_core = dict_data.get('computeNodeMaxCpuCore')
         compute_node_max_memory_gb = dict_data.get(
             'computeNodeMaxMemoryGb')
         compute_node_num = dict_data.get('computeNodeNum')
         scriptName = dict_data.get('scriptName', self.scriptName)
         sshKeyPath = dict_data.get('sshKeyPath')
         masterIP = shell.call(
             get_sahara_cluster_s_masterIP_cmd(project_name,
                                               cluster_name)).strip()
         if not workDir or not sshKeyPath or not masterIP or not jobID or not compute_node_max_cpu_core or not compute_node_max_memory_gb or not compute_node_num:
             abort(400, message="bad parameter in request body")
         jhist_json_path = "%s/%s-trace.json" % (work_path, jobID)
         topology_json_path = "%s/%s-topology.json" % (work_path, jobID)
         jhist_path = shell.call("ssh -i %s centos@%s \"sudo su - -c \'%s\' hadoop\"" \
                                 %(sshKeyPath, masterIP, find_jhist_file_in_hdfs_cmd(jobID))).strip()
         shell.call("ssh -i %s centos@%s \"sudo su - -c \'%s\' hadoop\"" \
                     %(sshKeyPath, masterIP, analysis_job_with_hadoop_rumen_cmd(cluster_name, jobID, jhist_path)))
         shell.call("ssh -i %s centos@%s \"sudo su - -c \'cat /home/hadoop/%s/%s-trace.json\' hadoop\" > %s" \
                    % (sshKeyPath, masterIP, cluster_name, jobID, jhist_json_path))
         shell.call("ssh -i %s centos@%s \"sudo su - -c \'cat /home/hadoop/%s/%s-topology.json\' hadoop\" > %s" \
                    % (sshKeyPath, masterIP, cluster_name, jobID, topology_json_path))
         get_yarn_max_cpu = shell.call(
             "ssh -i %s centos@%s \"sudo grep -A 1 \'yarn.scheduler.maximum-allocation-vcores\' /opt/hadoop/etc/hadoop/yarn-site.xml | tail -1 \""
             % (sshKeyPath, masterIP)).strip()
         get_yarn_max_memory_mb = shell.call(
             "ssh -i %s centos@%s \"sudo grep -A 1 \'yarn.scheduler.maximum-allocation-mb\' /opt/hadoop/etc/hadoop/yarn-site.xml | tail -1 \""
             % (sshKeyPath, masterIP)).strip()
         get_yarn_container_cpu = shell.call(
             "ssh -i %s centos@%s \"sudo grep -A 1 \'yarn.scheduler.minimum-allocation-vcores\' /opt/hadoop/etc/hadoop/yarn-site.xml | tail -1 \""
             % (sshKeyPath, masterIP)).strip()
         get_yarn_container_memory_mb = shell.call(
             "ssh -i %s centos@%s \"sudo grep -A 1 \'yarn.scheduler.minimum-allocation-mb\' /opt/hadoop/etc/hadoop/yarn-site.xml | tail -1 \""
             % (sshKeyPath, masterIP)).strip()
         yarn_max_cpu = int(re.sub(
             r'\D', '', get_yarn_max_cpu)) if get_yarn_max_cpu else 8
         yarn_max_memory_mb = int(re.sub(
             r'\D', '',
             get_yarn_max_memory_mb)) if get_yarn_max_memory_mb else 8192
         yarn_container_cpu = int(re.sub(
             r'\D', '',
             get_yarn_container_cpu)) if get_yarn_container_cpu else 1
         yarn_container_memory_mb = int(
             re.sub(r'\D', '', get_yarn_container_memory_mb)
         ) if get_yarn_container_memory_mb else 1024
         yarn_cluster_workers_number = self._get_workers_number_from_topology_json(
             topology_json_path)
         actual_workers = int(
             shell.call(
                 "ssh -i %s centos@%s \"grep %s /etc/hosts | wc -l\"" %
                 (sshKeyPath, masterIP, cluster_name)).strip()) - 1
         output = self._analysis_jhist_json(
             jhist_json_path, yarn_cluster_workers_number, actual_workers,
             yarn_max_memory_mb, yarn_max_cpu, yarn_container_memory_mb,
             yarn_container_cpu, compute_node_max_memory_gb,
             compute_node_max_cpu_core, compute_node_num)
         return output_json(output, 200)
     except Exception:
         log.exception(traceback.format_exc())
         abort(400, message="Request failed")