示例#1
0
def analyze_service():
    """
    Returns results from avg heuristic
    """
    LOG.info("Analyzing service instances with url : %s", request.url)
    recipe = request.get_json()
    service_type = 'stack'
    if recipe.get('exec_type') == 'docker':
        service_type = 'service'
    LOG.info(recipe)
    LOG.info(str(recipe['name']))
    workload = Workload(str(recipe['name']))
    # storing initial recipe
    # TODO: validate recipe format
    recipe_bean = Recipe()
    recipe_bean.from_json(recipe)
    workload.add_recipe(
        int("{}{}".format(int(round(time.time())), '000000000')), recipe_bean)
    pipe_exec = AnalyseServiceHistPipe()
    workload = pipe_exec.run(workload, service_type)
    analysis_description = {
        "name": recipe['name'],
        "id": recipe['id'],
        "analysis_id": workload.get_latest_recipe_time()
    }
    return Response(json.dumps(analysis_description), mimetype=MIME)
示例#2
0
def get_optimal_vms():
    """
    Returns results from avg heuristic
    """
    LOG.info("Retrieving Optimal_VMs with url : %s", request.url)
    recipe = request.get_json()
    LOG.info(recipe)
    LOG.info(str(recipe['name']))
    current_time = int(time.time())
    workload = Workload(str(recipe['name']),
                        ts_from=(current_time - 10),
                        ts_to=current_time)
    # storing initial recipe
    # TODO: validate recipe format
    recipe_bean = Recipe()
    recipe_bean.from_json(recipe)
    workload.add_recipe(
        int("{}{}".format(int(round(time.time())), '000000000')), recipe_bean)
    pipe_exec = OptimalPipe()
    node_type = 'vm'
    workload = pipe_exec.run(workload, node_type)
    results = workload.get_metadata(OptimalFilter.__filter_name__)
    # return Response(results.to_json(), mimetype=MIME)
    # return Response(results.to_dict('results'), mimetype=MIME)
    json_results = json.dumps(results.to_dict('results'))
    return Response(json_results, mimetype=MIME)
示例#3
0
    def show(self, params):
        """
        Retrieves wl data from influx. Rebuilds the workload
        based on service_id and analysis_id.
        Currently it queries for a single workload retrieval.
        Can be extended to retrieve all history data relative to a workload.
        :param params: (service_id, analysis_id)
        :return:
        """
        service_id = params[0]
        analysis_id = params[1]
        query = ""

        if service_id and analysis_id:
            query = 'SELECT * FROM "workloads" WHERE service_id = \'{}\'' \
                    'AND analysis_id = \'{}\';'.format(service_id, analysis_id)

        elif service_id:
            query = 'SELECT * FROM "workloads" WHERE service_id = \'{}\' ' \
                    'ORDER BY time DESC limit 1;'.format(service_id)
        else:
            LOG.error('service_id and analysis ID needs to be specified.')
            return None
        LOG.info("QUERY: {}".format(query))
        results = self.client.query(query)
        # TODO: losing part of the history
        workload = None
        for item in results.items():  # just 1 item
            data_points = item[1]
            data = next(data_points)
            workload_name = str(data["service_id"])
            ts_from = int(data["ts_from"])
            ts_to = int(data["ts_to"])
            workload_config = data["deployment conf"]  # the recipe
            workload_config_type = data["configuration type"]

            workload = Workload(workload_name=workload_name,
                                ts_from=ts_from,
                                ts_to=ts_to,
                                workload_config=workload_config,
                                workload_config_type=workload_config_type)
            subgraph = data["subgraph"]
            if subgraph != "{}":
                nx_subgraph = json_graph.node_link_graph(json.loads(subgraph))
                workload.save_results(
                    'subgraph_filter',
                    infograph.get_info_graph(
                        landscape=nx_subgraph))  # json to inforgraph needed
            recipe_time = int(data["recipe_time"])
            if recipe_time:
                recipe_json = data["recipe"]
                recipe = Recipe()
                recipe.from_json(recipe_json)
                workload.add_recipe(recipe_time, recipe)
            break
        return workload
示例#4
0
def get_optimal():
    """
    Returns results from avg heuristic
    """
    LOG.info("Retrieving Optimal with url : %s", request.url)
    recipe = request.get_json()
    LOG.info(recipe)
    LOG.info(str(recipe['name']))
    current_time = int(time.time())
    workload_name = 'optimal_' + str(current_time)
    # if 'ts_from' in recipe:
    #    LOG.debug(recipe['ts_from'])
    #    LOG.debug(recipe['ts_to'])
    #    workload = Workload(str(recipe['name']), ts_from= recipe['ts_from'], ts_to= recipe['ts_to'])
    # eng = Engine()
    # eng.run('optimal', recipe['name'], recipe['ts_from'], recipe['ts_to'])
    # else:
    #   workload = Workload(str(recipe['name']))
    config = {}
    if recipe.get('device_id'):
        config['device_id'] = recipe['device_id'].strip().lower().replace(
            '-', '_')
    if recipe.get('project'):
        config['project'] = recipe['project']
    if recipe.get('sort_order'):
        config['sort_order'] = recipe['sort_order']
    if recipe.get('telemetry_filter', None) is not None:
        config['telemetry_filter'] = recipe['telemetry_filter']
    else:
        config['telemetry_filter'] = False
    workload = Workload(workload_name, workload_config=config)
    # storing initial recipe
    # TODO: validate recipe format
    recipe_bean = Recipe()
    recipe_bean.from_json(recipe)
    workload.add_recipe(
        int("{}{}".format(int(round(time.time())), '000000000')), recipe_bean)
    pipe_exec = OptimalPipe()
    node_type = 'machine'
    try:
        workload = pipe_exec.run(workload, node_type)
    except KeyError:
        return Response(
            'Service not ready yet, please wait or restart landscape',
            status=202)
    if workload.get_latest_graph() is None and config.get(
            'device_id') is not None:
        return Response('Device not found', status=404)
    if workload.get_latest_graph() is None:
        return Response('Landscape not ready yet?', status=202)
    results = workload.get_metadata(OptimalFilter.__filter_name__)
    # return Response(results.to_json(), mimetype=MIME)
    # return Response(results.to_dict('results'), mimetype=MIME)
    json_results = json.dumps(results.to_dict('results'))
    return Response(json_results, mimetype=MIME)
示例#5
0
def analyse():
    params = request.get_json()
    id = params.get('id')
    name = params.get('name')
    if id:
        LOG.info(
            "Triggering analysis based on input service id: {}".format(id))
    elif name:
        LOG.info(
            "Triggering analysis based on input service name: {}".format(name))
    id = str(id) if id else str(name)
    influx_sink = InfluxSink()
    workload = influx_sink.show((id, None))
    recipes = {}
    if workload:
        recipes = workload.get_recipes()
    reuse = False
    for recipe_id in recipes:
        recipe = recipes[recipe_id]
        if recipe._ts_from == int(
                params.get('ts_from')) and recipe._ts_to == int(
                    params.get('ts_to')):
            reuse = True
            break
    if not workload or not reuse:
        # TODO: recipe should come from CIMI
        recipe = request.get_json()
        recipe_bean = Recipe()
        recipe_bean.from_json(recipe)
        workload = Workload(id, int(params['ts_from']), int(params['ts_to']))
        workload.add_recipe(
            int("{}{}".format(int(round(time.time())), '000000000')),
            recipe_bean)
    else:
        LOG.info("Reusing existing analysis")
    pipe_exec = AnalysePipe()
    workload = pipe_exec.run(workload)
    recipe = workload.get_latest_recipe()
    analysis_description = {
        "id": recipe.get_service_id(),
        "name": workload.get_service_name(),
        "analysis_id": workload.get_latest_recipe_time()
    }
    return Response(json.dumps(analysis_description), mimetype=MIME)
示例#6
0
def get_subgraph_telemetry():
    """
    Returns all services currently active
    """
    LOG.info("Retrieving Node subgraph Telemetry with url : %s", request.url)
    recipe = request.get_json()
    LOG.info(recipe)
    LOG.info(str(recipe['name']))
    workload = Workload(str(recipe['name']))
    # storing initial recipe
    # TODO: validate recipe format
    recipe_bean = Recipe()
    recipe_bean.from_json(recipe)
    workload.add_recipe(
        int("{}{}".format(int(round(time.time())), '000000000')), recipe_bean)
    pipe_exec = NodeSubgraphTelemetryPipe()
    workload = pipe_exec.run(workload)
    analysis_description = {
        "node_id": recipe['name'],
        "analysis_id": workload.get_latest_recipe_time()
    }
    return Response(json.dumps(analysis_description), mimetype=MIME)