Esempio n. 1
0
def analyze_service():
    """
    Returns results from avg heuristic
    """
    LOG.info("Analyzing service instances with url : %s", request.url)
    recipe = request.get_json()
    service_type = 'stack'
    if recipe.get('exec_type') == 'docker':
        service_type = 'service'
    LOG.info(recipe)
    LOG.info(str(recipe['name']))
    workload = Workload(str(recipe['name']))
    # storing initial recipe
    # TODO: validate recipe format
    recipe_bean = Recipe()
    recipe_bean.from_json(recipe)
    workload.add_recipe(
        int("{}{}".format(int(round(time.time())), '000000000')), recipe_bean)
    pipe_exec = AnalyseServiceHistPipe()
    workload = pipe_exec.run(workload, service_type)
    analysis_description = {
        "name": recipe['name'],
        "id": recipe['id'],
        "analysis_id": workload.get_latest_recipe_time()
    }
    return Response(json.dumps(analysis_description), mimetype=MIME)
Esempio n. 2
0
def get_optimal_vms():
    """
    Returns results from avg heuristic
    """
    LOG.info("Retrieving Optimal_VMs with url : %s", request.url)
    recipe = request.get_json()
    LOG.info(recipe)
    LOG.info(str(recipe['name']))
    current_time = int(time.time())
    workload = Workload(str(recipe['name']),
                        ts_from=(current_time - 10),
                        ts_to=current_time)
    # storing initial recipe
    # TODO: validate recipe format
    recipe_bean = Recipe()
    recipe_bean.from_json(recipe)
    workload.add_recipe(
        int("{}{}".format(int(round(time.time())), '000000000')), recipe_bean)
    pipe_exec = OptimalPipe()
    node_type = 'vm'
    workload = pipe_exec.run(workload, node_type)
    results = workload.get_metadata(OptimalFilter.__filter_name__)
    # return Response(results.to_json(), mimetype=MIME)
    # return Response(results.to_dict('results'), mimetype=MIME)
    json_results = json.dumps(results.to_dict('results'))
    return Response(json_results, mimetype=MIME)
Esempio n. 3
0
def get_active_services():
    """
    Returns all services currently active
    """
    LOG.info("Retrieving Active Services with url : %s", request.url)
    pipe_exec = ActiveServicePipe()
    workload = Workload("ActiveServiceList")
    workload = pipe_exec.run(workload)
    res = workload.get_latest_graph()
    return Response(json.dumps(json_graph.node_link_data(res)), mimetype=MIME)
Esempio n. 4
0
 def __init__(self, pipe, workload_name, ts_from, ts_to, config,
              config_type, analysis_id):
     # required initialization
     # Init Common
     self._setup_framework_base_directory()
     self.pipe = pipe
     if pipe == 'optimal':
         ts_from = 0
         ts_to = 0
         workload_name = pipe + "_" + str(int(time.time()))
     self.workload = Workload(workload_name, ts_from, ts_to, config,
                              config_type)
     self.analysis_id = analysis_id
Esempio n. 5
0
def get_optimal():
    """
    Returns results from avg heuristic
    """
    LOG.info("Retrieving Optimal with url : %s", request.url)
    recipe = request.get_json()
    LOG.info(recipe)
    LOG.info(str(recipe['name']))
    current_time = int(time.time())
    workload_name = 'optimal_' + str(current_time)
    # if 'ts_from' in recipe:
    #    LOG.debug(recipe['ts_from'])
    #    LOG.debug(recipe['ts_to'])
    #    workload = Workload(str(recipe['name']), ts_from= recipe['ts_from'], ts_to= recipe['ts_to'])
    # eng = Engine()
    # eng.run('optimal', recipe['name'], recipe['ts_from'], recipe['ts_to'])
    # else:
    #   workload = Workload(str(recipe['name']))
    config = {}
    if recipe.get('device_id'):
        config['device_id'] = recipe['device_id'].strip().lower().replace(
            '-', '_')
    if recipe.get('project'):
        config['project'] = recipe['project']
    if recipe.get('sort_order'):
        config['sort_order'] = recipe['sort_order']
    if recipe.get('telemetry_filter', None) is not None:
        config['telemetry_filter'] = recipe['telemetry_filter']
    else:
        config['telemetry_filter'] = False
    workload = Workload(workload_name, workload_config=config)
    # storing initial recipe
    # TODO: validate recipe format
    recipe_bean = Recipe()
    recipe_bean.from_json(recipe)
    workload.add_recipe(
        int("{}{}".format(int(round(time.time())), '000000000')), recipe_bean)
    pipe_exec = OptimalPipe()
    node_type = 'machine'
    try:
        workload = pipe_exec.run(workload, node_type)
    except KeyError:
        return Response(
            'Service not ready yet, please wait or restart landscape',
            status=202)
    if workload.get_latest_graph() is None and config.get(
            'device_id') is not None:
        return Response('Device not found', status=404)
    if workload.get_latest_graph() is None:
        return Response('Landscape not ready yet?', status=202)
    results = workload.get_metadata(OptimalFilter.__filter_name__)
    # return Response(results.to_json(), mimetype=MIME)
    # return Response(results.to_dict('results'), mimetype=MIME)
    json_results = json.dumps(results.to_dict('results'))
    return Response(json_results, mimetype=MIME)
Esempio n. 6
0
def get_subgraph_telemetry():
    """
    Returns all services currently active
    """
    LOG.info("Retrieving Node subgraph Telemetry with url : %s", request.url)
    recipe = request.get_json()
    LOG.info(recipe)
    LOG.info(str(recipe['name']))
    workload = Workload(str(recipe['name']))
    # storing initial recipe
    # TODO: validate recipe format
    recipe_bean = Recipe()
    recipe_bean.from_json(recipe)
    workload.add_recipe(
        int("{}{}".format(int(round(time.time())), '000000000')), recipe_bean)
    pipe_exec = NodeSubgraphTelemetryPipe()
    workload = pipe_exec.run(workload)
    analysis_description = {
        "node_id": recipe['name'],
        "analysis_id": workload.get_latest_recipe_time()
    }
    return Response(json.dumps(analysis_description), mimetype=MIME)
Esempio n. 7
0
    def show(self, params):
        """
        Retrieves wl data from influx. Rebuilds the workload
        based on service_id and analysis_id.
        Currently it queries for a single workload retrieval.
        Can be extended to retrieve all history data relative to a workload.
        :param params: (service_id, analysis_id)
        :return:
        """
        service_id = params[0]
        analysis_id = params[1]
        query = ""

        if service_id and analysis_id:
            query = 'SELECT * FROM "workloads" WHERE service_id = \'{}\'' \
                    'AND analysis_id = \'{}\';'.format(service_id, analysis_id)

        elif service_id:
            query = 'SELECT * FROM "workloads" WHERE service_id = \'{}\' ' \
                    'ORDER BY time DESC limit 1;'.format(service_id)
        else:
            LOG.error('service_id and analysis ID needs to be specified.')
            return None
        LOG.info("QUERY: {}".format(query))
        results = self.client.query(query)
        # TODO: losing part of the history
        workload = None
        for item in results.items():  # just 1 item
            data_points = item[1]
            data = next(data_points)
            workload_name = str(data["service_id"])
            ts_from = int(data["ts_from"])
            ts_to = int(data["ts_to"])
            workload_config = data["deployment conf"]  # the recipe
            workload_config_type = data["configuration type"]

            workload = Workload(workload_name=workload_name,
                                ts_from=ts_from,
                                ts_to=ts_to,
                                workload_config=workload_config,
                                workload_config_type=workload_config_type)
            subgraph = data["subgraph"]
            if subgraph != "{}":
                nx_subgraph = json_graph.node_link_graph(json.loads(subgraph))
                workload.save_results(
                    'subgraph_filter',
                    infograph.get_info_graph(
                        landscape=nx_subgraph))  # json to inforgraph needed
            recipe_time = int(data["recipe_time"])
            if recipe_time:
                recipe_json = data["recipe"]
                recipe = Recipe()
                recipe.from_json(recipe_json)
                workload.add_recipe(recipe_time, recipe)
            break
        return workload
Esempio n. 8
0
def analyse():
    params = request.get_json()
    id = params.get('id')
    name = params.get('name')
    if id:
        LOG.info(
            "Triggering analysis based on input service id: {}".format(id))
    elif name:
        LOG.info(
            "Triggering analysis based on input service name: {}".format(name))
    id = str(id) if id else str(name)
    influx_sink = InfluxSink()
    workload = influx_sink.show((id, None))
    recipes = {}
    if workload:
        recipes = workload.get_recipes()
    reuse = False
    for recipe_id in recipes:
        recipe = recipes[recipe_id]
        if recipe._ts_from == int(
                params.get('ts_from')) and recipe._ts_to == int(
                    params.get('ts_to')):
            reuse = True
            break
    if not workload or not reuse:
        # TODO: recipe should come from CIMI
        recipe = request.get_json()
        recipe_bean = Recipe()
        recipe_bean.from_json(recipe)
        workload = Workload(id, int(params['ts_from']), int(params['ts_to']))
        workload.add_recipe(
            int("{}{}".format(int(round(time.time())), '000000000')),
            recipe_bean)
    else:
        LOG.info("Reusing existing analysis")
    pipe_exec = AnalysePipe()
    workload = pipe_exec.run(workload)
    recipe = workload.get_latest_recipe()
    analysis_description = {
        "id": recipe.get_service_id(),
        "name": workload.get_service_name(),
        "analysis_id": workload.get_latest_recipe_time()
    }
    return Response(json.dumps(analysis_description), mimetype=MIME)
Esempio n. 9
0
def refine_recipe():
    """
    Returns a refined recipe.
    Accept a json with service id and analyse_id
    """
    LOG.info("Retrieving Refined Recipe with url : %s", request.url)
    params = request.get_json()
    LOG.info(params)
    LOG.info(str(params['name']))
    # eng = Engine()
    # eng.run('optimal', recipe['name'], recipe['ts_from'], recipe['ts_to'])
    workload = Workload(str(params['id']), None, None)
    # workload = Workload(str(params['name']), None, None)
    pipe_exec = RefineRecipePipe()
    analysis_id = params.get('analysis_id')
    if analysis_id:
        pipe_exec.set_analysis_id(str(analysis_id))

    recipe = pipe_exec.run(workload)
    if recipe:
        return Response(json.dumps(recipe.to_json()), mimetype=MIME)
    return Response(json.dumps({}), mimetype=MIME)
Esempio n. 10
0
class Engine:
    def __init__(self, pipe, workload_name, ts_from, ts_to, config,
                 config_type, analysis_id):
        # required initialization
        # Init Common
        self._setup_framework_base_directory()
        self.pipe = pipe
        if pipe == 'optimal':
            ts_from = 0
            ts_to = 0
            workload_name = pipe + "_" + str(int(time.time()))
        self.workload = Workload(workload_name, ts_from, ts_to, config,
                                 config_type)
        self.analysis_id = analysis_id

    def run(self):

        pipe_exec = None
        res = None
        if self.pipe == 'avg_analysis':
            pipe_exec = AvgHeuristicPipe()
        elif self.pipe == 'telemetry':
            pipe_exec = AnnotatedTelemetryPipe()
        elif self.pipe == 'optimal':
            pipe_exec = OptimalPipe()
        elif self.pipe == 'refine_recipe':
            pipe_exec = RefineRecipePipe()
            pipe_exec.set_analysis_id(self.analysis_id)

        elif self.pipe == 'rest':
            LOG.info('running in online mode')
            RestiAPI().run()

        else:
            LOG.error('Please specify at least 1 task to be performed '
                      '"{} --help" to see available options'.format(
                          common.SERVICE_NAME))
            exit()
        if self.pipe != 'rest':
            LOG.info('Analyzing Workload: {}'.format(
                self.workload.get_workload_name()))
            res = pipe_exec.run(self.workload)
        return res

    def _setup_framework_base_directory(self):
        from analytics_engine.utilities import misc as utils
        common.BASE_DIR = utils.Validation.directory_exist_and_format(
            common.BASE_DIR)

    def _setup_framework_configuration_file(self):
        """
        Validate configuration file and setup internal parameters accordingly
        """

        # Check configuration file and sections
        sections = [CFS_GENERAL, CFS_DYNAMIC_PARAMS]
        if common.CONF_FILE_LOCATION:
            Validation.file_exist(common.CONF_FILE_LOCATION)
            common.CONF_FILE = \
                utils.ConfigurationFile(sections, common.CONF_FILE_LOCATION)

        # Load dynamic parameters within common
        for common_attrib in common.CONF_FILE.get_variable_list(
                CFS_DYNAMIC_PARAMS):
            setattr(
                common, common_attrib.upper(),
                common.CONF_FILE.get_variable(CFS_DYNAMIC_PARAMS,
                                              common_attrib))

    @staticmethod
    def parse_args():
        parser = argparse.ArgumentParser(description='Analytics Engine')
        parser.add_argument('--run',
                            type=str,
                            required=True,
                            default='none',
                            help='name of the task to be performed')
        parser.add_argument('--workload_name',
                            type=str,
                            required=False,
                            default='none',
                            help='name of workload to export')
        parser.add_argument('--ts_from',
                            type=int,
                            required=False,
                            default=0,
                            help='start timestamp')
        parser.add_argument('--ts_to',
                            type=int,
                            required=False,
                            default=0,
                            help='end timestamp')
        parser.add_argument('--config',
                            type=str,
                            required=False,
                            default='none',
                            help='path to local config file')
        parser.add_argument('--config_type',
                            type=str,
                            required=False,
                            default=None,
                            help='type of the config file')
        parser.add_argument('--analysis_id',
                            type=str,
                            required=False,
                            default=None,
                            help='type of the config file')

        return parser.parse_args()