示例#1
0
def test_copy_tables_unified_job_query(
    sqlite_copy_expert, project, inventory, job_template
):
    """
    Ensure that various unified job types are in the output of the query.
    """

    time_start = now() - timedelta(hours=9)
    inv_src = InventorySource.objects.create(
        name="inventory_update1", inventory=inventory, source="gce"
    )

    project_update_name = ProjectUpdate.objects.create(
        project=project, name="project_update1"
    ).name
    inventory_update_name = inv_src.create_unified_job().name
    job_name = job_template.create_unified_job().name

    with tempfile.TemporaryDirectory() as tmpdir:
        collectors.copy_tables(time_start, tmpdir, subset="unified_jobs")
        with open(os.path.join(tmpdir, "unified_jobs_table.csv")) as f:
            lines = "".join([line for line in f])

            assert project_update_name in lines
            assert inventory_update_name in lines
            assert job_name in lines
示例#2
0
def test_copy_tables_workflow_job_node_query(sqlite_copy_expert, workflow_job):
    time_start = now() - timedelta(hours=9)

    with tempfile.TemporaryDirectory() as tmpdir:
        collectors.copy_tables(time_start, tmpdir, subset="workflow_job_node_query")
        with open(os.path.join(tmpdir, "workflow_job_node_table.csv")) as f:
            reader = csv.reader(f)
            # Pop the headers
            next(reader)
            lines = [line for line in reader]

            ids = [int(line[0]) for line in lines]

            assert ids == list(
                workflow_job.workflow_nodes.all().values_list("id", flat=True)
            )

            for index, relationship in zip(
                [7, 8, 9], ["success_nodes", "failure_nodes", "always_nodes"]
            ):
                for i, l in enumerate(lines):
                    related_nodes = (
                        [int(e) for e in l[index].split(",")] if l[index] else []
                    )
                    assert related_nodes == list(
                        getattr(workflow_job.workflow_nodes.all()[i], relationship)
                        .all()
                        .values_list("id", flat=True)
                    ), f"(right side) workflow_nodes.all()[{i}].{relationship}.all()"
示例#3
0
def gather(dest=None, module=None):
    """
    Gather all defined metrics and write them as JSON files in a .tgz

    :param dest:    the (optional) absolute path to write a compressed tarball
    :pararm module: the module to search for registered analytic collector
                    functions; defaults to awx.main.analytics.collectors
    """

    run_now = now()
    state = TowerAnalyticsState.get_solo()
    last_run = state.last_run
    logger.debug("Last analytics run was: {}".format(last_run))
    
    max_interval = now() - timedelta(days=7)
    if last_run < max_interval or not last_run:
        last_run = max_interval


    if _valid_license() is False:
        logger.exception("Invalid License provided, or No License Provided")
        return "Error: Invalid License provided, or No License Provided"
    
    if not settings.INSIGHTS_TRACKING_STATE:
        logger.error("Insights analytics not enabled")
        return

    if module is None:
        from awx.main.analytics import collectors
        module = collectors

    dest = dest or tempfile.mkdtemp(prefix='awx_analytics')
    for name, func in inspect.getmembers(module):
        if inspect.isfunction(func) and hasattr(func, '__awx_analytics_key__'):
            key = func.__awx_analytics_key__
            path = '{}.json'.format(os.path.join(dest, key))
            with open(path, 'w', encoding='utf-8') as f:
                try:
                    json.dump(func(last_run), f)
                except Exception:
                    logger.exception("Could not generate metric {}.json".format(key))
                    f.close()
                    os.remove(f.name)
    try:
        collectors.copy_tables(since=last_run, full_path=dest)
    except Exception:
        logger.exception("Could not copy tables")
        
    # can't use isoformat() since it has colons, which GNU tar doesn't like
    tarname = '_'.join([
        settings.SYSTEM_UUID,
        run_now.strftime('%Y-%m-%d-%H%M%S%z')
    ])
    tgz = shutil.make_archive(
        os.path.join(os.path.dirname(dest), tarname),
        'gztar',
        dest
    )
    shutil.rmtree(dest)
    return tgz
示例#4
0
def test_copy_tables_unified_job_query(sqlite_copy_expert, project, inventory,
                                       job_template):
    '''
    Ensure that various unified job types are in the output of the query.
    '''

    time_start = now()
    inv_src = InventorySource.objects.create(name="inventory_update1",
                                             inventory=inventory,
                                             source='gce')

    project_update_name = ProjectUpdate.objects.create(
        project=project, name="project_update1").name
    inventory_update_name = inv_src.create_unified_job().name
    job_name = job_template.create_unified_job().name

    with tempfile.TemporaryDirectory() as tmpdir:
        collectors.copy_tables(time_start, tmpdir)
        with open(os.path.join(tmpdir, 'unified_jobs_table.csv')) as f:
            lines = ''.join([l for l in f])

            assert project_update_name in lines
            assert inventory_update_name in lines
            assert job_name in lines