def process_new_build(config, last_build, last_build_status, jenkins_url, master_build_url): """Process new detected build.""" if last_build_status is not None: if last_build_status: log.info("And the last build is OK") else: log.error("And the last build failed!") build_cause = read_build_cause(master_build_url, last_build) if build_cause is not None: cause, changes = read_changes(jenkins_url, build_cause["upstreamUrl"], build_cause["upstreamBuild"]) message = construct_message(jenkins_url, master_build_url, last_build, last_build_status, build_cause, cause, changes) log.info("Sending message:") log.warning(message) login_and_send_message(config.get_mm_url(), config.get_mm_user_login(), config.get_mm_user_password(), config.get_mm_team(), config.get_mm_channel(), message) else: log.info("Still building")
def read_job_statuses(ci_jobs, ci_jobs_table_enabled, liveness_table_enabled): """Read job statuses from the CI, but only if its necessary.""" log.info("Read job statuses") if ci_jobs_table_enabled or liveness_table_enabled: log.success("Done") return read_ci_jobs_statuses(JENKINS_URL) else: log.warning("Disabled") return None
def missing_api_token_warning(env_var_name): """Print warning or basic info about missing API token.""" if os.environ.get(env_var_name): log.success("OK: {name} environment is set and will be used as " "authorization token".format(name=env_var_name)) else: log.warning( "Warning: the {name} environment variable is not" " set.\n" "Most tests that require authorization will probably fail".format( name=env_var_name))
def check_number_of_results(queue_size, component_analysis_count, stack_analysis_count): """Check if we really got the same number of results as expected. When the server respond by any HTTP error code (4xx, 5xx), the results are NOT stored in the queue. This means that number of results stored in the queue might be less than number of threads set up by user via CLI parameters in certain situations. This function check this situation. """ log.info("queue size: {size}".format(size=queue_size)) expected = component_analysis_count + 2 * stack_analysis_count if queue_size != expected: log.warning( "Warning: {expected} results expected, but only {got} is presented" .format(expected=expected, got=queue_size)) log.warning( "This means that {n} analysis ends with error or exception".format( n=expected - queue_size))
def read_unit_test_coverage(ci_jobs, repository): """Read and process unit test coverage.""" log.info("Reading unit test coverage") url = ci_jobs.get_console_output_url(repository) report_type = None if url is not None: response = requests.get(url) if response.status_code == 200: content = response.text.split("\n") unit_test_output = [] for line in content: line = line.strip() # check where the test coverage begins if line_with_unit_test_header(line): log_report_type("pycov") report_type = "pycov" unit_test_output.append(line) elif line_with_jacoco_test_header(line): log_report_type("jacoco") report_type = "jacoco" # not needed to write the header # unit_test_output.append(line) # check where the test coverage ends elif line_with_unit_test_summary(line, report_type): unit_test_output.append(line) write_unit_test_coverage(unit_test_output, repository) return parse_unit_test_statistic(line) # check where the test coverage ends elif line_with_jacoco_test_footer(line, report_type): # not needed to write the footer # unit_test_output.append(line) write_unit_test_coverage_as_csv(unit_test_output, repository) p = ProjectCoverageReport(repository + ".coverage.csv") p.convert_code_coverage_report(repository + ".coverage.txt") return compute_jacoco_test_statistic(p) # now we know we have something to report elif report_type: unit_test_output.append(line) log.warning("No coverage report found") return None
def analyze_repo(repo_info, repo_id, algorithm=OXYGEN): """Analyze the repo using the specified algorithm. Store results in db.""" log.info(f"Analyzing repository: {repo_info}") try: conn = pg_conn(db_url) if repo_info.clone_or_pull(): log.success( f"Repository has been successfully cloned: {repo_info}") else: log.warning(f"Unable to clone repository: {repo_info}") conn.run( """UPDATE repos SET status = (SELECT id FROM states WHERE name = 'err_clone') WHERE id = %s;""", repo_id) return modules = get_modules_from_dir(repo_info.dir) if not modules: log.warning("Repository contains no Python module") return result = run_single_repo(modules, algorithm) # Insert repository analysis into database all at once with conn.transaction(): commit_id = conn.one( """INSERT INTO commits (repo_id, hash) VALUES (%s, %s) RETURNING id;""", repo_id, repo_info.hash) for c in result.clones: cluster_id = conn.one( """INSERT INTO clusters (commit_id, "value", weight) VALUES (%s, %s, %s) RETURNING id;""", commit_id, c.value, c.match_weight) for o, s in c.origins.items(): conn.run( """INSERT INTO origins (cluster_id, file, line, col_offset, similarity) VALUES (%s, %s, %s, %s, %s);""", cluster_id, o.file, o.line, o.col_offset, s) log.success( f"Repository has been successfully analyzed: {repo_info}") conn.run( """UPDATE repos SET status = (SELECT id FROM states WHERE name = 'done') WHERE id = %s;""", repo_id) # Once done with the regular analysis, run pattern extraction with conn.transaction(): _extract_patterns(conn, commit_id, modules) log.success(f"Pattern extraction from was successful: {repo_info}") except PG_Error as ex: handle_pg_error(ex, conn, repo_id) finally: conn.close()
from fastlog import log log.setLevel(log.DEBUG) log.info("log.info") log.success("log.success") log.failure("log.failure") with log.indent(): log.debug("log.debug") log.warning("log.warning") log.separator() log.hexdump(list(map(chr, range(256))))
from fastlog import log import traceback log.info("Familiar log functions:") log.info("log.info") log.success("log.success") log.failure("log.failure") log.debug("You don't see me! The default level is log.INFO") log.setLevel(log.DEBUG) log.debug("log.debug") log.warning("log.warning") try: log.exception("log.exception") except Exception as e: traceback.print_exc() pass log.separator() log.info("Indent logs using Python's 'with:'") log.warning("First level block") with log.indent(): log.warning("Second level block") with log.indent(): log.warning("Third level block") log.warning("Back to second") log.warning("Back to first") log.separator()