コード例 #1
0
 def __init__(
     self,
     database: DB,
     use_lock: bool = False,
     primary_key_generator: Optional[PrimaryKeyGenerator] = None,
 ):
     self.use_lock = use_lock
     self.dbname = database.dbname
     self.database = database
     self.primary_key_generator = primary_key_generator or PrimaryKeyGenerator()
     self.bulk_saver = BulkSaver(self.primary_key_generator)
コード例 #2
0
def analyze(
    ctx: Context,
    run_kind,
    branch,
    commit_hash,
    job_id,
    differential_id,
    previous_issue_handles,
    previous_input,
    linemap,
    store_unused_models,
    input_file,
):
    # Store all options in the right places
    summary_blob = {
        "run_kind": run_kind,
        "compress": lambda x: x,
        "repository": ctx.repository,
        "branch": branch,
        "commit_hash": commit_hash,
        "old_linemap_file": linemap,
        "store_unused_models": store_unused_models,
    }

    if job_id is None and differential_id is not None:
        job_id = "user_input_" + str(differential_id)
    summary_blob["job_id"] = job_id

    if previous_issue_handles:
        summary_blob["previous_issue_handles"] = AnalysisOutput.from_file(
            previous_issue_handles)
    elif previous_input:
        previous_input = AnalysisOutput.from_file(previous_input)

    # Construct pipeline
    input_files = (AnalysisOutput.from_file(input_file), previous_input)
    pipeline_steps = [
        Parser(),
        ModelGenerator(),
        TrimTraceGraph(),
        DatabaseSaver(
            DB(ctx.database_engine, ctx.database_name, assertions=True),
            PrimaryKeyGenerator(),
        ),
    ]
    pipeline = Pipeline(pipeline_steps)
    pipeline.run(input_files, summary_blob)
コード例 #3
0
 def __init__(self, primary_key_generator: Optional[PrimaryKeyGenerator] = None):
     self.primary_key_generator = primary_key_generator or PrimaryKeyGenerator()
     self.saving: Dict[str, Any] = {}
     for cls in self.SAVING_CLASSES_ORDER:
         self.saving[cls.__name__] = []