def do_compare(parser, compare_parser, argv, db_path_list): assert len(db_path_list) == 2 db_list = [engine.ValueDB.from_path(path) for path in db_path_list] adaptor_cls_set = {db.adaptor_cls for db in db_list} if len(adaptor_cls_set) != 1: raise ValueError( 'Cannot compare DBs that were built using a different adaptor: {}'. format(adaptor_cls_set)) adaptor_cls = utils.take_first(adaptor_cls_set) # Add all the CLI arguments of the adaptor before reparsing the # command line. adaptor_group = utils.create_adaptor_parser_group(compare_parser, adaptor_cls) adaptor_cls.register_compare_param(adaptor_group) # Reparse the command line after the adaptor had a chance to add its own # arguments. args = parser.parse_args(argv) # Create the adaptor with the args, so it can use it to implement # comparison adaptor = adaptor_cls(args) return adaptor.compare_db_list(db_list)
def do_show(parser, show_parser, argv, db_path): db = engine.ValueDB.from_path(db_path) adaptor_cls = db.adaptor_cls # Add all the CLI arguments of the adaptor before reparsing the # command line. adaptor_group = utils.create_adaptor_parser_group(show_parser, adaptor_cls) adaptor_cls.register_show_param(adaptor_group) # Reparse the command line after the adaptor had a chance to add its own # arguments. args = parser.parse_args(argv) # Create the adaptor with the args, so it can use it to implement # the show adaptor = adaptor_cls(args) return adaptor.show_db(db)
def do_run(args, parser, run_parser, argv): # Import all modules, before selecting the adaptor module_set = set() for path in args.python_files: # This might fail, since some adaptor options may introduce "fake" # positional arguments, since these options are not registered yet. with contextlib.suppress(ValueError, ImportError): module_set.update(utils.import_modules([path], best_effort=True)) # Look for a customization submodule in one of the parent packages of the # modules we specified on the command line. utils.find_customization_module_set(module_set) adaptor_name = args.adaptor adaptor_cls = AdaptorBase.get_adaptor_cls(adaptor_name) if not adaptor_cls: if adaptor_name: raise RuntimeError( 'Adaptor "{}" cannot be found'.format(adaptor_name)) else: raise RuntimeError('No adaptor was found') # Add all the CLI arguments of the adaptor before reparsing the # command line. adaptor_group = utils.create_adaptor_parser_group(run_parser, adaptor_cls) adaptor_cls.register_run_param(adaptor_group) # Reparse the command line after the adaptor had a chance to add its own # arguments. args = parser.parse_args(argv) # Re-import now that we are sure to have the correct list of sources module_set = utils.import_modules(args.python_files) # Make sure the module in which adaptor_cls is defined is used module_set.add(inspect.getmodule(adaptor_cls)) verbose = args.verbose save_db = args.save_value_db iteration_nr = args.n shared_pattern_set = set(args.share) random_order = args.random_order adaptor = adaptor_cls(args) only_list = args.list only_template_scripts = args.template_scripts rst_expr_list = args.rst_list if rst_expr_list: only_list = True type_goal_pattern_set = set(args.goal) callable_goal_pattern_set = set(args.callable_goal) if not (type_goal_pattern_set or callable_goal_pattern_set): type_goal_pattern_set = set( adaptor_cls.get_default_type_goal_pattern_set()) load_db_path_list = args.load_db load_db_pattern_list = args.load_type load_db_uuid_list = args.load_uuid load_db_replay_uuid = args.replay load_db_uuid_args = load_db_replay_uuid or args.load_uuid_args user_filter_set = set(args.select) user_filter_set.update(args.select_multiple) if load_db_replay_uuid and user_filter_set: run_parser.error( '--replay and --select cannot be used at the same time') if load_db_replay_uuid and not load_db_path_list: run_parser.error('--load-db must be specified to use --replay') restricted_pattern_set = set(args.restrict) forbidden_pattern_set = set(args.forbid) allowed_pattern_set = set(args.allow) allowed_pattern_set.update(restricted_pattern_set) allowed_pattern_set.update(callable_goal_pattern_set) artifact_dir_link = args.symlink_artifact_dir_to # Setup the artifact_dir so we can create a verbose log in there date = datetime.datetime.now().strftime('%Y%m%d_%H:%M:%S') testsession_uuid = utils.create_uuid() if only_template_scripts: artifact_dir = pathlib.Path(only_template_scripts) elif args.artifact_dir: artifact_dir = pathlib.Path(args.artifact_dir) # If we are not given a specific folder, we create one under the root we # were given else: artifact_dir = pathlib.Path(args.artifact_root, date + '_' + testsession_uuid) if only_list: debug_log = None info_log = None else: artifact_dir.mkdir(parents=True) if artifact_dir_link: if artifact_dir_link.exists( ) and not artifact_dir_link.is_symlink(): raise ValueError( 'This is not a symlink and will not be overwritten: {}'. format(artifact_dir_link)) with contextlib.suppress(FileNotFoundError): artifact_dir_link.unlink() artifact_dir_link.symlink_to(artifact_dir, target_is_directory=True) artifact_dir = artifact_dir.resolve() # Update the CLI arguments so the customization module has access to the # correct value args.artifact_dir = artifact_dir debug_log = artifact_dir / 'DEBUG.log' info_log = artifact_dir / 'INFO.log' utils.setup_logging(args.log_level, debug_log, info_log, verbose=verbose) # Get the set of all callables in the given set of modules callable_pool = utils.get_callable_set(module_set, verbose=verbose) # Build the pool of operators from the callables non_reusable_type_set = set( utils.flatten_seq( utils.get_subclasses(cls) for cls in adaptor.get_non_reusable_type_set())) op_set = build_op_set( callable_pool, non_reusable_type_set, allowed_pattern_set, adaptor, ) # Load objects from an existing database if load_db_path_list: db_list = [] for db_path in load_db_path_list: db = engine.ValueDB.from_path(db_path) op_set.update( load_from_db(db, adaptor, non_reusable_type_set, load_db_pattern_list, load_db_uuid_list, load_db_uuid_args)) db_list.append(db) # Get the prebuilt operators from the adaptor else: db_list = [] op_set.update(adaptor.get_prebuilt_op_set()) # Force some parameter values to be provided with a specific callable patch_map = build_patch_map(args.sweep, args.param, op_set) op_set.update(apply_patch_map(patch_map, adaptor)) # Some operators are hidden in IDs since they don't add useful information # (internal classes) hidden_callable_set = { op.callable_ for op in adaptor.get_hidden_op_set(op_set) } # These get_id() options are used for all user-exposed listing that is supposed to be # filterable with user_filter_set (like only_list) filterable_id_kwargs = dict(full_qual=False, qual=False, with_tags=False, hidden_callable_set=hidden_callable_set) # Restrict the Expressions that will be executed to just the one we # care about if db_list and load_db_replay_uuid: id_kwargs = copy.copy(filterable_id_kwargs) del id_kwargs['hidden_callable_set'] # Let the merge logic handle duplicated UUIDs db = engine.ValueDB.merge(db_list) user_filter_set = { db.get_by_uuid(load_db_replay_uuid).get_id(**id_kwargs) } # Only print once per parameters' tuple if verbose: @utils.once def handle_non_produced(cls_name, consumer_name, param_name, callable_path): info( 'Nothing can produce instances of {cls} needed for {consumer} (parameter "{param}", along path {path})' .format(cls=cls_name, consumer=consumer_name, param=param_name, path=' -> '.join( utils.get_name(callable_) for callable_ in callable_path))) @utils.once def handle_cycle(path): error('Cyclic dependency detected: {path}'.format(path=' -> '.join( utils.get_name(callable_) for callable_ in path))) else: handle_non_produced = 'ignore' handle_cycle = 'ignore' # Get the callable goals, either by the callable name or the value type root_op_set = set(op for op in op_set if ( utils.match_name(op.get_name( full_qual=True), callable_goal_pattern_set) or # All producers of the goal types can be a root operator in the # expressions we are going to build, i.e. the outermost function call utils.match_base_cls(op.value_type, type_goal_pattern_set) # Only keep the Expression where the outermost (root) operator is # defined in one of the files that were explicitely specified on the # command line. ) and inspect.getmodule(op.callable_) in module_set) # Build the class context from the set of Operator's that we collected class_ctx = engine.ClassContext.from_op_set( op_set=op_set, forbidden_pattern_set=forbidden_pattern_set, restricted_pattern_set=restricted_pattern_set) # Build the list of Expression that can be constructed from the set of # callables expr_list = class_ctx.build_expr_list( root_op_set, non_produced_handler=handle_non_produced, cycle_handler=handle_cycle, ) # First, sort with the fully qualified ID so we have the strongest stability # possible from one run to another expr_list.sort( key=lambda expr: expr.get_id(full_qual=True, with_tags=True)) # Then sort again according to what will be displayed. Since it is a stable # sort, it will keep a stable order for IDs that look the same but actually # differ in their hidden part expr_list.sort(key=lambda expr: expr.get_id(qual=False, with_tags=True)) if random_order: random.shuffle(expr_list) if user_filter_set: expr_list = [ expr for expr in expr_list if utils.match_name(expr.get_id( **filterable_id_kwargs), user_filter_set) ] if not expr_list: info( 'Nothing to do, check --help while passing some python sources to get the full help.' ) return 1 id_kwargs = { **filterable_id_kwargs, 'full_qual': bool(verbose), } if rst_expr_list: id_kwargs['style'] = 'rst' for expr in expr_list: out('* {}'.format(expr.get_id(**id_kwargs))) else: out('The following expressions will be executed:\n') for expr in expr_list: out(expr.get_id(**id_kwargs)) if verbose >= 2: out(expr.format_structure() + '\n') formatted_out = adaptor.format_expr_list(expr_list, verbose=verbose) if formatted_out: out('\n' + formatted_out + '\n') if only_list: return 0 # Get a list of ComputableExpression in order to execute them expr_list = engine.ComputableExpression.from_expr_list(expr_list) if iteration_nr > 1: shared_op_set = { # We don't allow matching on root operators, since that would be # pointless. Sharing root operators basically means doing the work # once, and then reusing everything at every iteration. op for op in (op_set - root_op_set) if utils.match_base_cls(op.value_type, shared_pattern_set) } predicate = lambda expr: expr.op not in shared_op_set iteration_expr_list = [ # Apply CSE within each iteration engine.ComputableExpression.cse( expr.clone_by_predicate(predicate) for expr in expr_list) for i in range(iteration_nr) ] else: iteration_expr_list = [expr_list] # Make sure all references to Consumer are cloned appropriately for expr in utils.flatten_seq(iteration_expr_list): expr.prepare_execute() exec_ret_code = exec_expr_list( iteration_expr_list=iteration_expr_list, adaptor=adaptor, artifact_dir=artifact_dir, testsession_uuid=testsession_uuid, hidden_callable_set=hidden_callable_set, only_template_scripts=only_template_scripts, adaptor_cls=adaptor_cls, verbose=verbose, save_db=save_db, ) # If we reloaded a DB, merge it with the current DB so the outcome is a # self-contained artifact dir if load_db_path_list and save_db: orig_list = [ path if path.is_dir() else path.parent for path in map(pathlib.Path, load_db_path_list) ] do_merge(orig_list, artifact_dir, output_exist=True) return exec_ret_code