def test_af_verification():
    for af_pkl_path in afs_dir.glob("*.pkl"):
        af = ArgumentationFramework.from_pkl(af_pkl_path)
        for semantics in [GRD, STB, PRF, COM]:

            # test conflict free verification
            for extension in af.extensions[semantics]:
                if len(extension) == 0: continue
                k = random.randint(1, len(extension))
                extension_subset = frozenset(random.sample(extension, k))
                assert verifier.is_conflict_free(af, extension_subset)

            # test is admissible verification
            argument = random.choice(list(af.arguments))
            if not verifier.is_in_admissible(af, argument):
                assert argument not in af.cred_accepted_args(semantics)
                assert argument not in af.scept_accepted_args(semantics) or (
                    len(af.extensions[semantics]) == 0)

        # test is complete verification
        for complete_extension in af.extensions[COM]:
            assert verifier.is_complete(af, complete_extension)

        for stable_extension in af.extensions[STB]:
            assert verifier.is_stable(af, stable_extension)
    def sample_set_of_arguments(self, af: ArgumentationFramework) -> frozenset:
        """
        Generate a datapoint for the enumeration tree search with either an
        empty S, legal S or illegal S
        """
        arg_set = frozenset()

        if len(af.extensions[self.semantics]) == 0:
            return arg_set

        random_extension = random.choice(list(af.extensions[self.semantics]))

        if len(random_extension) == 0:
            return arg_set

        # subset of the randomly chosen extension
        arg_set = frozenset(
            random.sample(random_extension,
                          random.randrange(0, len(random_extension))))

        sample_type = random.choice(["legal", "illegal"])
        if sample_type == "legal":
            return arg_set

        # an illegal set has none of the extensions as its superset
        if sample_type == "illegal":
            possible_illegal_additions = af.arguments - (
                arg_set
                | af.cred_accepted_args(self.semantics, filter_args=arg_set))
            if len(possible_illegal_additions) != 0:
                arg_set = arg_set | frozenset(
                    random.sample(possible_illegal_additions, 1))

        return arg_set
def main(args: argparse.Namespace):
    """ Iterate the directory with apx files and save as argumentation frameworks"""

    # create required directories
    graphs_dir = config.dataset_dir / args.name / "graphs"
    afs_dir = config.dataset_dir / args.name / "AFs"
    enumeration_dir = config.dataset_dir / args.name / "problems" / "enumeration"

    afs_dir.mkdir(parents=True, exist_ok=True)
    enumeration_dir.mkdir(parents=True, exist_ok=True)

    # process APX files and skip if already exists
    # files are iterated randomly to allow parallelization
    apx_paths = list(graphs_dir.glob("*.apx"))
    random.shuffle(apx_paths)
    for apx_path in tqdm(apx_paths, desc="Solve"):
        graph_id = apx_path.stem
        af_pkl_path = afs_dir / f"{graph_id}.pkl"
        enum_pkl_path = enumeration_dir / f"{graph_id}.pkl"

        # only continue to the next instance if we can actually
        # load the argumentation framework from the pickle
        if af_pkl_path.exists() and enum_pkl_path.exists():
            try:
                ArgumentationFramework.from_pkl(af_pkl_path)
                EnumerationProblem.from_pkl(enum_pkl_path)
                continue
            except (Exception, ):
                pass

        # otherwise build AF from graph file and compute extensions
        with open(apx_path, "r", encoding="utf-8") as file:
            af = ArgumentationFramework.from_apx(file, graph_id=graph_id)

        enumeration_solver = EnumerationSolver()
        problem = EnumerationProblem.from_af(af)
        for semantics in args.semantics:
            problem.solve(enumeration_solver, semantics)
            assert af == problem.af

        with open(enum_pkl_path, "wb+") as file:
            pickle.dump(problem.state_dict, file)

        with open(af_pkl_path, "wb+") as file:
            pickle.dump(af.state_dict, file)
    def verify_status(self, af: ArgumentationFramework) -> bool:
        """
        Returns whether status enforcement has been achieved for either CRED and SCEPT
        """

        if any(self.quick_reject_status(af, arg) for arg in self.problem.positive):
            return False

        af.extensions[self.semantics] = self.enumeration_solver.solve(af, self.semantics)
        accepted_args = (
            af.cred_accepted_args(self.semantics)
            if self.task == CRED
            else af.scept_accepted_args(self.semantics)
        )

        return self.problem.positive.issubset(
            accepted_args
        ) and self.problem.negative.isdisjoint(accepted_args)
Exemple #5
0
 def is_stable(af: ArgumentationFramework, arg_set: frozenset) -> bool:
     """
     Verify whether arg_set is a stable extension by
     taking the set of arguments which are not attacked by arg_set
     and then testing if this set is equal to arg_set
     Besnard & Doutre (2004) Checking the acceptability of a set of arguments.
     """
     # "the set of arguments which are not attacked by S and then testing if this set is equal to S"
     not_attacked_by_arg_set = af.arguments - af.attacked_by(arg_set)
     return arg_set == frozenset(not_attacked_by_arg_set)
Exemple #6
0
 def __getitem__(self, idx: int) -> EnforcementProblem:
     """ Returns the ith enforcement problem from the dataset.
     When fixed goals is set, the problem is loaded from a pickle with a preset enforcement goal
     Otherwise the AF is loaded and a new enforcement goal is randomly created"""
     problem_class = (ExtensionEnforcementProblem if self.task
                      in [STRICT, NONSTRICT] else StatusEnforcementProblem)
     if self.fixed_goals:
         problem = problem_class.from_pkl(self.items[idx])
     else:
         af = ArgumentationFramework.from_pkl(self.items[idx])
         problem = problem_class.from_af(af, self.task)
     return problem
    def from_af(cls, af: ArgumentationFramework, task, max_enforce_fraction=1):
        # get arg fraction with min 1 and max |A|-1
        max_args = min(
            max(math.floor(af.num_arguments * max_enforce_fraction), 1),
            af.num_arguments - 1,
        )

        # accepted_args = set()
        # for semantics in af.extensions.keys():
        #     accepted_args = (
        #         accepted_args.union(af.cred_accepted_args(semantics))
        #         if task == CRED
        #         else af.scept_accepted_args(semantics)
        #     )
        positive: Optional[frozenset] = None
        negative: Optional[frozenset] = None
        accepted_args_per_semanitcs = [
            af.cred_accepted_args(semantics)
            if task == CRED else af.scept_accepted_args(semantics)
            for semantics in af.extensions.keys()
        ]
        while positive is None or any(
                positive.issubset(accepted_args)
                and negative.isdisjoint(accepted_args)
                for accepted_args in accepted_args_per_semanitcs):
            k = random.randint(1, max_args)
            enforce_arguments = frozenset(random.sample(af.arguments, k))
            p = random.randint(0, len(enforce_arguments))
            positive = frozenset(random.sample(enforce_arguments, p))
            negative = enforce_arguments - positive

        return cls(
            af=af,
            task=task,
            solutions=None,
            solve_times=None,
            negative=negative,
            positive=positive,
        )
def main(args: argparse.Namespace):
    """
    Generate and solve an enforcement problem for each AF in the dataset dir
    """
    dataset_dir = config.data_dir / "dataset" / args.name
    afs_dir = dataset_dir / "AFs"
    enforcement_dir = dataset_dir / "problems" / "enforcement"
    enforcement_dir.mkdir(parents=True, exist_ok=True)

    solver = EnforcementSolver()
    for task in args.tasks:
        task_dir = enforcement_dir / task
        task_dir.mkdir(parents=True, exist_ok=True)

        paths = list(afs_dir.glob("*.pkl"))
        random.shuffle(paths)
        for af_path in tqdm(paths):
            problem_path = task_dir / f"{af_path.stem}.pkl"
            if problem_path.exists():
                try:
                    problem = EnforcementProblem.from_pkl(problem_path)
                except Exception as exception:
                    print(exception)
                    continue
            else:
                af = ArgumentationFramework.from_pkl(af_path)
                problem = EnforcementProblem.from_af(af, task)

            for semantics in args.semantics:

                # check if solution for semantics exists,
                # and if exists but without optimal found than
                # check if now the time limit is highger than last try
                if (solver.name in problem.solutions
                        and semantics in problem.solutions[solver.name] and
                    (problem.solutions[solver.name][semantics] is not None
                     or problem.solve_times[solver.name][semantics] >=
                     args.time_limit)):
                    continue

                problem.solve(solver, semantics, time_limit=args.time_limit)

                if task == CRED and semantics == PRF:
                    problem.solutions[solver.name][COM] = problem.solutions[
                        solver.name][PRF]
                    problem.solve_times[solver.name][
                        COM] = problem.solve_times[solver.name][PRF]
                    problem.edge_changes[solver.name][
                        COM] = problem.edge_changes[solver.name][PRF]

            problem.to_pkl(problem_path)
def test_extensions():
    for af_pkl_path in afs_dir.glob("*.pkl"):
        af = ArgumentationFramework.from_pkl(af_pkl_path)
        assert (len(af.extensions[PRF]) >= 1 and len(af.extensions[GRD]) == 1
                and len(af.extensions[COM]) >= 1)
        for prf in af.extensions[PRF]:
            assert list(af.extensions[GRD])[0].issubset(prf)
            assert prf in af.extensions[COM]
        for stb in af.extensions[STB]:
            assert stb in af.extensions[PRF]
            assert list(af.extensions[GRD])[0] in af.extensions[COM]
            assert verifier.is_stable(af, stb)
        for com in af.extensions[COM]:
            assert verifier.is_complete(af, com)
Exemple #10
0
    def is_complete(af: ArgumentationFramework, arg_set: frozenset) -> bool:
        """
        Verify whether the arg_set is a complete extension by
        Compute the set of arguments defended by arg_set,
        the set of arguments not attacked by S and then
        to test if their intersection is equal to arg_set.
        Besnard & Doutre (2004) Checking the acceptability of a set of arguments.
        """
        attacked_by_arg_set = af.attacked_by(arg_set)
        defended_by_arg_set = set()
        for arg in af.arguments:
            attackers = set(af.graph.predecessors(arg))
            if attackers.issubset(attacked_by_arg_set):
                defended_by_arg_set.add(arg)

        not_attacked_by_arg_set = af.arguments - attacked_by_arg_set
        intersection = defended_by_arg_set.intersection(
            not_attacked_by_arg_set)
        return arg_set == frozenset(intersection)
Exemple #11
0
    def solve(self,
              af: ArgumentationFramework,
              semantics: str,
              return_times=False
              ) -> Union[Set[frozenset], Tuple[Set[frozenset], float]]:
        """
        Compute extensions with mu-toksia,
        write to a tempfile and parse
        """

        task = "SE" if semantics in [GRD, IDE] else "EE"
        cmd = [
            str(self.mu_toksia),
            "-fo",
            "apx",
            "-p",
            f"{task}-{self.semantics_conversion[semantics]}",
            "-f",
        ]
        apx = af.to_apx()
        # write APX file to RAM
        with tempfile.NamedTemporaryFile(mode="w+") as tmp_input:
            tmp_input.write(apx)
            tmp_input.seek(0)
            cmd.append(tmp_input.name)

            # write output file to RAM and solve
            with tempfile.TemporaryFile(mode="w+") as tmp_output:
                start = timer()
                subprocess.run(cmd,
                               stdout=tmp_output,
                               stderr=subprocess.PIPE,
                               check=True)
                end = timer()
                solve_time = end - start

                tmp_output.seek(0)
                extensions = self.parse(tmp_output, semantics)

        if return_times:
            return extensions, solve_time
        return extensions
 def get_af(self, idx: int) -> ArgumentationFramework:
     """ Load af from pickle on disk based on idx in dataset"""
     return ArgumentationFramework.from_pkl(self.afs[idx])
Exemple #13
0
 def from_state_dict(cls, state_dict: dict):
     """ Construct object from state dict """
     state_dict["af"] = ArgumentationFramework(**state_dict["af"])
     return cls(**state_dict)
Exemple #14
0
    def solve(
        self,
        problem: Union[ExtensionEnforcementProblem, StatusEnforcementProblem],
        semantics,
        time_limit=None,
    ):
        if (isinstance(problem, StatusEnforcementProblem)
                and semantics == GRD) or (problem.task == SCEPT
                                          and semantics != STB):
            return None, 0, None

        solver = self.maadoita if semantics == GRD else self.pakota

        with tempfile.NamedTemporaryFile(mode="w+") as tmp_input:
            tmp_input.write(problem.to_apx())
            tmp_input.seek(0)

            cmd = [
                str(solver), tmp_input.name,
                self.task_conversion[solver][problem.task]
            ]
            if semantics != GRD:
                cmd.append(self.semantics_conversion[semantics])

            # write output file to RAM and solve
            with tempfile.TemporaryFile(mode="w+") as tmp_output:
                try:
                    usage_start = resource.getrusage(resource.RUSAGE_CHILDREN)
                    process = subprocess.run(
                        cmd,
                        stdout=tmp_output,
                        stderr=subprocess.PIPE,
                        timeout=time_limit,
                        check=False,
                    )
                    usage_end = resource.getrusage(resource.RUSAGE_CHILDREN)
                    solve_time = usage_end.ru_utime - usage_start.ru_utime
                except subprocess.TimeoutExpired:
                    problem.solutions.setdefault(self.name,
                                                 {}).update({semantics: None})
                    problem.solve_times.setdefault(self.name, {}).update(
                        {semantics: time_limit})
                    problem.edge_changes.setdefault(self.name, {}).update(
                        {semantics: None})

                    return None, 0, None

                if process.stderr:
                    raise Exception(process.stderr)
                tmp_output.seek(0)
                output = tmp_output.read()
                tmp_output.seek(0)
                lines = tmp_output.readlines()

        if solver == self.maadoita:
            changes_readout_line = 3
            apx_readout_lines = slice(changes_readout_line + 1, None)
            assert "Number of changes:" in lines[changes_readout_line]
        else:
            changes_readout_line = 2 if "Number of iterations" in lines[
                0] else 1
            apx_readout_lines = slice(changes_readout_line + 1, None)
            assert ("o " == lines[changes_readout_line][0:2]
                    ), "readout problem for non GR"

        num_changes = int(
            re.search(r"\d+", lines[changes_readout_line]).group())

        apx_lines = lines[apx_readout_lines]
        modified_af = ArgumentationFramework.from_apx(apx_lines)
        edge_changes = modified_af.edge_difference(problem.af)

        return num_changes, solve_time, edge_changes