コード例 #1
0
ファイル: analysis.py プロジェクト: fsismondi/ttproto
def dissect_pcap_to_list(filename, protocol_selection=None):
    """
    :param filename: filename of the pcap file to be dissected
    :param protocol_selection:  protocol class for filtering purposes (inheriting from packet Value)
    :return: List of frames (frames as Ordered Dicts)
    """

    if protocol_selection:
        assert issubclass(protocol_selection, PacketValue)

    frame_list = []

    # for speeding up the process
    with Data.disable_name_resolution():

        frames = Frame.create_list(PcapReader(filename))

        for f in frames:

            # if we are filtering and frame doesnt contain protocol
            # then skip frame
            # TODO make this generic for any type of protocol
            if protocol_selection and not f.coap:
                pass
            else:
                frame = OrderedDict()
                frame['_type'] = 'frame'
                frame['id'] = f.id
                frame['timestamp'] = f.ts
                frame['error'] = f.exc
                frame['protocol_stack'] = []
                value_to_list(frame['protocol_stack'], f.msg.get_value())
                frame_list.append(frame)
    return frame_list
コード例 #2
0
ファイル: dissector.py プロジェクト: fsismondi/ttproto
    def get_dissection(
        self,
        protocol: optional(is_protocol) = None,
    ) -> list_of(OrderedDict):
        """
        Function to get dissection of a capture as a list of frames represented as strings

        :param protocol: Protocol class for filtering purposes
        :type protocol: type
        :raises TypeError: If protocol is not a protocol class
        :raises ReaderError: If the reader couldn't process the file

        :return: A list of Frame represented as API's dict form
        :rtype: [OrderedDict]

        """
        # log.debug('Starting dissection.')
        # Check the protocol is one entered
        if all((protocol, not is_protocol(protocol))):
            raise TypeError(protocol.__name__ + ' is not a protocol class')

        fs = self.frames

        # For speeding up the process
        with Data.disable_name_resolution():

            # Filter the frames for the selected protocol
            if protocol:
                fs, _ = Frame.filter_frames(fs, protocol)

        if fs is None:
            raise Error('Empty capture cannot be dissected')

        # Then return the list of dictionary frame representation
        return [frame.dict() for frame in fs]
コード例 #3
0
ファイル: analysis.py プロジェクト: fsismondi/ttproto
def basic_dissect_pcap_to_list(filename, protocol_selection=None):
    """
    :param filename: filename of the pcap file to be dissected
    :param protocol_selection:  protocol class for filtering purposes (inheriting from packet Value)
    :return: list of tuples with basic info about frames:
    [
        (13, '[127.0.0.1                        -> 127.0.0.1                       ] CoAP [CON 38515] GET /test'),
        (14, '[127.0.0.1                        -> 127.0.0.1                       ] CoAP [ACK 38515] 2.05 Content '),
        (21, '[127.0.0.1                        -> 127.0.0.1                       ] CoAP [CON 38516] PUT /test'),
        (22, '[127.0.0.1                        -> 127.0.0.1                       ] CoAP [ACK 38516] 2.04 Changed ')]
    ]

    """
    # read the frame
    # TODO: filter uninteresting frames ? (to decrease the load)
    with Data.disable_name_resolution():
        frames = Frame.create_list(PcapReader(filename))
        response = []

        # content of the response, TODO make this generic for any protocol
        if protocol_selection and protocol_selection is CoAP:
            selected_frames = [f for f in frames if f.coap]
        else:
            selected_frames = frames

        for f in selected_frames:
            response.append((f.id, f.msg.summary()))
        # malformed frames
        # malformed = list (filter ((lambda f: f.exc), frames))
    return response
コード例 #4
0
ファイル: dissector.py プロジェクト: fsismondi/ttproto
    def dissect(
            self,
            protocol: optional(is_protocol) = None) -> list_of(OrderedDict):
        """
        The dissect function to dissect a pcap file into list of frames

        :param protocol: Protocol class for filtering purposes
        :type protocol: type
        :raises TypeError: If protocol is not a protocol class
        :raises ReaderError: If the reader couldn't process the file

        :return: A list of Frame represented as API's dict form
        :rtype: [OrderedDict]
        """
        # log.debug('Starting dissection.')
        # Check the protocol is one entered
        if all((protocol, not is_protocol(protocol))):
            raise TypeError(protocol.__name__ + ' is not a protocol class')

        # For speeding up the process
        with Data.disable_name_resolution():

            # Get the list of frames
            frames = self.__capture.frames

            # Filter the frames for the selected protocol
            if protocol is not None:
                frames, _ = Frame.filter_frames(frames, protocol)

        # Then return the list of dictionary frame representation
        return [frame.dict() for frame in frames]
コード例 #5
0
ファイル: analysis.py プロジェクト: fsismondi/ttproto
def analyse_file(filename):
    testcases, _ = import_testcases()
    # read the frame
    # TODO: filter uninteresting frames ? (to decrease the load)
    with Data.disable_name_resolution():
        frames = Frame.create_list(PcapReader(filename))

        for f in frames:
            print("%5d %s" % (f.id, f.msg.summary()))

        # malformed frames
        malformed = list(filter((lambda f: f.exc), frames))

        print("\n%d malformed frames" % len(malformed))
        for f in malformed:
            print("%5d %s" % (f.id, f.exc))

        tracker = Tracker(frames)
        conversations = tracker.conversations
        ignored = tracker.ignored_frames
        #   sys.exit(1)
        #   conversations, ignored = extract_coap_conversations (frames)

        print("\n%d ignored frames" % len(ignored))
        for f in ignored:
            print("%5d %s" % (f.id, f.msg.summary()))

        print("\n%d CoAP conversations" % len(conversations))
        for t in conversations:
            print("    ---- Conversation %d    %s ----" % (t.id, t.tag))
            for f in t:
                print("    %5d %s" % (f.id, f.msg.summary()))

        conversations_by_pair = proto_specific.group_conversations_by_pair(
            conversations)

        print("\nTestcase results")
        results_by_pair = {}
        for pair, conversations in conversations_by_pair.items():
            pair_results = []
            #       print (pair, conversations)
            print("---- Pair  %s -> %s ----" % pair)
            for tc_type in testcases:
                print(" --- Testcase %s ---" % tc_type.__name__)
                tc_results = []
                for tr in conversations:
                    tc = tc_type(tr)
                    if tc.verdict:
                        print("    -- Conversation %d -> %s --" %
                              (tc.conversation.id, tc.verdict))
                        for line in tc.text.split("\n"):
                            print("\t" + line)
                        tc_results.append(tc)
                pair_results.append(tc_results)
コード例 #6
0
ファイル: dissector.py プロジェクト: fsismondi/ttproto
    def summary(self,
                protocol: optional(is_protocol) = None) -> list_of((int, str)):
        """
        The summaries function to get the summary of frames

        :param protocol: Protocol class for filtering purposes
        :type protocol: type

        :Example:

        from ttproto.core.lib.all import Ieee802154

        for s in dissector.summary(protocol = Ieee802154):

            print(s)

        :raises TypeError: If protocol is not a protocol class
        :raises ReaderError: If the reader couldn't process the file

        :return: Basic informations about frames like the underlying example
        :rtype: [(int, str)]

        :Example:

            [
                (13, '[127.0.0.1 -> 127.0.0.1] CoAP [CON 38515] GET /test'),

                (14, '[127.0.0.1 -> 127.0.0.1] CoAP [ACK 38515] 2.05 Content'),

                (21, '[127.0.0.1 -> 127.0.0.1] CoAP [CON 38516] PUT /test'),

                (22, '[127.0.0.1 -> 127.0.0.1] CoAP [ACK 38516] 2.04 Changed')]
            ]

        .. note:: With the protocol option we can filter the response
        """

        if all((protocol, not is_protocol(protocol))):
            raise TypeError(protocol.__name__ + ' is not a protocol class')

        fs = self.frames

        # For speeding up the process
        with Data.disable_name_resolution():

            # Filter the frames for the selected protocol
            if protocol:
                fs, _ = Frame.filter_frames(fs, protocol)

        if fs is None:
            raise Error('Empty capture cannot be dissected')

        # Return list of frames summary
        return [frame.summary() for frame in fs]
コード例 #7
0
ファイル: dissector.py プロジェクト: fsismondi/ttproto
    def summary(self,
                protocol: optional(is_protocol) = None) -> list_of((int, str)):
        """
        The summaries function to get the summary of frames

        :param protocol: Protocol class for filtering purposes
        :type protocol: type

        :Example:

        from ttproto.core.lib.all import Ieee802154

        for s in dissector.summary(protocol = Ieee802154):

            print(s)

        :raises TypeError: If protocol is not a protocol class
        :raises ReaderError: If the reader couldn't process the file

        :return: Basic informations about frames like the underlying example
        :rtype: [(int, str)]

        :Example:

            [
                (13, '[127.0.0.1 -> 127.0.0.1] CoAP [CON 38515] GET /test'),

                (14, '[127.0.0.1 -> 127.0.0.1] CoAP [ACK 38515] 2.05 Content'),

                (21, '[127.0.0.1 -> 127.0.0.1] CoAP [CON 38516] PUT /test'),

                (22, '[127.0.0.1 -> 127.0.0.1] CoAP [ACK 38516] 2.04 Changed')]
            ]

        .. todo:: Filter uninteresting frames ? (to decrease the load)
        .. note:: With the protocol option we can filter
        """

        # Check the protocol
        if all((protocol, not is_protocol(protocol))):
            raise TypeError(protocol.__name__ + ' is not a protocol class')

        # Disable the name resolution in order to improve performances
        with Data.disable_name_resolution():

            # Get the frames from the capture
            frames = self.__capture.frames

            # Filter the frames for the selected protocol
            if protocol is not None:
                frames, _ = Frame.filter_frames(frames, protocol)

        # Then give the summary of every frames
        return [frame.summary() for frame in frames]
コード例 #8
0
ファイル: dissector.py プロジェクト: fsismondi/ttproto
    def get_dissection_simple_format(
        self,
        protocol: optional(is_protocol) = None,
    ) -> list_of(str):
        """
        Function to get dissection of a capture as a list of frames represented as strings

        :param protocol: Protocol class for filtering purposes
        :type protocol: type
        :raises TypeError: If protocol is not a protocol class
        :raises ReaderError: If the reader couldn't process the file

        :return: A list of Frame represented as plain non-structured text
        :rtype: [str]

        """
        # log.debug('Starting dissection.')
        # Check the protocol is one entered
        if all((protocol, not is_protocol(protocol))):
            raise TypeError(protocol.__name__ + ' is not a protocol class')

        fs = self.frames

        # For speeding up the process
        with Data.disable_name_resolution():

            # Filter the frames for the selected protocol
            if protocol:
                fs, _ = Frame.filter_frames(fs, protocol)

        if fs is None:
            raise Error('Empty capture cannot be dissected')

        # fixme modify Message class from ttproto.data structure so I can get text display wihtout this patch
        class WritableObj(object):
            def __init__(self, text=''):
                self.val = text

            def __str__(self):
                return self.val

            def write(self, text):
                self.val += text

        frame_dissection_list = []

        for f in fs:
            text_output = WritableObj()
            f.message.display(output=text_output)
            frame_dissection_list.append(str(text_output))

        # Then return the list of frames,each as a simple text dissection
        return frame_dissection_list
コード例 #9
0
ファイル: analyzer.py プロジェクト: fsismondi/ttproto
    def analyse(
            self,
            filename: str,
            tc_id: str
    ) -> (str, str, list_of(int), str, list_of((str, str)), list_of((type, Exception, is_traceback))):
        """
        Analyse a dump file associated to a test case

        :param filename: The name of the file to analyse
        :param tc_id: The unique id of the test case to confront the given file
        :type filename: str
        :type tc_id: str

        :return: A tuple with the information about the analysis results:
                 - The id of the test case
                 - The verdict as a string
                 - The list of the result important frames
                 - A string with logs
                 - A list of all the partial verdicts
                 - A list of tuples representing the exceptions that occurred
        :rtype: (str, str, [int], str,[(str, str)], [(type, Exception, traceback)])

        :raises FileNotFoundError: If the test env of the tc is not found
        :raises ReaderError: If the capture didn't manage to read and decode
        :raises ObsoleteTestCase: If the test case if obsolete

        .. example::
            ('TD_COAP_CORE_03', 'fail', [21, 22], [('fail', "some comment"),('fail', "some other comment")] , 'verdict description', '')

        .. note::
            Allows multiple occurrences of executions of the testcase, returns as verdict:
                - fail: if at least one on the occurrences failed
                - inconclusive: if all occurrences returned a inconclusive verdict
                - pass: all occurrences are inconclusive or at least one is PASS and
                        the rest is inconclusive
        """

        # Get the test case class
        test_case_class = self.import_test_cases([tc_id])
        assert len(test_case_class) == 1
        test_case_class = test_case_class[0]

        # Disable name resolution for performance improvements
        with Data.disable_name_resolution():
            # Get the capture from the file
            capture = Capture(filename)
            # Initialize the TC with the list of conversations
            test_case = test_case_class(capture)
            verdict, rev_frames, log, partial_verdicts, exceps = test_case.run_test_case()

            # print('##### capture')
            # print(capture)
            # print('#####')
            #
            # # Here we execute the test case and return the result
            #
            # print('##### Verdict given')
            # print(verdict)
            # print('#####')
            # print('##### Review frames')
            # print(rev_frames)
            # print('#####')
            # print('##### Text')
            # print(log, partial_verdicts)
            # print('#####')
            # print('##### Exceptions')
            # print(exceptions)
            # print('#####')

            return tc_id, verdict, rev_frames, log, partial_verdicts, exceps
コード例 #10
0
ファイル: analysis.py プロジェクト: fsismondi/ttproto
def analyse_file_html(filename,
                      orig_name,
                      urifilter=False,
                      exceptions=None,
                      regex=None,
                      profile="client"):
    """
    TODO
    Args:
        filename:
        orig_name:
        urifilter:
        exceptions:
        regex:
        profile:

    Returns:

    """
    testcases, _ = import_testcases()

    logger = HTMLLogger()

    with XHTML10Generator() as g:

        def log_text(text):
            if not isinstance(text, str):
                text = str(text)
            for line in text.split("\n"):
                mo = reg_frame.match(line)
                if mo:
                    g.a(href="#frame" + mo.group(1))(line + "\n")
                    continue
                mo = reg_verdict.match(line)
                if mo:
                    g.span(**{"class": mo.group(1)})(line)
                    g("\n")
                elif line.endswith("Mismatch"):
                    g.span(**{"class": "mismatch"})(line)
                    g("\n")
                elif line.startswith("Chaining to conversation "):
                    g("\n")
                    g.span(**{"class": "chaining"})(line)
                    g("\n")
                else:
                    g(line)
                    g("\n")

        with g.head:
            g.title("CoAP interoperability test results")
            g.meta(
                **{
                    "http-equiv": "Content-Type",
                    "content": "text/html; charset=utf-8"
                })

            g.style("""
a {color: inherit; text-decoration: inherit}
.pass {color: green;}
.inconclusive {color: #e87500;}
.fail {color: red;}
.error {color: red;}
.mismatch {color: #803000;}
.chaining {color: #808080; font-style: italic;}
.bgpass {background-color: #B0FFB0;}
.bginconclusive {background-color: #FFB080;}
.bgfail {background-color: #FF9090;}
.bgerror {background-color: #FF9090;}
.bgnone {background-color: #FFFFB0;}
td {padding-left:15px; padding-right:15px; padding-top:3px; padding-bottom:3px;}
th {padding-left:15px; padding-right:15px; padding-top:10px; padding-bottom:10px;}
table {border: 1px solid; border-spacing: 0px; }
""",
                    type="text/css")

        g.h1("CoAP interoperability test results")

        g.pre("""Tool version:   %s
File:           %s
Date:           %s
URI filter:     %s
Regex:      %r
""" % (TOOL_VERSION, (orig_name if orig_name else "(unknown)"),
        time.strftime("%a, %d %b %Y %T %z"),
        ("enabled" if urifilter else "disabled"), regex),
              style="line-height: 150%;")

        my_testcases = [
            t for t in testcases
            if t.reverse_proxy == (profile == "reverse-proxy")
        ]

        if regex is not None:
            try:
                re_regex = re.compile(regex, re.I)
            except Exception as e:
                g.b("Error: ")
                g("regular expression %r is invalid (%s)" % (regex, e))
                return

            my_testcases = list(
                filter((lambda t: re_regex.search(t.__name__)), my_testcases))

            if not my_testcases:
                g.b("Warning: ")
                g("regular expression %r did not yield any testcase" % regex)

        force = len(my_testcases) == 1

        g.h2("Summary")
        with g.table(id="summary", border=1):  # FIXME: bug xmlgen
            pass

        with Data.disable_name_resolution():
            frames = Frame.create_list(PcapReader(filename))

            g.h2("File content (%d frames)" % len(frames))
            with g.pre:
                for f in frames:
                    log_text(f)

            # malformed frames
            malformed = list(filter((lambda f: f.exc), frames))

            g.h2("Malformed frames (%d)" % len(malformed))
            with g.pre:
                for f in malformed:
                    log_text(f)
                    g(" %s\n" % f.exc)

            tracker = Tracker(frames)
            conversations = tracker.conversations
            ignored = tracker.ignored_frames
            #   sys.exit(1)
            #   conversations, ignored = extract_coap_conversations (frames)

            g.h2("Ignored frames (%d)" % len(ignored))
            with g.pre:
                for f in ignored:
                    log_text(f)

            g.h2("CoAP conversations (%d)" % len(conversations))
            for t in conversations:
                g.h3("Conversation %d %s" % (t.id, t.tag))
                with g.pre:
                    for f in t:
                        log_text(f)

            conversations_by_pair = proto_specific.group_conversations_by_pair(
                conversations)

            g.h2("Testcase results")

            with g.script(type="text/javascript"):
                g('''
var t=document.getElementById("summary");
var r;
var c;
var a;
''')
            results_by_pair = {}
            for pair, conversations in conversations_by_pair.items():
                pair_results = []
                pair_txt = "%s vs %s" % tuple(map(Resolver.format, pair))
                g.h3(pair_txt)
                for tc_type in my_testcases:
                    tc_results = []
                    g.a(name="%x" % id(tc_results))
                    g.h4("Testcase %s  -  %s" %
                         (tc_type.__name__, tc_type.get_objective()))
                    for tr in conversations:
                        tc = tc_type(tr, urifilter, force)
                        if tc.verdict:
                            with g.h5:
                                g("Conversation %d -> " % tc.conversation.id)
                                g.span(tc.verdict, **{"class": tc.verdict})
                            with g.pre:
                                log_text(tc.text)

                            tc_results.append(tc)

                            # remember the exception
                            if hasattr(tc,
                                       "exception") and exceptions is not None:
                                exceptions.append(tc)

                    pair_results.append(tc_results)

                with g.script(type="text/javascript"):

                    g('''
r=t.insertRow(-1);
c=document.createElement ("th");
c.innerHTML=%s;
c.colSpan=4;
r.appendChild(c);
''' % repr(pair_txt))
                    verdicts = None, "inconclusive", "pass", "fail", "error"
                    for title, func in (
                        ("ETSI interoperability test scenarios",
                         (lambda x: "IRISA" not in x[0].__name__)),
                        ("IRISA interoperability test scenarios",
                         (lambda x: "IRISA" in x[0].__name__))):
                        g('''
r=t.insertRow(-1);
c=document.createElement ("th");
c.innerHTML=%s;
c.colSpan=4;
r.appendChild(c);
''' % repr(title))
                        for tc_type, tc_results in filter(
                                func, zip(my_testcases, pair_results)):
                            v = 0
                            for tc in tc_results:
                                new_v = verdicts.index(tc.verdict)
                                if new_v > v:
                                    v = new_v
                            v_txt = verdicts[v]
                            if v_txt is None:
                                v_txt = "none"


#TODO: factorise that w/ a function
                            g('''
r=t.insertRow(-1);

a=document.createElement ("a")
a.href=("#%x")
a.innerHTML=%s
r.insertCell(-1).appendChild(a);

a=document.createElement ("a")
a.href=("#%x")
a.innerHTML=%s
r.insertCell(-1).appendChild(a);

a=document.createElement ("a")
a.href=("#%x")
a.innerHTML=%s
r.insertCell(-1).appendChild(a);

a=document.createElement ("a")
a.href=("#%x")
a.innerHTML=%s
c=r.insertCell(-1)
c.appendChild(a);
c.className=%s;
''' % (
                                id(tc_results),
                                repr(tc_type.__name__),
                                id(tc_results),
                                repr(tc_type.get_objective()),
                                id(tc_results),
                                repr("%d occurence(s)" % len(tc_results)),
                                id(tc_results),
                                repr(v_txt),
                                repr("bg" + v_txt),
                            ))
                    """
                    pair_results = []
                    g.h3("%s vs %s" % pair)
                    for tc_type in my_testcases:
                        g.h4 ("Testcase %s" % tc_type.__name__)
                        tc_results = []
                        for tr in conversations:
                            tc = tc_type (tr)
                            if tc.verdict:
                                with g.h5:
                                    g ("Conversation %d -> "% tc.conversation.id)
                                    g.span (tc.verdict, **{"class": tc.verdict})
                                with g.pre:
                                    log_text (tc.text)

                                tc_results.append (tc)

                        pair_results.append (tc_results)
                """

            g.h2("Frames details")

            for f in frames:
                with g.pre:
                    g.a(name="frame%d" % f.id)
                    g.b()("\n%s\n\n" % f)

                    b = f.msg.get_binary()
                    for offset in range(0, len(b), 16):
                        values = ["%02x" % v for v in b[offset:offset + 16]]
                        if len(values) > 8:
                            values.insert(8, " ")

                        g("         %04x     %s\n" %
                          (offset, " ".join(values)))
                logger.display_value(g, f.msg.get_value())
                g(" ")
                g.br()
                g.hr()
コード例 #11
0
ファイル: analysis.py プロジェクト: fsismondi/ttproto
def analyse_file_rest_api(filename,
                          urifilter=False,
                          exceptions=None,
                          regex=None,
                          profile="client",
                          verbose=False):
    """
    :param filename:
    :param urifilter:
    :param exceptions:
    :param regex:
    :param profile:
    :param verbose: boolean, if true method returns verdict description (which may be very verbose)
    :return: tuple

    example:
    [('TD_COAP_CORE_03', 'fail', [21, 22]), 'verdict description']

    NOTES:
     - allows multiple ocurrences of the testcase, returns as verdict:
            - fail: if at least one on the occurrences failed
            - inconclusive : if all ocurrences returned a inconv verdict
            - pass: all occurrences are inconclusive or at least one is PASS and the rest is inconclusive
    """
    testcases, _ = import_testcases(regex)
    my_testcases = [
        t for t in testcases if t.reverse_proxy == (profile == "reverse-proxy")
    ]

    if regex is not None:
        try:
            re_regex = re.compile(regex, re.I)
        except Exception as e:
            return "Error: regular expression %r is invalid (%s)" % (regex, e)

    my_testcases = list(
        filter((lambda t: re_regex.search(t.__name__)), my_testcases))

    if not my_testcases:
        return "regular expression %r did not yield any testcase" % regex
    force = len(my_testcases) == 1

    with Data.disable_name_resolution():
        frames = Frame.create_list(PcapReader(filename))

        # malformed frames
        malformed = list(filter((lambda f: f.exc), frames))
        tracker = Tracker(frames)
        conversations = tracker.conversations
        ignored = tracker.ignored_frames

        results = []

        conversations_by_pair = proto_specific.group_conversations_by_pair(
            conversations)
        results_by_pair = {}
        for pair, conversations in conversations_by_pair.items():
            pair_results = []
            for tc_type in my_testcases:
                tc_results = []

                # we run the testcase for each conversation, meaning that one type of TC can have more than one result!
                for tr in conversations:
                    tc = tc_type(tr, urifilter, force)
                    if tc.verdict:

                        tc_results.append(tc)

                        # remember the exception
                        if hasattr(tc, "exception") and exceptions is not None:
                            exceptions.append(tc)

                pair_results.append(tc_results)

        verdicts = None, "inconclusive", "pass", "fail", "error"
        for tc_type, tc_results in filter(lambda x: regex in x[0].__name__,
                                          zip(my_testcases, pair_results)):
            review_frames = []
            v = 0
            for tc in tc_results:
                # all the failed frames for a TC, even if they are from different conversations!
                review_frames = tc.failed_frames

                new_v = verdicts.index(tc.verdict)
                if new_v > v:
                    v = new_v

            v_txt = verdicts[v]
            if v_txt is None:
                v_txt = "none"

            if verbose:
                results.append(
                    (type(tc).__name__, v_txt, list(review_frames), tc.text))
            else:
                results.append(
                    (type(tc).__name__, v_txt, list(review_frames), ''))
            # TODO clean list(review_frames)  tc.review_frames_log , tc.review_frames_log in module proto_specific

        return results