Example #1
0
    def parse(self, string):
        re_index  = r'\**\s*(?:[A-Ea-e]\.|\(?[A-Ea-e]\))'
        re_body   = r'.+'
        re_option = r'({index}\s+{body})'.format(index=re_index, body=re_body)
        self._tokenize(string)

        # spin thru the input chunks two at a time, the first being the
        # stem, presumably, and the second being the option group
        for st_index in range(0, len(self._tokens), 2):
            op_index = st_index +1
            if op_index < len(self._tokens):
                question = Question()

                stem = re.search(r'[0-9]+(?:\.|\s).+$', self._tokens[st_index], re.DOTALL)
                #~ stem = re.search(r"\n*((?:[0-9]*\.*\s*).+)$", self._tokens[st_index], re.DOTALL)
                #~ stem = re.search(r"(?:[0-9]+\s+(?:.|\n)+$)+?|(?:\n*.+$)", self._tokens[st_index])
                question.stem = stem.group().strip() if stem else self._tokens[st_index] 

                options = [o.strip() for o in re.split(re_option, self._tokens[op_index]) if o.strip()]
                for option in options:
                    question.options.append(option)

                self._questions.append(question)

        return self
Example #2
0
    def parse(self, string):
        super(StemsParser, self).parse(string)
        si = r'[0-9]+\.\s+'
        sb = r'.+?[?:.]\n\n'
        o = r'.+'
        regex = r"({si}{sb})({o})".format(
            si=si,
            sb=sb,
            o=o,
        )

        self._tokenize(string)
        for token in self._tokens:
            question = Question()
            match = re.search(regex, token, re.DOTALL)
            if match:
                question.stem = match.group(1).strip()
                for option in match.group(2).split('\n'):
                    if option:
                        question.options.append(option.strip())
                    else:
                        break
                self._questions.append(question)

        return self
Example #3
0
    def parse(self, string):
        question = Question()
        option = False

        self._tokenize(string)
        for token in self._tokens:
            o = re.match(r"^\s*[a-zA-Z][.)] ", token)
            if o and o.group():
                option = True
                try:
                    assert question is not None
                    question.options.append(token)
                except AssertionError:
                    pass
                continue

            if option:
                self._questions.append(question)
                question = Question()
                question.stem = ''
                option = False

            try:
                assert question is not None
                question.stem += token + '\n'
            except AssertionError:
                pass

        if question and len(question.options) > 0:
            self._questions.append(question)

        return self
Example #4
0
    def parse(self, string):
        re_index = r'\**\s*(?:[A-Ea-e]\.|\(?[A-Ea-e]\))'
        re_body = r'.+'
        re_option = r'({index}\s+{body})'.format(index=re_index, body=re_body)
        self._tokenize(string)

        # spin thru the input chunks two at a time, the first being the
        # stem, presumably, and the second being the option group
        for st_index in range(0, len(self._tokens), 2):
            op_index = st_index + 1
            if op_index < len(self._tokens):
                question = Question()

                stem = re.search(r'[0-9]+(?:\.|\s).+$', self._tokens[st_index],
                                 re.DOTALL)
                #~ stem = re.search(r"\n*((?:[0-9]*\.*\s*).+)$", self._tokens[st_index], re.DOTALL)
                #~ stem = re.search(r"(?:[0-9]+\s+(?:.|\n)+$)+?|(?:\n*.+$)", self._tokens[st_index])
                question.stem = stem.group().strip(
                ) if stem else self._tokens[st_index]

                options = [
                    o.strip()
                    for o in re.split(re_option, self._tokens[op_index])
                    if o.strip()
                ]
                for option in options:
                    question.options.append(option)

                self._questions.append(question)

        return self
Example #5
0
    def parse(self, string):
        question = Question()
        option = False

        self._tokenize(string)
        for token in self._tokens:
            o = re.match(r"^\s*[a-zA-Z][.)] ", token)
            if o and o.group():
                option = True
                try:
                    assert question is not None
                    question.options.append(token)
                except AssertionError:
                    pass
                continue

            if option:
                self._questions.append(question)
                question = Question()
                question.stem = ''
                option = False

            try:
                assert question is not None
                question.stem += token + '\n'
            except AssertionError:
                pass

        if question and len(question.options) > 0:
            self._questions.append(question)

        return self
Example #6
0
    def parse(self, string):
        self._tokenize(string)
        if not self._tokens: return self

        question = Question()
        question.stem = self._tokens[0]
        question.options = self._tokens[1:] if len(self._tokens) > 1 else []

        self._questions.append(question)

        return self
Example #7
0
    def parse(self, string):
        self._tokenize(string)
        if not self._tokens: return self

        question = Question()
        question.stem = self._tokens[0]
        question.options = self._tokens[1:] if len(self._tokens) > 1 else []

        self._questions.append(question)

        return self
Example #8
0
    def parse(self, string):
        super(QuestParser, self).parse(string)
        regex = self._format(r"({i}{w}{body})({a}{w}{body})({b}{w}{body})({c}{w}{body})({d}{w}{body})?({e}{w}{ebody})?{lb}")

        self._tokenize(string)
        for token in [t.strip() for t in self._tokens if t]:
            question = Question()
            match = re.search(regex, token, re.DOTALL | re.IGNORECASE)
            if match:
                question.stem = match.group(1).strip()
                question.options.append(match.group(2).strip())
                question.options.append(match.group(3).strip())
                question.options.append(match.group(4).strip())
                if match.group(5): question.options.append(match.group(5).strip())
                if match.group(6): question.options.append(match.group(6).strip())
                self._questions.append(question)

        return self
Example #9
0
    def parse(self, string):
        question = None
        self._tokenize(string)

        for token in self._tokens:
            s = re.match(r"^\s*\d+\.\s", token)
            if s and s.group():
                if question and len(question.options) > 0:
                    self._questions.append(question)
                question = Question()
                question.stem = token
                continue

            if question is not None:
                o = re.match(r"^\s*[\[\(]?[a-zA-Z][.):\]]\s", token)
                if o and o.group():
                    question.options.append(token)

        if question and len(question.options) > 0:
            self._questions.append(question)

        return self
Example #10
0
    def parse(self, string):
        super(QuestParser, self).parse(string)
        regex = self._format(
            r"({i}{w}{body})({a}{w}{body})({b}{w}{body})({c}{w}{body})({d}{w}{body})?({e}{w}{ebody})?{lb}"
        )

        self._tokenize(string)
        for token in [t.strip() for t in self._tokens if t]:
            question = Question()
            match = re.search(regex, token, re.DOTALL | re.IGNORECASE)
            if match:
                question.stem = match.group(1).strip()
                question.options.append(match.group(2).strip())
                question.options.append(match.group(3).strip())
                question.options.append(match.group(4).strip())
                if match.group(5):
                    question.options.append(match.group(5).strip())
                if match.group(6):
                    question.options.append(match.group(6).strip())
                self._questions.append(question)

        return self
Example #11
0
    def parse(self, string):
        question = None
        self._tokenize(string)

        for token in self._tokens:
            s = re.match(r"^\s*\d+\.\s", token)
            if s and s.group():
                if question and len(question.options) > 0:
                    self._questions.append(question)
                question = Question()
                question.stem = token
                continue

            if question is not None:
                o = re.match(r"^\s*[\[\(]?[a-zA-Z][.):\]]\s", token)
                if o and o.group():
                    question.options.append(token)

        if question and len(question.options) > 0:
            self._questions.append(question)

        return self
Example #12
0
    def parse(self, string):
        super(StemsParser, self).parse(string)
        si = r'[0-9]+\.\s+'
        sb = r'.+?[?:.]\n\n'
        o  = r'.+'
        regex = r"({si}{sb})({o})".format(
            si=si, sb=sb, o=o,
            )

        self._tokenize(string)
        for token in self._tokens:
            question = Question()
            match = re.search(regex, token, re.DOTALL)
            if match:
                question.stem = match.group(1).strip()
                for option in match.group(2).split('\n'):
                    if option:
                        question.options.append(option.strip())
                    else:
                        break
                self._questions.append(question)

        return self