コード例 #1
0
ファイル: HTML2Objects.py プロジェクト: javierj/LaPISK
    def create(self, soupfragment):
        result = dict()
        field = self._getfield_info(soupfragment)
        title = ""
        result["link"] = ""
        result["answers"] = ""
        result["views"] = ""
        result["location"] = ""
        if self.urlobject is not None:
            result["location"] = self.urlobject.description()

        #result['location'] = self.webclient.get_url_desc()
        if field is not None:
            title = UnicodeDammit(field.a.contents[0]).unicode_markup
            result["link"] = field.a['href']
            fragment = self._get_answer_and_viewa_fragment(soupfragment)
            if fragment is not None:
                result["answers"] = self._get_number_from(fragment.contents[0].strip())
                result["views"] = self._get_number_from(fragment.contents[2].strip())
            else:
                print "No answer and view bloq identified in thread: ", result["link"]
                result["answers"] = -1
                result["views"] = -1

        result["title"] = title.strip()

        #result['next_url'] = _nextUrl(soupfragment)
        return result
コード例 #2
0
ファイル: readers.py プロジェクト: hefeix/skll
    def _sub_read(self, f):
        example_num = 0
        curr_id = 'EXAMPLE_0'
        for line in f:
            # Process encoding
            if not isinstance(line, text_type):
                line = UnicodeDammit(line, ['utf-8',
                                            'windows-1252']).unicode_markup
            line = line.strip()
            # Handle instance lines
            if line.startswith('#'):
                curr_id = line[1:].strip()
            elif line and line not in ['TRAIN', 'TEST', 'DEV']:
                split_line = line.split()
                num_cols = len(split_line)
                del line
                # Line is just a class label
                if num_cols == 1:
                    class_name = safe_float(split_line[0],
                                            replace_dict=self.class_map)
                    field_pairs = []
                # Line has a class label and feature-value pairs
                elif num_cols % 2 == 1:
                    class_name = safe_float(split_line[0],
                                            replace_dict=self.class_map)
                    field_pairs = split_line[1:]
                # Line just has feature-value pairs
                elif num_cols % 2 == 0:
                    class_name = None
                    field_pairs = split_line

                curr_info_dict = {}
                if len(field_pairs) > 0:
                    # Get current instances feature-value pairs
                    field_names = islice(field_pairs, 0, None, 2)
                    # Convert values to floats, because otherwise
                    # features'll be categorical
                    field_values = (safe_float(val) for val in
                                    islice(field_pairs, 1, None, 2))

                    # Add the feature-value pairs to dictionary
                    curr_info_dict.update(zip(field_names, field_values))

                    if len(curr_info_dict) != len(field_pairs) / 2:
                        raise ValueError(('There are duplicate feature ' +
                                          'names in {} for example ' +
                                          '{}.').format(self.path_or_list,
                                                        curr_id))

                yield curr_id, class_name, curr_info_dict

                # Set default example ID for next instance, in case we see a
                # line without an ID.
                example_num += 1
                curr_id = 'EXAMPLE_{}'.format(example_num)
コード例 #3
0
ファイル: readers.py プロジェクト: nimmen/skll
    def _sub_read(self, f):
        example_num = 0
        curr_id = 'EXAMPLE_0'
        for line in f:
            # Process encoding
            if not isinstance(line, text_type):
                line = UnicodeDammit(line,
                                     ['utf-8', 'windows-1252']).unicode_markup
            line = line.strip()
            # Handle instance lines
            if line.startswith('#'):
                curr_id = line[1:].strip()
            elif line and line not in ['TRAIN', 'TEST', 'DEV']:
                split_line = line.split()
                num_cols = len(split_line)
                del line
                # Line is just a class label
                if num_cols == 1:
                    class_name = safe_float(split_line[0],
                                            replace_dict=self.class_map)
                    field_pairs = []
                # Line has a class label and feature-value pairs
                elif num_cols % 2 == 1:
                    class_name = safe_float(split_line[0],
                                            replace_dict=self.class_map)
                    field_pairs = split_line[1:]
                # Line just has feature-value pairs
                elif num_cols % 2 == 0:
                    class_name = None
                    field_pairs = split_line

                curr_info_dict = {}
                if len(field_pairs) > 0:
                    # Get current instances feature-value pairs
                    field_names = islice(field_pairs, 0, None, 2)
                    # Convert values to floats, because otherwise
                    # features'll be categorical
                    field_values = (safe_float(val)
                                    for val in islice(field_pairs, 1, None, 2))

                    # Add the feature-value pairs to dictionary
                    curr_info_dict.update(zip(field_names, field_values))

                    if len(curr_info_dict) != len(field_pairs) / 2:
                        raise ValueError(
                            ('There are duplicate feature ' +
                             'names in {} for example ' + '{}.').format(
                                 self.path_or_list, curr_id))

                yield curr_id, class_name, curr_info_dict

                # Set default example ID for next instance, in case we see a
                # line without an ID.
                example_num += 1
                curr_id = 'EXAMPLE_{}'.format(example_num)
コード例 #4
0
 def corpus_generator(self):
     with open(self.corpus_path, 'rb') as f:
         i = 0
         for line in f:
             line = UnicodeDammit(line.strip()).unicode_markup
             if line:
                 if self.lower:
                     line = line.lower()
                 i += 1
                 if i % 100000 == 0:
                     logging.info('Read {} nonblank lines'.format(i))
                 for tok in re.split(r'\s+', line):
                     yield tok
コード例 #5
0
ファイル: class_lm_cluster.py プロジェクト: shen1993/NER
 def corpus_generator(self):
     with open(self.corpus_path, 'rb') as f:
         i = 0
         for line in f:
             line = UnicodeDammit(line.strip()).unicode_markup
             if line:
                 if self.lower:
                     line = line.lower()
                 i += 1
                 if i % 100000 == 0:
                     logging.info('Read {} nonblank lines'.format(i))
                 for tok in re.split(r'\s+', line):
                     yield tok
コード例 #6
0
    def _sub_read(self, f):
        for example_num, line in enumerate(f):
            curr_id = ''
            label_map = None
            feat_map = None
            # Decode line if it's not already str
            if isinstance(line, bytes):
                line = UnicodeDammit(line,
                                     ['utf-8', 'windows-1252']).unicode_markup
            match = self.line_regex.search(line.strip())
            if not match:
                raise ValueError('Line does not look like valid libsvm format'
                                 '\n{}'.format(line))
            # Metadata is stored in comments if this was produced by SKLL
            if match.group('comments') is not None:
                # Store mapping from feature numbers to names
                if match.group('feat_map'):
                    feat_map = {}
                    for pair in match.group('feat_map').split():
                        number, name = pair.split('=')
                        for orig, replacement in \
                                LibSVMReader.LIBSVM_REPLACE_DICT.items():
                            name = name.replace(orig, replacement)
                        feat_map[number] = name
                else:
                    feat_map = None
                # Store mapping from label/class numbers to names
                if match.group('label_map'):
                    label_map = dict(
                        pair.split('=')
                        for pair in match.group('label_map').strip().split())
                else:
                    label_map = None
                curr_id = match.group('example_id').strip()

            if not curr_id:
                curr_id = 'EXAMPLE_{}'.format(example_num)

            class_num = match.group('label_num')
            # If we have a mapping from class numbers to labels, get label
            if label_map:
                class_name = label_map[class_num]
            else:
                class_name = class_num
            class_name = safe_float(class_name, replace_dict=self.class_map)

            curr_info_dict = dict(
                self._pair_to_tuple(pair, feat_map)
                for pair in match.group('features').strip().split())

            yield curr_id, class_name, curr_info_dict
コード例 #7
0
ファイル: readers.py プロジェクト: hefeix/skll
    def _sub_read(self, f):
        field_names = []
        # Process ARFF header
        for line in f:
            # Process encoding
            if not isinstance(line, text_type):
                decoded_line = UnicodeDammit(line,
                                             ['utf-8',
                                              'windows-1252']).unicode_markup
            else:
                decoded_line = line
            line = decoded_line.strip()
            # Skip empty lines
            if line:
                # Split the line using CSV reader because it can handle
                # quoted delimiters.
                split_header = self.split_with_quotes(line)
                row_type = split_header[0].lower()
                if row_type == '@attribute':
                    # Add field name to list
                    field_name = split_header[1]
                    field_names.append(field_name)
                    # Check if we're doing regression
                    if field_name == self.label_col:
                        self.regression = (len(split_header) > 2 and
                                           split_header[2] == 'numeric')
                # Save relation if specified
                elif row_type == '@relation':
                    self.relation = split_header[1]
                # Stop at data
                elif row_type == '@data':
                    break
                # Skip other types of rows (relations)

        # Create header for CSV
        if PY2:
            io_type = BytesIO
        else:
            io_type = StringIO
        with io_type() as field_buffer:
            csv.writer(field_buffer, dialect='arff').writerow(field_names)
            field_str = field_buffer.getvalue()

        # Set label_col to be the name of the last field, since that's standard
        # for ARFF files
        if self.label_col != field_names[-1]:
            self.label_col = None

        # Process data as CSV file
        return super(ARFFReader, self)._sub_read(chain([field_str], f))
コード例 #8
0
ファイル: readers.py プロジェクト: hefeix/skll
    def _sub_read(self, f):
        for example_num, line in enumerate(f):
            curr_id = ''
            label_map = None
            feat_map = None
            # Decode line if it's not already str
            if isinstance(line, bytes):
                line = UnicodeDammit(line, ['utf-8',
                                            'windows-1252']).unicode_markup
            match = self.line_regex.search(line.strip())
            if not match:
                raise ValueError('Line does not look like valid libsvm format'
                                 '\n{}'.format(line))
            # Metadata is stored in comments if this was produced by SKLL
            if match.group('comments') is not None:
                # Store mapping from feature numbers to names
                if match.group('feat_map'):
                    feat_map = {}
                    for pair in match.group('feat_map').split():
                        number, name = pair.split('=')
                        for orig, replacement in \
                                LibSVMReader.LIBSVM_REPLACE_DICT.items():
                            name = name.replace(orig, replacement)
                        feat_map[number] = name
                else:
                    feat_map = None
                # Store mapping from label/class numbers to names
                if match.group('label_map'):
                    label_map = dict(pair.split('=') for pair in
                                     match.group('label_map').strip().split())
                else:
                    label_map = None
                curr_id = match.group('example_id').strip()

            if not curr_id:
                curr_id = 'EXAMPLE_{}'.format(example_num)

            class_num = match.group('label_num')
            # If we have a mapping from class numbers to labels, get label
            if label_map:
                class_name = label_map[class_num]
            else:
                class_name = class_num
            class_name = safe_float(class_name,
                                    replace_dict=self.class_map)

            curr_info_dict = dict(self._pair_to_tuple(pair, feat_map) for pair
                                  in match.group('features').strip().split())

            yield curr_id, class_name, curr_info_dict
コード例 #9
0
ファイル: readers.py プロジェクト: nimmen/skll
    def _sub_read(self, f):
        field_names = []
        # Process ARFF header
        for line in f:
            # Process encoding
            if not isinstance(line, text_type):
                decoded_line = UnicodeDammit(
                    line, ['utf-8', 'windows-1252']).unicode_markup
            else:
                decoded_line = line
            line = decoded_line.strip()
            # Skip empty lines
            if line:
                # Split the line using CSV reader because it can handle
                # quoted delimiters.
                split_header = self.split_with_quotes(line)
                row_type = split_header[0].lower()
                if row_type == '@attribute':
                    # Add field name to list
                    field_name = split_header[1]
                    field_names.append(field_name)
                    # Check if we're doing regression
                    if field_name == self.label_col:
                        self.regression = (len(split_header) > 2
                                           and split_header[2] == 'numeric')
                # Save relation if specified
                elif row_type == '@relation':
                    self.relation = split_header[1]
                # Stop at data
                elif row_type == '@data':
                    break
                # Skip other types of rows (relations)

        # Create header for CSV
        if PY2:
            io_type = BytesIO
        else:
            io_type = StringIO
        with io_type() as field_buffer:
            csv.writer(field_buffer, dialect='arff').writerow(field_names)
            field_str = field_buffer.getvalue()

        # Set label_col to be the name of the last field, since that's standard
        # for ARFF files
        if self.label_col != field_names[-1]:
            self.label_col = None

        # Process data as CSV file
        return super(ARFFReader, self)._sub_read(chain([field_str], f))
コード例 #10
0
ファイル: pmi_cluster.py プロジェクト: nickmarton/NLP
def document_generator(path, lower=False):
    '''
    Default document reader.  Takes a path to a file with one document per line,
    with tokens separate by whitespace, and yields lists of tokens per document.
    This could be replaced by any function that yields lists of tokens.
    See main() for how it is called.

    Note: this uses BeautifulSoup's UnicodeDammit to convert to unicode.
    '''
    with open(path, 'rb') as f:
        i = 0
        for line in f:
            line = UnicodeDammit(line.strip()).unicode_markup
            if line:
                if lower:
                    line = line.lower()
                i += 1
                if i % 100000 == 0:
                    logging.info('Read {} nonblank lines'.format(i))
                yield re.split(r'\s+', line)
コード例 #11
0
ファイル: extractor.py プロジェクト: yinonbaron/aminer-spider
    def clean_google_title(self, title):
        has_dot = False

        titleCleaned = UnicodeDammit(title).unicode_markup
        # clean step 1
        # BUGFIX: don't remove [xxx]. eg: "OQL[C++]: Ext...'
        titleCleaned = re.sub("(<(.*?)>)", "", titleCleaned)
        re_hasdot = re.compile("(\.\.\.|&hellip;)", re.I)
        match = re_hasdot.search(title)
        if match is not None:
            has_dot = True
            # clean step 2, here title is readable
        titleCleaned = re.sub("(&nbsp;|&#x25ba;|&hellip;)", "", titleCleaned)
        titleCleaned = re.sub("(&#.+?;|&.+?;)", "", titleCleaned)
        titleCleaned = titleCleaned.strip()
        readableTitle = titleCleaned
        # Shrink, only letters left
        titleCleaned = re.sub("\W", "", titleCleaned)
        titleCleaned = titleCleaned.lower()
        return (readableTitle, titleCleaned, has_dot)
コード例 #12
0
ファイル: extractor.py プロジェクト: AlexLyj/aminer-spider
	def clean_google_title(self, title):
		has_dot = False
		
		titleCleaned = UnicodeDammit(title).unicode_markup
		# clean step 1
		# BUGFIX: don't remove [xxx]. eg: "OQL[C++]: Ext...'
		titleCleaned = re.sub("(<(.*?)>)", "", titleCleaned)
		re_hasdot = re.compile("(\.\.\.|&hellip;)", re.I)
		match = re_hasdot.search(title)
		if match is not None:
			has_dot = True
			# clean step 2, here title is readable
		titleCleaned = re.sub("(&nbsp;|&#x25ba;|&hellip;)", "", titleCleaned)
		titleCleaned = re.sub("(&#.+?;|&.+?;)", "", titleCleaned)
		titleCleaned = titleCleaned.strip()
		readableTitle = titleCleaned
		# Shrink, only letters left
		titleCleaned = re.sub("\W", "", titleCleaned)
		titleCleaned = titleCleaned.lower()
		return (readableTitle, titleCleaned, has_dot)
コード例 #13
0
def document_generator(path, lower=False):
    '''
    Default document reader.  Takes a path to a file with one document per line,
    with tokens separate by whitespace, and yields lists of tokens per document.
    This could be replaced by any function that yields lists of tokens.
    See main() for how it is called.

    Note: this uses BeautifulSoup's UnicodeDammit to convert to unicode.
    '''
    with open(path, 'rb') as f:
        i = 0
        for line in f:
            line = UnicodeDammit(line.strip()).unicode_markup
            if line:
                if lower:
                    line = line.lower()
                i += 1
                if i % 100000 == 0:
                    logging.info('Read {} nonblank lines'.format(i))
                yield re.split(r'\s+', line)
コード例 #14
0
ファイル: parse_script.py プロジェクト: dvp-tran/LSTM
def white_space_analysis(script_text, soup):
    spaces_regex = re.compile("^(\s*).*")
    space_vector = []
    character_presence = []

    for block in script_text.descendants:
        # Si block est une instance de bs4.Tag, il est entouré de balises HTML
        # Le prochain block contiendra le même texte sans les balises
        # Donc on continue sans parser ce bloc
        if (isinstance(block, Tag)):
            continue

        # UnicodeDammit converts any string to UTF-8
        # does not work so well
        block = UnicodeDammit(block, soup.original_encoding).unicode_markup
        # remove leading and ending end of lines
        block = block.strip('\n').strip('\r\n')

        # if the block doesn't have any text, skip it
        if (re.search('\w', block) == None):
            continue

        for line in block.split('\n'):
            stripped_line = line.strip(' \n\t\r')
            if (re.search('\w', line) == None):
                continue
            # Counting the number of spaces at the beginning of the line
            spmatch = spaces_regex.search(line)
            space_vector.append(len(spmatch.group(1)))
            if (stripped_line.isupper()) & (len(stripped_line.split(' ')) <=
                                            3):
                character_presence.append(len(spmatch.group(1)))

            else:
                character_presence.append(None)

    return space_vector, character_presence  #,speech_presence
コード例 #15
0
ファイル: parse_script.py プロジェクト: dvp-tran/LSTM
def parse(url, path, name):
    #init variables
    spaces_regex = re.compile("^(\s*).*")
    location_regex = re.compile("^\s*(INT\.|EXT\.)")

    BLOCK_TYPES = [
        'character', 'speech', 'stage direction', 'location', 'unknown'
    ]
    CHARACTER = 0
    SPEECH = 1
    DIRECTIONS = 2
    LOCATION = 3

    time_start = time.time()

    if url.endswith('.pdf'):
        print('The file @ %s is a PDF' % (url))
        return

    script_text, soup = get_script(url)
    #write raw file:
    if not os.path.exists(path + 'raw/'):
        os.makedirs(path + 'raw/')
    with open(path + 'raw/' + "%s.txt" % name, "w") as text_file:
        text_file.write(str(script_text))
    #####

    space_vector, character_presence = white_space_analysis(script_text, soup)
    usual_spaces, flag = identify_usual_spaces(space_vector,
                                               character_presence)

    # Ici on définit les variables qu'on remplira de texte
    is_intro = True
    movie_script = []
    intro = []
    last_line_type = -1
    last_character = 'unknown'
    text = []
    characters = []

    for block in script_text.descendants:
        # Si block est une instance de bs4.Tag, il est entouré de balises HTML
        # Le prochain block contiendra le même texte sans les balises
        # Donc on continue sans parser ce bloc
        if (isinstance(block, Tag)):
            continue

        # UnicodeDammit converts any string to UTF-8
        # does not work so well
        block = UnicodeDammit(block, soup.original_encoding).unicode_markup
        # remove leading and ending end of lines
        block = block.strip('\n').strip('\n\r')

        # if the block doesn't have any text, skip it
        if (re.search('\w', block) == None):
            continue

        for line in block.split('\n'):
            stripped_line = line.strip(' \n\t\r')
            if (re.search('\w', line) == None):
                continue
            # Counting the number of spaces at the beginning of the line
            spmatch = spaces_regex.search(line)
            space_vector.append(len(spmatch.group(1)))
            #print(block)
            #print(line)
            #print(len(spmatch.group(1)))
            line_type = get_line_type(line, stripped_line, usual_spaces)
            #print(line_type)
            #print(line)

            if (last_line_type == -1  # -1 = not initialized
                    or last_line_type == line_type):
                text.append(stripped_line)
            else:
                if (last_line_type == CHARACTER):
                    last_character = '\n'.join(
                        text
                    )  #regex to supress (parenthesis) & replicate speaker
                    if not last_character in characters:
                        characters.append(last_character)
                elif (last_line_type == SPEECH):
                    movie_script.append({
                        'type': BLOCK_TYPES[last_line_type],
                        BLOCK_TYPES[CHARACTER]: last_character,
                        'text': '\n'.join(text)
                    })
                    #print('We just parsed this JSON block:')
                    #print(movie_script[-1])
                else:
                    movie_script.append({
                        'type': BLOCK_TYPES[last_line_type],
                        'text': '\n'.join(text)
                    })
                    #print('We just parsed this JSON block:')
                    #print(movie_script[-1])
                text = [stripped_line]

            last_line_type = line_type
            #print('----------------')

    result = json_normalize(movie_script)
    if flag:
        write_csv(result, name, path)
        print('      Done parsing script at %s in %s' %
              (url, time.time() - time_start))
        print('-----------------')
        return (result)
    else:
        path = path + 'doubtful/'
        write_csv(result, name, path)
        print('      Done parsing script at %s in %s' %
              (url, time.time() - time_start))
        print('-----------------')
        return (result)
コード例 #16
0
print()
print()
print("Start by telling me when the introduction will end.")

for block in script_text.descendants:
    # Si block est une instance de bs4.Tag, il est entouré de balises HTML
    # Le prochain block contiendra le même texte sans les balises
    # Donc on continue sans parser ce bloc
    if(isinstance(block, Tag)):
        continue

    # UnicodeDammit converts any string to UTF-8
    # does not work so well
    block = UnicodeDammit(block, soup.original_encoding).unicode_markup
    # remove leading and ending end of lines
    block = block.strip('\n')

    # if the block doesn't have any text, skip it
    if( re.search('\w', block) == None ):
        continue

    # bs4 ne coupe pas toujours bien les différents blocs
    # Mieux vaut donc redécouper par paragraphe et les traiter un à un
    for line in block.split('\n'):
        stripped_line = line.strip(' \n\t\r')
        if( re.search('\w', line) == None ):
            continue

        print('------------------------------ Begin line ------------------------------')
        print(line)
        print('                        ------- End line -------')
コード例 #17
0
intro = []
last_line_type = -1
last_character = ''
text = []
characters=[]


for block in script_text.descendants:
    if(isinstance(block, Tag)):
        continue

    # UnicodeDammit converts any string to UTF-8
    # does not work so well
    block = UnicodeDammit(block, soup.original_encoding).unicode_markup
    # remove leading and ending end of lines
    block = block.strip('\n')

    # if the block doesn't have any text, skip it
    if( re.search('\w', block) == None ):
        continue

    # bs4 ne coupe pas toujours bien les différents blocs
    # Mieux vaut donc redécouper par paragraphe et les traiter un à un
    for line in block.split('\n'):
        stripped_line = line.strip(' \n\t\r')
        if( re.search('\w', line) == None ):
            continue

        line_type = get_line_type(line, stripped_line, usual_spaces)

        if(last_line_type == -1 # -1 = not initialized
コード例 #18
0
def analyze_content(script_text, encoding):
    print("\n\nStarting script parsing!\n\n")
    print("Start by telling me when the introduction will end.")

    is_intro = True
    movie_script = []
    intro = []
    last_line_type = -1
    last_character = ''
    line_type = None
    text = []
    characters = []
    usual_spaces = [[] for _ in range(len(BLOCK_TYPES))]

    for block in script_text.descendants:
        if isinstance(block, Tag):
            continue

        # UnicodeDammit converts any string to UTF-8
        # does not work so well
        block = UnicodeDammit(block, encoding).unicode_markup

        # remove leading and ending end of lines
        block = block.strip('\n')

        # if the block doesn't have any text, skip it
        if re.search('\w', block) is None:
            continue

        for line in block.split('\n'):
            stripped_line = line.strip(' \n\t\r')
            if re.search(r'\w', line) is None:
                continue

            print(
                '------------------------------ Begin line ------------------------------'
            )
            print(line)
            print(
                '------------------------------- End line -------------------------------'
            )

            if is_intro:
                print()
                answer = input("Is that still part of the intro? (Y/n) ")

                if answer == 'n' or answer == 'N':
                    is_intro = False
                    movie_script.append({
                        'type': 'introduction',
                        'text': '\n'.join(intro)
                    })

                    print(movie_script[-1])
                else:
                    print("OK")
                    print()
                    intro.append(stripped_line)
                    continue

            line_type = get_line_type(line, stripped_line, usual_spaces,
                                      characters)
            print("The last line was interpreted as '{}'".format(
                BLOCK_TYPES[line_type]))
            print()

            if last_line_type == -1 or last_line_type == line_type:  # -1 = not initialized
                text.append(stripped_line)
            else:
                if last_line_type == CHARACTER:
                    last_character = '\n'.join(text)
                    if not last_character in characters:
                        characters.append(last_character)
                elif last_line_type == SPEECH:
                    movie_script.append({
                        'type': BLOCK_TYPES[last_line_type],
                        BLOCK_TYPES[CHARACTER]: last_character,
                        'text': '\n'.join(text)
                    })
                    print('We just parsed this JSON block:')
                    print(movie_script[-1])
                else:
                    movie_script.append({
                        'type': BLOCK_TYPES[last_line_type],
                        'text': '\n'.join(text)
                    })
                    print('We just parsed this JSON block:')
                    print(movie_script[-1])
                text = [stripped_line]

            last_line_type = line_type
            print()

        print()
        print()

    movie_script.append({
        'type': BLOCK_TYPES[line_type],
        'text': '\n'.join(text)
    })

    print('We just parsed this JSON block:')
    print(movie_script[-1])
    print()
    print()

    return movie_script