Exemplo n.º 1
0
        'do_not_import': 0,
    }
    if is_story:
        final_story['coauthor_id'] = story['coauthor_id']
    return final_story


def dummy_chapters(stories):
    return [_dummy_chapter(story) for story in stories]


def _dummy_chapter(story):
    chapter = {k.lower(): v for k, v in story.iteritems()}
    final_chapter = {
        'id': chapter['id'],
        'position': chapter.get('position', 1),
        'title': chapter['title'],
        'author_id': chapter['author_id'],
        'text': chapter.get('text', ''),
        'date': chapter['date'],
        'story_id': chapter['id'],
        'notes': chapter['notes'],
        'url': chapter['url']
    }
    return final_chapter


if __name__ == "__main__":
    args = Args().process_args()
    data = _clean_file(args.filepath)
from shared_python.Sql import Sql
from shared_python.Tags import Tags


def _clean_email(author):
    email = author['email']
    if author['email'] is None or author['email'] == '':
        email = u'{0}{1}[email protected]'.format(author['name'], args.archive_name)\
          .replace(' ', '').replace("'", "")
    if author['email'].startswith('mailto:'):
        email = author['email'].replace('mailto:', '')
    return email


if __name__ == "__main__":
    args = Args.args_for_05()
    sql = Sql(args)
    tags = Tags(args, sql.db)
    final = FinalTables(args, sql.db)
    chaps = Chapters(args, sql.db)

    filter = ''
    coauthors = {}

    print "Creating destination tables in {0}".format(args.output_database)

    if args.archive_type == 'EF':
        table_names = efiction.table_names()
        has_coauthor_table = raw_input(
            "\nDoes this archive have a coauthors table? Y/N\n")
        has_coauthors = True if str.lower(has_coauthor_table) == 'y' else False
Exemplo n.º 3
0
# encoding: utf-8
import csv

import sys

from shared_python import Args
from shared_python.Sql import Sql
from shared_python.Tags import Tags

if __name__ == "__main__":
    args = Args.args_for_04()
    sql = Sql(args)
    tags = Tags(args, sql.db)

    # Input CSV from TW spreadsheet
    # Rename tags in `tags` table, populate ao3_tag_table column
    # eg: python 04-Rename-Tags.py -dh localhost -du root -dt dsa -dd temp_python -a EF -i path/to/tw-spreadsheet.csv

    with open(args.tag_input_file, 'r') as csvfile:
        tw_tags = list(csv.DictReader(csvfile))
        tag_headers = tags.tag_export_map
        total = len(tw_tags)

        for cur, row in enumerate(tw_tags):
            sys.stdout.write('\r{0}/{1} tags to map'.format(cur + 1, total))
            sys.stdout.flush()

            prefix = 'fanfiction' if args.archive_type == 'EF' else ''
            tags.update_tag_row(row, prefix)
import re

from shared_python import Args
from shared_python.Sql import Sql
from shared_python.Tags import Tags

if __name__ == "__main__":
    args = Args.args_for_02()
    sql = Sql(args)
    tags = Tags(args, sql.db)
    print('--- Processing tags from stories table in {0}'.format(
        args.db_database))
    tags.create_tags_table()

    # eg: python 01-Load-into-Mysql.py -dh localhost -du root -dt dsa -dd temp_python -a AA -f /Users/emma/OneDrive/DSA/ARCHIVE_DB.pl -o .
    tag_col_list = {}
    stories_id_name = ""
    stories_table_name = ""

    # AUTOMATED ARCHIVE
    if args.archive_type == 'AA':

        table_name = raw_input(
            'Story table name (default: "{0}_stories"): '.format(
                args.db_table_prefix))
        if table_name is None or table_name == '':
            table_name = '{0}_stories'.format(args.db_table_prefix)
        tag_columns = raw_input(
            'Column names containing tags \n   (delimited by commas - default: "tags, warnings, characters, fandoms, relationships"): '
        )
        if tag_columns is None or tag_columns == '':
Exemplo n.º 5
0
def write_csv(filename, columns):
    html_parser = HTMLParser()
    with open(filename, 'w') as fp:
        myFile = csv.writer(fp)
        myFile.writerow(columns)
        for row in results:
            r = []
            for s in row:
                r.append('' if s is None else html_parser.unescape(unicode(s)).
                         encode('utf-8'))
            myFile.writerows([r])
        fp.close()


if __name__ == "__main__":
    args = Args.args_for_03()
    sql = Sql(args)
    tags = Tags(args, sql.db)
    print('--- Exporting tags from {0}'.format(args.db_database))
    cols = tags.tag_export_map
    results = tags.distinct_tags()
    write_csv('{0} - tags.csv'.format(args.db_database), [
        cols['original_tagid'], cols['original_tag'], cols['original_table'],
        cols['original_parent'], cols['ao3_tag_fandom'], cols['ao3_tag'],
        cols['ao3_tag_type'], cols['ao3_tag_category'],
        cols['original_description'], "TW Notes"
    ])

    print('--- Exporting authors with stories from {0}'.format(
        args.db_database))
    if args.archive_type == 'AA':
Exemplo n.º 6
0
from shared_python import Args
from shared_python.Sql import Sql
from automated_archive import aa

if __name__ == "__main__":
    args = Args.args_for_01()
    sql = Sql(args)

    # eg: python 01-Load-into-Mysql.py -dh localhost -du root -dt dsa -dd temp_python -a AA -f /path/to/ARCHIVE_DB.pl -o .
    if args.archive_type == 'AA':
        print('--- Loading Automated Archive file "{0}" into database "{1}"'.
              format(args.db_input_file, args.db_database))
        aa.clean_and_load_data(args)

# eg: python 01-Load-into-Mysql.py -dh localhost -du root -dt sd -dd temp_python -a EF -f /path/to/backup-from-efiction.sql -o .
    elif args.archive_type == 'EF':
        print('Loading eFiction file "{0}" into database "{1}"'.format(
            args.db_input_file, args.db_database))
        sql.run_script_from_file(args.db_input_file,
                                 database=args.db_database,
                                 prefix=args.db_table_prefix)
Exemplo n.º 7
0
from eFiction import efiction
from shared_python import Args
from shared_python.FinalTables import FinalTables
from shared_python.Sql import Sql
from shared_python.Tags import Tags


def valid_tags(key, tag_type_list):
    return [
        d[key].strip() for d in tag_type_list
        if key in d and d[key] is not None and d[key] != ''
    ]


if __name__ == "__main__":
    args = Args.args_for_06()
    sql = Sql(args)
    tags = Tags(args, sql.db)
    final = FinalTables(args, sql.db)

    if args.archive_type == 'EF':
        table_names = efiction.table_names()
    else:
        table_names = {
            'authors': 'authors',
            'stories': 'stories',
            'chapters': 'chapters'
        }

    print "Getting all tags per story..."
    tags_by_story_id = tags.tags_by_story_id()