import pandas as pd

from datetime import datetime

from logya.core import Logya
from logya.writer import write, encode_content

parser = argparse.ArgumentParser(
    description='Create data map draft based on input CSV and arguments.')
parser.add_argument('input_file', help='Name of the input file.')
parser.add_argument('output_name',
                    help='Name part of the generated content and CSV files.')
args = parser.parse_args()

L = Logya()
L.init_env()

now = datetime.now()

# World Bank specific data loading and processing
df = pd.read_csv(args.input_file, skiprows=4).dropna(
    axis='columns', how='all',
    thresh=10).rename(columns={'Country Code': 'iso3'})

years = [str(year) for year in range(1960, now.year + 1) if str(year) in df]
columns = ['iso3'] + years
df_final = df[columns]

# Determine absolute min and max values for use in JS
abs_min = df.min(numeric_only=True).min()
#!/usr/bin/env python
# coding: utf-8
import os
import re
import requests

from datetime import datetime
from bs4 import BeautifulSoup
from logya.core import Logya
from logya.path import slugify, target_file
from logya.writer import encode_content, write


logya = Logya()
logya.init_env()

url = 'https://en.wikipedia.org/wiki/Bodyweight_exercise'
html = requests.get(url).text
soup = BeautifulSoup(html, 'lxml')

replacements = {
    'bams': 'bam',
    'bodybuilders': 'bodybuilder',
    'boots': 'boot',
    'chairs': 'chair',
    'climbers': 'climber',
    'crosses': 'cross',
    'curls': 'curl',
    'darlings': 'darling',
    'dips': 'dip',
    'dogs': 'dog',
Exemple #3
0
#!/usr/bin/env python
# coding: utf-8
import os
import re
import requests

from datetime import datetime
from bs4 import BeautifulSoup
from logya.core import Logya
from logya.path import slugify, target_file
from logya.writer import encode_content, write

logya = Logya()
logya.init_env()

url = 'https://en.wikipedia.org/wiki/Bodyweight_exercise'
html = requests.get(url).text
soup = BeautifulSoup(html, 'lxml')

replacements = {
    'bams': 'bam',
    'bodybuilders': 'bodybuilder',
    'boots': 'boot',
    'chairs': 'chair',
    'climbers': 'climber',
    'crosses': 'cross',
    'curls': 'curl',
    'darlings': 'darling',
    'dips': 'dip',
    'dogs': 'dog',
    'extensions': 'extension',
Exemple #4
0
# -*- coding: utf-8 -*-
# Run this script from your Logya site directory, e. g.:
# python scripts/site_index.py
import io
import os
import json

from logya.core import Logya
from logya.encoder import JSONEncoder

logya = Logya()
logya.init_env()
logya.build_index()

site_index = {}
for url, doc in logya.docs.items():
    del doc['body']
    site_index[url] = doc

index_file = os.path.join(logya.dir_static, 'site_index.json')
with io.open(index_file, 'w', encoding='utf-8') as f:
    json.dump(site_index, f, cls=JSONEncoder)
Exemple #5
0
# -*- coding: utf-8 -*-
# Run this script from your Logya site directory, e. g.:
# python scripts/site_index.py
import io
import os
import json

from logya.core import Logya
from logya.encoder import JSONEncoder


logya = Logya()
logya.init_env()
logya.build_index()

site_index = {}
for url, doc in logya.docs.items():
    del doc['body']
    site_index[url] = doc

index_file = os.path.join(logya.dir_static, 'site_index.json')
with io.open(index_file, 'w', encoding='utf-8') as f:
    json.dump(site_index, f, cls=JSONEncoder)
Exemple #6
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program flattens the content hierarchy of documents generated using the write_content function.
# It creates markdown versions from files named index.html in the directory content_new. New files get
# the name of their parent directory.
import os

from html2text import html2text

from logya.core import Logya
from logya.writer import encode_content, write

L = Logya()
L.init_env()
L.build_index()

for url, doc in L.docs.items():
    content_file = os.path.join(L.dir_content, url.strip('/'), 'index.html')
    if os.path.exists(content_file):
        body = html2text(doc['body'])

        # Cleanup
        del doc['body']
        if 'tags_links' in doc:
            del doc['tags_links']

        content = encode_content(doc, body)
        target_file = os.path.dirname(content_file) + '.md'

        write(target_file.replace('/content/', '/content_new/'), content)