コード例 #1
0
def get_labs(data_format):
    """Gets data from all labs from hackerspaces.org."""

    labs = []

    # Get the first page of data
    wiki = MediaWiki(hackerspaces_org_api_url)
    wiki_response = wiki.call({
        'action': 'query',
        'list': 'categorymembers',
        'cmtitle': 'Category:Hackerspace',
        'cmlimit': '500'
    })
    nextpage = wiki_response["query-continue"]["categorymembers"]["cmcontinue"]

    urls = []
    for i in wiki_response["query"]["categorymembers"]:
        urls.append(i["title"].replace(" ", "_"))

    # Load all the Labs in the first page
    for i in urls:
        current_lab = get_single_lab(i, data_format)
        labs.append(current_lab)

    # Load all the Labs from the other pages
    while "query-continue" in wiki_response:
        wiki = MediaWiki(hackerspaces_org_api_url)
        wiki_response = wiki.call({
            'action': 'query',
            'list': 'categorymembers',
            'cmtitle': 'Category:Hackerspace',
            'cmlimit': '500',
            "cmcontinue": nextpage
        })

        urls = []
        for i in wiki_response["query"]["categorymembers"]:
            urls.append(i["title"].replace(" ", "_"))

        # Load all the Labs
        for i in urls:
            current_lab = get_single_lab(i, data_format)
            labs.append(current_lab)

        if "query-continue" in wiki_response:
            nextpage = wiki_response["query-continue"]["categorymembers"][
                "cmcontinue"]
        else:
            break

    # Transform the list into a dictionary
    labs_dict = {}
    for j, k in enumerate(labs):
        labs_dict[j] = k

    return labs_dict
コード例 #2
0
def open_connection(bot_name, env_name, api_url):
    """Open a connection to MediaWiki for a bot."""

    LOGGER.info("Opening MediaWiki connection for %s at %s", bot_name, api_url)
    apiary_wiki = MediaWiki(api_url)
    edit_token = None

    try:
        # Passwords may be defined in the environment or in the config file
        # We prefer the environment variable if it is present
        password = os.environ.get(env_name, None)
        if password is None:
            try:
                config.get('Passwords', bot_name)
            except Exception as e:
                LOGGER.warn('No configuration file detected.')

        if password is not None:
            LOGGER.info("Logging in as %s using %s", bot_name, password)
            apiary_wiki.login(bot_name, password)

            LOGGER.info("Getting edit token for %s", bot_name)
            wiki_return = apiary_wiki.call({
                'action': 'tokens',
                'type': 'edit'
            })
            edit_token = wiki_return['tokens']['edittoken']
            LOGGER.info("%s has been given edit token %s", bot_name, edit_token)
        else:
            LOGGER.warn("No password was provided for %s. Queries allowed but editing will not work.", bot_name)

    except Exception as e:
        raise Exception("Unable to login as %s got '%s'", bot_name, e)

    return (apiary_wiki, edit_token)
コード例 #3
0
def get_lab_text(lab_slug, language):
    """Gets text description in English or Italian from a single lab from makeinitaly.foundation."""
    if language == "English" or language == "english" or language == "EN" or language == "En":
        language = "en"
    elif language == "Italian" or language == "italian" or language == "IT" or language == "It" or language == "it":
        language = "it"
    else:
        language = "en"
    wiki = MediaWiki(makeinitaly__foundation_api_url)
    wiki_response = wiki.call(
        {'action': 'query',
         'titles': lab_slug + "/" + language,
         'prop': 'revisions',
         'rvprop': 'content'})

    # If we don't know the pageid...
    for i in wiki_response["query"]["pages"]:
        if "revisions" in wiki_response["query"]["pages"][i]:
            content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
        else:
            content = ""

    # Clean the resulting string/list
    newstr01 = content.replace("}}", "")
    newstr02 = newstr01.replace("{{", "")
    result = newstr02.rstrip("\n|").split("\n|")

    return result[0]
コード例 #4
0
def parseWiki(con):
    useragent = build_user_agent('l2wiki', 0.1,
                                 'https://github.com/tm-calculate/l2wiki')
    wiki = MediaWiki('http://l2central.info/c/api.php', user_agent=useragent)
    cmcontinue = parseWikiPart(con, wiki)
    while cmcontinue:
        cmcontinue = parseWikiPart(con, wiki, cmcontinue)
コード例 #5
0
 def connectwiki(self, bot_name):
     self.apiary_wiki = MediaWiki(self.config.get('WikiApiary', 'API'))
     c = self.apiary_wiki.login(self.config.get(bot_name, 'Username'),
                                self.config.get(bot_name, 'Password'))
     if self.args.verbose >= 1:
         print("Username: %s Password: %s" % (self.config.get(
             bot_name, 'Username'), self.config.get(bot_name, 'Password')))
         print(c)
コード例 #6
0
 def fetch(cls, url, use_cache=True):
     m = re.match(r'^http://([a-z]{2})\.wikipedia\.org', url)
     page_lang = m.group(1).encode('utf8')
     page_title = extract_page_title(url, page_lang)
     wp = MediaWiki('http://%s.wikipedia.org/w/api.php' % page_lang)
     return cls(
         page_title,
         get_page_content(wp, page_title, page_lang, use_cache) or '',
         page_lang)
コード例 #7
0
    def __init__(self):
        config = ConfigParser.ConfigParser()
        config.read('../apiary.cfg')

        # Connect to SMW Community Wiki
        self.smwreferata = MediaWiki('http://smw.referata.com/w/api.php')

        # Connect to WikiApiary
        self.wikiapiary = MediaWiki(config.get('WikiApiary', 'api'))
        self.wikiapiary.login(config.get('wikkiibot', 'Username'),
                              config.get('wikkiibot', 'Password'))

        # We need an edit token
        c = self.wikiapiary.call({
            'action': 'query',
            'titles': 'Foo',
            'prop': 'info',
            'intoken': 'edit'
        })
        self.my_token = c['query']['pages']['-1']['edittoken']
コード例 #8
0
    def __init__(self, url, username, password):
        self.wiki = MediaWiki(url)
        self.username = username
        self.password = password

        self.login = self._make_wiki_login_call({'action': 'login'})
        self.token = self._make_wiki_login_call({
            'action':
            'login',
            'lgtoken':
            self.login['login']['token']
        })
コード例 #9
0
    def __init__(self):
        config = ConfigParser.ConfigParser()
        config.read('../apiary.cfg')

        self.wikiapiary = MediaWiki(config.get('WikiApiary', 'api'))
        self.wikiapiary.login(config.get('TropicalBot', 'Username'),
                              config.get('TropicalBot', 'Password'))

        # We need an edit token on wiki2
        c = self.wikiapiary.call({
            'action': 'query',
            'titles': 'Foo',
            'prop': 'info',
            'intoken': 'edit'
        })
        self.my_token = c['query']['pages']['-1']['edittoken']
コード例 #10
0
 def fetch(cls, url, use_cache=True):
     m = re.match(r'^https?://([a-z\-]+)\.wikipedia\.org/wiki/(.*)$', url)
     page_lang = m.group(1).encode('utf8')
     page_title = urllib.unquote(m.group(2).encode('utf8')).decode('utf8')
     wp = MediaWiki('https://%s.wikipedia.org/w/api.php' % page_lang)
     resp = wp.call({
         'action': 'query',
         'prop': 'pageprops|revisions',
         'titles': page_title.encode('utf8'),
         'rvprop': 'content'
     })
     page = resp['query']['pages'].values()[0]
     content = page['revisions'][0].values(
     )[0] if 'revisions' in page else None
     if 'pageprops' in page and 'wikibase_item' in page['pageprops']:
         wikidata_id = page['pageprops']['wikibase_item']
     else:
         wikidata_id = None
     return cls(page_title, content or '', page_lang, wikidata_id)
コード例 #11
0
def handle(text, mic, profile):
    baseurl = "http://www.wikihow.com/"
    wiki = MediaWiki('http://www.wikihow.com/api.php')
    #wiki.login("*****@*****.**", "david1234")
    params = {
        'action': 'query',
        'list': 'search',
        'srsearch': text,
        'srprop': 'redirecttitle',
        'limit': '1',
        'format': 'json'
    }

    response = wiki.call(params)
    #r = json.dumps(response, sort_keys=True, indent=4, separators=(',', ': '))

    flag = 0
    flag_title = "none"
    pos = response['query']['search']
    query = getRequest(text)
    wiki.logout()
    #Getting the article with the best score
    for key in pos:
        val = fuzz.ratio(key['title'], query)
        print(str(val) + "% " + key['title'])
        if val > flag:
            flag = val
            flag_title = key['title']
    if flag != 0:
        answer = flag_title
        mic.say(answer)

        #rWH = renderWH.renderWikihow()
        #url = baseurl + answer
        #print url
        #url_ = rWH.getContent(str(url))
        #rWH.renderContent(url_)
        webbrowser.open(baseurl + flag_title)
    else:
        mic.say("I could not find anything bro!")
コード例 #12
0
import re
import sqlalchemy
import solr
from simplemediawiki import MediaWiki
from editing import MusicBrainzClient
import pprint
import urllib
import time
from utils import mangle_name, join_names, contains_text_in_script, quote_page_title
import config as cfg

engine = sqlalchemy.create_engine(cfg.MB_DB)
db = engine.connect()
db.execute("SET search_path TO musicbrainz")

wp = MediaWiki('http://ko.wikipedia.org/w/api.php')
wps = solr.SolrConnection('http://localhost:8983/solr/wikipedia_ko')

mb = MusicBrainzClient(cfg.MB_USERNAME, cfg.MB_PASSWORD, cfg.MB_SITE)

"""
CREATE TABLE bot_wp_artist_ko (
    gid uuid NOT NULL,
    processed timestamp with time zone DEFAULT now()
);

ALTER TABLE ONLY bot_wp_artist_ko
    ADD CONSTRAINT bot_wp_artist_kokey PRIMARY KEY (gid);
"""

query = """
コード例 #13
0
def get_single_lab(lab_slug, open_cage_api_key):
    """Gets data from a single lab from hackerspaces.org."""
    wiki = MediaWiki(hackerspaces_org_api_url)
    wiki_response = wiki.call({
        'action': 'query',
        'titles': lab_slug,
        'prop': 'revisions',
        'rvprop': 'content'
    })

    # If we don't know the pageid...
    for i in wiki_response["query"]["pages"]:
        content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]

    # Transform the data into a Lab object
    current_lab = Hackerspace()

    equipment_list = []

    # Parse the Mediawiki code
    wikicode = mwparserfromhell.parse(content)
    for k in wikicode.filter_templates():
        element_name = unicode(k.name)
        if "Hackerspace" in element_name:
            for j in k.params:
                current_lab.name = lab_slug
                j_value = unicode(j.value)
                j_name = unicode(j.name)
                # Remove new line in content
                if j_value[-1:] == "\n" or j_value[:1] == "\n":
                    j_value = j_value.replace('\n', '')
                if j_name == "logo":
                    current_lab.logo = j_value
                if j_name == "founding":
                    current_lab.founding = j_value
                if j_name == "coordinate":
                    # Clean the coordinates
                    j_value = j_value.replace('"', '')
                    j_value = j_value.replace('N', '')
                    j_value = j_value.replace('S', '')
                    j_value = j_value.replace('W', '')
                    j_value = j_value.replace('E', '')
                    j_value = j_value.replace(u'°', '')
                    j_value = j_value.replace(' ', '')
                    # Get the full address with the coordinates
                    address = get_location(query=j_value,
                                           format="reverse",
                                           api_key=open_cage_api_key)
                    current_lab.city = address["city"]
                    current_lab.county = address["county"]
                    current_lab.state = address["state"]
                    current_lab.postal_code = address["postal_code"]
                    current_lab.address_1 = address["address_1"]
                    current_lab.country = address["country"]
                    current_lab.country_code = address["country_code"]
                    current_lab.continent = address["continent"]
                    current_lab.latitude = address["latitude"]
                    current_lab.longitude = address["longitude"]
                if j_name == "membercount":
                    current_lab.membercount = j_value
                if j_name == "fee":
                    current_lab.fee = j_value
                if j_name == "size":
                    current_lab.size = j_value
                if j_name == "status":
                    current_lab.status = j_value
                if j_name == "site":
                    current_lab.site = j_value
                if j_name == "wiki":
                    current_lab.wiki = j_value
                if j_name == "irc":
                    current_lab.irc = j_value
                if j_name == "jabber":
                    current_lab.jabber = j_value
                if j_name == "phone":
                    current_lab.phone = j_value
                if j_name == "youtube":
                    current_lab.youtube = j_value
                if j_name == "eventbrite":
                    current_lab.eventbrite = j_value
                if j_name == "facebook":
                    current_lab.facebook = j_value
                if j_name == "ustream":
                    current_lab.ustream = j_value
                if j_name == "flickr":
                    current_lab.flickr = j_value
                if j_name == "twitter":
                    current_lab.twitter = j_value
                if j_name == "googleplus":
                    current_lab.googleplus = j_value
                if j_name == "email":
                    current_lab.email = j_value
                if j_name == "maillist":
                    current_lab.maillist = j_value
                if j_name == "ical":
                    current_lab.ical = j_value
                if j_name == "forum":
                    current_lab.forum = j_value
        elif "Equipment" in element_name:
            for j in k.params:
                equipment_list.append(j.replace("equipment=", ""))

            current_lab.equipment = equipment_list

    # Load the free text
    freetext = ""
    for k in wikicode._nodes:
        try:
            test_value = k.name
        except AttributeError:
            freetext += unicode(k)
    current_lab.text = freetext

    return current_lab
コード例 #14
0
 def __init__(self, *args, **kwargs):
     super(Wiki, self).__init__(*args, **kwargs)
     self.wiki = MediaWiki(self.url + 'api.php')
コード例 #15
0
from editing import MusicBrainzClient
import pprint
import urllib
import time
from mbbot.wp.wikipage import WikiPage
from mbbot.wp.analysis import determine_country
from utils import mangle_name, join_names, out, colored_out, bcolors, escape_query, quote_page_title, wp_is_canonical_page
import config as cfg

engine = sqlalchemy.create_engine(cfg.MB_DB)
db = engine.connect()
db.execute("SET search_path TO musicbrainz, %s" % cfg.BOT_SCHEMA_DB)

wp_lang = sys.argv[1] if len(sys.argv) > 1 else 'en'

wp = MediaWiki('http://%s.wikipedia.org/w/api.php' % wp_lang)

suffix = '_' + wp_lang if wp_lang != 'en' else ''
wps = solr.SolrConnection('http://localhost:8983/solr/wikipedia' + suffix)

mb = MusicBrainzClient(cfg.MB_USERNAME, cfg.MB_PASSWORD, cfg.MB_SITE)
"""
CREATE TABLE bot_wp_artist_link (
    gid uuid NOT NULL,
    lang character varying(2),
    processed timestamp with time zone DEFAULT now()
    CONSTRAINT bot_wp_artist_link_pkey PRIMARY KEY (gid, lang)
);

CREATE TABLE bot_wp_artist_link_ignore (
    gid uuid NOT NULL,
コード例 #16
0
ファイル: import-urls.py プロジェクト: WikiApiary/wikibees
|Active=No
|Demote=No
|Defunct=No
}}
[[Category:WikiTeam Import]]"""

logo_page_text = """This image was automatically uploaded by [[User:Audit Bee|Audit Bee]] while importing.
[[Category:Import logos]] """

# timeout in seconds
timeout = 10
socket.setdefaulttimeout(timeout)

wiki = MediaWiki(
    'https://wikiapiary.com/w/api.php',
    cookie_file='cookie-jar',
    user_agent=
    'python-simplemediawiki/1.1.1 (WikiApiary; Bumble Bee; +http://wikiapiary.com/wiki/User:Bumble_Bee)'
)
wiki.login('Audit Bee', 'frYqj2AmPTqZDjn4TANE')

# We need an edit token
c = wiki.call({
    'action': 'query',
    'titles': 'Foo',
    'prop': 'info',
    'intoken': 'edit'
})
my_token = c['query']['pages']['-1']['edittoken']

i = 0
success = 0
コード例 #17
0
def get_single_lab(lab_slug, data_format):
    """Gets data from a single lab from makeinitaly.foundation."""
    wiki = MediaWiki(makeinitaly__foundation_api_url)
    wiki_response = wiki.call(
        {'action': 'query',
         'titles': lab_slug,
         'prop': 'revisions',
         'rvprop': 'content'})

    # If we don't know the pageid...
    for i in wiki_response["query"]["pages"]:
        content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]

    # Clean the resulting string/list
    newstr01 = content.replace("}}", "")
    newstr02 = newstr01.replace("{{", "")
    result = newstr02.rstrip("\n|").split("\n|")
    # result.remove(u'FabLab')

    # Transform the data into a Lab object
    current_lab = Lab()

    # Add existing data
    for i in result:
        if "coordinates=" in i:
            value = i.replace("coordinates=", "")
            current_lab.coordinates = value
            latlong = []
            if ", " in value:
                latlong = value.rstrip(", ").split(", ")
            elif " , " in value:
                latlong = value.rstrip(" , ").split(" , ")
            else:
                latlong = ["", ""]
            current_lab.lat = latlong[0]
            current_lab.long = latlong[1]
        elif "province=" in i:
            value = i.replace("province=", "")
            current_lab.province = value.upper()
        elif "region=" in i:
            value = i.replace("region=", "")
            current_lab.region = value
        elif "address=" in i:
            value = i.replace("address=", "")
            current_lab.address = value
        elif "city=" in i:
            value = i.replace("city=", "")
            current_lab.city = value
        elif "fablabsio=" in i:
            value = i.replace("fablabsio=", "")
            current_lab.fablabsio = value
        elif "website=" in i:
            value = i.replace("website=", "")
            current_lab.website = value
        elif "facebook=" in i:
            value = i.replace("facebook=", "")
            current_lab.facebook = value
        elif "twitter=" in i:
            value = i.replace("twitter=", "")
            current_lab.twitter = value
        elif "email=" in i:
            value = i.replace("email=", "")
            current_lab.email = value
        elif "manager=" in i:
            value = i.replace("manager=", "")
            current_lab.manager = value
        elif "birthyear=" in i:
            value = i.replace("birthyear=", "")
            current_lab.birthyear = value

    current_lab.text_en = get_lab_text(lab_slug=lab_slug, language="en")
    current_lab.text_it = get_lab_text(lab_slug=lab_slug, language="it")

    if data_format == "dict":
        return current_lab.__dict__
    elif data_format == "object":
        return current_lab
コード例 #18
0
import datetime
import glob
import json
import os
import re
import sys
import textwrap

from simplemediawiki import MediaWiki


with open(os.path.expanduser('~/.mediawiki'), 'r') as f:
    conf = json.loads(f.read())['ircbot']

wiki = MediaWiki(conf['url'])

day_re = re.compile('^--- Day changed (.*) (.*) ([0-9]+) (20[0-9]+)$')
human_re = re.compile('.*<([^>]+)>.*')

days = {}
days_order = []

months = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
          'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}


def make_wiki_login_call(packet):
    packet.update({'lgname': conf['username'],
                   'lgpassword': conf['password']})
    return wiki.call(packet)
コード例 #19
0
import re
import sqlalchemy
import solr
from simplemediawiki import MediaWiki
from editing import MusicBrainzClient
import pprint
import urllib
import time
from utils import mangle_name, join_names, quote_page_title
import config as cfg

engine = sqlalchemy.create_engine(cfg.MB_DB)
db = engine.connect()
db.execute("SET search_path TO musicbrainz, %s" % cfg.BOT_SCHEMA_DB)

wp = MediaWiki('https://en.wikipedia.org/w/api.php')
wps = solr.SolrConnection('http://localhost:8983/solr/wikipedia')

mb = MusicBrainzClient(cfg.MB_USERNAME, cfg.MB_PASSWORD, cfg.MB_SITE)
"""
CREATE TABLE bot_wp_label (
    gid uuid NOT NULL,
    processed timestamp with time zone DEFAULT now()
);

ALTER TABLE ONLY bot_wp_label
    ADD CONSTRAINT bot_wp_label_pkey PRIMARY KEY (gid);

"""

query = """
コード例 #20
0
parser.add_argument('--filename',
                    '-f',
                    dest='filename',
                    help='Output filename',
                    default='list_titles.txt')

args = parser.parse_args()

lang_val = args.lang
filename = args.filename

wiki_url = "https://"
wiki_url = wiki_url + lang_val
wiki_url = wiki_url + ".wikipedia.org/w/api.php"

wiki = MediaWiki(wiki_url)

output_file = open(filename, "w")

continue_param = ''
request_obj = {}
request_obj['action'] = 'query'
request_obj['list'] = 'allpages'
request_obj['aplimit'] = 'max'
request_obj['apnamespace'] = '0'

page_list = wiki.call(request_obj)
pages_in_query = page_list['query']['allpages']

for each_page in pages_in_query:
    page_ID = each_page['pageid']
コード例 #21
0
def get_labs(format):
    """Gets data from all labs from makeinitaly.foundation."""

    labs = []

    # Get the first page of data
    wiki = MediaWiki(makeinitaly__foundation_api_url)
    wiki_response = wiki.call({
        'action': 'query',
        'list': 'categorymembers',
        'cmtitle': 'Category:Italian_FabLabs',
        'cmlimit': '500'
    })
    if "query-continue" in wiki_response:
        nextpage = wiki_response["query-continue"]["categorymembers"][
            "cmcontinue"]

    urls = []
    for i in wiki_response["query"]["categorymembers"]:
        urls.append(i["title"].replace(" ", "_"))

    # Load all the Labs in the first page
    for i in urls:
        current_lab = get_single_lab(i)
        labs.append(current_lab)

    # Load all the Labs from the other pages
    while "query-continue" in wiki_response:
        wiki = MediaWiki(makeinitaly__foundation_api_url)
        wiki_response = wiki.call({
            'action': 'query',
            'list': 'categorymembers',
            'cmtitle': 'Category:Hackerspace',
            'cmlimit': '500',
            "cmcontinue": nextpage
        })

        urls = []
        for i in wiki_response["query"]["categorymembers"]:
            urls.append(i["title"].replace(" ", "_"))

        # Load all the Labs
        for i in urls:
            current_lab = get_single_lab(i, data_format)
            labs.append(current_lab)

        if "query-continue" in wiki_response:
            nextpage = wiki_response["query-continue"]["categorymembers"][
                "cmcontinue"]
        else:
            break

    # Transform the list into a dictionary
    labs_dict = {}
    for j, k in enumerate(labs):
        labs_dict[j] = k.__dict__

    # Return a dictiornary / json
    if format.lower() == "dict" or format.lower() == "json":
        output = labs_dict
    # Return a geojson
    elif format.lower() == "geojson" or format.lower() == "geo":
        labs_list = []
        for l in labs_dict:
            single = labs_dict[l].__dict__
            single_lab = Feature(type="Feature",
                                 geometry=Point((single["latitude"],
                                                 single["longitude"])),
                                 properties=single)
            labs_list.append(single_lab)
        output = dumps(FeatureCollection(labs_list))
    # Return a Pandas DataFrame
    elif format.lower() == "pandas" or format.lower() == "dataframe":
        output = {}
        for j in labs_dict:
            output[j] = labs_dict[j].__dict__
        # Transform the dict into a Pandas DataFrame
        output = pd.DataFrame.from_dict(output)
        output = output.transpose()
    # Return an object
    elif format.lower() == "object" or format.lower() == "obj":
        output = labs
    # Default: return an object
    else:
        output = labs
    # Return a proper json
    if format.lower() == "json":
        output = json.dumps(labs_dict)
    return output
コード例 #22
0
ファイル: wiki.py プロジェクト: markododa/dezhurni
#!/usr/bin/python
from simplemediawiki import MediaWiki
from tabela import tabela
from people import people
import sys
text = '==== Листа на дежурства ====\n\nОва е автоматски генерерирана листа на дежурни со две ротации, доколку не сте во можност да бидете дежурни некоја недела или ден запишете во забелешка и пишете на мејлинг листа. Доколку сте дежурен во вашиот google calendar е вметнат нов календар насловен „Хаклаб: Дежурства“ со настан за деновите кога сте дежурни. Поставете ги известувањата за да бидете навреме известени.\n\n'
text+=tabela(people)
wiki = MediaWiki('https://wiki.spodeli.org/api.php')
user, password = open('credentials', 'r').read().split()
wiki.login(user,password)
token = wiki.call({'action': 'query', 'meta': 'tokens'})['query']['tokens']['csrftoken']
wiki.call({'action': 'edit', 'title': 'Хаклаб/Дежурства', 'section':'5', 'text':text, 'token':token})
コード例 #23
0
def get_single_lab(lab_slug, data_format):
    """Gets data from a single lab from hackerspaces.org."""
    wiki = MediaWiki(hackerspaces_org_api_url)
    wiki_response = wiki.call({
        'action': 'query',
        'titles': lab_slug,
        'prop': 'revisions',
        'rvprop': 'content'
    })

    # If we don't know the pageid...
    for i in wiki_response["query"]["pages"]:
        content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]

    # Transform the data into a Lab object
    current_lab = Lab()

    equipment_list = []

    # Parse the Mediawiki code
    wikicode = mwparserfromhell.parse(content)
    for k in wikicode.filter_templates():
        element_name = unicode(k.name)
        if "Hackerspace" in element_name:
            for j in k.params:
                if unicode(j.name) == "logo":
                    current_lab.logo = unicode(j.value)
                if unicode(j.name) == "country":
                    current_lab.country = unicode(j.value)
                if unicode(j.name) == "state":
                    current_lab.state = unicode(j.value)
                if unicode(j.name) == "city":
                    current_lab.city = unicode(j.value)
                if unicode(j.name) == "founding":
                    current_lab.city = unicode(j.value)
                if unicode(j.name) == "coordinate":
                    value = unicode(j.value)
                    current_lab.coordinates = value
                    latlong = []
                    if ", " in value:
                        latlong = value.rstrip(", ").split(", ")
                    elif " , " in value:
                        latlong = value.rstrip(" , ").split(" , ")
                    else:
                        latlong = ["", ""]
                    current_lab.lat = latlong[0]
                    current_lab.long = latlong[1]
                if unicode(j.name) == "membercount":
                    current_lab.membercount = unicode(j.value)
                if unicode(j.name) == "fee":
                    current_lab.fee = unicode(j.value)
                if unicode(j.name) == "size":
                    current_lab.size = unicode(j.value)
                if unicode(j.name) == "status":
                    current_lab.status = unicode(j.value)
                if unicode(j.name) == "site":
                    current_lab.site = unicode(j.value)
                if unicode(j.name) == "wiki":
                    current_lab.wiki = unicode(j.value)
                if unicode(j.name) == "irc":
                    current_lab.irc = unicode(j.value)
                if unicode(j.name) == "jabber":
                    current_lab.jabber = unicode(j.value)
                if unicode(j.name) == "phone":
                    current_lab.phone = unicode(j.value)
                if unicode(j.name) == "youtube":
                    current_lab.youtube = unicode(j.value)
                if unicode(j.name) == "eventbrite":
                    current_lab.eventbrite = unicode(j.value)
                if unicode(j.name) == "facebook":
                    current_lab.facebook = unicode(j.value)
                if unicode(j.name) == "ustream":
                    current_lab.ustream = unicode(j.value)
                if unicode(j.name) == "flickr":
                    current_lab.flickr = unicode(j.value)
                if unicode(j.name) == "twitter":
                    current_lab.twitter = unicode(j.value)
                if unicode(j.name) == "googleplus":
                    current_lab.googleplus = unicode(j.value)
                if unicode(j.name) == "email":
                    current_lab.email = unicode(j.value)
                if unicode(j.name) == "maillist":
                    current_lab.maillist = unicode(j.value)
                if unicode(j.name) == "ical":
                    current_lab.ical = unicode(j.value)
                if unicode(j.name) == "forum":
                    current_lab.forum = unicode(j.value)
                if unicode(j.name) == "street-address":
                    current_lab.street_address = unicode(j.value)
                if unicode(j.name) == "postalcode":
                    current_lab.postalcode = unicode(j.value)
                if unicode(j.name) == "region":
                    current_lab.region = unicode(j.value)
                if unicode(j.name) == "post-office-box":
                    current_lab.post_office_box = unicode(j.value)
        elif "Equipment" in element_name:
            for j in k.params:
                equipment_list.append(j.replace("equipment=", ""))

    current_lab.equipment = equipment_list

    # Load the free text
    freetext = ""
    for k in wikicode._nodes:
        try:
            test_value = k.name
        except AttributeError:
            freetext += unicode(k)
    current_lab.text = freetext

    if data_format == "dict":
        return current_lab.__dict__
    elif data_format == "object":
        return current_lab