示例#1
0
def button(update: Update, context: CallbackContext) -> None:
    query = update.callback_query
    query.answer()
    data = query.data

    past_reply_markup = InlineKeyboardMarkup(
        [[InlineKeyboardButton("Показать прошедшие", callback_data='htmore')]])

    if data.startswith('b'):
        query.edit_message_text(text=Parser.books(int(data.split('_')[1])),
                                parse_mode='HTML')
    elif data == 'htmore':
        parser = Parser.Parser(int(query.message.text[:2]))
        query.edit_message_text(
            text=
            f'{query.message.text_html}\n\n<b>Идет загрузка прошедших заданий...\nПожалуйста, подождите.</b>',
            parse_mode='HTML')
        parser.parse_prev_ht(query.message.text.count('Прошедшие задания'))
        query.edit_message_text(
            text=
            f'{query.message.text_html}\n\n<b>Прошедшие задания</b>\n{parser.format_page()}',
            parse_mode='HTML',
            reply_markup=past_reply_markup)
    elif data.startswith('ht'):
        query.edit_message_text(
            text=f'\n<b>Идет загрузка...\nПожалуйста, подождите.</b>',
            parse_mode='HTML')
        parser = Parser.Parser(int(data.split('_')[1]))
        parser.parse_current_ht()
        query.edit_message_text(text=parser.format_page(),
                                reply_markup=past_reply_markup,
                                parse_mode='HTML')
示例#2
0
def init_federated():

    # clients list
    clients = []

    # load configs
    parser = Parser()
    parser.parse()
    config = parser.config

    # generate clients
    for i in range(config.num_of_clients):
        clients.append(Client(id=i, config=config))

    # generate server
    server = Server(id=0, config=config)

    # foo
    server.generator_A2B.apply(weights_init_normal)
    server.generator_B2A.apply(weights_init_normal)
    server.discriminator_A.apply(weights_init_normal)
    server.discriminator_B.apply(weights_init_normal)

    if (os.path.exists(server.model_dir + server.generator_name_A2B)
            and os.path.exists(server.model_dir + server.generator_name_B2A)
            and os.path.exists(server.model_dir + server.discriminator_name_A)
            and
            os.path.exists(server.model_dir + server.discriminator_name_B)):
        server.load_model()
        print("Global model saved on the server has been restored!")

    elif not (os.path.exists(server.model_dir + server.generator_name_A2B)
              and os.path.exists(server.model_dir + server.generator_name_B2A)
              and os.path.exists(server.model_dir +
                                 server.discriminator_name_A) and
              os.path.exists(server.model_dir + server.discriminator_name_B)):
        print("Global model has been created!")
    else:
        raise EOFError

    # load datasets
    # This method is detached from the init part
    # load_datasets(clients=clients, config=config)

    # load models
    for client in clients:
        client.load_model(generator_A2B=server.generator_A2B,
                          generator_B2A=server.generator_B2A,
                          discriminator_A=server.discriminator_A,
                          discriminator_B=server.discriminator_B)
        print("Client {}'s model has been updated from the server".format(
            client.id))

    return clients, server, config
示例#3
0
def populate_db_words(sentences):
    parser = Parser(sentences)
    words = {}
    for tokens in parser.get_tokenised_parts():
        for (word, pos) in tokens:
            freq = words.get(word)
            if freq is None:
                words[word] = 1
            else:
                words[word] = freq + 1

    for word, freq in words.items():
        dbEntry = Words(word=word, freq=freq)
        dbEntry.save()
示例#4
0
 def __init__(self, **kwargs):
     self.decode = kwargs.get('decode', 'utf-8')
     self.start_url = kwargs['start_url']
     self.page_f = kwargs.get('page_f')
     self.last_page_xp = kwargs.get('last_page_xp')
     self.xp_page = kwargs.get('xp_page', True)
     self.url_xp = kwargs['url_xp']
     self.url_prefix = kwargs.get('url_prefix', '')
     self.total_page = kwargs.get('total_page')
     self.data = kwargs.get('data')
     self.page_no_key = kwargs.get('page_no_key')
     self.post_url = kwargs.get('post_url')
     self.divide_by = kwargs.get('divide_by')
     self.total_count_key = kwargs.get('total_count_key')
     self.parser = Parser(decode=self.decode)
示例#5
0
async def translate_inline_0(query: InlineQuery):
    text = query.query[:-2].strip().lower()
    user = db.get_user(query.from_user.id)
    result_id: str = md5((text + str(query.id)).encode()).hexdigest()
    if user.translate_free < len(text) or len(text) > 1900:
        result = content = exceptions.limit
    else:
        args = Parser.get_translation_args(text)
        translator = db.get_translator()
        translator.reduce(len(text))
        user.reduce('translate', len(text))
        if len(args) == 1:
            result = await translator.translate(*args,
                                                lang_to=user.lang_translate.ui)
        else:
            result = await translator.translate(*args)
        if result:
            lang_from = db.get_language(result["lang_from"].lower())
            lang_to = db.get_language(result["lang_to"].lower())
            content = texts.translate_0.format(lang_from.value, lang_to.value,
                                               args[0], result["text"])
        else:
            content = exceptions.translator
        result = result["text"] if result else exceptions.translator
    item = InlineQueryResultArticle(
        id=result_id,
        title=result,
        input_message_content=InputTextMessageContent(content))
    await query.answer([item])
def test_mtcnn(base_dir, thresholds):
    # initialize data
    results = dict(zip(thresholds, [[] for _ in thresholds]))
    filenames = os.listdir(base_dir)
    logger.debug(f"Going to process {len(filenames)} images")

    # calculate individual recalls
    for i, filename in enumerate(filenames):
        try:
            parser = Parser(base_dir, filename)
        except FileNotFoundError:
            continue

        for th in thresholds:
            recall = test_single_image(parser, th, save_random=False)
            if recall is None:
                break
            results[th].append(recall)

        # log every n images
        if (i + 1) % 10 == 0:
            logger.info(f"Processed {i + 1} images")

    # Notify reults
    for th, recall in results.items():
        logger.info(f"Mean recall (@IoU={th:0.0%}) = {np.mean(recall):0.3f}")

    return results
def main():
    # Parsing user-given inputs
    parser = Parser.Parser(sys.argv[1:]).args

    cbmcmc(parser.data, parser.it, parser.basis, parser.treeprior, parser.r,
           parser.p, parser.cob_freq, parser.outfile, parser.seed)

    return 0
示例#8
0
def populate_db_bigrams(sentences):
    parser = Parser(sentences)
    bigrams = {}
    for tokens in parser.get_tokenised_parts():
        for i in range(0, len(tokens) - 1):
            (word1, pos1) = tokens[i]
            (word2, pos2) = tokens[i + 1]
            bigram = (word1, word2)
            freq = bigrams.get(bigram)
            if freq is None:
                bigrams[bigram] = 1
            else:
                bigrams[bigram] = freq + 1

    for bigram, freq in bigrams.items():
        (w1, w2) = bigram
        dbEntry = Bigrams(word1=w1, word2=w2, freq=freq)
        dbEntry.save()
示例#9
0
 def _get_parser(cls):
     parser = Parser(prog='get')
     parser.add_argument('-t',
                         '--target',
                         nargs='*',
                         help='File to download')
     parser.add_argument('-d',
                         '--destination',
                         nargs='*',
                         help='Directory to download file to')
     return parser
示例#10
0
def html2md(url, md_file, with_title=False):
    response = requests.get(url)
    soup = BeautifulSoup(response.content,
                         'html.parser',
                         from_encoding="utf-8")
    html = ""
    for child in soup.find_all('svg'):
        child.extract()
    if with_title:
        for c in soup.find_all('div', {'class': 'article-title-box'}):
            html += str(c)
    for c in soup.find_all('div', {'id': 'content_views'}):
        html += str(c)

    parser = Parser(html)
    with open(md_file, 'w') as f:
        f.write('{}\n'.format(''.join(parser.outputs)))
def test_parser():
    """ test that parser returns the correct types
    """
    sys_args = ["--data",  "observations.csv", "--outfile", "results/samples.dat", "--it", "7500",
                "--basis", "cycle", "--treeprior", "all", "-r", "6",
                "-p", ".75", "--cob-freq", "100", "--seed", "123"]

    parser = Parser.Parser(sys_args)
    assert(isinstance(parser.args.data, str))
    assert(isinstance(parser.args.outfile, str))
    assert(isinstance(parser.args.it, int))
    assert(isinstance(parser.args.basis, str))
    assert(isinstance(parser.args.treeprior, str))
    assert(isinstance(parser.args.r, int))
    assert(isinstance(parser.args.p, float))
    assert(isinstance(parser.args.cob_freq, int))
    if parser.args.seed is not None:
        assert(isinstance(parser.args.seed, int))
示例#12
0
 def _find_parser(cls):
     parser = Parser(prog='find')
     parser.add_argument(
         '-e', '--exact',
         action='store_true',
         default=False,
         help='Do an exact string match'
     )
     parser.add_argument(
         '-r', '--relative',
         action='store_true',
         default=False,
         help='Search relative to current path'
     )
     parser.add_argument(
         'target',
         nargs='*',
         help='Target to search for'
     )
     return parser
示例#13
0
def main():
    # Create initialiser instance, and pass it command line argument
    initialiser = Initialiser(sys.argv[1])

    # Create code_writer instance and write bootstrap code to file
    code_writer = CodeWriter(initialiser.asm_filename)
    code_writer.write_init()

    # Create list of parsers, one for each vm_file
    parsers = [Parser(x) for x in initialiser.vm_files]

    for parser in parsers:
        # Set filename of parser that is currently being translated
        code_writer.set_filename(parser.filename)

        # Parse the VM file
        while parser.has_more_commands():
            initialiser.translate_file(parser, code_writer)

    code_writer.close()
示例#14
0
class Bid:
    """招标类。传入一个网站的配置字典, 使用get_info获取其所有信息"""
    def __init__(self, **kwargs):
        self.decode = kwargs.get('decode', 'utf-8')
        self.start_url = kwargs['start_url']
        self.page_f = kwargs.get('page_f')
        self.last_page_xp = kwargs.get('last_page_xp')
        self.xp_page = kwargs.get('xp_page', True)
        self.url_xp = kwargs['url_xp']
        self.url_prefix = kwargs.get('url_prefix', '')
        self.total_page = kwargs.get('total_page')
        self.data = kwargs.get('data')
        self.page_no_key = kwargs.get('page_no_key')
        self.post_url = kwargs.get('post_url')
        self.divide_by = kwargs.get('divide_by')
        self.total_count_key = kwargs.get('total_count_key')
        self.parser = Parser(decode=self.decode)

    def get_info(self):
        """获取该网站所有信息并存入数据库"""
        if self.total_page is None:
            self.total_page = self.parser.get_total_page(
                self.start_url, self.last_page_xp, self.xp_page)
        if self.data is None:
            self.get_urls()
        else:
            self.post_data()

    def get_urls(self):
        """get方法获取每一页信息"""
        pages = [self.page_f.format(i) for i in range(1, self.total_page)]
        for page in pages:
            print(page)
            urls = self.parser.get_bid_urls(page, self.url_xp, self.url_prefix)
            self.parser.save_text(urls)

    def post_data(self):
        """post方法获取每一页信息"""
        for i in range(1, self.total_page + 1):
            print(self.post_url, i)
            self.data.update({self.page_no_key: i})
            urls = self.parser.get_bid_urls(self.post_url, self.url_xp,
                                            self.url_prefix, True, self.data)
            self.parser.save_text(urls)
示例#15
0
#import matplotlib
#import matplotlib.pyplot as plt
from skimage.transform import resize
import losses
from losses import Precision_img, Recall_img, F1_score_img
import time
from skimage.morphology import closing, binary_closing, binary_opening

parser = argparse.ArgumentParser()
parser.add_argument('-cfg', '--cfg', default='cell', type=str)

path = os.path.dirname(__file__)

# parse arguments
args = parser.parse_args()
args = Parser(args.cfg, args)
ckpts = args.getdir()


def saveimage(image, filename):
    data = sitk.GetImageFromArray(image)
    sitk.WriteImage(data, filename)


def main():
    start_time = time.time()
    # setup environments and seeds
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    # setup networks
    #Network = getattr(models, args.net)
                mnotes.append(legend.get(i, legend.get(i[2:], i)))

            # Try to add the meal
            try:
                canteen.addMeal( mdate, category, mname,
                                mnotes, prices, roles)
            except ValueError as e:
                print('could not add meal {}/{} "{}" due to "{}"'.format(mdate, category, mname, e), file=sys.stderr)
                # empty meal ...
                pass

    # return xml data
    return canteen.toXMLFeed()


parser = Parser('niederbayern_oberpfalz', handler=parse_url,
                shared_prefix='http://www.stwno.de/infomax/daten-extern/csv/')
parser.define('th-deggendorf', suffix='HS-DEG')
parser.define('hs-landshut', suffix='HS-LA')
parser.define('wz-straubing', suffix='HS-SR')
parser.define('uni-passau', suffix='UNI-P')
parser.define('unip-cafeteria-nikolakloster', suffix='Cafeteria-Nikolakloster')
parser.define('oth-regensburg', suffix='HS-R-tag')
parser.define('oth-regensburg-abends', suffix='HS-R-abend')
parser.define('othr-cafeteria-pruefening', suffix='Cafeteria-Pruefening')
parser.define('uni-regensburg', suffix='UNI-R')
parser.define('unir-cafeteria-pt', suffix='Cafeteria-PT')
parser.define('unir-cafeteria-chemie', suffix='Cafeteria-Chemie')
parser.define('unir-cafeteria-milchbar', suffix='Cafeteria-Milchbar')
parser.define('unir-cafeteria-sammelgebaeude', suffix='Cafeteria-Sammelgebaeude')
parser.define('unir-cafeteria-sport', suffix='Cafeteria-Sport')
示例#17
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
import shlex

import sarge
from debot.plugin_utils import admin_required, notify
from flask import g
from gevent import spawn
from ratelimit import rate_limited

from utils import Parser

parser = Parser()
parser.add_argument('env', help='environment', choices=('live', 'stage', 'shared'))
DEPLOYMENT_PATH = '/home/ubuntu/deployment'
COMMAND = 'bin/activate {env} && ansible-playbook -i inventory/ec2.py --vault-password-file .vault_pass reboot.yml'

HALF_HOUR = 1800


@rate_limited(1, HALF_HOUR)
def reboot(env):
    spawn(_reboot, env, g.channel)
    return True


def _reboot(env, channel):
    old_cwd = os.getcwd()
    os.chdir(DEPLOYMENT_PATH)
示例#18
0
                # multiple prices for a meal - keep all of them literally
                name = mealCellText
                price = None

            try:
                date=dates[dateIdx]
                canteen.addMeal(date, category=subCanteen, name=name, prices=price)
            except ValueError as e:
                print('Error adding meal {} on {}: {}'.format(name, date, e))


def parse_url(url, today):
    canteen = LazyBuilder()
    canteen.setAdditionalCharges('student', {})
    if today:
        parse_week(url, canteen)  # base url only contains current day
    else:
        parse_week(url + 'week', canteen)
        parse_week(url + 'nextweek', canteen)

    return canteen.toXMLFeed()


parser = Parser('darmstadt', handler=parse_url,
                shared_prefix='https://www.stwda.de/components/com_spk/')
parser.define('stadtmitte', suffix='spk_Stadtmitte_print.php?ansicht=')
parser.define('lichtwiese', suffix='spk_Lichtwiese_print.php?ansicht=')
parser.define('schoefferstrasse', suffix='spk_Schoefferstrasse_print.php?ansicht=')
parser.define('dieburg', suffix='spk_Dieburg_print.php?ansicht=')
parser.define('haardtring', suffix='spk_Haardtring_print.php?ansicht=')
示例#19
0
                    prices = []

                canteen.addMeal(date, category, name, notes,
                                prices, roles)


def parse_url(url, today=False):
    canteen = LazyBuilder()
    parse_week(url + '.html?view=list', canteen)
    if not today:
        parse_week(url + '-w1.html?view=list', canteen)
        parse_week(url + '-w2.html?view=list', canteen)
    return canteen.toXMLFeed()


parser = Parser('dresden', handler=parse_url,
                shared_prefix='https://www.studentenwerk-dresden.de/mensen/speiseplan/')
parser.define('reichenbachstrasse', suffix='mensa-reichenbachstrasse')
parser.define('zeltschloesschen', suffix='zeltschloesschen')
parser.define('alte-mensa', suffix='alte-mensa')
parser.define('mensologie', suffix='mensologie')
parser.define('siedepunkt', suffix='mensa-siedepunkt')
parser.define('johannstadt', suffix='mensa-johannstadt')
parser.define('wueins', suffix='mensa-wueins')
parser.define('bruehl', suffix='mensa-bruehl')
parser.define('u-boot', suffix='u-boot')
parser.define('tellerrandt', suffix='mensa-tellerrandt')
parser.define('zittau', suffix='mensa-zittau')
parser.define('stimm-gabel', suffix='mensa-stimm-gabel')
parser.define('palucca-schule', suffix='mensa-palucca-schule')
parser.define('goerlitz', suffix='mensa-goerlitz')
parser.define('sport', suffix='mensa-sport')
示例#20
0
    day = datetime.date.today()
    emptyCount = 0
    totalCount = 0
    while emptyCount < 7 and totalCount < 32:
        if not parse_day(canteen, '{}&day={}&month={}&year={}&limit=25'
                         .format(url, day.day, day.month, day.year),
                         day.strftime('%Y-%m-%d')):
            emptyCount += 1
        else:
            emptyCount = 0
        if today:
            break
        totalCount += 1
        day += datetime.date.resolution
    return canteen.toXMLFeed()


parser = Parser('leipzig', handler=parse_url,
                shared_prefix='http://www.studentenwerk-leipzig.de/mensen-und-cafeterien/speiseplan/m/meals.php?canteen=')
parser.define('dittrichring', suffix='153')
parser.define('koburger-strasse', suffix='121')
parser.define('philipp-rosenthal-strasse', suffix='127')
parser.define('waechterstrasse', suffix='129')
parser.define('academica', suffix='118')
parser.define('am-park', suffix='106')
parser.define('am-elsterbecken', suffix='115')
parser.define('liebigstrasse', suffix='162')
parser.define('peterssteinweg', suffix='111')
parser.define('schoenauer-strasse', suffix='140')
parser.define('tierklinik', suffix='170')
示例#21
0
from bs4 import BeautifulSoup
from pyopenmensa.feed import LazyBuilder
from urllib.request import urlopen
from utils import Parser

def parse_week(url, canteen):
    soup = BeautifulSoup(urlopen(url).read(), 'lxml')
    plan_table = soup.find("table", "tabmensaplan")
    for day_span in plan_table.find_all("span", "tabDate"):
        meal_date = day_span.text + "2015"
        for index, meal_td in enumerate(day_span.parent.parent.find_all("td")):
            if index > 0 and index < 5:
                meal_text = meal_td.text
                meal_type = soup.find_all("span", "mvmensa")[index-1].text
                canteen.addMeal(meal_date, meal_type, meal_text)

def parse_url(url, today):
    canteen = LazyBuilder()
    if not today:
        parse_week(url, canteen)
    return canteen.toXMLFeed()

parser = Parser('siegen', handler=parse_url,
    shared_prefix='http://studentenwerk.uni-siegen.de/index.php?uid=650&uid2=0')
parser.define('ar', suffix='&cat_show=1')
parser.define('enc', suffix='&cat_show=2')
parser.define('ars-mundi', suffix='&cat_show=3')
parser.define('cafeterien', suffix='&cat_show=4')
示例#22
0
            price_tag = meal_item.find("span", {"class": "fmc-item-price"})

            try:
                canteen.addMeal(date, category=location_tag.string, name=title_tag.string, prices=price_tag.string)
            except ValueError as e:
                print('Error adding meal "{}": {}'.format(meal_item, e))


def parse_url(url, today=False):
    canteen = LazyBuilder()
    canteen.setAdditionalCharges("student", {})
    parse_week(url, canteen)
    return canteen.toXMLFeed()


parser = Parser(
    "darmstadt",
    handler=parse_url,
    shared_prefix="http://studierendenwerkdarmstadt.de/hochschulgastronomie/speisekarten/",
)
parser.define("stadtmitte", suffix="stadtmitte/")
parser.define("lichtwiese", suffix="lichtwiese/")
parser.define("schoefferstrasse", suffix="schoefferstrasse/")
parser.define("dieburg", suffix="dieburg/")
parser.define("haardtring", suffix="haardtring/")


# for debugging / testing
if __name__ == "__main__":
    print(parser.parse("darmstadt", "stadtmitte", None))
示例#23
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shlex

import sarge
from debot.plugin_utils import admin_required, notify
from flask import g
from gevent import spawn
from utils import Parser, e2e_ui_test

parser = Parser()
parser.add_argument('env', help='environment', choices=('live', 'stage'))
parser.add_argument('app', help='application', choices=('tac', 'middle'))
parser.add_argument('version', help='version', default='master')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-e', '--environment',
                    help='additional env vars to ansible')
base_cmd = ('bin/az k8s_cluster.yml',)
DEPLOYMENT_PATH = '/home/ubuntu/deployment'


def run_cmd(cmd, args, channel):
    old_cwd = os.getcwd()
    os.chdir(DEPLOYMENT_PATH)
    cmd = "su ubuntu -c '%s'" % cmd
    resp = []
    try:
        p = sarge.capture_both(cmd)
        if p.returncode == 0:
            resp.append('Successfully deployed %s to %s on Azure' % (
示例#24
0
                canteen.addMeal(weekDays[i], category, name,
                                list(set(notes)),
                                prices, ('student', 'employee', 'other')
                                )
            i += 1


def parse_url(url, today=False):
    canteen = LazyBuilder()
    parse_week(url, date.today(), canteen)
    if not today:
        parse_week(url, date.today() + date.resolution * 7, canteen)
    return canteen.toXMLFeed()


parser = Parser('hamburg', handler=parse_url,
                shared_prefix='http://speiseplan.studierendenwerk-hamburg.de/de/')
parser.define('armgartstrasse', suffix='590')
parser.define('bergedorf', suffix='520')
parser.define('berliner-tor', suffix='530')
parser.define('botanischer-garten', suffix='560')
parser.define('bucerius-law-school', suffix='410')
parser.define('cafe-mittelweg', suffix='690')
parser.define('cafe-cfel', suffix='680')
parser.define('cafe-jungiusstrasse', suffix='610')
parser.define('cafe-alexanderstrasse', suffix='660')
parser.define('campus', suffix='340')
parser.define('finkenau', suffix='420')
parser.define('geomatikum', suffix='540')
parser.define('harburg', suffix='570')
parser.define('hcu', suffix='430')
parser.define('philosophenturm', suffix='350')
示例#25
0
            price_div = meal_article.find('div', 'price')
            if price_div is None:
                canteen.addMeal(date, category, name, notes)
                continue
            prices = {}
            for v, r in (('default', 'student'), ('bed', 'employee'), ('guest', 'other')):
                price = price_regex.search(price_div['data-' + v])
                if price:
                    prices[r] = price.group('price')
                elif v == 'default':
                    prices = {}
                    break
            canteen.addMeal(date, category, name, notes, prices)
        if closed_candidate and not canteen.hasMealsFor(date):
            canteen.setDayClosed(date)
    return canteen.toXMLFeed()


parser = Parser('wuerzburg', handler=parse_url,
                shared_prefix='http://www.studentenwerk-wuerzburg.de/essen-trinken/speiseplaene/plan/show/')
parser.define('austrasse', suffix='austrasse-bamberg.html')
parser.define('burse', suffix='burse-wuerzburg.html')
parser.define('feldkirchenstrasse', suffix='feldkirchenstrasse-bamberg.html')
parser.define('frankenstube', suffix='frankenstube-wuerzburg.html')
parser.define('hubland', suffix='mensa-am-hubland-wuerzburg.html')
parser.define('studentenhaus', suffix='mensa-am-studentenhaus.html')
parser.define('aschaffenburg', suffix='mensa-aschaffenburg')
parser.define('augenklinik', suffix='mensa-augenklinik-wuerzburg.html')
parser.define('josef-schneider', suffix='mensa-josef-schneider-strasse-wuerzburg.html')
parser.define('schweinfurt', suffix='mensa-schweinfurt.html')
示例#26
0
def parse_url(url, data_canteen, today=False):
    canteen = LazyBuilder()

    data = urlopen(url).read().decode('utf-8')
    document = parse(data, 'lxml')

    dish = document.find(class_='neo-menu-single-dishes')
    if dish is not None:
        dishes = dish.find_all(name='tr', attrs={"data-canteen": data_canteen})
    else:
        dishes = []

    side = document.find(class_='neo-menu-single-modals')
    if side is not None:
        dishes = dishes + side.find_all(name='tr', attrs={"data-canteen": data_canteen})

    for dish in dishes:
        parse_dish(dish, canteen)

    return canteen.toXMLFeed()


parser = Parser('marburg', handler=parse_url,
                shared_args=['https://studentenwerk-marburg.de/essen-trinken/speisekarte/'])
parser.define('bistro', args=[460])
parser.define('mos-diner', args=[420])
parser.define('erlenring', args=[330])
parser.define('lahnberge', args=[340])
parser.define('cafeteria-lahnberge', args=[490])
示例#27
0
            if notematch not in legends:
                print('unknown legend: {}'.format(notematch))
                continue
            notes.append(legends[notematch])
        canteen.addMeal(date, category, name, notes,
                        price_regex.findall(line), roles)


def parse_url(url, today=False):
    canteen = LazyBuilder()
    parse_week(url + '&wann=2', canteen)
    if not today:
        parse_week(url + '&wann=3', canteen)
    return canteen.toXMLFeed()


parser = Parser('hannover', handler=parse_url,
                shared_prefix='http://www.stwh-portal.de/mensa/index.php?format=txt&wo=')
parser.define('hauptmensa', suffix='2')
parser.define('hauptmensa-marktstand', suffix='9')
parser.define('restaurant-ct', suffix='10')
parser.define('contine', suffix='3')
parser.define('pzh', suffix='13')
parser.define('caballus', suffix='1')
parser.define('tiho-tower', suffix='0')
parser.define('hmtmh', suffix='8')
parser.define('ricklinger-stadtweg', suffix='6')
parser.define('kurt-schwitters-forum', suffix='7')
parser.define('blumhardtstrasse', suffix='14')
parser.define('herrenhausen', suffix='12')
示例#28
0
        parse_week(url + '-kommende-woche',
                   canteen,
                   canteentype,
                   allergene=allergene,
                   zusatzstoffe=zusatzstoffe)
    if not today and type(next_week) is str:
        parse_week(url + next_week,
                   canteen,
                   canteentype,
                   allergene=allergene,
                   zusatzstoffe=zusatzstoffe)
    return canteen.toXMLFeed()


parser = Parser('ostniedersachsen',
                handler=parse_url,
                shared_prefix='http://www.stw-on.de')

sub = parser.sub('braunschweig', shared_prefix='/braunschweig/essen/menus/')
sub.define('mensa1-mittag',
           suffix='mensa-1',
           extra_args={'canteentype': 'Mittagsmensa'})
sub.define('mensa1-abend',
           suffix='mensa-1',
           extra_args={'canteentype': 'Abendmensa'})
sub.define('mensa360',
           suffix='360',
           extra_args={
               'canteentype': 'Pizza',
               'this_week': '-2',
               'next_week': '-nachste-woche'
示例#29
0
            notes = []
            for img in meal_tr.contents[1].find_all("img"):
                notes.append(img["title"])
            canteen.addMeal(date, category, name, notes, price_regex.findall(meal_tr.contents[2].text), roles)


def parse_url(url, today=False):
    canteen = LazyBuilder()
    parse_week(url + ".html", canteen)
    if not today:
        parse_week(url + "-w1.html", canteen)
        parse_week(url + "-w2.html", canteen)
    return canteen.toXMLFeed()


parser = Parser("dresden", handler=parse_url, shared_prefix="http://www.studentenwerk-dresden.de/mensen/speiseplan/")
parser.define("reichenbachstrasse", suffix="mensa-reichenbachstrasse")
parser.define("zeltschloesschen", suffix="zeltschloesschen")
parser.define("alte-mensa", suffix="alte-mensa")
parser.define("mensologie", suffix="mensologie")
parser.define("siedepunkt", suffix="mensa-siedepunkt")
parser.define("johannstadt", suffix="mensa-johannstadt")
parser.define("wueins", suffix="mensa-wueins")
parser.define("bruehl", suffix="mensa-bruehl")
parser.define("u-boot", suffix="u-boot")
parser.define("tellerrandt", suffix="mensa-tellerrandt")
parser.define("zittau", suffix="mensa-zittau")
parser.define("stimm-gabel", suffix="mensa-stimm-gabel")
parser.define("palucca-schule", suffix="mensa-palucca-schule")
parser.define("goerlitz", suffix="mensa-goerlitz")
parser.define("sport", suffix="mensa-sport")
示例#30
0
import os
from sklearn.model_selection import StratifiedKFold
import numpy as np

from utils import Parser
args = Parser('settings')
root = args.data_dir


def write(data, fname, root=root):
    fname = os.path.join(root, fname)
    with open(fname, 'w') as f:
        f.write('\n'.join(data))


hgg = os.listdir(os.path.join(root, 'HGG'))
hgg = [os.path.join('HGG', f) for f in hgg]

lgg = os.listdir(os.path.join(root, 'LGG'))
lgg = [os.path.join('LGG', f) for f in lgg]

X = hgg + lgg
Y = [1] * len(hgg) + [0] * len(lgg)

write(X, 'all.txt')

X, Y = np.array(X), np.array(Y)

skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=2018)

for k, (train_index, valid_index) in enumerate(skf.split(Y, Y)):
示例#31
0
    roles = {'Studenten': 'student',
             'Studierende': 'student',
             'Bedienstete': 'employee',
             'Gäste': 'other'}
    for item in items:
        raw_role, price = item.text.split(':')
        if raw_role in roles:
            prices[roles[raw_role]] = price
    return prices


# name of canteens is suffix at the same time
canteens = ['mensa-universitaetsstrasse-duesseldorf',
            'mensa-kamp-lintfort',
            'mensa-campus-derendorf',
            'mensa-georg-glock-strasse-duesseldorf',
            'mensa-obergath-krefeld',
            'mensa-frankenring-krefeld',
            'mensa-sommerdeich-kleve',
            'mensa-rheydter-strasse-moenchengladbach',
            'restaurant-bar-campus-vita-duesseldorf',
            'essenausgabe-sued-duesseldorf',
            'kunstakademie-duesseldorf',
            'musikhochschule-duesseldorf']

parser = Parser('duesseldorf', handler=parse_url,
                shared_prefix='http://www.stw-d.de/gastronomie/speiseplaene/')

for canteen in canteens:
    parser.define(canteen, suffix=canteen)
示例#32
0
                continue
            if len(tr) != 2:
                continue  # no meal
            strings = list(tr.contents[0].strings)
            name = strings[0]
            # prices:
            prices = strings[-1].split('|')
            if '-' in map(lambda v: v.strip(), prices):
                prices = {}
            # notes:
            notes = []
            for img in tr.contents[1].find_all('img'):
                notes.append(img['alt'].replace('Symbol', '').strip())
            for extra in list(set(map(lambda v: int(v), extra_regex.findall(tr.text)))):
                if extra in extraLegend:
                    notes.append(extraLegend[extra])
            canteen.addMeal(date, 'Hauptgerichte', name, notes, prices, roles if prices else None)
    return canteen.toXMLFeed()


parser = Parser('magdeburg',
                handler=parse_url,
                shared_prefix='http://www.studentenwerk-magdeburg.de/')
parser.define('ovgu-unten', suffix='mensen-cafeterien/mensa-unicampus/speiseplan-unten/')
parser.define('ovgu-oben', suffix='mensen-cafeterien/mensa-unicampus/speiseplan-oben/')
parser.define('herrenkrug', suffix='mensen-cafeterien/mensa-herrenkrug/speiseplan/')
parser.define('kellercafe', suffix='mensen-cafeterien/mensa-kellercafe/speiseplan/')
parser.define('stendal', suffix='mensen-cafeterien/mensa-stendal/speiseplan/')
parser.define('halberstadt', suffix='mensen-cafeterien/mensa-halberstadt/speiseplan/')
parser.define('wernigerode', suffix='mensen-cafeterien/mensa-wernigerode/speiseplan/')
示例#33
0
        return self.feed.toXMLFeed()

    @Source.feed(name='thisWeek', priority=1, hour='8', retry='2 60')
    def thisWeek(self, request):
        day = datetime.datetime.now().isocalendar()
        self.parse_data(week=day[1], year=day[0])
        return self.feed.toXMLFeed()

    @Source.feed(name='nextWeek', priority=2, hour='9')
    def nextWeek(self, request):
        day = (datetime.datetime.now() + 7 * datetime.date.resolution).isocalendar()
        self.parse_data(week=day[1], year=day[0])
        return self.feed.toXMLFeed()


parser = Parser(name='halle', version=1.0)
Canteen('harzmensa', parser, location=3, needed_title='Harzmensa')
Canteen('weinbergmensa', parser, location=5, needed_title='Weinbergmensa')
#Canteen('cafebar-weinberg', parser, location=, needed_title='')
Canteen('tulpe', parser, location=10, needed_title='Mensa Tulpe')
Canteen('heidemensa', parser, location=17, needed_title='Heidemensa')
Canteen('burg', parser, location=12, needed_title='Mensa Burg')
Canteen('neuwerk', parser, location=9, needed_title='Neuwerk')
Canteen('franckesche-stiftungen', parser, location=14, needed_title='Franckesche Stiftungen')

#merseburg = parser.sub('merseburg')
Canteen('merseburg', parser, location=16, needed_title='Mensa Merseburg', not_halle=True)
#Canteen('cafebar-merseburg', merseburg, location=, needed_title=)

#dessau = parser.sub('dessau')
Canteen('dessau', parser, location=13, needed_title='Mensa Dessau', not_halle=True)
            title = title.text
        text = tds[1].text.replace("enthält", "").strip()
        if title.isdigit():
            zusatzstoffe[title] = text
        else:
            allergene[title] = text
    parse_week(url + this_week, canteen, canteentype, allergene=allergene, zusatzstoffe=zusatzstoffe)
    if not today and next_week is True:
        parse_week(url + "-kommende-woche", canteen, canteentype, allergene=allergene, zusatzstoffe=zusatzstoffe)
    if not today and type(next_week) is str:
        parse_week(url + next_week, canteen, canteentype, allergene=allergene, zusatzstoffe=zusatzstoffe)
    print(canteen.toXMLFeed())
    return canteen.toXMLFeed()


parser = Parser("ostniedersachsen", handler=parse_url, shared_prefix="http://www.stw-on.de")

sub = parser.sub("braunschweig", shared_prefix="/braunschweig/essen/menus/")
sub.define("mensa1-mittag", suffix="mensa-1", extra_args={"canteentype": "Mittagsmensa"})
sub.define("mensa1-abend", suffix="mensa-1", extra_args={"canteentype": "Abendmensa"})
sub.define(
    "mensa360", suffix="360", extra_args={"canteentype": "Pizza", "this_week": "-2", "next_week": "-nachste-woche"}
)
sub.define("mensa2", suffix="mensa-2")
sub.define("hbk", suffix="mensa-hbk")

parser.define("clausthal", suffix="/clausthal/essen/menus/mensa-clausthal", extra_args={"next_week": "-kommend-woche"})

sub = parser.sub("hildesheim", shared_prefix="/hildesheim/essen/menus/")
sub.define("uni", suffix="mensa-uni")
sub.define("hohnsen", suffix="mensa-hohnsen")
示例#35
0
    # map roles
    roles = {'Studenten': 'student',
             'Bedienstete': 'employee',
             'Gäste': 'other'}
    for item in items:
        raw_role, price = item.text.split(':')
        if raw_role in roles:
            prices[roles[raw_role]] = price
    return prices


# name of canteens is suffix at the same time
canteens = ['mensa-universitaetsstrasse-duesseldorf',
            'mensa-kamp-lintfort',
            'mensa-campus-derendorf',
            'mensa-georg-glock-strasse-duesseldorf',
            'mensa-obergath-krefeld',
            'mensa-frankenring-krefeld',
            'mensa-sommerdeich-kleve',
            'mensa-rheydter-strasse-moenchengladbach',
            'restaurant-bar-campus-vita-duesseldorf',
            'essenausgabe-sued-duesseldorf',
            'kunstakademie-duesseldorf',
            'musikhochschule-duesseldorf']

parser = Parser('duesseldorf', handler=parse_url,
                shared_prefix='http://www.stw-d.de/gastronomie/speiseplaene/')

for canteen in canteens:
    parser.define(canteen, suffix=canteen)
示例#36
0
                .replace('Biogericht', 'Bio-/Aktionsgericht') \
                .strip()

            canteen.addMeal(date, category, name,
                [legend.get(n, n) for n in notes],
                prices.get(price_category, {})
            )

        date += datetime.timedelta(days=1)
        if today:
            break

    return canteen.toXMLFeed()


parser = Parser('muenchen', handler=parse_url, shared_prefix=base+'/speiseplan/')
parser.define('leopoldstrasse', suffix='speiseplan_{}_411_-de.html')
parser.define('martinsried', suffix='speiseplan_{}_412_-de.html')
parser.define('grosshadern', suffix='speiseplan_{}_414_-de.html')
parser.define('schellingstrasse', suffix='speiseplan_{}_416_-de.html')
parser.define('archisstrasse', suffix='speiseplan_{}_421_-de.html')
parser.define('garching', suffix='speiseplan_{}_422_-de.html')
parser.define('weihenstephan', suffix='speiseplan_{}_423_-de.html')
parser.define('lothstrasse', suffix='speiseplan_{}_431_-de.html')
parser.define('pasing', suffix='speiseplan_{}_432_-de.html')
parser.define('rosenheim', suffix='speiseplan_{}_441_-de.html')
parser.define('adalbertstrasse', suffix='speiseplan_{}_512_-de.html')
parser.define('cafeteria-garching', suffix='speiseplan_{}_524_-de.html')
parser.define('wst', suffix='speiseplan_{}_525_-de.html')
parser.define('akademie', suffix='speiseplan_{}_526_-de.html')
parser.define('boltzmannstrasse', suffix='speiseplan_{}_527_-de.html')
示例#37
0
                    staff_price = getAndFormatPrice(price)
                elif 'guest' in item['class']:
                    guest_price = getAndFormatPrice(price)
        canteen.addMeal(wdate,
                        category,
                        description,
                        notes=supplies,
                        prices={
                            'student': student_price,
                            'employee': staff_price,
                            'other': guest_price
                        })


parser = Parser('dortmund',
                handler=parse_url,
                shared_prefix='https://www.stwdo.de/mensa-co/')

parser.define('tu-hauptmensa', suffix='tu-dortmund/hauptmensa/')
parser.define('tu-mensa-sued', suffix='tu-dortmund/mensa-sued/')
parser.define('tu-vital', suffix='tu-dortmund/vital/')
parser.define('tu-archeteria', suffix='tu-dortmund/archeteria/')
parser.define('tu-calla', suffix='tu-dortmund/restaurant-calla/')
parser.define('tu-food-fakultaet', suffix='tu-dortmund/food-fakultaet/')
parser.define('fh-mensa-max-ophuels-platz',
              suffix='fh-dortmund/max-ophuels-platz/')
parser.define('fh-mensa-sonnenstrasse', suffix='fh-dortmund/sonnenstrasse/')
parser.define('fh-kostbar', suffix='fh-dortmund/mensa-kostbar/')
parser.define('ism-mensa', suffix='ism/mensa-der-ism/')
parser.define('fernuni-mensa', suffix='hagen')
parser.define('fsw-snackit', suffix='fh-suedwestfalen/hagen/')
                mtype = row[4]
                if mtype != '':
                    for i in mtype.split(','):
                        notes.append('ZT' + i)

                prices = [row[6], row[7], row[8]]

                mnotes = []
                for i in notes:
                    mnotes.append(legend.get(i, legend.get(i[2:], i)))

                try:
                    canteen.addMeal(mdate, category, mname,
                                    mnotes, prices, roles)
                except ValueError as e:
                    print('could not add meal {}/{} "{}" due to "{}"'.format(mdate, category, mname, e), file=sys.stderr)
                    # empty meal ...
                    pass

    return canteen.toXMLFeed()


parser = Parser('niederbayern_oberpfalz', handler=parse_url,
                shared_prefix='http://www.stwno.de/infomax/daten-extern/csv/')
parser.define('th-deggendorf', suffix='HS-DEG')
parser.define('hs-landshut', suffix='HS-LA')
parser.define('uni-passau', suffix='UNI-P')
parser.define('oth-regensburg', suffix='HS-R-tag')
parser.define('uni-regensburg', suffix='UNI-R')
示例#39
0
            if price_div:
                for k, v in price_map.items():
                    price = price_div['data-' + k]
                    if price:
                        prices[v] = price
            canteen.addMeal(date, category, name, notes, prices)

        if closed_candidate and not canteen.hasMealsFor(date):
            canteen.setDayClosed(date)

    return canteen.toXMLFeed()


parser = Parser(
    'wuerzburg',
    handler=parse_url,
    shared_prefix=
    'https://www.studentenwerk-wuerzburg.de/essen-trinken/speiseplaene/plan/')
parser.define('austrasse', suffix='austrasse-bamberg.html')
parser.define('burse', suffix='burse-am-studentenhaus-wuerzburg.html')
parser.define('feldkirchenstrasse', suffix='feldkirchenstrasse-bamberg.html')
#parser.define('frankenstube', suffix='frankenstube-wuerzburg.html')
#parser.define('hubland', suffix='mensa-am-hubland-wuerzburg.html')
parser.define('studentenhaus', suffix='mensa-am-studentenhaus.html')
parser.define('aschaffenburg', suffix='mensa-aschaffenburg.html')
parser.define('augenklinik', suffix='mensa-roentgenring-wuerzburg.html')
parser.define('josef-schneider',
              suffix='mensa-josef-schneider-strasse-wuerzburg.html')
parser.define('schweinfurt', suffix='mensa-schweinfurt.html')
parser.define('mensateria',
              suffix='mensateria-campus-hubland-nord-wuerzburg.html')
        if title.isdigit():
            zusatzstoffe[title] = text
        else:
            allergene[title] = text
    parse_week(url + this_week, canteen, canteentype,
               allergene=allergene, zusatzstoffe=zusatzstoffe)
    if not today and next_week is True:
        parse_week(url + '-kommende-woche', canteen, canteentype,
                   allergene=allergene, zusatzstoffe=zusatzstoffe)
    if not today and type(next_week) is str:
        parse_week(url + next_week, canteen, canteentype,
                   allergene=allergene, zusatzstoffe=zusatzstoffe)
    return canteen.toXMLFeed()


parser = Parser('ostniedersachsen', handler=parse_url,
                shared_prefix='http://www.stw-on.de')

sub = parser.sub('braunschweig',
                 shared_prefix='/braunschweig/essen/menus/')
sub.define('mensa1-mittag', suffix='mensa-1', extra_args={'canteentype': 'Mittagsmensa'})
sub.define('mensa1-abend', suffix='mensa-1', extra_args={'canteentype': 'Abendmensa'})
sub.define('mensa360', suffix='360', extra_args={'canteentype': 'Pizza', 'this_week': '-2', 'next_week': '-nachste-woche'})
sub.define('mensa2', suffix='mensa-2')
sub.define('hbk', suffix='mensa-hbk')

parser.define('clausthal', suffix='/clausthal/essen/menus/mensa-clausthal',
              extra_args={'next_week': '-kommend-woche'})

sub = parser.sub('hildesheim', shared_prefix='/hildesheim/essen/menus/')
sub.define('uni', suffix='mensa-uni')
sub.define('hohnsen', suffix='mensa-hohnsen')
示例#41
0
                name = meal_tr.contents[1].text
                # notes, to do
                canteen.addMeal(date, category, name, [],
                                price_regex.findall(meal_tr.contents[2].text), roles)


def parse_url(url, place_class=None, today=False):
    canteen = OpenMensaCanteen()
    parse_week(canteen, url, place_class)
    day = datetime.date.today()
    old = -1
    day += datetime.date.resolution * 7
    if not today:
        parse_week(canteen, '{}?kw={}'.format(url, day.isocalendar()[1]), place_class)
    day += datetime.date.resolution * 7
    while not today and old != canteen.dayCount():
        old = canteen.dayCount()
        parse_week(canteen, '{}?kw={}'.format(url, day.isocalendar()[1]), place_class)
        day += datetime.date.resolution * 7
    return canteen.toXMLFeed()


parser = Parser('karlsruhe', handler=parse_url,
                shared_args=['http://www.studentenwerk-karlsruhe.de/de/essen/'])
parser.define('adenauerring', args=['canteen_place_1'])
parser.define('moltke', args=['canteen_place_2'])
parser.define('erzbergerstrasse', args=['canteen_place_3'])
parser.define('schloss-gottesaue', args=['canteen_place_4'])
parser.define('tiefenbronner-strasse', args=['canteen_place_5'])
parser.define('holzgartenstrasse', args=['canteen_place_6'])
示例#42
0
from __future__ import division
import __init__
import os
import os.path as osp
import torch
import argparse
import csv
from datasets.hico_api import Hico
from utils import Parser
from networks import models
from torch.autograd import Variable

import numpy as np
""" Parsing options """
args = argparse.ArgumentParser()
parser = Parser(args)
opt = parser.make_options()
""" Load dataset """
data_path = '{}/{}'.format(opt.data_path, 'hico')
image_path = '{}/{}/{}'.format(opt.data_path, 'hico', 'images')
cand_dir = '{}/{}/{}'.format(opt.data_path, 'hico', 'detections')

dset = Hico(data_path, \
            image_path, \
            opt.test_split, \
            cand_dir=cand_dir,\
            thresh_file=opt.thresh_file, \
            add_gt=False, \
            train_mode=False, \
            jittering=False, \
            nms_thresh=opt.nms_thresh)
示例#43
0
        return self.feed.toXMLFeed()

    @Source.feed(name="thisWeek", priority=1, hour="8", retry="2 60")
    def thisWeek(self, request):
        day = datetime.datetime.now().isocalendar()
        self.parse_data(week=day[1], year=day[0])
        return self.feed.toXMLFeed()

    @Source.feed(name="nextWeek", priority=2, hour="9")
    def nextWeek(self, request):
        day = (datetime.datetime.now() + 7 * datetime.date.resolution).isocalendar()
        self.parse_data(week=day[1], year=day[0])
        return self.feed.toXMLFeed()


parser = Parser(name="halle", version=1.0)
Canteen("harzmensa", parser, location=3, needed_title="Harzmensa")
Canteen("weinbergmensa", parser, location=5, needed_title="Weinbergmensa")
# Canteen('cafebar-weinberg', parser, location=, needed_title='')
Canteen("tulpe", parser, location=10, needed_title="Mensa Tulpe")
Canteen("heidemensa", parser, location=17, needed_title="Heidemensa")
Canteen("burg", parser, location=12, needed_title="Mensa Burg")
Canteen("neuwerk", parser, location=9, needed_title="Neuwerk")
Canteen("franckesche-stiftungen", parser, location=14, needed_title="Franckesche Stiftungen")

# merseburg = parser.sub('merseburg')
Canteen("merseburg", parser, location=16, needed_title="Mensa Merseburg", not_halle=True)
# Canteen('cafebar-merseburg', merseburg, location=, needed_title=)

# dessau = parser.sub('dessau')
Canteen("dessau", parser, location=13, needed_title="Mensa Dessau", not_halle=True)
示例#44
0
parser.add_argument('-batch_size',
                    '--batch_size',
                    default=1,
                    type=int,
                    help='Batch size')
parser.add_argument('-restore',
                    '--restore',
                    default='model_last.pth',
                    type=str)  # model_last.pth
parser.add_argument('-output_path', '--output_path', default='ckpts', type=str)
parser.add_argument('-prefix_path', '--prefix_path', default='', type=str)

path = os.path.dirname(__file__)

args = parser.parse_args()
args = Parser(args.cfg, log='train').add_args(args)

ckpts = args.makedir()
args.resume = os.path.join(ckpts, args.restore)  # specify the epoch


def main():
    # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    assert torch.cuda.is_available(), "Currently, we only support CUDA version"

    torch.manual_seed(args.seed)
    # torch.cuda.manual_seed(args.seed)
    random.seed(args.seed)
    np.random.seed(args.seed)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
示例#45
0
    days = (
        "montag",
        "dienstag",
        "mittwoch",
        "donnerstag",
        "freitag",
        "montagNaechste",
        "dienstagNaechste",
        "mittwochNaechste",
        "donnerstagNaechste",
        "freitagNaechste",
    )
    for day in days:
        data = document.find("div", id=day)
        headline = document.find("a", attrs={"data-anchor": "#" + day})
        parse_day(canteen, headline.text, data)
    return canteen.toXMLFeed()


parser = Parser("aachen", handler=parse_url, shared_prefix="http://www.studentenwerk-aachen.de/speiseplaene/")
parser.define("academica", suffix="academica-w.html")
parser.define("ahorn", suffix="ahornstrasse-w.html")
parser.define("templergraben", suffix="templergraben-w.html")
parser.define("bayernallee", suffix="bayernallee-w.html")
parser.define("eups", suffix="eupenerstrasse-w.html")
parser.define("goethe", suffix="goethestrasse-w.html")
parser.define("vita", suffix="vita-w.html")
parser.define("zeltmensa", suffix="forum-w.html")
parser.define("juelich", suffix="juelich-w.html")
示例#46
0
    while type(mensa_data) != Tag or mensa_data.name != 'div'\
            or 'tx-cagcafeteria-pi1' not in mensa_data.get('class', []):
        mensa_data = mensa_data.next_sibling
    weekDays = extractWeekDates(mensa_data.find('h2').text)
    for day_headline in mensa_data.find_all('h3'):
        date = weekDays[day_headline.text]
        day_table = day_headline.next_sibling.next_sibling
        for tr_menu in day_table.tbody.find_all('tr'):
            category = tr_menu.find_all('td')[0].text.strip()
            name = tr_menu.find_all('td')[1].text.replace('\r\n', ' ').strip()
            canteen.addMeal(date, category, name, [], tr_menu.find_all('td')[2].text)


def parse_url(url, mensa, *weeks, today):
    canteen = LazyBuilder()
    for week in weeks:
        parse_week(url + week, canteen, mensa)
        if today:
            break
    return canteen.toXMLFeed()


parser = Parser('marburg', handler=parse_url,
                shared_args=['http://www.studentenwerk-marburg.de/essen-trinken/speiseplan/'])
parser.define('bistro', args=['Speiseplan.*Bistro', 'diese-woche-bistro.html', 'naechste-woche-bistro.html'])
parser.define('mos-diner', args=['Speiseplan.*Diner', 'diese-woche-mos-diner.html'])
parser.define('erlenring', args=['Mensa Erlenring', 'diese-woche-mensa-erlenring-und-lahnberge.html',
              'naechste-woche-mensa-erlenring-und-lahnberge.html'])
parser.define('lahnberge', args=['Mensa Lahnberge', 'diese-woche-mensa-erlenring-und-lahnberge.html',
              'naechste-woche-mensa-erlenring-und-lahnberge.html'])
                        food_type = parse_foot_type(tds[2])
                        food_description = get_foot_description(tds[3])
                        notes_string = build_notes_string(tds[3])
                        if(notes_string != ""):
                            notes.append(notes_string)
                        prices = get_pricing(tds, 4, 7)
                        if food_type is not None:
                            canteen.addMeal(date, food_type, food_description, notes, prices, roles if prices else None)
            except Exception as e:
                traceback.print_exception(*sys.exc_info())

    return canteen.toXMLFeed()



parser = Parser('erlangen_nuernberg',
                handler=parse_url,
                shared_prefix='http://www.studentenwerk.uni-erlangen.de/verpflegung/de/')
parser.define('er-langemarck', suffix='sp-er-langemarck.shtml')
parser.define('er-sued', suffix='sp-er-sued.shtml')
parser.define('n-schuett', suffix='sp-n-schuett.shtml')
parser.define('n-regens', suffix='sp-n-regens.shtml')
parser.define('n-stpaul', suffix='sp-n-stpaul.shtml')
parser.define('n-mensateria', suffix='sp-n-mensateria.shtml')
parser.define('n-hohfederstr', suffix='sp-n-hohfederstr.shtml')
parser.define('n-baerenschanzstr', suffix='sp-n-baerenschanzstr.shtml')
parser.define('eichstaett', suffix='sp-eichstaett.shtml')
parser.define('ingolstadt', suffix='sp-ingolstadt.shtml')
parser.define('ansbach', suffix='sp-ansbach.shtml')
parser.define('triesdorf', suffix='sp-triesdorf.shtml')
示例#48
0
                .replace('Biogericht', 'Bio-/Aktionsgericht') \
                .strip()

            canteen.addMeal(date, category, name,
                            [legend.get(n, n) for n in notes],
                            prices.get(price_category, {}))

        date += datetime.timedelta(days=1)
        if today:
            break

    return canteen.toXMLFeed()


parser = Parser('muenchen',
                handler=parse_url,
                shared_prefix=base + '/speiseplan/')
parser.define('leopoldstrasse', suffix='speiseplan_{}_411_-de.html')
parser.define('martinsried', suffix='speiseplan_{}_412_-de.html')
parser.define('grosshadern', suffix='speiseplan_{}_414_-de.html')
parser.define('schellingstrasse', suffix='speiseplan_{}_416_-de.html')
parser.define('archisstrasse', suffix='speiseplan_{}_421_-de.html')
parser.define('garching', suffix='speiseplan_{}_422_-de.html')
parser.define('weihenstephan', suffix='speiseplan_{}_423_-de.html')
parser.define('lothstrasse', suffix='speiseplan_{}_431_-de.html')
parser.define('pasing', suffix='speiseplan_{}_432_-de.html')
parser.define('rosenheim', suffix='speiseplan_{}_441_-de.html')
parser.define('adalbertstrasse', suffix='speiseplan_{}_512_-de.html')
parser.define('cafeteria-garching', suffix='speiseplan_{}_524_-de.html')
parser.define('wst', suffix='speiseplan_{}_525_-de.html')
parser.define('akademie', suffix='speiseplan_{}_526_-de.html')
示例#49
0
                            elif group == 'Bed.:':
                                prices['employee'] = price
                            elif group == 'Gast:':
                                prices['other'] = price

                    canteen.addMeal(date, category, menuName, notes, prices)
        else:
            canteen.setDayClosed(date)

        # check for further pages
        nextPageLink = dom.find(id='next_day_link')
        if nextPageLink == None or today:
            url = None
        else:
            url = 'https://www.studentenwerk-rostock.de/' + nextPageLink['href']
    return canteen.toXMLFeed()

def parse_url(url, today=False):
    splitted = url.split('#')
    return parsePlan(splitted[0], splitted[1], today)


parser = Parser('rostock', handler=parse_url, shared_prefix='https://www.studentenwerk-rostock.de/de/mensen/speiseplaene.html')
parser.define('mensa-sued', suffix='#mensa_id_1')
parser.define('campus-cafeteria-einstein', suffix='#mensa_id_13')
parser.define('mensa-st-georg-straße', suffix='#mensa_id_2')
parser.define('mensa-multiple-choice', suffix='#mensa_id_14')
parser.define('mensa-kleine-ulme', suffix='#mensa_id_3')
parser.define('mensa-ulme-69', suffix='#mensa_id_8')
parser.define('campus-mensa-wismar', suffix='#mensa_id_5')
示例#50
0
from utils import ResultWriter, Parser, get_args, Algorithm

if __name__ == "__main__":
    alg, input_file, output_file = get_args()

    item_class = Item
    if alg == Algorithm.GREEDY:
        algorithm = GreedyAlgorithm
    elif alg == Algorithm.DYNAMIC:
        algorithm = DynamicProgramingAlgorithm
    elif alg == Algorithm.DYNAMIC_OPTIMAL:
        algorithm = DynamicProgramingOptimizedAlgorithm
        item_class = ItemWithSlots
    else:
        raise ValueError("Algorithm can be one of: {}, {}, {}".format(
            Algorithm.GREEDY, Algorithm.DYNAMIC, Algorithm.DYNAMIC_OPTIMAL))

    investor, items = Parser.parse(input_file, item_class)

    start_time = time.time()

    algorithm(investor, items).calculate()

    duration = time.time() - start_time

    print('Duration is {:.2f} ms'.format(duration * 1000))
    print('Memory usage: {} KB'.format(
        resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))

    ResultWriter.write(investor, output_file)
示例#51
0
        notes = []
        for notematch in note_regex.findall(line):
            if notematch not in legends:
                print("unknown legend: {}".format(notematch))
                continue
            notes.append(legends[notematch])
        canteen.addMeal(date, category, name, notes, price_regex.findall(line), roles)


def parse_url(url, today=False):
    canteen = LazyBuilder()
    parse_week(url + "&wann=2", canteen)
    if not today:
        parse_week(url + "&wann=3", canteen)
    return canteen.toXMLFeed()


parser = Parser("hannover", handler=parse_url, shared_prefix="http://www.stwh-portal.de/mensa/index.php?format=txt&wo=")
parser.define("hauptmensa", suffix="2")
parser.define("hauptmensa-marktstand", suffix="9")
parser.define("restaurant-ct", suffix="10")
parser.define("contine", suffix="3")
parser.define("pzh", suffix="13")
parser.define("caballus", suffix="1")
parser.define("tiho-tower", suffix="0")
parser.define("hmtmh", suffix="8")
parser.define("ricklinger-stadtweg", suffix="6")
parser.define("kurt-schwitters-forum", suffix="7")
parser.define("blumhardtstrasse", suffix="14")
parser.define("herrenhausen", suffix="12")
示例#52
0
                continue
            notes.append(legends[notematch])
        canteen.addMeal(date, category, name, notes, price_regex.findall(line),
                        roles)


def parse_url(url, today=False):
    canteen = LazyBuilder()
    parse_week(url + '&wann=2', canteen)
    if not today:
        parse_week(url + '&wann=3', canteen)
    return canteen.toXMLFeed()


parser = Parser(
    'hannover',
    handler=parse_url,
    shared_prefix='http://www.stwh-portal.de/mensa/index.php?format=txt&wo=')
parser.define('hauptmensa', suffix='2')
parser.define('hauptmensa-marktstand', suffix='9')
parser.define('restaurant-ct', suffix='10')
parser.define('contine', suffix='3')
parser.define('pzh', suffix='13')
parser.define('caballus', suffix='1')
parser.define('tiho-tower', suffix='0')
parser.define('hmtmh', suffix='8')
parser.define('ricklinger-stadtweg', suffix='6')
parser.define('kurt-schwitters-forum', suffix='7')
parser.define('blumhardtstrasse', suffix='14')
parser.define('herrenhausen', suffix='12')
示例#53
0
    # todo only for: Tellergericht, vegetarisch, Klassiker, Empfehlung des Tages:
    canteen.setAdditionalCharges('student', {'other': 1.5})

    document = parse(urlopen(url).read())

    global legend
    regex = '(?P<name>(\d|[A-Z])+)\)\s*' + \
            '(?P<value>\w+((\s+\w+)*[^0-9)]))'
    legend = buildLegend(legend, document.find(id='additives').text, regex=regex)

    days = ('montag', 'dienstag', 'mittwoch', 'donnerstag', 'freitag',
            'montagNaechste', 'dienstagNaechste', 'mittwochNaechste', 'donnerstagNaechste', 'freitagNaechste')
    for day in days:
        data = document.find('div', id=day)
        headline = document.find('a', attrs={'data-anchor': '#' + day})
        parse_day(canteen, headline.text, data)
    return canteen.toXMLFeed()


parser = Parser('aachen', handler=parse_url,
                shared_prefix='http://www.studentenwerk-aachen.de/speiseplaene/')
parser.define('academica', suffix='academica-w.html')
parser.define('ahorn', suffix='ahornstrasse-w.html')
parser.define('templergraben', suffix='templergraben-w.html')
parser.define('bayernallee', suffix='bayernallee-w.html')
parser.define('eups', suffix='eupenerstrasse-w.html')
parser.define('goethe', suffix='goethestrasse-w.html')
parser.define('vita', suffix='vita-w.html')
parser.define('zeltmensa', suffix='forum-w.html')
parser.define('juelich', suffix='juelich-w.html')
示例#54
0
import pickle
import os
import numpy as np
import nibabel as nib
from utils import Parser
import time

args = Parser()

patch_shapes = [(22, 22, 22), (25, 25, 25), (28, 28, 28)]

modalities = ('flair', 't1ce', 't1', 't2')


def nib_load(file_name):

    if not os.path.exists(file_name):
        return np.array([1])

    proxy = nib.load(file_name)
    data = proxy.get_data()
    proxy.uncache()
    return data


def get_dist2center(patch_shape):
    ndim = len(patch_shape)
    dist2center = np.zeros((ndim, 2), dtype='int32')  # from patch boundaries
    for dim, shape in enumerate(patch_shape):
        dist2center[dim] = [shape/2 - 1, shape/2] if shape % 2 == 0 \
                else [shape//2, shape//2]
    global legend
    canteen = LazyBuilder()
    canteen.setLegendData(legend)
    day = datetime.date.today()
    emptyCount = 0
    totalCount = 0
    while emptyCount < 7 and totalCount < 32:
        if not parse_day(canteen, '{}&tag={}&monat={}&jahr={}'
                         .format(url, day.day, day.month, day.year),
                         day.strftime('%Y-%m-%d')):
            emptyCount += 1
        else:
            emptyCount = 0
        if today:
            break
        totalCount += 1
        day += datetime.date.resolution
    return canteen.toXMLFeed()


parser = Parser('chemnitz_zwickau', handler=parse_url,
                shared_prefix='http://www.swcz.de/bilderspeiseplan/xml.php?plan=')
parser.define('mensa-reichenhainer-strasse', suffix='1479835489')
parser.define('mensa-strasse-der-nationen', suffix='773823070')
parser.define('mensa-ring', suffix='4')
parser.define('mensa-scheffelberg', suffix='3')
parser.define('cafeteria-reichenhainer-strasse', suffix='7')
parser.define('cafeteria-strasse-der-nationen', suffix='6')
parser.define('cafeteria-ring', suffix='5')
parser.define('cafeteria-scheffelberg', suffix='8')
            title = item.find('title').text
            description = get_description(title)
            notes = build_notes_string(title)
            plist = [
                item.find('preis1').text,
                item.find('preis2').text,
                item.find('preis3').text
            ]
            food_type = get_food_types(item.find('piktogramme').text)
            canteen.addMeal(date, food_type, description, notes, plist, roles)
    return canteen.toXMLFeed()


parser = Parser(
    'erlangen_nuernberg',
    handler=parse_url,
    shared_prefix=
    'https://www.max-manager.de/daten-extern/sw-erlangen-nuernberg/xml/')
parser.define('er-langemarck', suffix='mensa-lmp.xml')
parser.define('er-sued', suffix='mensa-sued.xml')
parser.define('n-schuett', suffix='mensa-inselschuett.xml')
parser.define('n-regens', suffix='mensa-regensburgerstr.xml')
parser.define('n-stpaul', suffix='mensateria-st-paul.xml')
parser.define('n-mensateria', suffix='mensateria-ohm.xml')
parser.define('n-hohfederstr', suffix='cafeteria-come-in.xml')
parser.define('n-baerenschanzstr', suffix='cafeteria-baerenschanzstr.xml')
parser.define('eichstaett', suffix='mensa-eichstaett.xml')
parser.define('ingolstadt', suffix='mensa-ingolstadt.xml')
parser.define('ansbach', suffix='mensa-ansbach.xml')
parser.define('triesdorf', suffix='mensateria-triesdorf.xml')
示例#57
0
                supplies = []
                for supply in item.find_all('img'):
                    if supply['title']:
                        supplies.append(supply['title'])
            elif 'price'in item['class']:
                price = item.text
                if 'student' in item['class']:
                    student_price = getAndFormatPrice(price)
                elif 'staff' in item['class']:
                    staff_price = getAndFormatPrice(price)
                elif 'guest' in item['class']:
                    guest_price = getAndFormatPrice(price)
        if description != "":
            canteen.addMeal(wdate, category, description, notes=supplies, prices={'student': student_price, 'employee': staff_price, 'other': guest_price})

parser = Parser('dortmund', handler=parse_url, shared_prefix='https://www.stwdo.de/mensa-co/')

parser.define('tu-hauptmensa', suffix='tu-dortmund/hauptmensa/')
parser.define('tu-mensa-sued', suffix='tu-dortmund/mensa-sued/')
parser.define('tu-vital', suffix='tu-dortmund/vital/')
parser.define('tu-archeteria', suffix='tu-dortmund/archeteria/')
parser.define('tu-calla', suffix='tu-dortmund/restaurant-calla/')
parser.define('tu-food-fakultaet', suffix='tu-dortmund/food-fakultaet/')
parser.define('fh-mensa-max-ophuels-platz', suffix='fh-dortmund/max-ophuels-platz/')
parser.define('fh-mensa-sonnenstrasse', suffix='fh-dortmund/sonnenstrasse/')
parser.define('fh-kostbar', suffix='fh-dortmund/mensa-kostbar/')
parser.define('ism-mensa', suffix='ism/mensa-der-ism/')
parser.define('fernuni-mensa', suffix='hagen')
parser.define('fsw-snackit', suffix='fh-suedwestfalen/hagen/')
parser.define('fsw-canape', suffix='fh-suedwestfalen/iserlohn/')
parser.define('fsw-davinci', suffix='fh-suedwestfalen/meschede/')
        self.avg = self.sum / self.count


if __name__ == '__main__':
    global args

    parser = argparse.ArgumentParser()
    parser.add_argument('-cfg', '--cfg', default='deepmedic_ce_all', type=str)
    parser.add_argument('-gpu', '--gpu', default='3', type=str)
    args = parser.parse_args()

    args.cfg = 'deepmedic_nr'
    args.gpu = str(args.gpu)
    args.gpu = '3'

    args = Parser(args.cfg, log='test').add_args(args)

    args.valid_list = 'train_0.txt'

    args.data_dir = '/home/thuyen/Data/brats17/Brats17ValidationData'
    args.valid_list = 'test.txt'

    args.ckpt = 'model_last.tar'
    #args.ckpt = 'model_iter_227.tar'

    folder = os.path.splitext(args.valid_list)[0]
    out_dir = os.path.join('output', args.name, folder)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    args.out_dir = out_dir
示例#59
0
        return canteen.toXMLFeed()
    root = ET.fromstring(xml_data)
    for day in root:
        date = time.strftime('%d.%m.%Y', time.localtime(int(day.get('timestamp'))))
        for item in day:
            title = item.find('title').text
            description = get_description(title)
            notes = build_notes_string(title)
            plist = [item.find('preis1').text, item.find('preis2').text, item.find('preis3').text]
            food_type = get_food_types(item.find('piktogramme').text)
            canteen.addMeal(date, food_type, description, notes, plist, roles)
    return canteen.toXMLFeed()



parser = Parser('erlangen_nuernberg',
                handler=parse_url,
                shared_prefix='https://www.max-manager.de/daten-extern/sw-erlangen-nuernberg/xml/')
parser.define('er-langemarck', suffix='mensa-lmp.xml')
parser.define('er-sued', suffix='mensa-sued.xml')
parser.define('n-schuett', suffix='mensa-inselschuett.xml')
parser.define('n-regens', suffix='mensa-regensburgerstr.xml')
parser.define('n-stpaul', suffix='mensateria-st-paul.xml')
parser.define('n-mensateria', suffix='mensateria-ohm.xml')
parser.define('n-hohfederstr', suffix='cafeteria-come-in.xml')
parser.define('n-baerenschanzstr', suffix='cafeteria-baerenschanzstr.xml')
parser.define('eichstaett', suffix='mensa-eichstaett.xml')
parser.define('ingolstadt', suffix='mensa-ingolstadt.xml')
parser.define('ansbach', suffix='mensa-ansbach.xml')
parser.define('triesdorf', suffix='mensateria-triesdorf.xml')