def main():
    # Parsing user-given inputs
    parser = Parser.Parser(sys.argv[1:]).args

    cbmcmc(parser.data, parser.it, parser.basis, parser.treeprior, parser.r,
           parser.p, parser.cob_freq, parser.outfile, parser.seed)

    return 0
Пример #2
0
def init_federated():

    # clients list
    clients = []

    # load configs
    parser = Parser()
    parser.parse()
    config = parser.config

    # generate clients
    for i in range(config.num_of_clients):
        clients.append(Client(id=i, config=config))

    # generate server
    server = Server(id=0, config=config)

    # foo
    server.generator_A2B.apply(weights_init_normal)
    server.generator_B2A.apply(weights_init_normal)
    server.discriminator_A.apply(weights_init_normal)
    server.discriminator_B.apply(weights_init_normal)

    if (os.path.exists(server.model_dir + server.generator_name_A2B)
            and os.path.exists(server.model_dir + server.generator_name_B2A)
            and os.path.exists(server.model_dir + server.discriminator_name_A)
            and
            os.path.exists(server.model_dir + server.discriminator_name_B)):
        server.load_model()
        print("Global model saved on the server has been restored!")

    elif not (os.path.exists(server.model_dir + server.generator_name_A2B)
              and os.path.exists(server.model_dir + server.generator_name_B2A)
              and os.path.exists(server.model_dir +
                                 server.discriminator_name_A) and
              os.path.exists(server.model_dir + server.discriminator_name_B)):
        print("Global model has been created!")
    else:
        raise EOFError

    # load datasets
    # This method is detached from the init part
    # load_datasets(clients=clients, config=config)

    # load models
    for client in clients:
        client.load_model(generator_A2B=server.generator_A2B,
                          generator_B2A=server.generator_B2A,
                          discriminator_A=server.discriminator_A,
                          discriminator_B=server.discriminator_B)
        print("Client {}'s model has been updated from the server".format(
            client.id))

    return clients, server, config
Пример #3
0
 def _get_parser(cls):
     parser = Parser(prog='get')
     parser.add_argument('-t',
                         '--target',
                         nargs='*',
                         help='File to download')
     parser.add_argument('-d',
                         '--destination',
                         nargs='*',
                         help='Directory to download file to')
     return parser
Пример #4
0
def populate_db_words(sentences):
    parser = Parser(sentences)
    words = {}
    for tokens in parser.get_tokenised_parts():
        for (word, pos) in tokens:
            freq = words.get(word)
            if freq is None:
                words[word] = 1
            else:
                words[word] = freq + 1

    for word, freq in words.items():
        dbEntry = Words(word=word, freq=freq)
        dbEntry.save()
Пример #5
0
 def __init__(self, **kwargs):
     self.decode = kwargs.get('decode', 'utf-8')
     self.start_url = kwargs['start_url']
     self.page_f = kwargs.get('page_f')
     self.last_page_xp = kwargs.get('last_page_xp')
     self.xp_page = kwargs.get('xp_page', True)
     self.url_xp = kwargs['url_xp']
     self.url_prefix = kwargs.get('url_prefix', '')
     self.total_page = kwargs.get('total_page')
     self.data = kwargs.get('data')
     self.page_no_key = kwargs.get('page_no_key')
     self.post_url = kwargs.get('post_url')
     self.divide_by = kwargs.get('divide_by')
     self.total_count_key = kwargs.get('total_count_key')
     self.parser = Parser(decode=self.decode)
Пример #6
0
def html2md(url, md_file, with_title=False):
    response = requests.get(url)
    soup = BeautifulSoup(response.content,
                         'html.parser',
                         from_encoding="utf-8")
    html = ""
    for child in soup.find_all('svg'):
        child.extract()
    if with_title:
        for c in soup.find_all('div', {'class': 'article-title-box'}):
            html += str(c)
    for c in soup.find_all('div', {'id': 'content_views'}):
        html += str(c)

    parser = Parser(html)
    with open(md_file, 'w') as f:
        f.write('{}\n'.format(''.join(parser.outputs)))
Пример #7
0
def populate_db_bigrams(sentences):
    parser = Parser(sentences)
    bigrams = {}
    for tokens in parser.get_tokenised_parts():
        for i in range(0, len(tokens) - 1):
            (word1, pos1) = tokens[i]
            (word2, pos2) = tokens[i + 1]
            bigram = (word1, word2)
            freq = bigrams.get(bigram)
            if freq is None:
                bigrams[bigram] = 1
            else:
                bigrams[bigram] = freq + 1

    for bigram, freq in bigrams.items():
        (w1, w2) = bigram
        dbEntry = Bigrams(word1=w1, word2=w2, freq=freq)
        dbEntry.save()
def test_parser():
    """ test that parser returns the correct types
    """
    sys_args = ["--data",  "observations.csv", "--outfile", "results/samples.dat", "--it", "7500",
                "--basis", "cycle", "--treeprior", "all", "-r", "6",
                "-p", ".75", "--cob-freq", "100", "--seed", "123"]

    parser = Parser.Parser(sys_args)
    assert(isinstance(parser.args.data, str))
    assert(isinstance(parser.args.outfile, str))
    assert(isinstance(parser.args.it, int))
    assert(isinstance(parser.args.basis, str))
    assert(isinstance(parser.args.treeprior, str))
    assert(isinstance(parser.args.r, int))
    assert(isinstance(parser.args.p, float))
    assert(isinstance(parser.args.cob_freq, int))
    if parser.args.seed is not None:
        assert(isinstance(parser.args.seed, int))
Пример #9
0
 def _find_parser(cls):
     parser = Parser(prog='find')
     parser.add_argument(
         '-e', '--exact',
         action='store_true',
         default=False,
         help='Do an exact string match'
     )
     parser.add_argument(
         '-r', '--relative',
         action='store_true',
         default=False,
         help='Search relative to current path'
     )
     parser.add_argument(
         'target',
         nargs='*',
         help='Target to search for'
     )
     return parser
Пример #10
0
def main():
    # Create initialiser instance, and pass it command line argument
    initialiser = Initialiser(sys.argv[1])

    # Create code_writer instance and write bootstrap code to file
    code_writer = CodeWriter(initialiser.asm_filename)
    code_writer.write_init()

    # Create list of parsers, one for each vm_file
    parsers = [Parser(x) for x in initialiser.vm_files]

    for parser in parsers:
        # Set filename of parser that is currently being translated
        code_writer.set_filename(parser.filename)

        # Parse the VM file
        while parser.has_more_commands():
            initialiser.translate_file(parser, code_writer)

    code_writer.close()
Пример #11
0
parser.add_argument('-batch_size',
                    '--batch_size',
                    default=1,
                    type=int,
                    help='Batch size')
parser.add_argument('-restore',
                    '--restore',
                    default='model_last.pth',
                    type=str)  # model_last.pth
parser.add_argument('-output_path', '--output_path', default='ckpts', type=str)
parser.add_argument('-prefix_path', '--prefix_path', default='', type=str)

path = os.path.dirname(__file__)

args = parser.parse_args()
args = Parser(args.cfg, log='train').add_args(args)

ckpts = args.makedir()
args.resume = os.path.join(ckpts, args.restore)  # specify the epoch


def main():
    # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    assert torch.cuda.is_available(), "Currently, we only support CUDA version"

    torch.manual_seed(args.seed)
    # torch.cuda.manual_seed(args.seed)
    random.seed(args.seed)
    np.random.seed(args.seed)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
Пример #12
0
#import matplotlib
#import matplotlib.pyplot as plt
from skimage.transform import resize
import losses
from losses import Precision_img, Recall_img, F1_score_img
import time
from skimage.morphology import closing, binary_closing, binary_opening

parser = argparse.ArgumentParser()
parser.add_argument('-cfg', '--cfg', default='cell', type=str)

path = os.path.dirname(__file__)

# parse arguments
args = parser.parse_args()
args = Parser(args.cfg, args)
ckpts = args.getdir()


def saveimage(image, filename):
    data = sitk.GetImageFromArray(image)
    sitk.WriteImage(data, filename)


def main():
    start_time = time.time()
    # setup environments and seeds
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    # setup networks
    #Network = getattr(models, args.net)
            title = item.find('title').text
            description = get_description(title)
            notes = build_notes_string(title)
            plist = [
                item.find('preis1').text,
                item.find('preis2').text,
                item.find('preis3').text
            ]
            food_type = get_food_types(item.find('piktogramme').text)
            canteen.addMeal(date, food_type, description, notes, plist, roles)
    return canteen.toXMLFeed()


parser = Parser(
    'erlangen_nuernberg',
    handler=parse_url,
    shared_prefix=
    'https://www.max-manager.de/daten-extern/sw-erlangen-nuernberg/xml/')
parser.define('er-langemarck', suffix='mensa-lmp.xml')
parser.define('er-sued', suffix='mensa-sued.xml')
parser.define('n-schuett', suffix='mensa-inselschuett.xml')
parser.define('n-regens', suffix='mensa-regensburgerstr.xml')
parser.define('n-stpaul', suffix='mensateria-st-paul.xml')
parser.define('n-mensateria', suffix='mensateria-ohm.xml')
parser.define('n-hohfederstr', suffix='cafeteria-come-in.xml')
parser.define('n-baerenschanzstr', suffix='cafeteria-baerenschanzstr.xml')
parser.define('eichstaett', suffix='mensa-eichstaett.xml')
parser.define('ingolstadt', suffix='mensa-ingolstadt.xml')
parser.define('ansbach', suffix='mensa-ansbach.xml')
parser.define('triesdorf', suffix='mensateria-triesdorf.xml')
Пример #14
0
import pickle
import os
import numpy as np
import nibabel as nib
from utils import Parser
import time

args = Parser()

patch_shapes = [(22, 22, 22), (25, 25, 25), (28, 28, 28)]

modalities = ('flair', 't1ce', 't1', 't2')


def nib_load(file_name):

    if not os.path.exists(file_name):
        return np.array([1])

    proxy = nib.load(file_name)
    data = proxy.get_data()
    proxy.uncache()
    return data


def get_dist2center(patch_shape):
    ndim = len(patch_shape)
    dist2center = np.zeros((ndim, 2), dtype='int32')  # from patch boundaries
    for dim, shape in enumerate(patch_shape):
        dist2center[dim] = [shape/2 - 1, shape/2] if shape % 2 == 0 \
                else [shape//2, shape//2]
Пример #15
0
                continue
            notes.append(legends[notematch])
        canteen.addMeal(date, category, name, notes, price_regex.findall(line),
                        roles)


def parse_url(url, today=False):
    canteen = LazyBuilder()
    parse_week(url + '&wann=2', canteen)
    if not today:
        parse_week(url + '&wann=3', canteen)
    return canteen.toXMLFeed()


parser = Parser(
    'hannover',
    handler=parse_url,
    shared_prefix='http://www.stwh-portal.de/mensa/index.php?format=txt&wo=')
parser.define('hauptmensa', suffix='2')
parser.define('hauptmensa-marktstand', suffix='9')
parser.define('restaurant-ct', suffix='10')
parser.define('contine', suffix='3')
parser.define('pzh', suffix='13')
parser.define('caballus', suffix='1')
parser.define('tiho-tower', suffix='0')
parser.define('hmtmh', suffix='8')
parser.define('ricklinger-stadtweg', suffix='6')
parser.define('kurt-schwitters-forum', suffix='7')
parser.define('blumhardtstrasse', suffix='14')
parser.define('herrenhausen', suffix='12')
Пример #16
0
        return self.feed.toXMLFeed()

    @Source.feed(name='thisWeek', priority=1, hour='8', retry='2 60')
    def thisWeek(self, request):
        day = datetime.datetime.now().isocalendar()
        self.parse_data(week=day[1], year=day[0])
        return self.feed.toXMLFeed()

    @Source.feed(name='nextWeek', priority=2, hour='9')
    def nextWeek(self, request):
        day = (datetime.datetime.now() + 7 * datetime.date.resolution).isocalendar()
        self.parse_data(week=day[1], year=day[0])
        return self.feed.toXMLFeed()


parser = Parser(name='halle', version=1.0)
Canteen('harzmensa', parser, location=3, needed_title='Harzmensa')
Canteen('weinbergmensa', parser, location=5, needed_title='Weinbergmensa')
#Canteen('cafebar-weinberg', parser, location=, needed_title='')
Canteen('tulpe', parser, location=10, needed_title='Mensa Tulpe')
Canteen('heidemensa', parser, location=17, needed_title='Heidemensa')
Canteen('burg', parser, location=12, needed_title='Mensa Burg')
Canteen('neuwerk', parser, location=9, needed_title='Neuwerk')
Canteen('franckesche-stiftungen', parser, location=14, needed_title='Franckesche Stiftungen')

#merseburg = parser.sub('merseburg')
Canteen('merseburg', parser, location=16, needed_title='Mensa Merseburg', not_halle=True)
#Canteen('cafebar-merseburg', merseburg, location=, needed_title=)

#dessau = parser.sub('dessau')
Canteen('dessau', parser, location=13, needed_title='Mensa Dessau', not_halle=True)
Пример #17
0
import os
from sklearn.model_selection import StratifiedKFold
import numpy as np

from utils import Parser
args = Parser('settings')
root = args.data_dir


def write(data, fname, root=root):
    fname = os.path.join(root, fname)
    with open(fname, 'w') as f:
        f.write('\n'.join(data))


hgg = os.listdir(os.path.join(root, 'HGG'))
hgg = [os.path.join('HGG', f) for f in hgg]

lgg = os.listdir(os.path.join(root, 'LGG'))
lgg = [os.path.join('LGG', f) for f in lgg]

X = hgg + lgg
Y = [1] * len(hgg) + [0] * len(lgg)

write(X, 'all.txt')

X, Y = np.array(X), np.array(Y)

skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=2018)

for k, (train_index, valid_index) in enumerate(skf.split(Y, Y)):
Пример #18
0
        parse_week(url + '-kommende-woche',
                   canteen,
                   canteentype,
                   allergene=allergene,
                   zusatzstoffe=zusatzstoffe)
    if not today and type(next_week) is str:
        parse_week(url + next_week,
                   canteen,
                   canteentype,
                   allergene=allergene,
                   zusatzstoffe=zusatzstoffe)
    return canteen.toXMLFeed()


parser = Parser('ostniedersachsen',
                handler=parse_url,
                shared_prefix='http://www.stw-on.de')

sub = parser.sub('braunschweig', shared_prefix='/braunschweig/essen/menus/')
sub.define('mensa1-mittag',
           suffix='mensa-1',
           extra_args={'canteentype': 'Mittagsmensa'})
sub.define('mensa1-abend',
           suffix='mensa-1',
           extra_args={'canteentype': 'Abendmensa'})
sub.define('mensa360',
           suffix='360',
           extra_args={
               'canteentype': 'Pizza',
               'this_week': '-2',
               'next_week': '-nachste-woche'
Пример #19
0
    while type(mensa_data) != Tag or mensa_data.name != 'div'\
            or 'tx-cagcafeteria-pi1' not in mensa_data.get('class', []):
        mensa_data = mensa_data.next_sibling
    weekDays = extractWeekDates(mensa_data.find('h2').text)
    for day_headline in mensa_data.find_all('h3'):
        date = weekDays[day_headline.text]
        day_table = day_headline.next_sibling.next_sibling
        for tr_menu in day_table.tbody.find_all('tr'):
            category = tr_menu.find_all('td')[0].text.strip()
            name = tr_menu.find_all('td')[1].text.replace('\r\n', ' ').strip()
            canteen.addMeal(date, category, name, [], tr_menu.find_all('td')[2].text)


def parse_url(url, mensa, *weeks, today):
    canteen = LazyBuilder()
    for week in weeks:
        parse_week(url + week, canteen, mensa)
        if today:
            break
    return canteen.toXMLFeed()


parser = Parser('marburg', handler=parse_url,
                shared_args=['http://www.studentenwerk-marburg.de/essen-trinken/speiseplan/'])
parser.define('bistro', args=['Speiseplan.*Bistro', 'diese-woche-bistro.html', 'naechste-woche-bistro.html'])
parser.define('mos-diner', args=['Speiseplan.*Diner', 'diese-woche-mos-diner.html'])
parser.define('erlenring', args=['Mensa Erlenring', 'diese-woche-mensa-erlenring-und-lahnberge.html',
              'naechste-woche-mensa-erlenring-und-lahnberge.html'])
parser.define('lahnberge', args=['Mensa Lahnberge', 'diese-woche-mensa-erlenring-und-lahnberge.html',
              'naechste-woche-mensa-erlenring-und-lahnberge.html'])
Пример #20
0
                address.group("postcode"),
                address.group("city").strip(),
            )
            canteen.city = address.group("city").strip().capitalize()
        pass

    def load_data(self):
        # Cache the data for 15 min to not stress the API too much
        now = datetime.datetime.now()
        if self._data is None or now - self._data[1] > datetime.timedelta(minutes=15):
            content = urlopen(self.parser.shared_prefix).read()
            data = BeautifulSoup(content.decode('utf-8'), 'xml')
            self._data = (data, now)
        return self._data[0]

parser = Parser('ostniedersachsen', version="1.0", shared_prefix='http://api.stw-on.de/xml/mensa.xml')

braunschweig = parser.sub('braunschweig')
Canteen('mensa1-mittag', braunschweig, id="101", open_id="2")
Canteen('mensa1-abend', braunschweig, id="101", open_id="3")
Canteen('mensa360', braunschweig, id="111")
Canteen('mensa2', braunschweig, id="105")
Canteen('mensa2-cafeteria', braunschweig, id="106")
Canteen('hbk', braunschweig, id="120")
Canteen('bistro-nff', braunschweig, id="109")

Canteen('clausthal', parser, id="171")

hildesheim = parser.sub('hildesheim')
Canteen('uni', hildesheim, id="150")
Canteen('hohnsen', hildesheim, id="160")
Пример #21
0
                .replace('Biogericht', 'Bio-/Aktionsgericht') \
                .strip()

            canteen.addMeal(date, category, name,
                            [legend.get(n, n) for n in notes],
                            prices.get(price_category, {}))

        date += datetime.timedelta(days=1)
        if today:
            break

    return canteen.toXMLFeed()


parser = Parser('muenchen',
                handler=parse_url,
                shared_prefix=base + '/speiseplan/')
parser.define('leopoldstrasse', suffix='speiseplan_{}_411_-de.html')
parser.define('martinsried', suffix='speiseplan_{}_412_-de.html')
parser.define('grosshadern', suffix='speiseplan_{}_414_-de.html')
parser.define('schellingstrasse', suffix='speiseplan_{}_416_-de.html')
parser.define('archisstrasse', suffix='speiseplan_{}_421_-de.html')
parser.define('garching', suffix='speiseplan_{}_422_-de.html')
parser.define('weihenstephan', suffix='speiseplan_{}_423_-de.html')
parser.define('lothstrasse', suffix='speiseplan_{}_431_-de.html')
parser.define('pasing', suffix='speiseplan_{}_432_-de.html')
parser.define('rosenheim', suffix='speiseplan_{}_441_-de.html')
parser.define('adalbertstrasse', suffix='speiseplan_{}_512_-de.html')
parser.define('cafeteria-garching', suffix='speiseplan_{}_524_-de.html')
parser.define('wst', suffix='speiseplan_{}_525_-de.html')
parser.define('akademie', suffix='speiseplan_{}_526_-de.html')
Пример #22
0
        data = json.loads(response.read().decode())

    base_data = {}
    base_data['roles'] = {}

    base_data['notices'] = data['notices']
    for loc in data['locations']:
        if loc not in LOCATIONS:
            # Found an unknown location
            # Please consider updating the parser!
            sys.stderr.write('Unknown location: %s (displayName: %s)\n' %
                             (loc, data['locations'][loc]['displayName']))
    for role in data['priceTiers']:
        if role not in ROLES:
            # Found an unknown price tier
            # Please consider updating the parser!
            sys.stderr.write('Unknown price tier: %s (displayName: %s)\n' %
                             (role, data['priceTiers'][role]))
        else:
            base_data['roles'][role] = ROLES[role]

    return base_data


parser = Parser('saarland',
                handler=parse_url,
                shared_prefix=URL_BASE + URL_MENU)

for loc in LOCATIONS:
    parser.define(loc, suffix=loc)
Пример #23
0
    # map roles
    roles = {'Studenten': 'student',
             'Bedienstete': 'employee',
             'Gäste': 'other'}
    for item in items:
        raw_role, price = item.text.split(':')
        if raw_role in roles:
            prices[roles[raw_role]] = price
    return prices


# name of canteens is suffix at the same time
canteens = ['mensa-universitaetsstrasse-duesseldorf',
            'mensa-kamp-lintfort',
            'mensa-campus-derendorf',
            'mensa-georg-glock-strasse-duesseldorf',
            'mensa-obergath-krefeld',
            'mensa-frankenring-krefeld',
            'mensa-sommerdeich-kleve',
            'mensa-rheydter-strasse-moenchengladbach',
            'restaurant-bar-campus-vita-duesseldorf',
            'essenausgabe-sued-duesseldorf',
            'kunstakademie-duesseldorf',
            'musikhochschule-duesseldorf']

parser = Parser('duesseldorf', handler=parse_url,
                shared_prefix='http://www.stw-d.de/gastronomie/speiseplaene/')

for canteen in canteens:
    parser.define(canteen, suffix=canteen)
Пример #24
0
                                 r'\1, \3 \2', address)
        canteen.city = city.capitalize()
        canteen.location(*location.split(','))

    @Source.today_feed
    def today(self, request):
        self.parse_url(self.BASE_URL + self.suffix, True)
        return self.feed.toXMLFeed()

    @Source.full_feed
    def full(self, request):
        self.parse_url(self.BASE_URL + self.suffix, False)
        return self.feed.toXMLFeed()


parser = Parser('thueringen', version='1.0')
Canteen('ei-wartenberg', parser, suffix='eisenach/mensa-am-wartenberg-2.html')
Canteen('ef-nordhaeuser',
        parser,
        suffix='erfurt/mensa-nordhaeuser-strasse.html')
Canteen('ef-altonaer', parser, suffix='erfurt/mensa-altonaer-strasse.html')
Canteen('ef-schlueterstr',
        parser,
        suffix='erfurt/cafeteria-schlueterstrasse.html')
Canteen('ef-leipzigerstr',
        parser,
        suffix='erfurt/cafeteria-leipziger-strasse.html')
Canteen('ge-freundschaft',
        parser,
        suffix='gera/mensa-weg-der-freundschaft.html')
Canteen('il-ehrenberg', parser, suffix='ilmenau/mensa-ehrenberg.html')
Пример #25
0
                    staff_price = getAndFormatPrice(price)
                elif 'guest' in item['class']:
                    guest_price = getAndFormatPrice(price)
        canteen.addMeal(wdate,
                        category,
                        description,
                        notes=supplies,
                        prices={
                            'student': student_price,
                            'employee': staff_price,
                            'other': guest_price
                        })


parser = Parser('dortmund',
                handler=parse_url,
                shared_prefix='https://www.stwdo.de/mensa-co/')

parser.define('tu-hauptmensa', suffix='tu-dortmund/hauptmensa/')
parser.define('tu-mensa-sued', suffix='tu-dortmund/mensa-sued/')
parser.define('tu-vital', suffix='tu-dortmund/vital/')
parser.define('tu-archeteria', suffix='tu-dortmund/archeteria/')
parser.define('tu-calla', suffix='tu-dortmund/restaurant-calla/')
parser.define('tu-food-fakultaet', suffix='tu-dortmund/food-fakultaet/')
parser.define('fh-mensa-max-ophuels-platz',
              suffix='fh-dortmund/max-ophuels-platz/')
parser.define('fh-mensa-sonnenstrasse', suffix='fh-dortmund/sonnenstrasse/')
parser.define('fh-kostbar', suffix='fh-dortmund/mensa-kostbar/')
parser.define('ism-mensa', suffix='ism/mensa-der-ism/')
parser.define('fernuni-mensa', suffix='hagen')
parser.define('fsw-snackit', suffix='fh-suedwestfalen/hagen/')
                    food_type = parse_foot_type(tds[2])
                    food_description = get_foot_description(tds[3])
                    notes_string = build_notes_string(tds[3])
                    if (notes_string != ""):
                        notes.append(notes_string)
                    prices = get_pricing(tds, 4, 7)
                    canteen.addMeal(date, food_type, food_description, notes,
                                    prices, roles if prices else None)
            except Exception as e:
                traceback.print_exception(*sys.exc_info())

    return canteen.toXMLFeed()


parser = Parser(
    'erlangen_nuernberg',
    handler=parse_url,
    shared_prefix='http://www.studentenwerk.uni-erlangen.de/verpflegung/de/')
parser.define('er-langemarck', suffix='sp-er-langemarck.shtml')
parser.define('er-sued', suffix='sp-er-sued.shtml')
parser.define('n-schuett', suffix='sp-n-schuett.shtml')
parser.define('n-regens', suffix='sp-n-regens.shtml')
parser.define('n-stpaul', suffix='sp-n-stpaul.shtml')
parser.define('n-mensateria', suffix='sp-n-mensateria.shtml')
parser.define('n-hohfederstr', suffix='sp-n-hohfederstr.shtml')
parser.define('n-baerenschanzstr', suffix='sp-n-baerenschanzstr.shtml')
parser.define('eichstaett', suffix='sp-eichstaett.shtml')
parser.define('ingolstadt', suffix='sp-ingolstadt.shtml')
parser.define('ansbach', suffix='sp-ansbach.shtml')
parser.define('triesdorf', suffix='sp-triesdorf.shtml')
Пример #27
0
            if price_div:
                for k, v in price_map.items():
                    price = price_div['data-' + k]
                    if price:
                        prices[v] = price
            canteen.addMeal(date, category, name, notes, prices)

        if closed_candidate and not canteen.hasMealsFor(date):
            canteen.setDayClosed(date)

    return canteen.toXMLFeed()


parser = Parser(
    'wuerzburg',
    handler=parse_url,
    shared_prefix=
    'https://www.studentenwerk-wuerzburg.de/essen-trinken/speiseplaene/plan/')
parser.define('austrasse', suffix='austrasse-bamberg.html')
parser.define('burse', suffix='burse-am-studentenhaus-wuerzburg.html')
parser.define('feldkirchenstrasse', suffix='feldkirchenstrasse-bamberg.html')
#parser.define('frankenstube', suffix='frankenstube-wuerzburg.html')
#parser.define('hubland', suffix='mensa-am-hubland-wuerzburg.html')
parser.define('studentenhaus', suffix='mensa-am-studentenhaus.html')
parser.define('aschaffenburg', suffix='mensa-aschaffenburg.html')
parser.define('augenklinik', suffix='mensa-roentgenring-wuerzburg.html')
parser.define('josef-schneider',
              suffix='mensa-josef-schneider-strasse-wuerzburg.html')
parser.define('schweinfurt', suffix='mensa-schweinfurt.html')
parser.define('mensateria',
              suffix='mensateria-campus-hubland-nord-wuerzburg.html')
Пример #28
0
                    meal_data.find('span', 'price').text)
                canteen.addMeal(weekDays[i], category, name, list(set(notes)),
                                prices, ('student', 'employee', 'other'))
            i += 1


def parse_url(url, today=False):
    canteen = LazyBuilder()
    parse_week(url, date.today(), canteen)
    if not today:
        parse_week(url, date.today() + date.resolution * 7, canteen)
    return canteen.toXMLFeed()


parser = Parser(
    'hamburg',
    handler=parse_url,
    shared_prefix='http://speiseplan.studierendenwerk-hamburg.de/de/')
parser.define('armgartstrasse', suffix='590')
parser.define('bergedorf', suffix='520')
parser.define('berliner-tor', suffix='530')
parser.define('botanischer-garten', suffix='560')
parser.define('bucerius-law-school', suffix='410')
parser.define('cafe-mittelweg', suffix='690')
parser.define('cafe-cfel', suffix='680')
parser.define('cafe-jungiusstrasse', suffix='610')
parser.define('cafe-alexanderstrasse', suffix='660')
parser.define('campus', suffix='340')
parser.define('finkenau', suffix='420')
parser.define('geomatikum', suffix='540')
parser.define('harburg', suffix='570')
parser.define('hcu', suffix='430')
        self.avg = self.sum / self.count


if __name__ == '__main__':
    global args

    parser = argparse.ArgumentParser()
    parser.add_argument('-cfg', '--cfg', default='deepmedic_ce_all', type=str)
    parser.add_argument('-gpu', '--gpu', default='3', type=str)
    args = parser.parse_args()

    args.cfg = 'deepmedic_nr'
    args.gpu = str(args.gpu)
    args.gpu = '3'

    args = Parser(args.cfg, log='test').add_args(args)

    args.valid_list = 'train_0.txt'

    args.data_dir = '/home/thuyen/Data/brats17/Brats17ValidationData'
    args.valid_list = 'test.txt'

    args.ckpt = 'model_last.tar'
    #args.ckpt = 'model_iter_227.tar'

    folder = os.path.splitext(args.valid_list)[0]
    out_dir = os.path.join('output', args.name, folder)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    args.out_dir = out_dir
Пример #30
0
from __future__ import division
import __init__
import os
import os.path as osp
import torch
import argparse
import csv
from datasets.hico_api import Hico
from utils import Parser
from networks import models
from torch.autograd import Variable

import numpy as np
""" Parsing options """
args = argparse.ArgumentParser()
parser = Parser(args)
opt = parser.make_options()
""" Load dataset """
data_path = '{}/{}'.format(opt.data_path, 'hico')
image_path = '{}/{}/{}'.format(opt.data_path, 'hico', 'images')
cand_dir = '{}/{}/{}'.format(opt.data_path, 'hico', 'detections')

dset = Hico(data_path, \
            image_path, \
            opt.test_split, \
            cand_dir=cand_dir,\
            thresh_file=opt.thresh_file, \
            add_gt=False, \
            train_mode=False, \
            jittering=False, \
            nms_thresh=opt.nms_thresh)