Exemple #1
0
def main():
    """ Execute the program """
    surcharge = Surcharge()
    sauce = Sauce(2.00)
    cheese = Cheese(3.00)
    dough = Dough(4.00)

    print(sauce.accept(surcharge))
    print(cheese.accept(surcharge))
    print(dough.accept(surcharge))
Exemple #2
0
 def categorize(item):
     if item.name == "Aged Brie":
         return Cheese()
     elif item.name == "Sulfuras, Hand of Ragnaros":
         return Legendary()
     elif item.name == "Backstage passes to a TAFKAL80ETC concert":
         return BackstagePass()
     return ItemCategory()
def parseUSDA(library):
    source = 'USDA'
    root = os.path.dirname(os.path.realpath(__file__))
    sr22 = os.path.join(root, 'sr22')

    food_descriptions_filename = os.path.join(sr22, 'FOOD_DES.txt')
    food_descriptions = parseFdaFileUniqueDict(food_descriptions_filename)

    food_groups_filename = os.path.join(sr22, 'FD_GROUP.txt')
    food_groups = parseFdaFileUniqueDict(food_groups_filename)

    nutritional_data_filename = os.path.join(sr22, 'NUT_DATA.txt')
    nutritional_data = parseFdaFileArray(nutritional_data_filename)

    nutritional_definitions_filename = os.path.join(sr22, 'NUTR_DEF.txt')
    nutritional_definitions = parseFdaFileUniqueDict(nutritional_definitions_filename)

    food_descriptions_headers = ['FdGrp_Cd', 'Long_Desc', 'Shrt_Desc', 'ComName']
    nutritional_data_headers = ['Nutr_No', 'Nutr_Val', 'Num_Data_Pts', 'ComName']
    nutritional_definition_headers = ['Units', 'Tagname', 'NutrDesc']

    for (ndb_no, food) in food_descriptions.iteritems():
        if ndb_no in nutritional_data:
            nutritions = nutritional_data[ndb_no]
            short_food_names = food[ food_descriptions_headers.index('Shrt_Desc') ].split(',')
            long_food_names = food[ food_descriptions_headers.index('Long_Desc') ].split(',')
            common_food_names = food[ food_descriptions_headers.index('ComName') ].split(',')

            if short_food_names[0].lower() == 'cheese':
                for nutrition in nutritions:
                    nutritional_definition_index = nutrition[nutritional_data_headers.index('Nutr_No')]
                    nutritional_definition = nutritional_definitions[nutritional_definition_index]
                    value = nutrition[ nutritional_data_headers.index('Nutr_Val') ]
                    units = nutritional_definition[ nutritional_definition_headers.index('Units') ]
                    name = nutritional_definition[ nutritional_definition_headers.index('NutrDesc') ]

                cheese = Cheese()
                cheese.name = ' '.join(long_food_names[1:]).strip()

                if not library.add(cheese, source):
                    break

    return
Exemple #4
0
 def snakesNom():
     for sidx in range(len(GameServer.__snakes)):
         for cidx in range(len(GameServer.__cheeses)):
             if (GameServer.__cheeses[cidx].collide_with(
                     GameServer.__snakes[sidx].position()[0][0],
                     GameServer.__snakes[sidx].position()[0][1])):
                 GameServer.__snakes[sidx].add(10)
                 del GameServer.__cheeses[cidx]
                 GameServer.__cheeses.append(
                     Cheese(GameServer.__playfield[1],
                            GameServer.__playfield[2]))
                 logging.info("NOM!")
 def setRandomItems(self, number):
     for x in range(number - 1):
         item = None
         itemType = random.randint(1, 4)
         if itemType == 1:
             item = Coin()
         elif itemType == 2:
             item = Cheese()
         elif itemType == 3:
             item = Book()
         self.addItem(item)
     if random.randint(0, 5) == 5:  #1/5 chances of getting a key
         self.items.append(Key())
def parseCheese(library):
    source = 'http://www.cheese.com'
    url = 'http://www.cheese.com/alphabetical/?page=%d&per_page=20&i=%s#top'
    out_folder = os.path.join(get_output_folder(), "cheese")
    letters = map(chr, range(97, 123))

    findCheeseTypes = False
    if findCheeseTypes:
        for letter in letters:
            lastPage = False
            lastPageData = ""
            currentPage = 0
            while not lastPage:
                currentUrl = url % (currentPage, letter)   
                outputFile = os.path.join(out_folder, "cheese_%s_page%d.html" % (letter, currentPage))
                if os.path.exists(outputFile):
                    f = codecs.open(outputFile, encoding='utf-8')
                    lastPageData = f.read()
                    currentPage += 1
                else:
                    print('Parsing %s' % currentUrl)         
                    html = urlopen(currentUrl)
                    soup = BeautifulSoup(html, 'html.parser')
                    data = soup.prettify()
                    if data == lastPageData:
                        lastPage = True
                    else:
                        file = open(outputFile, "w")
                        file.write(data.encode('utf8'))
                        file.close()
                        currentPage += 1
                        lastPageData = data

                    sleepTime = randint(0,5)
                    time.sleep(sleepTime)

    parseCheeseFiles = False
    if parseCheeseFiles:
        cheesesFiles = glob.glob(out_folder + "\\cheese*.html")
        for cheeseFile in cheesesFiles:
            data = open(cheeseFile,'r').read()
            soup = BeautifulSoup(data, 'html.parser')
            for div in soup.findAll("div", class_='unit'):
                type = div.a["href"]
                currentUrl = 'http://www.cheese.com' + type
                outputFile = os.path.join(out_folder, "type_%s.html" % type[1:-1])
                if os.path.exists(outputFile):
                    print('Skipping %s' % outputFile)   
                else:
                    succeeded = False
                    while not succeeded:
                        try:
                            print('Parsing %s' % currentUrl)         
                            html = urlopen(currentUrl)
                            soup = BeautifulSoup(html, 'html.parser')
                            data = soup.prettify()
                            file = open(outputFile, "w")
                            file.write(data.encode('utf8'))
                            file.close()
                            succeeded = True
                        except:
                            print("Failed to parse URL. Trying again...")
                        sleepTime = randint(0,5)
                        time.sleep(sleepTime)

    cheeseTypesFiles = glob.glob( os.path.join(out_folder, "type_*.html") )

    for cheeseTypeFile in cheeseTypesFiles:
        data = codecs.open(cheeseTypeFile).read()
        soup = BeautifulSoup(data, 'html.parser')
        unit = soup.find("div", class_='unit')

        cheese = Cheese()
        tmb = unit.find("div", class_='tmb')
        if tmb != None:
            cheese.image = tmb.a.img["src"]
            credits_div = tmb.find("div")
            if credits_div != None:
                cheese.image_credits = tmb.find("div").getText().strip()
        cheese.name = unit.h3.getText().strip()
        cheese.summary = unit.find("div", class_='summary').p.text.strip()
        for detail in unit.find("ul").findAll("li"):
            body = detail.text.strip()
            if "Made from" in body:
                cheese.made_from = strip_whitespace(body.replace('Made from', ''))
            elif "Country of" in body:
                cheese.origin = strip_whitespace(body.replace('Country of origin:', ''))
            elif "Type:" in body:
                cheese.type = [s.strip() for s in strip_whitespace(body.replace('Type:', '')).split(',')]
            elif "Fat content" in body:
                cheese.fat = strip_whitespace(body.replace('Fat content:', ''))
            elif "Texture:" in body:
                cheese.texture = strip_whitespace(body.replace('Texture:', ''))
            elif "Rind:" in body:
                cheese.rind = strip_whitespace(body.replace('Rind:', ''))
            elif "Colour:" in body:
                cheese.color = strip_whitespace(body.replace('Colour:', ''))
            elif "Flavour" in body:
                cheese.flavor = [s.strip() for s in strip_whitespace(body.replace('Flavour:', '')).split(',')]
            elif "Aroma" in body:
                cheese.aroma = [s.strip() for s in strip_whitespace(body.replace('Aroma:', '')).split(',')]
            elif "Vegetarian" in body:
                cheese.vegetarian = strip_whitespace(body.replace('Vegetarian:', ''))
            elif "Producers" in body:
                cheese.producers = strip_whitespace(body.replace('Producers:', ''))
            elif "Synonyms" in body:
                cheese.synonyms = [s.strip() for s in strip_whitespace(body.replace('Synonyms:', '')).split(',')]

        # TODO: Download the JPG for the images

        if not library.add(cheese, source):
            break

    # We are done and have parsed all the cheeses!
    return
def parseCheeseLibrary(library):
    source = 'http://www.cheeselibrary.com/'
    url = source + 'library_of_cheese.html'

    pages = []
    soup = get_cached_page(url, getOutputFolder())
    table = soup.find("table", class_='table_d2e201 usertable main_table')
    for link in table.findAll("a"):
        page = link["href"]
        pages.append(source + page)

    cheese_pages = []
    for page in pages:
        soup = get_cached_page(page, getOutputFolder())
        table = soup.find("table", class_='usertable')
        if table != None:
            links = table.findAll("a")
            for link in links:
                page = link["href"]
                cheese_pages.append(source + page)
        else:
            print("Failed to find cheese information for " + page)
    
    for cheese_page in cheese_pages:
        soup = get_cached_page(cheese_page, getOutputFolder())
        cheese = Cheese()
        master_table = soup.find("body").table.findAll("tr", recursive=False)[2]
        master_data_table = master_table.find("table").findAll("tr", recursive=False)
        cheese.name = strip_whitespace(master_data_table[0].text)

        cheese_data_and_summary = master_data_table[1].findAll("td")
        cheese_data_divs = cheese_data_and_summary[0].findAll("div")
        for div in cheese_data_divs:
            if "Country:" in div.text:
                cheese_data = div
                break
        chunk_data = ""
        root = cheese_data.next_element
        while root != None:
            root_text = ''
            if isinstance(root, bs4.Tag):
                root_text = root.text
            elif isinstance(root, bs4.NavigableString):
                root_text = unicode(root);
            if ':' in root_text and \
                ':' in chunk_data and \
                not root_text in chunk_data:
                data = strip_whitespace(chunk_data)
                data_split = data.split(':')
                type = strip_whitespace(data_split[0]).lower()
                value = strip_whitespace(data_split[1])
                if type == 'country':
                    cheese.origin = value
                elif type == 'region':
                    cheese.region = value
                elif type == 'texture':
                    cheese.texture = value
                elif type == 'type of':
                    cheese.made_from = value
                elif type == 'aging':
                    cheese.age = value
                elif type == 'pasteurized':
                    cheese.texture = value
                chunk_data = ''
            if (not root_text in chunk_data) and not ("Aging Time:" in root_text and "Texture:" in root_text):
                chunk_data += root_text 
            if isinstance(root, bs4.Tag) and root.name == 'img':
                break
            root = root.next_element

        cheese.description = strip_whitespace(cheese_data_and_summary[1].text)

        if not library.add(cheese, source):
            break

    return
    
import weakref
from cheese import Cheese

stock = weakref.WeakValueDictionary()
catalog = [
    Cheese('Red Leicester'),
    Cheese('Tilsit'),
    Cheese('Brie'),
    Cheese('Parmesan')
]

for cheese in catalog:
    stock[cheese.kind] = cheese

sorted(stock.keys())

del catalog
sorted(stock.keys())

del cheese
sorted(stock.keys())
 def _fire_cheese_(self):
     """Create a new cheese and add it to the cheese group"""
     if len(self.cheeses) < self.settings.cheese_allowed:
         new_cheese = Cheese(self)
         self.cheeses.add(new_cheese)
Exemple #10
0
 def __init__(self):
     if len(GameServer.__cheeses) == 0:
         GameServer.__cheeses.append(
             Cheese(GameServer.__playfield[1], GameServer.__playfield[2]))