def __init__(self):
        self.packages = {}

        self.logger = ourlogging.otherLogger("Package Manager")
        
        #load all packages from directory, filter for actual package directories
        self.allPackNames = [filename for filename in os.listdir('.\\packages\\')]
        self.allPackNames = filter(lambda x: x[0] == '_' and x[1] != '_', self.allPackNames)
import urllib2
import re
import calendar
import ourlogging
from BeautifulSoup import BeautifulSoup

logger = ourlogging.otherLogger("packages.util")

def getPage(url):
    """Returns the contents of a url as a string.

    This currently doesn't do anything to handle exceptions.

    @param url The url to grab a page from.
    @return A string containing the page contents of url.
    """
    try:
        f = urllib2.urlopen(url)
        page = f.read()
        f.close()
    except urllib2.URLError:
        logger.warning("Couldn't not connect to and read from %s" % url)
    except:
        logger.warning('unknown error running  getPage(%s)' % url)
        raise
    else:
        return page
    
def scrapePage(reg, url):
    """Scrapes the page from url for the reg at position pos.
Beispiel #3
0
import urllib2, re, calendar, logging, _winreg, ourlogging, platform

from BeautifulSoup import BeautifulSoup

logger = ourlogging.otherLogger("packages.util")


def getPage(url):
    """Returns the contents of a url as a string.

    This currently doesn't do anything to handle exceptions.

    @param url The url to grab a page from.
    @return A string containing the page contents of url.
    """
    try:
        f = urllib2.urlopen(url)
        page = f.read()
        f.close()
    except urllib2.URLError:
        logger.warning("Couldn't not connect to and read from %s" % url)
    except:
        logger.warning('unknown error running  getPage(%s)' % url)
        raise
    else:
        return page


def scrapePage(reg, url):
    """Scrapes the page from url for the reg at position pos.