Example #1
0
def ssid_test(ssid, session_mode):

    # Silence output from Twill commands
    f = open(os.devnull, "w")
    twill.set_output(f)

    # Generate File names for diff
    file_name = ssid + '.html'
    generated_html_path = 'output/ssid/' + file_name
    expected_html_path = 'expected/ssidForm/' + file_name

    # Start with a fresh page every time
    go(url)

    print '\n**Testing SSID of ' + ssid + '**'

    code(200)
    # Fill the HTML forms with test values and submit
    fv("1", "ssid", ssid)
    fv("1", "session_mode", session_mode)

    submit('0')
    save_html(generated_html_path)

    # Diff with HTML page we know should 'come back'
    command = 'diff {0} {1}'.format(generated_html_path, expected_html_path)
    result = subprocess.call(command.split(), shell=False)

    if result is not 0:
        print 'Test failed'
    else:
        print 'Test Passed'
def _grab_remote_html(url):

  global base_url, data_output, screen_output

  twill.commands.clear_cookies()
  twill.commands.agent('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.65 Safari/537.36')
  twill.commands.go(base_url + url)

  # Ensure we get a 200 http status code back
  try:
    code_response = twill.commands.code(200)
  except:
    code_response = ""

  # Step into the html and extract the links
  if code_response is None:

    # Reset buffer
    data_output.seek(0)
    data_output.truncate(0)

    # Change sys output to capture output in a variable
    sys.stdout = data_output
    twill.set_output(data_output)

    # Grab the HTML data which will be stored in data_reponse
    twill.commands.show()

    # Change the sys output back to the screen, now we have captured the data
    sys.stdout = screen_output
    twill.set_output(screen_output)

  return data_output
Example #3
0
    def __init__(self):
        Web2UnitTest.__init__(self)
        self.b = get_browser()
        self.b_data = StringIO()
        set_output(self.b_data)

        # list of links that return a http_code other than 200
        # with the key being the URL and the value the http code
        self.brokenLinks = dict()
        # List of links visited (key) with the depth
        self.urlList = dict()
        # List of urls for each model
        self.model_url = dict()
        # This string must exist in the URL for it to be followed
        # Useful to avoid going to linked sites
        self.homeURL = self.url
        # Tuple of strings that if in the URL will be ignored
        # Useful to avoid dynamic URLs that trigger the same functionality
        self.include_ignore = (
            "_language=",
            "/admin/default/",
        )
        # tuple of strings that should be removed from the URL before storing
        # Typically this will be some variables passed in via the URL
        self.strip_url = ("?_next=", )
        self.maxDepth = 2  # sanity check
Example #4
0
    def setAgent(self, agentAcronym):
       # Decide on the agent that will be used to power the smoke test
        if agentAcronym == "g":
            self.agent = "Ghost"
            try:
                from ghost import Ghost
                self.ghost = Ghost(wait_timeout = 360)
            except ImportError:
                raise NameError("Ghost not installed")

            from using_ghost import login, visit

        else:
            self.agent = "Twill"
            try:
                from twill import get_browser
                from twill import set_output
            except ImportError:
                raise NameError("Twill not installed")

            try:
                import mechanize
            except ImportError:
                raise NameError("Mechanize not installed")

            self.b = get_browser()
            self.b_data = StringIO()
            set_output(self.b_data)

            from using_twill import login, visit

        self.visit = MethodType(visit, self)
        self.login = MethodType(login, self)
Example #5
0
    def __init__(self):
        self.twill_browser = twill.get_browser()

        if not settings.get('verbose', True):
            twill.set_output(open(os.devnull, 'w'))
            #twill.browser.OUT = open(os.devnull, 'w')

        # Handle HTTP authentication
        if settings.get('http_auth_username', None) and settings.get('http_auth_password', None):
            base64string = base64.encodestring('%s:%s' % (settings['http_auth_username'], settings['http_auth_password'])).replace('\n', '')
            twill.commands.add_auth("wiki", settings['mediawiki_url'], settings['http_auth_username'], settings['http_auth_password'])
            #self.twill_browser._session.headers.update([("Authorization", "Basic %s" % base64string)])
            twill.commands.add_extra_header("Authorization", "Basic %s" % base64string)

        # Handle Mediawiki authentication
        if settings.get('mediawiki_username', None) and settings.get('mediawiki_password', None):
            login_url = urlparse.urljoin(settings['mediawiki_url'], '/index.php?title=Special:UserLogin')
            self.openurl(login_url)

            self._set_form_value('userlogin', 'wpName', settings.get('mediawiki_username'))
            self._set_form_value('userlogin', 'wpPassword', settings.get('mediawiki_password'))

            self.twill_browser.submit()

        self.openurl(settings['mediawiki_url'])
Example #6
0
 def __init__(self):
     Web2UnitTest.__init__(self)
     self.b = get_browser()
     self.b_data = StringIO()
     set_output(self.b_data)
     self.clearRecord()
     # This string must exist in the URL for it to be followed
     # Useful to avoid going to linked sites
     self.homeURL = self.url
     # Link used to identify a URL to a ticket
     self.url_ticket = "/admin/default/ticket/"
     # Tuple of strings that if in the URL will be ignored
     # Useful to avoid dynamic URLs that trigger the same functionality
     self.include_ignore = ("_language=",
                            "logout",
                            "appadmin",
                            "admin",
                            "delete",
                           )
     # tuple of strings that should be removed from the URL before storing
     # Typically this will be some variables passed in via the URL
     self.strip_url = ("?_next=",
                       )
     self.reportOnly = False
     self.maxDepth = 16 # sanity check
     self.setThreshold(10)
     self.setUser("[email protected]/eden")
     self.total_visited = 0
     self.broken_links_count = 0
Example #7
0
 def add_class(self, unique_number):
     class_url = self.url + '/' + unique_number
     tc.go(class_url)
     html = StringIO.StringIO()
     twill.set_output(html)
     tc.show()
     soup = BeautifulSoup(html.getvalue())
     table = soup.find('table')
     for row in table.findAll('tr')[1:]:
         columns = row.findAll('td')
         unique = columns[0].string
         days = [d.text for d in columns[1].findAll('span')]
         hour = [d.text for d in columns[2].findAll('span')]
         room = [d.text for d in columns[3].findAll('span')]
         instructor = columns[4].span.text
         new_course = Course(unique, days, hour, room, instructor)
         if self._check_planner_to_add(new_course):
             self.course_set.add(new_course)
             days_to_add = new_course.parse_days()
             hours_to_add = new_course.parse_hours()
             for d in range(len(days_to_add)):
                 for h in range(hours_to_add[d][0], hours_to_add[d][1]):
                     for day in days_to_add[d]:
                         self.grid[h][day] = new_course
             print("Course successfully added.")
Example #8
0
    def __init__(self):
        Web2UnitTest.__init__(self)
        self.b = get_browser()
        self.b_data = StringIO()
        set_output(self.b_data)

        # list of links that return a http_code other than 200
        # with the key being the URL and the value the http code
        self.brokenLinks = dict()
        # List of links visited (key) with the depth
        self.urlList = dict()
        # List of urls for each model
        self.model_url = dict()
        # This string must exist in the URL for it to be followed
        # Useful to avoid going to linked sites
        self.homeURL = self.url
        # Tuple of strings that if in the URL will be ignored
        # Useful to avoid dynamic URLs that trigger the same functionality 
        self.include_ignore = ("_language=",
                               "/admin/default/",
                              )
        # tuple of strings that should be removed from the URL before storing
        # Typically this will be some variables passed in via the URL 
        self.strip_url = ("?_next=",
                          )
        self.maxDepth = 2 # sanity check
Example #9
0
    def wiki_lookup(self):
        wiki = twill.commands
        out = StringIO()
        twill.set_output(out)
        res = wiki.go('http://en.wikipedia.org/wiki/Main_Page')
        res = wiki.formvalue(1, "searchInput", self.name)
        res = wiki.tidy_ok()
        res = wiki.submit(0)
        data = (wiki.get_browser().get_html())

        out = open('tmp', 'a')
        i = 0
        go = False
        for line in data.split('\n'):
            soep = BeautifulSoup(line)
            for line in soep.popTag():
                try:
                    for div in line.findAll('div').pop():
                        if (div.encode().strip().startswith('may refer to:')):
                            desc = str(line.findAll('div')[0]).split(
                                '>')[13].split('<')[0].strip()
                            link = str(line.findAll('a')[0]).replace(
                                '/wiki/', 'http://en.wikipedia.org/wiki/')
                            return (desc, link)
                except:
                    pass
        return (False)
Example #10
0
 def __init__(self):
     Web2UnitTest.__init__(self)
     self.b = get_browser()
     self.b_data = StringIO()
     set_output(self.b_data)
     self.clearRecord()
     # This string must exist in the URL for it to be followed
     # Useful to avoid going to linked sites
     self.homeURL = self.url
     # Link used to identify a URL to a ticket
     self.url_ticket = "/admin/default/ticket/"
     # Tuple of strings that if in the URL will be ignored
     # Useful to avoid dynamic URLs that trigger the same functionality
     self.include_ignore = (
         "_language=",
         "logout",
         "appadmin",
         "admin",
         "delete",
     )
     # tuple of strings that should be removed from the URL before storing
     # Typically this will be some variables passed in via the URL
     self.strip_url = ("?_next=", )
     self.reportOnly = False
     self.maxDepth = 16  # sanity check
     self.setThreshold(10)
     self.setUser("[email protected]/eden")
Example #11
0
def timeout_test(timeout, time_unit):

    # Silence output from Twill commands
    f = open(os.devnull, "w")
    twill.set_output(f)

    # Generate File names for diff
    # e.x. output/10seconds.html, expected/sessionsForm/10seconds.html
    file_name = ` timeout ` + time_unit + '.html'
    generated_html_path = 'output/timeout/' + file_name
    expected_html_path = 'expected/sessionsForm/' + file_name

    # Start with a fresh page every time
    go(url)

    print '\n**Testing timeout of ' + ` timeout ` + ' ' + time_unit + '**'

    code(200)
    # Fill the HTML forms with test values and submit
    fv("1", "timeout", ` timeout `)
    fv("1", "timeUnit", time_unit)
    submit('0')
    save_html(generated_html_path)

    # Diff with HTML page we know should 'come back'
    command = 'diff {0} {1}'.format(generated_html_path, expected_html_path)
    result = subprocess.call(command.split(), shell=False)

    if result is not 0:
        print 'Test failed'
    else:
        print 'Test Passed'
Example #12
0
 def tearDown(self):
     import twill
     import twill.commands
     twill.commands.reset_browser()
     twill.remove_wsgi_intercept('localhost', 6543)
     twill.set_output(None)
     testing.tearDown()
Example #13
0
    def setUp(self):
        '''Create the app'''
        test_path =  os.path.abspath(os.path.dirname(__file__))
        testpath_command = "setglobal test_path " + test_path
        twill.execute_string(testpath_command)

        fixtures = os.path.join(test_path, 'fixtures')
        for to_delete in [fname for fname in os.listdir(fixtures)
                          if fname.startswith('Data.fs') or fname in ['blobs']]:
            _rm(os.path.join(fixtures, to_delete))
        os.mkdir(os.path.join(fixtures, 'blobs'))
        wsgi_app = get_app(os.path.join(test_path, 'fixtures', 'karl.ini'),
                           'main')

        def build_app():
            return wsgi_app

        twill.add_wsgi_intercept('localhost', 6543, build_app)
        # XXX How do we suppress the annoying "AT LINE: " output?
        twill.set_output(open('/dev/null', 'wb'))
        twill.execute_string("extend_with karl.twillcommands")

        # mostly the same as karl3.conf without extending with flunc
        # and few other adjustments.

        twill.execute_string("runfile '" +
                             os.path.abspath(os.path.dirname(__file__)) +
                             "/test_twill_wsgi_karl3.conf'")
Example #14
0
def ssid_test(ssid, session_mode):

    # Silence output from Twill commands
    f = open(os.devnull,"w")
    twill.set_output(f)

    # Generate File names for diff
    file_name = ssid + '.html'
    generated_html_path = 'output/ssid/' + file_name 
    expected_html_path = 'expected/ssidForm/' + file_name

    # Start with a fresh page every time
    go(url)

    print '\n**Testing SSID of ' + ssid + '**'

    code(200)
    # Fill the HTML forms with test values and submit
    fv("1","ssid",ssid)
    fv("1","session_mode",session_mode)

    submit('0')
    save_html(generated_html_path)

    # Diff with HTML page we know should 'come back'
    command = 'diff {0} {1}'.format(generated_html_path, expected_html_path)
    result = subprocess.call(command.split(), shell=False)

    if result is not 0:
        print 'Test failed'
    else:
        print 'Test Passed'
Example #15
0
def timeout_test(timeout, time_unit):

    # Silence output from Twill commands
    f = open(os.devnull,"w")
    twill.set_output(f)

    # Generate File names for diff
    # e.x. output/10seconds.html, expected/sessionsForm/10seconds.html
    file_name = `timeout` + time_unit + '.html'
    generated_html_path = 'output/timeout/' + file_name 
    expected_html_path = 'expected/sessionsForm/' + file_name

    # Start with a fresh page every time
    go(url)

    print '\n**Testing timeout of ' + `timeout` + ' ' + time_unit + '**'

    code(200)
    # Fill the HTML forms with test values and submit
    fv("1","timeout",`timeout`)
    fv("1","timeUnit",time_unit)
    submit('0')
    save_html(generated_html_path)

    # Diff with HTML page we know should 'come back'
    command = 'diff {0} {1}'.format(generated_html_path, expected_html_path)
    result = subprocess.call(command.split(), shell=False)

    if result is not 0:
        print 'Test failed'
    else:
        print 'Test Passed'
Example #16
0
 def __enter__(self):
     twill.set_output(StringIO.StringIO())
     twill.commands.clear_cookies()
     twill.add_wsgi_intercept(self.host, 
                              self.port, 
                              lambda: self.app)
 
     return self
Example #17
0
    def _pre_setup(self):
        super(TwillTestCase, self)._pre_setup()
        twill.set_output(StringIO.StringIO())
        twill.commands.clear_cookies()
        twill.add_wsgi_intercept(self.twill_host, self.twill_port,
                                 lambda: self.app)

        self.browser = twill.get_browser()
Example #18
0
def reset_output():
    """
    >> reset_output

    Reset twill output to go to the screen.
    """
    import twill
    twill.set_output(None)
Example #19
0
def reset_output():
    """
    >> reset_output

    Reset twill output to go to the screen.
    """
    import twill
    twill.set_output(None)
Example #20
0
    def __enter__(self):
        twill.set_output(StringIO.StringIO())
        twill.commands.clear_cookies()
        twill.add_wsgi_intercept(self.host,
                                 self.port,
                                 lambda: self.app)

        return self
Example #21
0
def redirect_output(filename):
    """
    >> redirect_output <filename>

    Append all twill output to the given file.
    """
    import twill
    fp = open(filename, 'a')
    twill.set_output(fp)
Example #22
0
 def _pre_setup(self):
     super(TwillTestCase, self)._pre_setup()
     twill.set_output(StringIO.StringIO())
     twill.commands.clear_cookies()
     twill.add_wsgi_intercept(self.twill_host, 
                              self.twill_port, 
                              lambda: self.app)
 
     self.browser = twill.get_browser()
Example #23
0
def redirect_output(filename):
    """
    >> redirect_output <filename>

    Append all twill output to the given file.
    """
    import twill
    fp = open(filename, 'a')
    twill.set_output(fp)
Example #24
0
    def __init__(self):
        self.username = USERNAME
        self.password = PASSWORD
        self.resourses = defaultdict(int)
        self.fields = defaultdict(list)
        self.farms = []

        # suppress twill output
        f = open(os.devnull, "w")
        set_output(f)
Example #25
0
    def __init__(self):
        self.username = USERNAME
        self.password = PASSWORD
        self.resourses = defaultdict(int)
        self.fields = defaultdict(list)
        self.farms = []

        # suppress twill output
        f = open(os.devnull, "w")
        set_output(f)
Example #26
0
 def execute_script(self):
     """ Executes twill script. Returns a tuple status, output """
     out = StringIO()
     # execute the twill, catching any exceptions
     try:
         twill.set_errout(out)
         twill.set_output(out)
         twill.parse._execute_script(self.watch.script.split("\n"))
         status = STATUS_OK
     except Exception, e:
         status = STATUS_FAILED
Example #27
0
    def tearDown(self):
        # remove intercept
        twill.remove_wsgi_intercept('localhost', 6543)
        twill.set_output(None)

        test_path =  os.path.abspath(os.path.dirname(__file__))
        fixtures = os.path.join(test_path, 'fixtures')
        for to_delete in [fname for fname in os.listdir(fixtures)
                          if fname.startswith('Data.fs') or fname in
                          ['blobs', 'mail_queue']]:
            _rm(os.path.join(fixtures, to_delete))
Example #28
0
def before_all(context):
    context.baseurl = 'http://127.0.0.1:8000'
    twill.set_output(StringIO.StringIO())
    twill.commands.clear_cookies()
    context.app = beer_app.create_app()
    context.app.config['TESTING'] = True
    # fn=(lambda : context.app), it help create the WSGI app object only once, 
    # because function passed into wsgi_intercept is called 
    # once for each intercepted connection
    # more here: http://ivory.idyll.org/articles/twill-and-wsgi_intercept.html
    twill.add_wsgi_intercept('127.0.0.1', 8000, lambda : context.app)
    context.browser = twill.get_browser()
Example #29
0
def fetch_disease_model(id):
    from twill import set_output
    set_output(open('/dev/null', 'w'))

    dismod_server_login()

    twc.go(DISMOD_DOWNLOAD_URL % id)
    result_json = twc.show()
    twc.get_browser()._browser._response.close()  # end the connection, so that apache doesn't get upset

    dm = DiseaseJson(result_json)
    return dm
def scrape(url):

  global start_count, end_count, data_output, screen_output, query_url, page_links, sleep_time

  # Start configuring twill
  twill.commands.clear_cookies()
  twill.commands.agent('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.65 Safari/537.36')
  twill.commands.go(url)

  # Ensure we get a 200 http status code back
  try:
    code_response = twill.commands.code(200)
  except:
    code_response = ""

  # Step into the html and extract the links
  if code_response is None:

    # Change sys output to capture output in a variable
    sys.stdout = data_output
    twill.set_output(data_output)

    # Grab the HTML data which will be stored in data_reponse
    twill.commands.showlinks()

    # Change the sys output back to the screen, now we have captured the data
    sys.stdout = screen_output
    twill.set_output(screen_output)

    # Split data up using new line char
    page_links_raw = data_output.getvalue().split("\n")

    # Loop through each row and look for a hyperlink
    for item in page_links_raw:

      # Find http in string
      httpString = item.find(query_url)

      # Add url to the array if not already
      if httpString is not -1:
        page_links.append(item[httpString:])

    # Goto the next page url
    start_count = start_count + 10;

    if start_count <= end_count:

      # Wait "sleep_time" seconds before visiting the next page
      sleep(sleep_time)

      # Recursive call, visit the next page
      scrape(base_url + str(start_count))
 def setUp(self):
     super(AdminSurveyTwillTest, self).setUp()
     self.old_propagate = settings.DEBUG_PROPAGATE_EXCEPTIONS
     settings.DEBUG_PROPAGATE_EXCEPTIONS = True
     signals.request_finished.disconnect(close_connection)
     twill.set_output(StringIO())
     twill.add_wsgi_intercept(TWILL_TEST_HOST, 80, WSGIHandler)
     self.browser = twill.get_browser()
     self.browser.go(reverse_for_twill('admin:index'))
     twill.commands.formvalue(1, 'username', self.username)
     twill.commands.formvalue(1, 'password', self.pw)
     self.browser.submit()
     twill.commands.find('Welcome')
Example #32
0
def fetch_disease_model(id):
    from twill import set_output

    set_output(open("/dev/null", "w"))

    dismod_server_login()

    twc.go(DISMOD_DOWNLOAD_URL % id)
    result_json = twc.show()
    twc.get_browser()._browser._response.close()  # end the connection, so that apache doesn't get upset

    dm = DiseaseJson(result_json)
    return dm
Example #33
0
 def setUp(self):
     import sys
     import twill
     from pyramid.configuration import Configurator
     config = Configurator(root_factory=self.root_factory)
     config.load_zcml(self.config)
     twill.add_wsgi_intercept('localhost', 6543, config.make_wsgi_app)
     if sys.platform is 'win32': # pragma: no cover
         out = open('nul:', 'wb')
     else:
         out = open('/dev/null', 'wb')
     twill.set_output(out)
     testing.setUp(registry=config.registry)
Example #34
0
def enable_debug(debug):
    if isinstance(debug, bool) or debug is None:
        if debug:
            twillout=sys.stderr
        else:
            twillout=StringIO.StringIO()        
    elif isinstance(debug, str):
        twillout=file(debug, 'a')
    else:
        raise ValueError, debug

    import twill
    twill.set_output(twillout)
    twill.set_errout(twillout)
Example #35
0
def download_with_login(url,
                        login_url,
                        login=None,
                        password=None,
                        ext='',
                        username_field='username',
                        password_field='password',
                        form_id=1):
    ''' Download a URI from a website using Django by loging-in first

        1. Logs in using supplied login & password (if provided)
        2. Create a temp file on disk using extension if provided
        3. Write content of URI into file '''

    # log-in to Django site
    if login and password:
        tw.go(login_url)
        tw.formvalue('%s' % form_id, username_field, login)
        tw.formvalue('%s' % form_id, password_field, password)
        tw.submit()

    # retrieve URI
    try:
        tw.go(url)
        tw.code('200')
    except TwillAssertionError:
        code = get_browser().get_code()
        # ensure we don't keep credentials
        tw.reset_browser()
        raise DownloadFailed(u"Unable to download %(url)s. "
                             u"Received HTTP #%(code)s." % {
                                 'url': url,
                                 'code': code
                             })
    buff = StringIO.StringIO()
    twill.set_output(buff)
    try:
        tw.show()
    finally:
        twill.set_output(None)
        tw.reset_browser()

    # write file on disk
    suffix = '.%s' % ext if ext else ''
    fileh, filename = tempfile.mkstemp(suffix=suffix)
    os.write(fileh, buff.getvalue())
    os.close(fileh)
    buff.close()

    return filename
Example #36
0
def enable_debug(debug):
    if isinstance(debug, bool) or debug is None:
        if debug:
            twillout = sys.stderr
        else:
            twillout = StringIO.StringIO()
    elif isinstance(debug, str):
        twillout = file(debug, 'a')
    else:
        raise ValueError, debug

    import twill
    twill.set_output(twillout)
    twill.set_errout(twillout)
Example #37
0
 def setUp(self):
     webapp.web.db.open_database("sqlite://")
     cherrypy.config.update(
         {
             "environment": "embedded",
             "global" : {
                 "tools.auth.on" : True,
                 "tools.sessions.on" : True,
                 }
             })
     wsgiApp = cherrypy.tree.mount(webapp.web.root.Root())
     cherrypy.server.start()
     twill.add_wsgi_intercept('localhost', 8080, lambda : wsgiApp)
     self.outp = StringIO()
     twill.set_output(self.outp)
Example #38
0
    def __init__(self, *args, **kwargs):

        self.db_connection = db.Connection(
            host=settings.SETTINGS['db_host'],
            port=settings.SETTINGS['db_port']
        )
        self.db = self.db_connection[settings.SETTINGS['db_name']]
        self.tc = tc
        self.tc.browser.set_agent_string(
            'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8) '
            'Gecko/20051111 Firefox/1.5 BAVM/1.0.0'
        )
        if not self.debug:
            twill.set_output(StringIO())
        return super(BasicTest, self).__init__(*args, **kwargs)
Example #39
0
    def pageinfo(self):
        buf = StringIO()
        twill.set_output(buf)
        twillc.info()

        info = {}
        for line in buf.getvalue().split('\n'):
            if len(line.strip()) > 0:
                parts = line.strip().split(': ')
                if len(parts) > 1:
                    info[parts[0].strip()] = string.join(parts[1:], ': ')
                else:
                    info[parts[0].strip()] = ''

        return info
Example #40
0
 def setUp(self):
     """Setup an authorized user so we can test app functionality"""
     # Handles setting the state once for the all tests in this class
     # http://stackoverflow.com/questions/402483/caching-result-of-setup-using-python-unittest/402492#402492
     if not self.browser:
         twill.set_output(StringIO())
         twill.commands.clear_cookies()
         twill.add_wsgi_intercept(TWILLHOST, 80, lambda:app)
         self.__class__.browser = twill.get_browser()
         # authorize user against Gmail for our app
         self.__class__.browser.go(url_for_twill('/'))
         twill.commands.formvalue(1, 'search', 'test')
         self.__class__.browser.submit()
         twill.commands.formvalue(1, 'Passwd', secrets.TEST_GOOGLE_PASSWORD)
         twill.commands.formvalue(1, 'Email', secrets.TEST_GOOGLE_EMAIL)
         self.__class__.browser.submit()
         self.__class__.browser.submit('allow')
Example #41
0
def download_with_login(url, login_url, login=None,
                              password=None, ext='',
                              username_field='username',
                              password_field='password',
                              form_id=1):
    ''' Download a URI from a website using Django by loging-in first

        1. Logs in using supplied login & password (if provided)
        2. Create a temp file on disk using extension if provided
        3. Write content of URI into file '''

    # log-in to Django site
    if login and password:
        tw.go(login_url)
        tw.formvalue('%s' % form_id, username_field, login)
        tw.formvalue('%s' % form_id, password_field, password)
        tw.submit()

    # retrieve URI
    try:
        tw.go(url)
        tw.code('200')
    except TwillAssertionError:
        code = get_browser().get_code()
        # ensure we don't keep credentials
        tw.reset_browser()
        raise DownloadFailed(u"Unable to download %(url)s. "
                             u"Received HTTP #%(code)s."
                             % {'url': url, 'code': code})
    buff = StringIO.StringIO()
    twill.set_output(buff)
    try:
        tw.show()
    finally:
        twill.set_output(None)
        tw.reset_browser()

    # write file on disk
    suffix = '.%s' % ext if ext else ''
    fileh, filename = tempfile.mkstemp(suffix=suffix)
    os.write(fileh, buff.getvalue())
    os.close(fileh)
    buff.close()

    return filename
Example #42
0
def getResults(opts):
    # Twill prints a lot of shit, we don't want that.
    twill.set_output(StringIO.StringIO())

    # Log into studweb
    tc.go(
        "https://studweb.uio.no/as/WebObjects/studentweb2.woa/3/wa/default?inst=UiO"
    )
    tc.fv("2", "fodselsnr", opts["fnr"])
    tc.fv("2", "pinkode", opts["pin"])
    tc.submit()

    # Navigating to grades. Bad localization for NN users. No loc. for EN :(
    try:
        tc.follow('Se opplysninger om deg')
    except te.TwillAssertionError:
        try:
            # Note: wildcard in linkname.
            # Problems with special NO chars (æøå).
            tc.follow('Sj. opplysningar om deg')
        except te.TwillAssertionError:
            print "Error: Unknown language on site?"
            sys.exit(1)
    tc.follow('Resultater')

    # Storing HTML in var.
    data = tc.show()

    tc.follow('Logg ut')

    # Pulling coursecode and grade out of HTML, converting flunking to
    # 'F', and putting it in an array.
    # Ex. result: [["INF1000", "B"], ["INF1040", "E"]]
    res = re.findall('<tr class="pysj\d">(.*?)</tr>', data)
    ans = {}
    for i in res:
        if not re.search("Ikkje møtt|Ikke møtt", i):
            tmp = re.findall("<td.*?>(.*?)</td>", i)
            if not re.search("[A-E]", tmp[7]):
                tmp[7] = "F"
            if (not ans.has_key(tmp[1])) or (ans.has_key(tmp[1])
                                             and ans[tmp[1]] == "F"):
                ans[tmp[1]] = tmp[7]
    return reduce(lambda x, y: x + [[y, ans[y]]], ans, [])
Example #43
0
def lookup(sname, syear):
    imdb = twill.commands
    out = StringIO()
    twill.set_output(out)
    res = imdb.go("http://www.imdb.com/find?s=all&q=")
    res = imdb.formvalue(1, "q", sname)
    res = imdb.tidy_ok()
    res = imdb.submit(0)

    soep = BeautifulSoup(imdb.get_browser().get_html())
    u = unicode(sname + " (" + syear + ")")

    if soep.title.contents[0] == u:
        print("superb!")
    links = soep.findAll('a')
    for l in links:
        line = str(l)
        if (line.find("directorlist") > -1):
            print "*************" + line
            print l.contents
Example #44
0
def getResults(opts):
    # Twill prints a lot of shit, we don't want that.
    twill.set_output(StringIO.StringIO())

    # Log into studweb
    tc.go("https://studweb.uio.no/as/WebObjects/studentweb2.woa/3/wa/default?inst=UiO")
    tc.fv("2", "fodselsnr", opts["fnr"])
    tc.fv("2", "pinkode", opts["pin"])
    tc.submit()

    # Navigating to grades. Bad localization for NN users. No loc. for EN :(
    try:
        tc.follow('Se opplysninger om deg')
    except te.TwillAssertionError:
        try:
            # Note: wildcard in linkname.
            # Problems with special NO chars (æøå).
            tc.follow('Sj. opplysningar om deg')
        except te.TwillAssertionError:
            print "Error: Unknown language on site?"
            sys.exit(1)
    tc.follow('Resultater')

    # Storing HTML in var.
    data = tc.show()

    tc.follow('Logg ut')

    # Pulling coursecode and grade out of HTML, converting flunking to
    # 'F', and putting it in an array.
    # Ex. result: [["INF1000", "B"], ["INF1040", "E"]]
    res = re.findall('<tr class="pysj\d">(.*?)</tr>', data)
    ans = {}
    for i in res:
        if not re.search("Ikkje møtt|Ikke møtt", i):
            tmp = re.findall("<td.*?>(.*?)</td>", i)
            if not re.search("[A-E]", tmp[7]):
                tmp[7] = "F"
            if (not ans.has_key(tmp[1])) or (ans.has_key(tmp[1]) and ans[tmp[1]]== "F"):
                ans[tmp[1]] = tmp[7]
    return reduce(lambda x, y: x + [[y, ans[y]]], ans, [])
Example #45
0
def annotate(params, proteins, \
             url="http://signalfind.org/tatfind.html", force=False):
    """
    Interfaces with the TatFind web service at (http://signalfind.org/tatfind.html)
    to predict if protein sequences contain Twin-Arginine Translocation (Tat)
    signal peptides.
    """
    # set the user-agent so web services can block us if they want ... :/
    python_version = sys.version.split()[0]
    agent("Python-urllib/%s (twill; inmembrane)" % python_version)

    outfn = 'tatfind.out'
    log_stderr("# TatFind(web) %s > %s" % (params['fasta'], outfn))

    if not force and os.path.isfile(outfn):
        log_stderr("# -> skipped: %s already exists" % outfn)
        fh = open(outfn, 'r')
        proteins = parse_tatfind_output(fh, proteins)
        fh.close()
        return proteins

    # dump extraneous output into this blackhole so we don't see it
    if not __DEBUG__: twill.set_output(StringIO.StringIO())

    go(url)
    if __DEBUG__: showforms()
    formfile("1", "seqFile", params["fasta"])
    submit()
    if __DEBUG__: show()

    tatfind_output = show()
    if __DEBUG__: log_stderr(tatfind_output)

    # write raw TatFind output to a file
    fh = open(outfn, 'w')
    fh.write(tatfind_output)
    fh.close()

    proteins = parse_tatfind_output(tatfind_output.split("\n"), proteins)

    return proteins
Example #46
0
def annotate(params, proteins, \
             url="http://signalfind.org/tatfind.html", force=False):
    """
    Interfaces with the TatFind web service at (http://signalfind.org/tatfind.html)
    to predict if protein sequences contain Twin-Arginine Translocation (Tat)
    signal peptides.
    """
    # set the user-agent so web services can block us if they want ... :/
    python_version = sys.version.split()[0]
    agent("Python-urllib/%s (twill; inmembrane)" % python_version)

    outfn = 'tatfind.out'
    log_stderr("# TatFind(web) %s > %s" % (params['fasta'], outfn))

    if not force and os.path.isfile(outfn):
        log_stderr("# -> skipped: %s already exists" % outfn)
        fh = open(outfn, 'r')
        proteins = parse_tatfind_output(fh, proteins)
        fh.close()
        return proteins

    # dump extraneous output into this blackhole so we don't see it
    if not __DEBUG__: twill.set_output(StringIO.StringIO())

    go(url)
    if __DEBUG__: showforms()
    formfile("1", "seqFile", params["fasta"])
    submit()
    if __DEBUG__: show()

    tatfind_output = show()
    if __DEBUG__: log_stderr(tatfind_output)

    # write raw TatFind output to a file
    fh = open(outfn, 'w')
    fh.write(tatfind_output)
    fh.close()

    proteins = parse_tatfind_output(tatfind_output.split("\n"), proteins)

    return proteins
Example #47
0
def setup():
    if not os.path.exists(AUTHENTIC_SRCDIR):
        print >> sys.stderr, 'Authentic source dir (%s) does not exist' % AUTHENTIC_SRCDIR
        print >> sys.stderr, 'Create it or edit tests/config.py to match your local installation'
        sys.exit(1)

    silent = os.environ.get('NO_SILENT') is None
    twill.commands.reset_browser()
    twill.set_output(file('/dev/null', 'w'))
    base = []
    if os.environ.get('VALGRIND') is '1' and os.path.exists(
            '/usr/bin/valgrind'):
        base = ['./valgrind-wrapper.sh', 'python']

    os.mkdir('/tmp/.tests')
    authentic_command = base + [
        AUTHENTICCTL, 'start', '--app-dir', '/tmp/.tests/authentictests',
        '--data-dir', AUTHENTIC_DATADIR, '--extra',
        os.path.join(AUTHENTIC_SRCDIR, 'extra',
                     'conformance'), '--port', '10001', '--http'
    ]
    if silent:
        authentic_command.append('--silent')
    sp = subprocess.Popen(authentic_command)
    pids.append(sp.pid)
    lcs_command = base + [
        LCSCTL, 'start', '--app-dir', '/tmp/.tests/lcstests', '--data-dir',
        LCS_DATADIR, '--port', '10002', '--http'
    ]
    if silent:
        lcs_command.append('--silent')
    sp = subprocess.Popen(lcs_command)
    pids.append(sp.pid)

    # Wait for the daemons to load themselves
    starttime = time.time()
    waitforport(10001, starttime)
    waitforport(10002, starttime)
Example #48
0
 def imdb_lookup(self):
     imdb = twill.commands
     out = StringIO()
     twill.set_output(out)
     res = imdb.go("http://www.imdb.com/find?s=all&q=")
     res = imdb.formvalue(1, "q", self.new_name)
     res = imdb.tidy_ok()
     res = imdb.submit(0)
     data = (imdb.get_browser().get_html())
     out = open('tmp', 'w')
     i = 0
     for line in data.split('\n'):
         if len(line.strip()) > 1:
             out.write(line.strip() + '\n')
             if line.find('/title/tt') > -1:
                 if i < 10:
                     i += 1
                     soup = BeautifulSoup(line)
                     imdb_name = soup.findAll('a')[0]
                     try:
                         return (unescape(imdb_name.contents.decode(), {
                             "&#x26;": '&',
                             "&#x27;": "'",
                             '&#x22;': ''
                         }))
                     except:
                         pass
                     soup = BeautifulSoup(line)
                     imdb_name = soup.findAll('td')
                     try:
                         return (unescape(imdb_name.contents, {
                             "&#x26;": '&',
                             "&#x27;": "'",
                             '&#x22;': ''
                         }))
                     except:
                         return (False)
     out.close()
Example #49
0
 def setUp(self):
     "Run before all tests in this class, sets the output to the console"
     twill.set_output(StringIO())
Example #50
0
 def __init__(self):
     #ignore twill output, until we need it
     twill.set_output(StringIO())
Example #51
0
 def setUp(self):
     app = AdminMediaHandler(WSGIHandler())
     twill.add_wsgi_intercept(self.HOST, self.PORT, lambda: app)
     twill.set_output(StringIO())
     self.command = twill.commands
Example #52
0
from xml.etree import ElementTree

# Be sure to use Galaxy's vanilla pyparsing instead of the older version
# imported by twill.
import pyparsing  # noqa: F401
import twill
import twill.commands as tc
from six import string_types, StringIO
from six.moves.urllib.parse import urlencode, urlparse
from twill.other_packages._mechanize_dist import ClientForm

from base.testcase import FunctionalTestCase  # noqa: I100

# Force twill to log to a buffer -- FIXME: Should this go to stdout and be captured by nose?
buffer = StringIO()
twill.set_output(buffer)
tc.config('use_tidy', 0)

# Dial ClientCookie logging down (very noisy)
logging.getLogger("ClientCookie.cookies").setLevel(logging.WARNING)
log = logging.getLogger(__name__)

DEFAULT_TOOL_TEST_WAIT = os.environ.get("GALAXY_TEST_DEFAULT_WAIT", 86400)


class TwillTestCase(FunctionalTestCase):

    """Class of FunctionalTestCase geared toward HTML interactions using the Twill library."""

    def check_for_strings(self, strings_displayed=[], strings_not_displayed=[]):
        if strings_displayed:
Example #53
0
path = "dx/"
convertDocxToText(path)

from pylab import *
import urllib2
import twill
from twill.commands import *
import re
import os
import magic
import sys
from docx import *

# Set up dummy non-existent file to suppress twill output
f = open(os.devnull, 'w')
twill.set_output(f)

# Navigate to the website and authenticate
url = 'http://linguistlist.org/confservices/EasyAbs/login.cfm'
go(url)
fv('2', 'emailaddress', '[INSERT EMAIL]')
fv('2', 'password', '[INSERT PASSWORD]')
submit(0)
follow('View/Assign Abstracts')

# Grab the paper IDs (6 digit numbers)
pagetext = show()
allIDs = [
    pagetext[num.start() + 1:num.start() + 7]
    for num in list(re.finditer('t19', pagetext))
]
Example #54
0
def annotate(params, proteins, \
             force=False):
    """
    DEPRECATED: The TMB-HUNT server appears to be permanently offline.

    Uses the TMB-HUNT web service
    (http://bmbpcu36.leeds.ac.uk/~andy/betaBarrel/AACompPred/aaTMB_Hunt.cgi) to
    predict if proteins are outer membrane beta-barrels.

    NOTE: In my limited testing, TMB-HUNT tends to perform very poorly in
          terms of false positives and false negetives. I'd suggest using only
          BOMP.
    """
    # TODO: automatically split large sets into multiple jobs
    #       TMB-HUNT will only take 10000 seqs at a time
    if len(proteins) >= 10000:
        log_stderr(
            "# ERROR: TMB-HUNT(web): can't take more than 10,000 sequences.")
        return

    # set the user-agent so web services can block us if they want ... :/
    python_version = sys.version.split()[0]
    agent("Python-urllib/%s (twill; inmembrane)" % python_version)

    out = 'tmbhunt.out'
    log_stderr("# TMB-HUNT(web) %s > %s" % (params['fasta'], out))

    if not force and os.path.isfile(out):
        log_stderr("# -> skipped: %s already exists" % out)
        return parse_tmbhunt(proteins, out)

    # dump extraneous output into this blackhole so we don't see it
    if not __DEBUG__: twill.set_output(StringIO.StringIO())

    go("http://bmbpcu36.leeds.ac.uk/~andy/betaBarrel/AACompPred/aaTMB_Hunt.cgi"
       )
    if __DEBUG__: showforms()

    # read up the FASTA format seqs
    fh = open(params['fasta'], 'r')
    fasta_seqs = fh.read()
    fh.close()

    # fill out the form
    fv("1", "sequences", fasta_seqs)

    submit()
    if __DEBUG__: showlinks()

    # small jobs will lead us straight to the results, big jobs
    # go via a 'waiting' page which we skip past if we get it
    job_id = None
    try:
        # we see this with big jobs
        result_table_url = follow(
            "http://www.bioinformatics.leeds.ac.uk/~andy/betaBarrel/AACompPred/tmp/tmp_output.*.html"
        )
        job_id = result_table_url.split('tmp_output')[-1:][0].split('.')[0]
    except:
        # small jobs take us straight to the html results table
        pass

    # parse the job_id from the url, since due to a bug in
    # TMB-HUNT the link on the results page from large jobs is wrong
    if not job_id:        job_id = \
follow("Full results").split('/')[-1:][0].split('.')[0]
    log_stderr(
        "# TMB-HUNT(web) job_id is: %s <http://www.bioinformatics.leeds.ac.uk/~andy/betaBarrel/AACompPred/tmp/tmp_output%s.html>"
        % (job_id, job_id))

    # polling until TMB-HUNT finishes
    # TMB-HUNT advises that 4000 sequences take ~10 mins
    # we poll a little faster than that
    polltime = (len(proteins) * 0.1) + 2
    while True:
        log_stderr("# TMB-HUNT(web): waiting another %i sec ..." % (polltime))
        time.sleep(polltime)
        try:
            go("http://bmbpcu36.leeds.ac.uk/~andy/betaBarrel/AACompPred/tmp/%s.txt"
               % (job_id))
            break
        except:
            polltime = polltime * 2

        if polltime >= 7200:  # 2 hours
            log_stderr("# TMB-HUNT error: Taking too long.")
            return

    txt_out = show()

    # write raw TMB-HUNT results
    fh = open(out, 'w')
    fh.write(txt_out)
    fh.close()

    return parse_tmbhunt(proteins, out)
Example #55
0
 def _twill_quiet():
     # suppress normal output of twill.. You don't want to
     # call this if you want an interactive session
     twill.set_output(StringIO())
Example #56
0
def twill_quiet():
    # suppress normal output of twill.. You don't want to
    # call this if you want an interactive session
    if testlib.TWILL_QUIET:
        twill.set_output(StringIO())
Example #57
0
def annotate(params, proteins, \
                   url="http://psfs.cbrc.jp/tmbeta-net/", \
                   category='OM(barrel)',
                   force=False):
    """
  Uses the TMBETA-NET web service (http://psfs.cbrc.jp/tmbeta-net/) to
  predict strands of outer membrane beta-barrels.
  
  By default, category='BARREL' means prediction will only be run
  on proteins in the set with this category property. To process all
  proteins, change category to None.

  These keys are added to the proteins dictionary: 
    'tmbeta_strands' - a list of lists with paired start and end 
                       residues of each predicted strand. 
                       (eg [[3,9],[14,21], ..etc ])
  """

    # set the user-agent so web services can block us if they want ... :/
    python_version = sys.version.split()[0]
    agent("Python-urllib/%s (twill; inmembrane)" % python_version)

    outfile = 'tmbeta_net.out'
    log_stderr("# TMBETA-NET(web) %s > %s" % (params['fasta'], outfile))

    tmbeta_strands = {}
    if not force and os.path.isfile(outfile):
        log_stderr("# -> skipped: %s already exists" % outfile)
        fh = open(outfile, 'r')
        tmbeta_strands = json.loads(fh.read())
        fh.close()
        for seqid in tmbeta_strands:
            proteins[seqid]['tmbeta_strands'] = tmbeta_strands[seqid]

        return tmbeta_strands

    # dump extraneous output into this blackhole so we don't see it
    if not __DEBUG__: twill.set_output(StringIO.StringIO())

    for seqid in proteins:

        # only run on sequences which match the category filter
        if force or \
           (category == None) or \
           (dict_get(proteins[seqid], 'category') == category):
            pass
        else:
            continue

        go(url)
        if __DEBUG__: showforms()
        fv("1", "sequence", proteins[seqid]['seq'])
        submit()
        log_stderr("# TMBETA-NET: Predicting strands for %s - %s\n" \
                          % (seqid, proteins[seqid]['name']))
        out = show()
        time.sleep(1)

        if ("Some query is already running. Please try again." in out):
            log_stderr("# TMBETA-NET(web) error: %s" % (out))
            return {}

        # parse the web page returned, extract strand boundaries
        proteins[seqid]['tmbeta_strands'] = []
        for l in out.split('\n'):
            if __DEBUG__: log_stderr("## " + l)

            if "<BR>Segment " in l:
                i, j = l.split(":")[1].split("to")
                i = int(i.strip()[1:])
                j = int(j.strip()[1:])
                proteins[seqid]['tmbeta_strands'].append([i, j])

                if __DEBUG__:
                    log_stderr("# TMBETA-NET(web) segments: %s, %s" % (i, j))

        tmbeta_strands[seqid] = proteins[seqid]['tmbeta_strands']

    # we store the parsed strand boundaries in JSON format
    fh = open(outfile, 'w')
    fh.write(json.dumps(tmbeta_strands, separators=(',', ':\n')))
    fh.close()

    return tmbeta_strands