def main(): login, password = get_credentials() # log-in to Django site if login and password: tw.go(LOGIN_URL) tw.formvalue('1', 'username', login) tw.formvalue('1', 'password', password) tw.submit() if isinstance(DATA_URL, basestring): urls = [DATA_URL] else: urls = list(DATA_URL) # retrieve URIs for url in urls: try: tw.go(url) tw.code('200') tw.show() except TwillAssertionError: code = get_browser().get_code() print (u"Unable to access %(url)s. " u"Received HTTP #%(code)s." % {'url': url, 'code': code}) tw.reset_browser()
def main(): login, password = get_credentials() # log-in to Django site if login and password: tw.go(LOGIN_URL) tw.formvalue('1', 'username', login) tw.formvalue('1', 'password', password) tw.submit() if isinstance(DATA_URL, basestring): urls = [DATA_URL] else: urls = list(DATA_URL) # retrieve URIs for url in urls: try: tw.go(url) tw.code('200') tw.show() except TwillAssertionError: code = get_browser().get_code() print(u"Unable to access %(url)s. " u"Received HTTP #%(code)s." % { 'url': url, 'code': code }) tw.reset_browser()
def list_cars(): c.go('http://icfpcontest.org/icfp10/score/instanceTeamCount') cars = re.findall(all_cars_rx, c.show()) if not cars: sys.stderr.write(c.show()) sys.stderr.write('Could not find any cars') return cars
def add_class(self, unique_number): class_url = self.url + '/' + unique_number tc.go(class_url) html = StringIO.StringIO() twill.set_output(html) tc.show() soup = BeautifulSoup(html.getvalue()) table = soup.find('table') for row in table.findAll('tr')[1:]: columns = row.findAll('td') unique = columns[0].string days = [d.text for d in columns[1].findAll('span')] hour = [d.text for d in columns[2].findAll('span')] room = [d.text for d in columns[3].findAll('span')] instructor = columns[4].span.text new_course = Course(unique, days, hour, room, instructor) if self._check_planner_to_add(new_course): self.course_set.add(new_course) days_to_add = new_course.parse_days() hours_to_add = new_course.parse_hours() for d in range(len(days_to_add)): for h in range(hours_to_add[d][0], hours_to_add[d][1]): for day in days_to_add[d]: self.grid[h][day] = new_course print("Course successfully added.")
def login(username): """Find user for given username and make the browser logged in""" global_dict, local_dict = namespaces.get_twill_glocals() # Set a globabl Twill variable to let Twill scripts now the name # of the test, e.g. the directory, as well as community name. #global_dict['test_name'] = test_name #global_dict['community_name'] = test_name + "-testcase" global_dict['cwd'] = os.getcwd() hn = global_dict['localhost_url'] # First logout logout() # Do a login au = global_dict['%s_user' % username] # Echo to screen dump("Logging into %s as %s" % (hn, au)) # Continue ap = global_dict['%s_password' % username] commands.go(hn + '/login.html') commands.fv("formLogin", "login", au) commands.fv("formLogin", "password", ap) commands.submit() # Make sure the login succeeded commands.show() commands.find("My Profile")
def test_simple_index_case(root): root.join("FooBar-1.0.zip").write("") root.join("FooBar-1.1.zip").write("") go("/simple/foobar") show() links = list(showlinks()) assert len(links) == 2
def test_nonroot_simple_packages(root): root.join("foobar-1.0.zip").write("123") for url in ["http://nonroot/priv/packages", "http://nonroot/priv/packages/"]: go(url) show() links = list(showlinks()) assert len(links) == 1 assert links[0].url == "/priv/packages/foobar-1.0.zip"
def test_nonroot_simple_packages(root): root.join("foobar-1.0.zip").write("123") for url in [ "http://nonroot/priv/packages", "http://nonroot/priv/packages/" ]: go(url) show() links = list(showlinks()) assert len(links) == 1 assert links[0].url == "/priv/packages/foobar-1.0.zip"
def test_simple_index_list(root): root.join("foobar-1.0.zip").write("") root.join("foobar-1.1.zip").write("") root.join("foobarbaz-1.1.zip").write("") root.join("foobar.baz-1.1.zip").write("") go("/simple/") show() links = list(showlinks()) assert len(links) == 3
def test_root_count(root): go("/") show() code(200) find("PyPI compatible package index serving 0 packages") showlinks() root.join("Twisted-11.0.0.tar.bz2").write("") reload() show() find("PyPI compatible package index serving 1 packages")
def test_profile_record(): """ Test availability of user profile """ go(SITE + '/accounts/login/') code(200) show() formvalue(1, 'username', 'root') formvalue(1, 'password', '1') submit() code(200)
def test_reset_password(): """ Test user password reset """ go(SITE + '/accounts/password/reset/') code(200) show() formvalue(1, 'email', '*****@*****.**') submit() code(200) find('Password reset successful') return
def test_success_login(): """ Test a success log in to site """ go(SITE + '/accounts/login/') code(200) show() formvalue(1, 'username', 'test') formvalue(1, 'password', '1') submit() code(200) find('<h4 align="center">My profile</h4>') return
def test_incorrect_username(): """ Test an incorrect log in to site with invalid username """ go(SITE + '/accounts/login/') code(200) show() formvalue(1, 'username', 'testincorrect') formvalue(1, 'password', '1') submit() code(200) find('Please enter a correct username and password.') return
def _requestLikes (self, startingLike, startingThumb): url = PANDORA_LIKES_URL.substitute(webname = self.webname, likestart = startingLike, thumbstart = startingThumb) Browser.go(url) likesList = self._songHtmlToList(Browser.show()) soup = BeautifulSoup(Browser.show()) nextInfoElement = soup.find('div', {'class': 'show_more tracklike'}) nextStartingLike = self._attributeNumberValueOrZero(nextInfoElement, 'data-nextlikestartindex') nextStartingThumb = self._attributeNumberValueOrZero(nextInfoElement, 'data-nextthumbstartindex') return (likesList, nextStartingLike, nextStartingThumb)
def download_with_login(url, login_url, login=None, password=None, ext='', username_field='username', password_field='password', form_id=1): ''' Download a URI from a website using Django by loging-in first 1. Logs in using supplied login & password (if provided) 2. Create a temp file on disk using extension if provided 3. Write content of URI into file ''' # log-in to Django site if login and password: tw.go(login_url) tw.formvalue('%s' % form_id, username_field, login) tw.formvalue('%s' % form_id, password_field, password) tw.submit() # retrieve URI try: tw.go(url) tw.code('200') except TwillAssertionError: code = get_browser().get_code() # ensure we don't keep credentials tw.reset_browser() raise DownloadFailed(u"Unable to download %(url)s. " u"Received HTTP #%(code)s." % { 'url': url, 'code': code }) buff = StringIO.StringIO() twill.set_output(buff) try: tw.show() finally: twill.set_output(None) tw.reset_browser() # write file on disk suffix = '.%s' % ext if ext else '' fileh, filename = tempfile.mkstemp(suffix=suffix) os.write(fileh, buff.getvalue()) os.close(fileh) buff.close() return filename
def test_register(): """ Test user registration """ go(SITE + '/accounts/register/') code(200) show() formvalue(1, 'username', 'demouser') formvalue(1, 'email', '*****@*****.**') formvalue(1, 'password1', '1') formvalue(1, 'password2', '1') submit() code(200) find('Confirmation e-mail sent') return
def ff(): """ show current page in firefox """ # if not os.environ.get('DEBUG_TEST', False): # return # turn on debugging to show # set_debugging(True) filename = '/tmp/_yp_test_%s' % uniq() # os.tmpnam() make warnings f = open(filename, 'w') html = cmd.show() # replace home urls to htdocs dir urls html = html.replace('"/', '"%s/htdocs/' % SITE_ROOT) html = html.replace('url(/', 'url(%s/htdocs/' % SITE_ROOT) # delete rX for css version for r in range(7, 50): html = html.replace('/r%s/' % r, '/') f.write(html) f.close() os.system('/usr/bin/env firefox %s' % filename)
def test_image_processing_library_error(self): """ If the image processing library errors while preparing a photo, report a helpful message to the user and log the error. The photo is not added to the user's profile. """ # Get a copy of the error log. string_log = StringIO.StringIO() logger = logging.getLogger() my_log = logging.StreamHandler(string_log) logger.addHandler(my_log) logger.setLevel(logging.ERROR) self.login_with_twill() tc.go(make_twill_url('http://openhatch.org/people/paulproteus/')) tc.follow('photo') # This is a special image from issue166 that passes Django's image # validation tests but causes an exception during zlib decompression. tc.formfile('edit_photo', 'photo', photo('static/images/corrupted.png')) tc.submit() tc.code(200) self.assert_("Something went wrong while preparing this" in tc.show()) p = Person.objects.get(user__username='******') self.assertFalse(p.photo.name) # an error message was logged during photo processing. self.assert_("zlib.error" in string_log.getvalue()) logger.removeHandler(my_log)
def leave_all_studentsets_named(studentset_name): b = cmd.get_browser() cmd.go("/py/teacher/studentset/list/") soup = BeautifulSoup(cmd.show()) for tststudentset in soup.findAll('a', text=studentset_name): stsid = int(tststudentset.parent['href'].split('/')[-2]) studentset_leave(stsid)
def get_building_cost(self, id): """ Read the cost for expanding building """ commands.go(SERVER + 'build.php?id=' + str(id)) html = commands.show() soup = BeautifulSoup(html) cost = {} cost['wood'] = int( soup.find('span', { 'class': re.compile('.*r1.*') }).text) cost['clay'] = int( soup.find('span', { 'class': re.compile('.*r2.*') }).text) cost['iron'] = int( soup.find('span', { 'class': re.compile('.*r3.*') }).text) cost['cerial'] = int( soup.find('span', { 'class': re.compile('.*r4.*') }).text) cost['space'] = int( soup.find('span', { 'class': re.compile('.*r5.*') }).text) return cost
def get_job_queue(): """ fetch list of disease model jobs waiting to run from dismod server given in settings.py. """ dismod_server_login() twc.go(DISMOD_LIST_JOBS_URL) return json.loads(twc.show())
def student_casual_register(access_key, login): cmd.go('/py/student/login/') cmd.show() cmd.fv(2, 'full_access_key', access_key) cmd.submit() no = re.search( re.compile('(\/py\/student\/login\/\d+-\w+\/join\/register\/)'), cmd.show()).groups()[0] cmd.go(no) cmd.fv(1, 'forename', 'generic') cmd.fv(1, 'surname', 'robot') cmd.fv(1, 'login', login) cmd.fv(1, 'passwd', 'mymy') cmd.fv(1, 'passwd2', 'mymy') cmd.submit() cmd.url('/py/student/')
def download_with_login(url, login_url, login=None, password=None, ext='', username_field='username', password_field='password', form_id=1): ''' Download a URI from a website using Django by loging-in first 1. Logs in using supplied login & password (if provided) 2. Create a temp file on disk using extension if provided 3. Write content of URI into file ''' # log-in to Django site if login and password: tw.go(login_url) tw.formvalue('%s' % form_id, username_field, login) tw.formvalue('%s' % form_id, password_field, password) tw.submit() # retrieve URI try: tw.go(url) tw.code('200') except TwillAssertionError: code = get_browser().get_code() # ensure we don't keep credentials tw.reset_browser() raise DownloadFailed(u"Unable to download %(url)s. " u"Received HTTP #%(code)s." % {'url': url, 'code': code}) buff = StringIO.StringIO() twill.set_output(buff) try: tw.show() finally: twill.set_output(None) tw.reset_browser() # write file on disk suffix = '.%s' % ext if ext else '' fileh, filename = tempfile.mkstemp(suffix=suffix) os.write(fileh, buff.getvalue()) os.close(fileh) buff.close() return filename
def getNumberOfLikes (self): url = PANDORA_LIKES_COUNT_URL.substitute(webname = self.webname) Browser.go(url) soup = BeautifulSoup(Browser.show()) songsDiv = soup.find('div', {'id': 'songs'}) numberSpan = songsDiv.find('span', {'class': 'section_count'}) return int(numberSpan.text.strip('()'))
def get_posted_holds(self): print '[%s] Checking holds ...' % str(datetime.datetime.now()) twillc.go('https://my.umcu.org/User/MainAccounts/List') self.pgutil.ensure_url('https://my.umcu.org/User/MainAccounts/List') soup = BeautifulSoup(twillc.show()) holds = [ re.sub('\s{2,}', '|', row.text.strip()) for row in soup.select('#HoldsId table tr.BasicLine')] return holds
def assign(ssid, content): cmd.go('/py/teacher/assignment/new/') cmd.find(content[0]) soup = BeautifulSoup(cmd.show()) cmd.go(soup.find('a', text=content[0]).parent['href']) soup = BeautifulSoup(cmd.show()) cmd.go(soup.find('a', text=content[1]).parent['href']) soup = BeautifulSoup(cmd.show()) assignment = soup.find( 'b', text=content[2]).parent.parent.parent.find('input')['value'] cmd.fv(2, 'task', assignment) cmd.config('readonly_controls_writeable', '+') cmd.fv(2, 'mainform_force_next', 'yes') cmd.submit() b = cmd.get_browser() mb = b._browser r = mb.open(b.get_url(), 'studentset_id=%d' % ssid) cmd.submit()
def get_resourses(self): """ Parse resourses """ commands.go(SERVER + 'dorf1.php') html = commands.show() soup = BeautifulSoup(html) self.resourses['wood'] = int(soup.find('span', {'id': 'l1'}).text) self.resourses['clay'] = int(soup.find('span', {'id': 'l2'}).text) self.resourses['iron'] = int(soup.find('span', {'id': 'l3'}).text) self.resourses['cereal'] = int(soup.find('span', {'id': 'l4'}).text)
def get_fields(self): """ Get lvl of production fields """ commands.go(SERVER + 'dorf1.php') html = commands.show() soup = BeautifulSoup(html) self.fields['wood'] = [int(f.text) for f in soup.findAll('div', {'class': re.compile('.*gid1.*')})] self.fields['clay'] = [int(f.text) for f in soup.findAll('div', {'class': re.compile('.*gid2.*')})] self.fields['iron'] = [int(f.text) for f in soup.findAll('div', {'class': re.compile('.*gid3.*')})] self.fields['cereal'] = [int(f.text) for f in soup.findAll('div', {'class': re.compile('.*gid4.*')})]
def getVoteCredentials(redditURL): t.go(redditURL + "/.compact") page = PyQuery(t.show()) voteHash = page(".link").find(".arrow").eq(0).attr.onclick.split("'")[1] storyID = page(".link").attr.class_.split("-")[1] modHash = getModHashFromCurrentPage() subredditName = redditURL.split("/")[4] return (storyID, voteHash, modHash, subredditName)
def remove_from_job_queue(id): """ remove a disease model from the job queue on the dismod server given in dismod3/settings.py """ dismod_server_login() twc.go(DISMOD_REMOVE_JOB_URL) twc.fv("1", "id", str(id)) twc.submit() return json.loads(twc.show())
def remove_from_job_queue(id): """ remove a disease model from the job queue on the dismod server given in dismod3/settings.py """ dismod_server_login() twc.go(DISMOD_REMOVE_JOB_URL) twc.fv('1', 'id', str(id)) twc.submit() return json.loads(twc.show())
def annotate(params, proteins, \ url="http://signalfind.org/tatfind.html", force=False): """ Interfaces with the TatFind web service at (http://signalfind.org/tatfind.html) to predict if protein sequences contain Twin-Arginine Translocation (Tat) signal peptides. """ # set the user-agent so web services can block us if they want ... :/ python_version = sys.version.split()[0] agent("Python-urllib/%s (twill; inmembrane)" % python_version) outfn = 'tatfind.out' log_stderr("# TatFind(web) %s > %s" % (params['fasta'], outfn)) if not force and os.path.isfile(outfn): log_stderr("# -> skipped: %s already exists" % outfn) fh = open(outfn, 'r') proteins = parse_tatfind_output(fh, proteins) fh.close() return proteins # dump extraneous output into this blackhole so we don't see it if not __DEBUG__: twill.set_output(StringIO.StringIO()) go(url) if __DEBUG__: showforms() formfile("1", "seqFile", params["fasta"]) submit() if __DEBUG__: show() tatfind_output = show() if __DEBUG__: log_stderr(tatfind_output) # write raw TatFind output to a file fh = open(outfn, 'w') fh.write(tatfind_output) fh.close() proteins = parse_tatfind_output(tatfind_output.split("\n"), proteins) return proteins
def fetch_disease_model(id): from twill import set_output set_output(open('/dev/null', 'w')) dismod_server_login() twc.go(DISMOD_DOWNLOAD_URL % id) result_json = twc.show() twc.get_browser()._browser._response.close() # end the connection, so that apache doesn't get upset dm = DiseaseJson(result_json) return dm
def get_building_cost(self, id): """ Read the cost for expanding building """ commands.go(SERVER + 'build.php?id=' + str(id)) html = commands.show() soup = BeautifulSoup(html) cost = {} cost['wood'] = int(soup.find('span', {'class': re.compile('.*r1.*')}).text) cost['clay'] = int(soup.find('span', {'class': re.compile('.*r2.*')}).text) cost['iron'] = int(soup.find('span', {'class': re.compile('.*r3.*')}).text) cost['cerial'] = int(soup.find('span', {'class': re.compile('.*r4.*')}).text) cost['space'] = int(soup.find('span', {'class': re.compile('.*r5.*')}).text) return cost
def getTopicsFromCoachReportsPage( ): if s218: go( "http://129.21.142.218:8008/coachreports/" ) else: go( "http://129.21.142.118:8008/coachreports/") soup = BeautifulSoup( show() ) #get all the topics (like 'addition-subtraction') from the select widget on the page selectDivs = soup.findAll('div', {'class':'selection'}) divWIthTopics = selectDivs[1] optionTags = divWIthTopics.findAll('option')[1:] #first one is empty return [option['value'] for option in optionTags]
def _songHtmlToList (self, html): songList = [] soup = BeautifulSoup(Browser.show()) songs = soup.findAll('div', {'class': 'infobox-body'}) for song in songs: links = song.findAll('a') title = links[0].text artist = links[1].text songList.append([title, artist]) return songList
def test_change_password(): """ Test user password change """ go(SITE + '/accounts/login/') code(200) show() formvalue(1, 'username', 'test') formvalue(1, 'password', '1') submit() code(200) go(SITE + '/accounts/password/change/') code(200) show() formvalue(1, 'old_password', '1') formvalue(1, 'new_password1', '2') formvalue(1, 'new_password2', '2') submit() code(200) find('Password change successful') go(SITE + '/accounts/login/') code(200) show() formvalue(1, 'username', 'test') formvalue(1, 'password', '2') submit() code(200) return
def fetch_disease_model(id): from twill import set_output set_output(open("/dev/null", "w")) dismod_server_login() twc.go(DISMOD_DOWNLOAD_URL % id) result_json = twc.show() twc.get_browser()._browser._response.close() # end the connection, so that apache doesn't get upset dm = DiseaseJson(result_json) return dm
def campusnet_login(l_username, l_password): commands.go('https://campusnet.jacobs-university.de/scripts/mgrqispi.dll?APPNAME=CampusNet&PRGNAME=ACTION&ARGUMENTS=-A9PnS7.Eby4LCWWmmtOcbYKUQ-so-sF48wtHtVNWX9aIeYmoSh5mej--SCbT.jubdlAouHy3dHzwyr-O.ufj3NVAYCNiJr0CFcBNwA3xADclRCTyqC0Oip8drT0F=') commands.fv('1', 'usrname', l_username) commands.fv('1', 'pass', l_password) commands.submit('3') out = sys.stdout bin = open(os.devnull, 'w') sys.stdout = bin returned_page = commands.show() sys.stdout = out return returned_page
def get_student_info(student_id, ssid): cmd.go('/py/teacher/studentset/student/%s/edit/studentset/%s/' \ %(student_id, ssid)) soup = BeautifulSoup(cmd.show()) return dict(id=student_id, ssid=ssid, login=soup.find('td', { 'class': 'first-cell' }, text='Login').parent.parent.find('td', { 'class': 'second-cell' }).string, password=soup.find('input', {'id': 'id_passwd'})['value'], forename=soup.find('input', {'id': 'id_forename'})['value'], surname=soup.find('input', {'id': 'id_surname'})['value'])
def test_processing_simplify(): """cfg: [process_simplify] module=Simplify class=Simplify tolerance_default=.1 tolerance_locked=no [scribble] type=SQLite file=/var/www/ms_tmp/featureserver.scribble gaping_security_hole=yes template=template/default-withmap.html processes=simplify """ s1 = StringIO() twill.set_output(s1) tc.go("http://%s/scribble/1.geojson?process_simplify_tolerance=0.001" % full_host) tc.show() s2 = StringIO() twill.set_output(s2) tc.go("http://%s/scribble/1.geojson?process_simplify_tolerance=100" % full_host) tc.show() s1 = s1.getvalue() s2 = s2.getvalue() # import sys # print >>sys.stderr, s1 # print >>sys.stderr, '' # print >>sys.stderr, s2 assert "processed_by" in s2 # it's simplified so it should be shorter ... add a better test... assert len(s2) <= len(s1)
def fetchUpToThreeMonths(startDate, endDate, accountID): if (endDate - startDate).days <= 90: url = "https://venmo.com/transaction-history/" \ + formatRequest(startDate, endDate) \ + f"&profileId={accountID}&accountType=personal" tw.go(url) try: df = read_csv(io.StringIO(tw.show()), encoding='utf8', header=2) row, col = df.shape return df.drop([row-1]) except Exception as e: print(f"problem reading response: {e}") return None else: raise Exception("Cannot fetch date range greater than 90 days")
def can_attack(self): """ Determine if it is possible to attack """ if not self.attacking(): commands.go(SERVER + 'build.php?tt=2&id=39') html = commands.show() soup = BeautifulSoup(html) for troop, num in self.troops.items(): link = soup.find( 'a', {'onclick': re.compile('.*\.' + troop + '\..*')}) if not link or int(link.text) < int(num): print "Not enought troops to attack" return False return True else: print "This farm is being attacked" return False
def getResults(opts): # Twill prints a lot of shit, we don't want that. twill.set_output(StringIO.StringIO()) # Log into studweb tc.go( "https://studweb.uio.no/as/WebObjects/studentweb2.woa/3/wa/default?inst=UiO" ) tc.fv("2", "fodselsnr", opts["fnr"]) tc.fv("2", "pinkode", opts["pin"]) tc.submit() # Navigating to grades. Bad localization for NN users. No loc. for EN :( try: tc.follow('Se opplysninger om deg') except te.TwillAssertionError: try: # Note: wildcard in linkname. # Problems with special NO chars (æøå). tc.follow('Sj. opplysningar om deg') except te.TwillAssertionError: print "Error: Unknown language on site?" sys.exit(1) tc.follow('Resultater') # Storing HTML in var. data = tc.show() tc.follow('Logg ut') # Pulling coursecode and grade out of HTML, converting flunking to # 'F', and putting it in an array. # Ex. result: [["INF1000", "B"], ["INF1040", "E"]] res = re.findall('<tr class="pysj\d">(.*?)</tr>', data) ans = {} for i in res: if not re.search("Ikkje møtt|Ikke møtt", i): tmp = re.findall("<td.*?>(.*?)</td>", i) if not re.search("[A-E]", tmp[7]): tmp[7] = "F" if (not ans.has_key(tmp[1])) or (ans.has_key(tmp[1]) and ans[tmp[1]] == "F"): ans[tmp[1]] = tmp[7] return reduce(lambda x, y: x + [[y, ans[y]]], ans, [])
def login_interactive(self): print ''' Cardamon will need your UMCU and GMail login information. The UMCU login information will be used to automatically login into the UMCU web-portal and monitor your holds. The GMail login information will be used to send e-mails when new holds are posted to your account. All login information will be stored SECURELY. To re-enter login information start cardamon with the --from-scratch flag: ./cardamon.py --from-scratch Remember that UMCU freezes weblogins after three unsuccessful attempts. If login fails for any reason, please go to https://www.umcu.org/ and verify your login information Lets begin now ... ''' twillc.go('https://www.umcu.org/') self.username = SecureStorage.get('UMCU UserName', False) twillc.fv('4', 'UsernameField', self.username) twillc.submit('SubmitNext') self.pgutil.ensure_url('https://my.umcu.org/User/AccessSignin/Password') self.password = SecureStorage.get('UMCU Password') twillc.fv('1', 'PasswordField', self.password) twillc.submit('SubmitNext') print 'UMCU wants you to answer a security question:' self.pgutil.ensure_url('https://my.umcu.org/User/AccessSignin/Challenge') soup = BeautifulSoup(twillc.show()) question = soup.select('#AccessForm td')[2].text.strip() + '\n' twillc.fv('1', 'Answer', SecureStorage.get(question, False)) twillc.fv('1', 'Remember', 'True') twillc.submit('SubmitNext') self.pgutil.ensure_url('https://my.umcu.org/User/MainAccounts/List') print '\nLOGIN SUCCESSFUL\n\n'
def get_fields(self): """ Get lvl of production fields """ commands.go(SERVER + 'dorf1.php') html = commands.show() soup = BeautifulSoup(html) self.fields['wood'] = [ int(f.text) for f in soup.findAll('div', {'class': re.compile('.*gid1.*')}) ] self.fields['clay'] = [ int(f.text) for f in soup.findAll('div', {'class': re.compile('.*gid2.*')}) ] self.fields['iron'] = [ int(f.text) for f in soup.findAll('div', {'class': re.compile('.*gid3.*')}) ] self.fields['cereal'] = [ int(f.text) for f in soup.findAll('div', {'class': re.compile('.*gid4.*')}) ]
def attack(self): """ Attack the farm """ print "Attack (" + self.x + "|" + self.y + "): ", if self.can_attack(): commands.go(SERVER + 'build.php?tt=2&id=39') commands.fv('2', 'x', self.x) commands.fv('2', 'y', self.y) commands.fv('2', 'c', '4') for troop, num in self.troops.items(): commands.fv('2', troop, num) commands.submit() commands.reload() html = commands.show() soup = BeautifulSoup(html) t = soup.find('div', {'class': 'in'}).text t = re.search('[0-9][0-9]?:[0-9]{2}:[0-9]{2}', t).group(0) h, m, s = [2 * int(e) for e in t.split(':')] wait = datetime.timedelta(seconds=s, minutes=m, hours=h) self.next_attack = datetime.datetime.now() + wait commands.fv('2', 's1', 'ok') commands.submit() print "done"
def test_invalid_photo(self): """ If the uploaded image is detected as being invalid, report a helpful message to the user. The photo is not added to the user's profile. """ bad_image = tempfile.NamedTemporaryFile(delete=False) self.login_with_twill() try: bad_image.write("garbage") bad_image.close() tc.go(make_twill_url('http://openhatch.org/people/paulproteus/')) tc.follow('photo') tc.formfile('edit_photo', 'photo', bad_image.name) tc.submit() tc.code(200) self.assert_("The file you uploaded was either not an image or a " "corrupted image" in tc.show()) p = Person.objects.get(user__username='******') self.assertFalse(p.photo.name) finally: os.unlink(bad_image.name)
def annotate(params, proteins, \ force=False): """ DEPRECATED: The TMB-HUNT server appears to be permanently offline. Uses the TMB-HUNT web service (http://bmbpcu36.leeds.ac.uk/~andy/betaBarrel/AACompPred/aaTMB_Hunt.cgi) to predict if proteins are outer membrane beta-barrels. NOTE: In my limited testing, TMB-HUNT tends to perform very poorly in terms of false positives and false negetives. I'd suggest using only BOMP. """ # TODO: automatically split large sets into multiple jobs # TMB-HUNT will only take 10000 seqs at a time if len(proteins) >= 10000: log_stderr( "# ERROR: TMB-HUNT(web): can't take more than 10,000 sequences.") return # set the user-agent so web services can block us if they want ... :/ python_version = sys.version.split()[0] agent("Python-urllib/%s (twill; inmembrane)" % python_version) out = 'tmbhunt.out' log_stderr("# TMB-HUNT(web) %s > %s" % (params['fasta'], out)) if not force and os.path.isfile(out): log_stderr("# -> skipped: %s already exists" % out) return parse_tmbhunt(proteins, out) # dump extraneous output into this blackhole so we don't see it if not __DEBUG__: twill.set_output(StringIO.StringIO()) go("http://bmbpcu36.leeds.ac.uk/~andy/betaBarrel/AACompPred/aaTMB_Hunt.cgi" ) if __DEBUG__: showforms() # read up the FASTA format seqs fh = open(params['fasta'], 'r') fasta_seqs = fh.read() fh.close() # fill out the form fv("1", "sequences", fasta_seqs) submit() if __DEBUG__: showlinks() # small jobs will lead us straight to the results, big jobs # go via a 'waiting' page which we skip past if we get it job_id = None try: # we see this with big jobs result_table_url = follow( "http://www.bioinformatics.leeds.ac.uk/~andy/betaBarrel/AACompPred/tmp/tmp_output.*.html" ) job_id = result_table_url.split('tmp_output')[-1:][0].split('.')[0] except: # small jobs take us straight to the html results table pass # parse the job_id from the url, since due to a bug in # TMB-HUNT the link on the results page from large jobs is wrong if not job_id: job_id = \ follow("Full results").split('/')[-1:][0].split('.')[0] log_stderr( "# TMB-HUNT(web) job_id is: %s <http://www.bioinformatics.leeds.ac.uk/~andy/betaBarrel/AACompPred/tmp/tmp_output%s.html>" % (job_id, job_id)) # polling until TMB-HUNT finishes # TMB-HUNT advises that 4000 sequences take ~10 mins # we poll a little faster than that polltime = (len(proteins) * 0.1) + 2 while True: log_stderr("# TMB-HUNT(web): waiting another %i sec ..." % (polltime)) time.sleep(polltime) try: go("http://bmbpcu36.leeds.ac.uk/~andy/betaBarrel/AACompPred/tmp/%s.txt" % (job_id)) break except: polltime = polltime * 2 if polltime >= 7200: # 2 hours log_stderr("# TMB-HUNT error: Taking too long.") return txt_out = show() # write raw TMB-HUNT results fh = open(out, 'w') fh.write(txt_out) fh.close() return parse_tmbhunt(proteins, out)