def test_file_missing_columns(self): tc.formfile( 2, 'domains_emails', os.path.join(os.path.dirname(__file__), 'data/domain_2016-09-16_missing_column_id.csv')) tc.submit() tc.find('Missing column "Id" on the row 2.')
def test_image_processing_library_error(self): """ If the image processing library errors while preparing a photo, report a helpful message to the user and log the error. The photo is not added to the user's profile. """ # Get a copy of the error log. string_log = StringIO.StringIO() logger = logging.getLogger() my_log = logging.StreamHandler(string_log) logger.addHandler(my_log) logger.setLevel(logging.ERROR) self.login_with_twill() tc.go(make_twill_url('http://openhatch.org/people/paulproteus/')) tc.follow('photo') # This is a special image from issue166 that passes Django's image # validation tests but causes an exception during zlib decompression. tc.formfile('edit_photo', 'photo', photo('static/images/corrupted.png')) tc.submit() tc.code(200) self.assert_("Something went wrong while preparing this" in tc.show()) p = Person.objects.get(user__username='******') self.assertFalse(p.photo.name) # an error message was logged during photo processing. self.assert_("zlib.error" in string_log.getvalue()) logger.removeHandler(my_log)
def upload(self, input): self.get("/tool_runner/index?tool_id=upload1") tc.fv("1", "file_type", "bed") tc.fv("1", "dbkey", input.get('dbkey', '?')) tc.formfile("1", "file_data", input['file_path']) tc.submit("runtool_btn") tc.code(200)
def test_file_with_invalid_id(self): tc.formfile( 2, 'domains_emails', os.path.join(os.path.dirname(__file__), 'data/domain_2016-09-16_invalid_id.csv')) tc.submit() tc.find( 'Invalid value in column Id: "asdf9". It must be a whole number.')
def upload_file(self, fname, ftype='auto', dbkey='hg17'): """Uploads a file""" fname = self.get_fname(fname) tc.go("./tool_runner/index?tool_id=upload1") tc.fv("1","file_type", ftype) tc.fv("1","dbkey", dbkey) tc.formfile("1","file_data", fname) tc.submit("runtool_btn") self.home()
def test_set_avatar(self): self.login_with_twill() for image in (photo("static/sample-photo.png"), photo("static/sample-photo.jpg")): url = "http://openhatch.org/people/1/" tc.go(make_twill_url(url)) tc.follow("photo") tc.formfile("edit_photo", "photo", image) tc.submit() # Now check that the photo == what we uploaded p = Person.objects.get(user__username="******") self.assert_(p.photo.read() == open(image).read())
def test_set_avatar_too_wide(self): self.login_with_twill() for image in [photo("static/images/too-wide.jpg"), photo("static/images/too-wide.png")]: url = "http://openhatch.org/people/1/" tc.go(make_twill_url(url)) tc.follow("photo") tc.formfile("edit_photo", "photo", image) tc.submit() # Now check that the photo is 200px wide p = Person.objects.get(user__username="******") image_as_stored = mysite.base.depends.Image.open(p.photo.file) w, h = image_as_stored.size self.assertEqual(w, 260)
def test_set_avatar(self): self.login_with_twill() for image in (photo('static/sample-photo.png'), photo('static/sample-photo.jpg')): url = 'http://openhatch.org/people/paulproteus/' tc.go(make_twill_url(url)) tc.follow('photo') tc.formfile('edit_photo', 'photo', image) tc.submit() # Now check that the photo == what we uploaded p = Person.objects.get(user__username='******') self.assert_(p.photo.read() == open(image).read())
def test_set_avatar_too_wide(self): self.login_with_twill() for image in [photo('static/images/too-wide.jpg'), photo('static/images/too-wide.png')]: url = 'http://openhatch.org/people/paulproteus/' tc.go(make_twill_url(url)) tc.follow('photo') tc.formfile('edit_photo', 'photo', image) tc.submit() # Now check that the photo is 200px wide p = Person.objects.get(user__username='******') image_as_stored = Image.open(p.photo.file) w, h = image_as_stored.size self.assertEqual(w, 200)
def test_file_unicode_in_email(self): self.notif_mock.set_domain_outzone_unguarded_warning_emails.side_effect = \ Registry.Notification.DOMAIN_EMAIL_VALIDATION_ERROR( [Registry.Notification.DomainEmail(domain_id=4, email='[email protected]\xc2\xa0')] ) tc.formfile( 2, 'domains_emails', os.path.join(os.path.dirname(__file__), 'data/domain_2017-02-01_unicode_in_email.csv')) tc.submit() tc.find( 'The file contains these invalid emails: [email protected]\xc2\xa0' .decode('utf-8'))
def test_file_ok(self): tc.formfile( 2, 'domains_emails', os.path.join(os.path.dirname(__file__), 'data/domain_2016-09-16_OK.csv')) tc.submit() assert_equal(self.notif_mock.mock_calls, [ call.set_domain_outzone_unguarded_warning_emails([ Registry.Notification.DomainEmail(domain_id=4, email='*****@*****.**'), Registry.Notification.DomainEmail(domain_id=9, email='*****@*****.**'), Registry.Notification.DomainEmail(domain_id=9, email='*****@*****.**') ]) ]) tc.find( r'Emails have been successfully saved. \(2 domains, 3 emails\)')
def annotate(params, proteins, \ url="http://signalfind.org/tatfind.html", force=False): """ Interfaces with the TatFind web service at (http://signalfind.org/tatfind.html) to predict if protein sequences contain Twin-Arginine Translocation (Tat) signal peptides. """ # set the user-agent so web services can block us if they want ... :/ python_version = sys.version.split()[0] agent("Python-urllib/%s (twill; inmembrane)" % python_version) outfn = 'tatfind.out' log_stderr("# TatFind(web) %s > %s" % (params['fasta'], outfn)) if not force and os.path.isfile(outfn): log_stderr("# -> skipped: %s already exists" % outfn) fh = open(outfn, 'r') proteins = parse_tatfind_output(fh, proteins) fh.close() return proteins # dump extraneous output into this blackhole so we don't see it if not __DEBUG__: twill.set_output(StringIO.StringIO()) go(url) if __DEBUG__: showforms() formfile("1", "seqFile", params["fasta"]) submit() if __DEBUG__: show() tatfind_output = show() if __DEBUG__: log_stderr(tatfind_output) # write raw TatFind output to a file fh = open(outfn, 'w') fh.write(tatfind_output) fh.close() proteins = parse_tatfind_output(tatfind_output.split("\n"), proteins) return proteins
def test_set_avatar(self): self.login_with_twill() for image in [photo('static/sample-photo.png'), photo('static/sample-photo.jpg')]: url = 'http://openhatch.org/people/paulproteus/' tc.go(make_twill_url(url)) tc.follow('photo') tc.formfile('edit_photo', 'photo', image) tc.submit() # Now check that the photo == what we uploaded p = Person.objects.get(user__username='******') self.assert_(p.photo.read() == open(image).read()) response = self.login_with_client().get(reverse(mysite.account.views.edit_photo)) self.assertEqual( response.context[0]['photo_url'], p.photo.url, "Test that once you've uploaded a photo via the photo editor, " "the template's photo_url variable is correct.") self.assert_(p.photo_thumbnail) thumbnail_as_stored = mysite.base.depends.Image.open(p.photo_thumbnail.file) w, h = thumbnail_as_stored.size self.assertEqual(w, 40)
def test_invalid_photo(self): """ If the uploaded image is detected as being invalid, report a helpful message to the user. The photo is not added to the user's profile. """ bad_image = tempfile.NamedTemporaryFile(delete=False) self.login_with_twill() try: bad_image.write("garbage") bad_image.close() tc.go(make_twill_url("http://openhatch.org/people/1/")) tc.follow("photo") tc.formfile("edit_photo", "photo", bad_image.name) tc.submit() tc.code(200) self.assert_("The file you uploaded was either not an image or a " "corrupted image" in tc.show()) p = Person.objects.get(user__username="******") self.assertFalse(p.photo.name) finally: os.unlink(bad_image.name)
def upload(self, file): self.get("/tool_runner/index?tool_id=upload1") tc.fv("1","file_type", "bed") tc.formfile("1","file_data", file) tc.submit("runtool_btn") tc.code(200)
def annotate(params, proteins, \ url="http://rbf.bioinfo.tw/"+ "~sachen/OMPpredict/"+ "TMBETADISC-RBF-Content.html", force=False): """ Interfaces with the TatFind web service at (http://rbf.bioinfo.tw/~sachen/OMPpredict/TMBETADISC-RBF.php) to predict if protein sequence is likely to be an outer membrane beta-barrel. Note that the default URL we use it different to the regular form used by web browsers, since we need to bypass some AJAX fun. """ # TODO: automatically split large sets into multiple jobs # since TMBETADISC seems to not like more than take # ~5000 seqs at a time if len(proteins) >= 5000: log_stderr("# ERROR: TMBETADISC-RBF(web): tends to fail with > ~5000 sequences.") return # set the user-agent so web services can block us if they want ... :/ python_version = sys.version.split()[0] agent("Python-urllib/%s (twill; inmembrane)" % python_version) outfn = 'tmbetadisc-rbf.out' log_stderr("# TMBETADISC-RBF(web) %s > %s" % (params['fasta'], outfn)) if not force and os.path.isfile(outfn): log_stderr("# -> skipped: %s already exists" % outfn) fh = open(outfn, 'r') proteins = parse_tmbetadisc_output(fh.read(), proteins) fh.close() return proteins # dump extraneous output into this blackhole so we don't see it if not __DEBUG__: twill.set_output(StringIO.StringIO()) go(url) if __DEBUG__: showforms() formfile("1", "userfile", params["fasta"]) fv("1", "format", "file") # set the user defined method method_map = {"aa":"Amino Acid Composition", "dp":"Depipetide Composition", "aadp":"Amino Acid & Depipetide Composition", "pssm":"PSSM"} if dict_get(params, 'tmbetadisc_rbf_method'): try: method = method_map[params['tmbetadisc_rbf_method']] except KeyError: log_stderr("# ERROR: Invalid setting from tmbetadisc_rbf_method. \ Must be set to aa, dp, aadp or pssm.") sys.exit() #fv("1", "select", "Amino Acid Composition") #fv("1", "select", "Depipetide Composition") #fv("1", "select", "Amino Acid & Depipetide Composition") #fv("1", "select", "PSSM") fv("1", "select", method) submit() waiting_page = show() if __DEBUG__: log_stderr(waiting_page) for l in waiting_page.split('\n'): if l.find("TMBETADISC-RBF-action.php?UniqueName=") != -1: result_url = l.split("'")[1] time.sleep(5) go(result_url) output = show() if __DEBUG__: log_stderr(output) # write raw output to a file fh = open(outfn, 'w') fh.write(output) fh.close() proteins = parse_tmbetadisc_output(output, proteins) return proteins
def test(): url = twilltestlib.get_url() # test empty page get_title namespaces.new_local_dict() twill.commands.reset_browser() browser = twill.get_browser() try: browser.get_title() assert 0, "should never get here" except BrowserStateError: pass ### now test a few special cases commands.go(url) commands.go('/login') commands.showforms() # test no matching forms try: commands.fv('2', 'submit', '1') assert 0 except TwillAssertionError: pass # test regexp match commands.fv('1', '.*you', '1') # test ambiguous match to value commands.go('/testform') commands.fv('1', 'selecttest', 'val') commands.fv('1', 'selecttest', 'value1') commands.fv('1', 'selecttest', 'selvalue1') commands.formclear('1') try: commands.fv('1', 'selecttest', 'value') assert 0 except ClientForm.ItemNotFoundError: pass # test ambiguous match to name commands.go('/testform') try: commands.fv('1', 'item_', 'value') assert 0 except Exception: pass try: commands.formfile('1', 'selecttest', 'null') assert 0 except Exception: pass commands.go('http://www.google.com/') browser.get_title() # test the twill script. twilltestlib.execute_twill_script('test-form.twill', initial_url=url)
def test_empty_file_uploaded(self): tc.formfile(2, 'domains_emails', os.path.join(os.path.dirname(__file__), 'data/empty_file')) tc.submit() tc.find('Wrong file format.')
def annotate(params, proteins, \ url="http://services.cbu.uib.no/tools/bomp/", force=False): """ Uses the BOMP web service (http://services.cbu.uib.no/tools/bomp/) to predict if proteins are outer membrane beta-barrels. """ # set the user-agent so web services can block us if they want ... :/ python_version = sys.version.split()[0] agent("Python-urllib/%s (twill; inmembrane/%s)" % (python_version, inmembrane.__version__)) bomp_out = 'bomp.out' log_stderr("# BOMP(web) %s > %s" % (params['fasta'], bomp_out)) if not force and os.path.isfile(bomp_out): log_stderr("# -> skipped: %s already exists" % bomp_out) bomp_categories = {} fh = open(bomp_out, 'r') for l in fh: words = l.split() bomp_category = int(words[-1:][0]) seqid = parse_fasta_header(l)[0] proteins[seqid]['bomp'] = bomp_category bomp_categories[seqid] = bomp_category fh.close() return bomp_categories # dump extraneous output into this blackhole so we don't see it if not __DEBUG__: twill.set_output(StringIO.StringIO()) go(url) if __DEBUG__: showforms() formfile("1", "queryfile", params["fasta"]) submit() if __DEBUG__: show() # extract the job id from the page links = showlinks() job_id = None for l in links: if l.url.find("viewOutput") != -1: # grab job id from "viewOutput?id=16745338" job_id = int(l.url.split("=")[1]) if __DEBUG__: log_stderr("BOMP job id: %d" % job_id) if not job_id: # something went wrong log_stderr("# BOMP error: Can't find job id") return # parse the HTML table and extract categories go("viewOutput?id=%i" % (job_id)) polltime = 10 log_stderr("# Waiting for BOMP to finish .") while True: try: find("Not finished") log_stderr(".") except: # Finished ! Pull down the result page. log_stderr(". done!\n") go("viewOutput?id=%i" % (job_id)) if __DEBUG__: log_stderr(show()) break # Not finished. We keep polling for a time until # we give up time.sleep(polltime) polltime = polltime * 2 if polltime >= 7200: # 2 hours log_stderr("# BOMP error: Taking too long.") return go("viewOutput?id=%i" % (job_id)) if __DEBUG__: log_stderr(show()) bomp_html = show() if __DEBUG__: log_stderr(bomp_html) # Results are in the only <table> on this page, formatted like: # <tr><th>gi|107836852|gb|ABF84721.1<th>5</tr> soup = BeautifulSoup(bomp_html) bomp_categories = {} # dictionary of {name, category} pairs for tr in soup.findAll('tr')[1:]: n, c = tr.findAll('th') name = parse_fasta_header(n.text.strip())[0] category = int(c.text) bomp_categories[name] = category # write BOMP results to a tab delimited file fh = open(bomp_out, 'w') for k, v in bomp_categories.iteritems(): fh.write("%s\t%i\n" % (k, v)) fh.close() if __DEBUG__: log_stderr(str(bomp_categories)) # label proteins with bomp classification (int) or False for name in proteins: if "bomp" not in proteins[name]: if name in bomp_categories: category = int(bomp_categories[name]) proteins[name]['bomp'] = category else: proteins[name]['bomp'] = False if __DEBUG__: log_stderr(str(proteins)) return bomp_categories """
def upload_list(browser, pagename, uploads): # get the file sizes for later comparison. filesizes = [] for fn in uploads: filesizes.append(os.stat(fn)[stat.ST_SIZE]) filesizes.reverse() # because they get listed newest first. # Upload copy #1. t.go(host + "index.php/Special:Upload") t.formfile("1", "wpUploadFile", uploads[0]) t.fv("1", "wpDestFile", pagename) t.fv("1", "wpUploadDescription", "Uploading %s" % pagename) t.submit("wpUpload") # Verify that we succeeded. t.find("File:%s" % pagename) for fn in uploads[1:]: # propose that we upload a replacement t.go(host + "index.php?title=Special:Upload&wpDestFile=%s&wpForReUpload=1" % pagename) t.formfile("1", "wpUploadFile", fn) t.fv("1", "wpUploadDescription", "Uploading %s as %s" % (fn, pagename)) t.submit("wpUpload") # get the URLs for the thumbnails urls = [] for url in re.finditer( r'<td><a href="([^"]*?)"><img alt="Thumbnail for version .*?" src="(.*?)"', browser.get_html() ): urls.append(url.group(1)) urls.append(url.group(2)) print filesizes for i, url in enumerate(urls): t.go(url) if i % 2 == 0 and len(browser.get_html()) != filesizes[i / 2]: print i, len(browser.get_html()), filesizes[i / 2] t.find("Files differ in size") t.code("200") t.back() # delete all versions t.go(host + "index.php?title=File:%s&action=delete" % pagename) # after we get the confirmation page, commit to the action. t.fv("1", "wpReason", "Test Deleting...") t.submit("mw-filedelete-submit") # make sure that we can't visit their URLs. for i, url in enumerate(urls): t.go(url) if 0 and i % 2 == 1 and i > 0 and browser.get_code() == 200: # bug 30192: the archived file's thumbnail doesn't get deleted. print "special-casing the last URL" continue t.code("404") # restore the current and archived version. t.go(host + "index.php/Special:Undelete/File:%s" % pagename) t.fv("1", "wpComment", "Test Restore") t.submit("restore") # visit the page to make sure that the thumbs get re-rendered properly. # when we get the 404 handler working correctly, this won't be needed. t.go(host + "index.php?title=File:%s" % pagename) # make sure that they got restored correctly. for i, url in enumerate(urls): t.go(url) if i % 2 == 0 and len(browser.get_html()) != filesizes[i / 2]: t.find("Files differ in size") t.code("200") t.back() if len(uploads) != 2: return match = re.search(r'"([^"]+?)" title="[^"]+?">revert', browser.get_html()) if not match: t.find("revert") t.go(match.group(1).replace("&", "&"))
def test_data_uploads(self): # data upload test name = 'Upload-test-name' self.create_project(name) tc.follow(name) # find the project id url = tc.follow('Edit') pid = url.split("/")[-2] tc.go("/data/upload/simple/%s/" % pid) # search for then add Demo User to this project tc.formfile("1", "File1", conf.testdata('short-data.bed') ) tc.formfile("1", "File2", conf.testdata('short-good-input.gtrack') ) tc.formfile("1", "File3", conf.testdata('readcounts.png') ) tc.submit() # verify uploads tc.find("short-data.bed") tc.find("short-good-input.gtrack") tc.find("readcounts.png") # visit the dataset tc.follow("short-good-input.gtrack") tc.find("waiting") # edit the dataset tc.follow("Edit") tc.fv("1", "name", "short-good-input.gtrack" ) tc.fv("1", "info","extra-info" ) tc.submit() tc.find("extra-info") # upload two results for it tc.follow("Add results") tc.formfile("1", "content", conf.testdata('short-data.bed') ) tc.formfile("1", "image", conf.testdata('readcounts.png') ) tc.submit() tc.follow("short-data.bed") tc.back() # upload one image tc.follow("Add results") tc.formfile("1", "image", conf.testdata('shift.png') ) tc.submit() tc.follow("shift.png") tc.back() # back to project view tc.follow("Project view") self.delete_project(name)