def download_file(url): """Download webm and pass file exemplar""" try: u = urlopen(url) except URLError as e: print("Error: {} while opening webm url: {}".format(e, url)) raise URLError('Link open error') file_size = int(u.getheader("Content-Length")) if file_size > MAX_SIZE: raise Exception( "WEBM size is too big. Allowed: {0}, File size: {1}".format( MAX_SIZE, file_size)) f = NamedTemporaryFile('w+b', suffix=".webm") print("Downloading: WEBM: {} Bytes: {}".format(url, file_size)) file_size_dl = 0 block_sz = 8192 while True: buffer = u.read(block_sz) if not buffer: break file_size_dl += len(buffer) f.write(buffer) # status = "{10d} [%3.2f%%]".format(file_size_dl, file_size_dl * 100. / file_size) # status = status + chr(8)*(len(status)+1) # print (status) print("Downloaded WEBM: {}".format(url)) return f
def readHTML(self): ''' Parse html file, and store information to movie instance @return: noreturn ''' ### download html file html_url = u.catHTML('movie', self.__id) status = u.downloadHTML(html_url) ### parse html file if status: movie_info = parseHTML(self.__id) self.__title = movie_info['title'] self.__original_title = movie_info['original_title'] self.__rating_ave = movie_info['rating_ave'] self.__rating_count = movie_info['rating_count'] self.__rating_5 = movie_info['rating_5'] self.__rating_4 = movie_info['rating_4'] self.__rating_3 = movie_info['rating_3'] self.__rating_2 = movie_info['rating_2'] self.__rating_1 = movie_info['rating_1'] self.__wish_count = movie_info['wish_count'] self.__viewed_count = movie_info['viewed_count'] self.__comment_count = movie_info['comment_count'] self.__review_count = movie_info['review_count'] self.__subtype = movie_info['subtype'] self.__director = movie_info['director'] self.__pubdate = movie_info['pubdate'] self.__year = movie_info['year'] self.__duration = movie_info['duration'] self.__episode = movie_info['episode'] self.__country = movie_info['country'] self.__intro = movie_info['intro'] self.__update_date = movie_info['update_date'] else: raise URLError('Fetching {} failed.'.format(html_url))
def run(self): """ Downloads, unzips and installs chromedriver. If a chromedriver binary is found in PATH it will be copied, otherwise downloaded. """ chromedriver_version='78.0.3904.11' chromedriver_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'chromedriver_binary') chromedriver_filename = find_binary_in_path(get_chromedriver_filename()) if chromedriver_filename and check_version(chromedriver_filename, chromedriver_version): print("\nChromedriver already installed at {}...\n".format(chromedriver_filename)) new_filename = os.path.join(chromedriver_dir, get_chromedriver_filename()) self.copy_file(chromedriver_filename, new_filename) else: chromedriver_bin = get_chromedriver_filename() chromedriver_filename = os.path.join(chromedriver_dir, chromedriver_bin) if not os.path.isfile(chromedriver_filename) or not check_version(chromedriver_filename, chromedriver_version): print("\nDownloading Chromedriver...\n") if not os.path.isdir(chromedriver_dir): os.mkdir(chromedriver_dir) url = get_chromedriver_url(version=chromedriver_version) try: response = urlopen(url) if response.getcode() != 200: raise URLError('Not Found') except URLError: raise RuntimeError('Failed to download chromedriver archive: {}'.format(url)) archive = BytesIO(response.read()) with zipfile.ZipFile(archive) as zip_file: zip_file.extract(chromedriver_bin, chromedriver_dir) else: print("\nChromedriver already installed at {}...\n".format(chromedriver_filename)) if not os.access(chromedriver_filename, os.X_OK): os.chmod(chromedriver_filename, 0o744) build_py.run(self)
def open(self, opener=urlopen): if self.uri is not None: if isinstance(opener, OpenerDirector): opener = opener.open return opener(self.uri) else: raise URLError(self.uri)
def https_open(self, request): try: connection = self.get_free_connection() while connection: response = self.make_request(connection, request) if response is not None: break else: connection.close() self.remove_connection(connection) connection = self.get_free_connection() else: connection = self.get_new_connection() response = self.make_request(connection, request) except (socket.error, HTTPException) as exc: raise URLError(exc) else: if response.raw.will_close: self.remove_connection(connection) return response
def open_local_file(self, req): host = req.get_host() file = req.get_selector() localfile = urllib.url2pathname(file) stats = os.stat(localfile) size = stats[stat.ST_SIZE] modified = email.utils.formatdate(stats[stat.ST_MTIME]) mtype = mimetypes.guess_type(file)[0] if host: host, port = urllib.splitport(host) if port or socket.gethostbyname(host) not in self.get_names(): raise URLError('file not on local host') fo = open(localfile, 'rb') brange = req.headers.get('Range', None) brange = range_header_to_tuple(brange) assert brange != () if brange: (fb, lb) = brange if lb == '': lb = size if fb < 0 or fb > size or lb > size: raise RangeError(9, 'Requested Range Not Satisfiable') size = (lb - fb) fo = RangeableFileObject(fo, (fb, lb)) headers = email.message_from_string( 'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' % (mtype or 'text/plain', size, modified)) return urllib.addinfourl(fo, headers, 'file:' + file)
def test_unreachable_url(monkeypatch): """Testing than ValueError raises when URL is unreachable using mock""" mock = MagicMock() mock.side_effect = URLError(404) monkeypatch.setattr("homework_4.task_2_mock_input.Request", mock) with pytest.raises(ValueError, match="Unreachable"): count_dots_on_i(mock)
def s3_open(self, req): # The implementation was inspired mainly by the code behind # urllib.request.FileHandler.file_open(). # # recipe copied from: # http://code.activestate.com/recipes/578957-urllib-handler-for-amazon-s3-buckets/ # converted to boto3 if version_info[0] < 3: bucket_name = req.get_host() key_name = url2pathname(req.get_selector())[1:] else: bucket_name = req.host key_name = url2pathname(req.selector)[1:] if not bucket_name or not key_name: raise URLError('url must be in the format s3://<bucket>/<key>') s3 = boto3.resource('s3') key = s3.Object(bucket_name, key_name) client = boto3.client('s3') obj = client.get_object(Bucket=bucket_name, Key=key_name) filelike = _FileLikeKey(obj['Body']) origurl = 's3://{}/{}'.format(bucket_name, key_name) if key is None: raise URLError('no such resource: {}'.format(origurl)) headers = [ ('Content-type', key.content_type), ('Content-encoding', key.content_encoding), ('Content-language', key.content_language), ('Content-length', key.content_length), ('Etag', key.e_tag), ('Last-modified', key.last_modified), ] headers = email.message_from_string('\n'.join( '{}: {}'.format(key, value) for key, value in headers if value is not None)) return addinfourl(filelike, headers, origurl)
def default_open(self, req): if isinstance(req, Request): url = req.full_url else: url = req for prefix, head in self.path_map.items(): if url.startswith(prefix): tail = url[len(prefix):] return open(os.path.join(head, tail)) raise URLError(url)
def test_negative_case_count_dots_on_i(monkeypatch): """ Passes test if `count_dots_on_i(URL)` raises `ValueError("Unreachable URL")`. """ monkeypatch.setattr( "homework_4.tasks.task_2.urlopen", MagicMock(side_effect=URLError(404)), ) with pytest.raises(ValueError, match=f"Unreachable {URL}"): count_dots_on_i(URL)
def do_auth_capture(request): req = create_authnet_checkout_request(request) gcontext = ssl.SSLContext() try: response_xml = urlopen(req, context=gcontext) print('ian') # print(response_xml.read()) except HTTPError as err: raise HTTPError(err) except URLError as err: raise URLError(err) return response_xml
def s3_open(self, req): # The implementation was inspired mainly by the code behind # urllib.request.FileHandler.file_open(). bucket_name = req.host key_name = url2pathname(req.selector)[1:] if not bucket_name or not key_name: raise URLError('url must be in the format s3://<bucket>/<key>') try: conn = self._conn except AttributeError: conn = self._conn = boto.s3.connection.S3Connection() bucket = conn.get_bucket(bucket_name, validate=False) key = bucket.get_key(key_name) origurl = 's3://{}/{}'.format(bucket_name, key_name) if key is None: raise URLError('no such resource: {}'.format(origurl)) headers = [ ('Content-type', key.content_type), ('Content-encoding', key.content_encoding), ('Content-language', key.content_language), ('Content-length', key.size), ('Etag', key.etag), ('Last-modified', key.last_modified), ] headers = email.message_from_string('\n'.join( '{}: {}'.format(key, value) for key, value in headers if value is not None)) return addinfourl(_FileLikeKey(key), headers, origurl)
def run(self): """ Downloads, unzips and installs geckodriver. If a geckodriver binary is found in PATH it will be copied, otherwise downloaded. """ geckodriver_version = "@@GECKODRIVER_VERSION@@" geckodriver_dir = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'geckodriver_binary') geckodriver_filename = find_binary_in_path(get_geckodriver_filename()) if geckodriver_filename and check_version(geckodriver_filename, geckodriver_version): print("\ngeckodriver already installed at {}...\n".format( geckodriver_filename)) new_filename = os.path.join(geckodriver_dir, get_geckodriver_filename()) self.copy_file(geckodriver_filename, new_filename) else: geckodriver_bin = get_geckodriver_filename() geckodriver_filename = os.path.join(geckodriver_dir, geckodriver_bin) if not os.path.isfile(geckodriver_filename) or not check_version( geckodriver_filename, geckodriver_version): print("\nDownloading geckodriver...\n") if not os.path.isdir(geckodriver_dir): os.mkdir(geckodriver_dir) url = get_geckodriver_url(version=geckodriver_version) try: response = urlopen(url) if response.getcode() != 200: raise URLError('Not Found') except URLError: raise RuntimeError( 'Failed to download geckodriver archive: {}'.format( url)) archive = BytesIO(response.read()) if url.endswith(".zip") is True: with zipfile.ZipFile(archive) as zip_file: zip_file.extract(geckodriver_bin, geckodriver_dir) elif url.endswith(".tar.gz") is True: with tarfile.open(fileobj=archive, mode="r:gz") as tar_file: tar_file.extractall(path=geckodriver_dir) else: print("\ngeckodriver already installed at {}...\n".format( geckodriver_filename)) if not os.access(geckodriver_filename, os.X_OK): os.chmod(geckodriver_filename, 0o744) build_py.run(self)
def get_file_from_mirrors(file_name, version, schema): mirrors = [ # GitHub enforces HTTPS 'https://github.com/sciapp/gr/releases/download/v{version}/'.format(version=version), '{schema}://gr-framework.org/downloads/'.format(schema=schema) ] urls = [] for mirror in mirrors: urls.append(mirror + file_name) for url in urls: try: response = urlopen(url) except Exception: continue if response.getcode() == 200: return response.read() raise URLError('Failed to download file from: ' + ', '.join(urls))
def get_latest_release_for_version(version=None): """ Searches for the latest release (complete version string) for a given major `version`. If `version` is None the latest release is returned. :param version: Major version number or None :return: Latest release for given version """ release_url = "https://chromedriver.storage.googleapis.com/LATEST_RELEASE" if version: release_url += '_{}'.format(version) try: response = urlopen(release_url) if response.getcode() != 200: raise URLError('Not Found') return response.read().decode('utf-8').strip() except URLError: raise RuntimeError('Failed to find release information: {}'.format(release_url))
def test_survey(self, mock_get, mock_urlopen, mock_send_job): json_file_path = os.path.join(os.path.dirname(__file__), "test_transcriptome_species.json") with open(json_file_path, "r") as json_file: species_json = json.load(json_file) # Insert the organisms into the database so the model doesn't call the # taxonomy API to populate them. for species in species_json: # Account for the subtle difference between the API for # the main Ensembl division and the API for the rest of # them. name_key = "common_name" if "common_name" in species else "name" taxonomy_key = "taxonomy_id" if "taxonomy_id" in species else "taxon_id" organism = Organism(name=species[name_key].upper(), taxonomy_id=species[taxonomy_key], is_scientific_name=True) organism.save() mock_get.return_value = Mock(ok=True) mock_get.return_value.json.return_value = species_json # There are two possible file locations. The correct one is # determined by making a request to one to see if it # exists. This URLError simulates it not existing. mock_urlopen.side_effect = URLError("404 or something") surveyor = TranscriptomeIndexSurveyor(self.survey_job) surveyor.survey() downloader_jobs = DownloaderJob.objects.order_by("id").all() self.assertEqual(downloader_jobs.count(), len(species_json)) send_job_calls = [] for downloader_job in downloader_jobs: send_job_calls.append( call(Downloaders.TRANSCRIPTOME_INDEX, downloader_job.id)) mock_send_job.assert_has_calls(send_job_calls) # There should be 2 Batches for each species (long and short # transcriptome lengths). batches = Batch.objects.all() self.assertEqual(batches.count(), len(species_json) * 2) # And each batch has two files: fasta and gtf for batch in batches: self.assertEqual(len(batch.files), 2)
def validated_mpesa_access_token(): url = MpesaC2bCredential.API_URL auth = base64.b64encode( bytes( '%s:%s' % (MpesaC2bCredential.consumer_key, MpesaC2bCredential.consumer_secret), 'ascii')) req = Request(url) req.add_header("Authorization", "Basic %s" % auth.decode('utf-8')) try: result = urlopen(req).read() except URLError as err: raise URLError(err) except HTTPError as err: raise HTTPError(err) r = result.decode(encoding='utf-8', errors='ignore') mpesa_access_token = json.loads(r) validated_mpesa_access_token1 = mpesa_access_token['access_token'] return validated_mpesa_access_token1
def run(self): """ Downloads, unzips and installs GKS, GR and GR3 binaries. """ build_py.run(self) base_path = os.path.realpath(self.build_lib) if runtime_helper.load_runtime(silent=True) is None: version = _runtime_version operating_system = DownloadBinaryDistribution.detect_os() if operating_system is not None: arch = DownloadBinaryDistribution.detect_architecture() # download binary distribution for system file_name = 'gr-{version}-{os}-{arch}.tar.gz'.format( version=version, os=operating_system, arch=arch ) distribution_url = 'http://gr-framework.org/downloads/' + file_name response = urlopen(distribution_url) if response.getcode() != 200: raise URLError('GR runtime not found on: ' + distribution_url) # wrap response as file-like object tar_gz_data = BytesIO(response.read()) expected_hash = DownloadBinaryDistribution.get_expected_hash(version, file_name) calculated_hash = hashlib.sha512(tar_gz_data.read()).hexdigest() tar_gz_data.seek(0) if calculated_hash != expected_hash: raise RuntimeError("Downloaded binary distribution of GR runtime does not match expected hash") # extract shared libraries from downloaded .tar.gz archive tar_gz_file = tarfile.open(fileobj=tar_gz_data) try: for member in tar_gz_file.getmembers(): tar_gz_file.extract(member, base_path) finally: tar_gz_file.close() if sys.platform == 'win32': search_dir = os.path.join(base_path, 'gr', 'bin') else: search_dir = os.path.join(base_path, 'gr', 'lib') if runtime_helper.load_runtime(search_dirs=[search_dir], silent=False) is None: raise RuntimeError("Unable to install GR runtime")
def read_and_report_progress(file_in, file_out, name, content_length, logger, chunk_size=1048576): tb = 0 while True: bytes = file_in.read(chunk_size) if bytes: file_out.write(bytes) else: break tb += len(bytes) if content_length: msg = 'Fetching %s, %.3g of %.3g Mbytes received' % (name, tb / 1048576, content_length / 1048576) else: msg = 'Fetching %s, %.3g Mbytes received' % (name, tb / 1048576) logger.status(msg) if content_length is not None and tb != content_length: # In ChimeraX bug #2747 zero bytes were read and no error reported. from urllib.request import URLError raise URLError('Got %d bytes when %d were expected' % (tb, content_length))
def get_file_from_mirrors(file_name, version, schema): mirrors = [ '{schema}://gr-framework.org/downloads/'.format(schema=schema) ] if version != 'latest': # GitHub only hosts release builds # GitHub should be preferred # GitHub enforces HTTPS mirrors.insert( 0, 'https://github.com/sciapp/gr/releases/download/v{version}/'. format(version=version)) urls = [] for mirror in mirrors: urls.append(mirror + file_name) for url in urls: try: response = urlopen(url) except Exception: continue if response.getcode() == 200: return response.read() raise URLError('Failed to download file from: ' + ', '.join(urls))
def check_if_usl_forecast_exists(config, stid, run, forecast_date): """ Checks the parent USL directory to see if USL has run for specified stid and run time. This avoids the server automatically returning a "close enough" date instead of an "Error 300: multiple choices." """ run_date = (forecast_date - timedelta(days=1)).replace(hour=int(run)) run_strtime = run_date.strftime('%Y%m%d_%H') api_url = 'http://www.microclimates.org/forecast/{}/'.format(stid) req = Request(api_url) try: response = urlopen(req) except HTTPError: if config['debug'] > 9: print("usl: forecast for %s at run time %s doesn't exist" % (stid, run_date)) raise page = response.read().decode('utf-8', 'ignore') # Look for string of USL run time in the home menu for this station ID (equal to -1 if not found) if page.find(run_strtime) == -1: if config['debug'] > 9: print("usl: forecast for %s at run time %s hasn't run yet" % (stid, run_date)) raise URLError("- usl: no correct date/time choice")
def get_genome_download_link(self, name, mask="soft", **kwargs): """ Return Ensembl ftp link to the genome sequence Parameters ---------- name : str Genome name. Current implementation will fail if exact name is not found. Returns ------ tuple (name, link) where name is the Ensembl dbname identifier and link is a str with the ftp download link. """ genome_info = self._get_genome_info(name) # parse the division division = genome_info["division"].lower().replace("ensembl", "") if division == "bacteria": raise NotImplementedError("bacteria from ensembl not yet supported") ftp_site = "ftp://ftp.ensemblgenomes.org/pub" if division == "vertebrates": ftp_site = "https://ftp.ensembl.org/pub" version = self.version if kwargs.get("version", None): version = kwargs.get("version") elif not version: version = self.get_version(ftp_site) if division != "vertebrates": base_url = "/{}/release-{}/fasta/{}/dna/" ftp_dir = base_url.format( division, version, genome_info["url_name"].lower() ) url = "{}/{}".format(ftp_site, ftp_dir) else: base_url = "/release-{}/fasta/{}/dna/" ftp_dir = base_url.format(version, genome_info["url_name"].lower()) url = "{}/{}".format(ftp_site, ftp_dir) def get_url(level="toplevel"): pattern = "dna.{}".format(level) if mask == "soft": pattern = "dna_sm.{}".format(level) elif mask == "hard": pattern = "dna_rm.{}".format(level) asm_url = "{}/{}.{}.{}.fa.gz".format( url, genome_info["url_name"].capitalize(), re.sub(r"\.p\d+$", "", self.safe(genome_info["assembly_name"])), pattern, ) return asm_url # first try the (much smaller) primary assembly, otherwise use the toplevel assembly try: if kwargs.get("toplevel", False): raise URLError("skipping primary assembly check") asm_url = get_url("primary_assembly") with urlopen(asm_url): None except URLError: asm_url = get_url() return self.safe(genome_info["assembly_name"]), asm_url
def test_read_url_exception(self): """Test read_url raises exception.""" self.url_mock.urlopen.side_effect = URLError("FakeError") with self.assertRaises(Exception): implementation.read_url("FakeURL")
def http_error_416(self, req, fp, code, msg, hdrs): raise URLError('Requested Range Not Satisfiable')
class TestUpload(moves.unittest.TestCase): def setUp(self): self.package = PackageDescription.from_string("""\ Name: foo """) self.cwd = tempfile.mkdtemp() try: self.old_cwd = os.getcwd() os.chdir(self.cwd) filename = op.join(self.cwd, "foo.bin") fp = open(filename, "wb") try: fp.write(six.b("garbage")) finally: fp.close() except: shutil.rmtree(self.cwd) raise def tearDown(self): os.chdir(self.old_cwd) shutil.rmtree(self.cwd) def test_upload_post_data(self): post_data = build_upload_post_data("foo.bin", "bdist_dumb", self.package) self.assertEqual(post_data[":action"], "file_upload") self.assertEqual(post_data["content"], ("foo.bin", six.b("garbage"))) def test_signing(self): self.assertRaises(NotImplementedError, build_upload_post_data, "foo.bin", "bdist_dumb", self.package, True) def test_build_request(self): repository = "http://localhost" post_data = build_upload_post_data("foo.bin", "bdist_dumb", self.package) request = build_request(repository, post_data, "dummy_auth") r_headers = { "Content-type": six. b("multipart/form-data; boundary=--------------GHSKFJDLGDS7543FJKLFHRE75642756743254" ), "Content-length": "2238", "Authorization": "dummy_auth" } self.assertEqual(request.headers, r_headers) @mock.patch("bento.pypi.upload_utils.urlopen", lambda request: MockedResult(200, "")) def test_upload(self): config = PyPIConfig("john", "password", repository="http://localhost") upload("foo.bin", "bdist_dumb", self.package, config) @mock.patch("bento.pypi.upload_utils.urlopen", my_urlopen_factory( HTTPError("", 404, "url not found", {}, six.moves.StringIO()))) def test_upload_error_404(self): config = PyPIConfig("john", "password", repository="http://localhost") self.assertRaises(bento.errors.PyPIError, upload, "foo.bin", "bdist_dumb", self.package, config) @mock.patch("bento.pypi.upload_utils.urlopen", my_urlopen_factory(URLError("dummy"))) def test_upload_error_no_host(self): config = PyPIConfig("john", "password", repository="http://llocalhost") self.assertRaises(URLError, upload, "foo.bin", "bdist_dumb", self.package, config) @mock.patch("bento.pypi.upload_utils.urlopen", lambda request: MockedResult(200, "")) def test_upload_auth(self): config = PyPIConfig("john", "password", repository="http://localhost") self.assertRaises(NotImplementedError, upload, "foo.bin", "bdist_dumb", self.package, config, True)
def ftp_open(self, req): host = req.get_host() if not host: raise IOError('ftp error', 'no host given') host, port = splitport(host) if port is None: port = ftplib.FTP_PORT else: port = int(port) # username/password handling user, host = splituser(host) if user: user, passwd = splitpasswd(user) else: passwd = None host = unquote(host) user = unquote(user or '') passwd = unquote(passwd or '') try: host = socket.gethostbyname(host) except socket.error as msg: raise URLError(msg) path, attrs = splitattr(req.get_selector()) dirs = path.split('/') dirs = map(unquote, dirs) dirs, file = dirs[:-1], dirs[-1] if dirs and not dirs[0]: dirs = dirs[1:] try: fw = self.connect_ftp(user, passwd, host, port, dirs) type = file and 'I' or 'D' for attr in attrs: attr, value = splitattr(attr) if attr.lower() == 'type' and \ value in ('a', 'A', 'i', 'I', 'd', 'D'): type = value.upper() # -- range support modifications start here rest = None range_tup = range_header_to_tuple(req.headers.get('Range', None)) assert range_tup != () if range_tup: (fb, lb) = range_tup if fb > 0: rest = fb # -- range support modifications end here fp, retrlen = fw.retrfile(file, type, rest) # -- range support modifications start here if range_tup: (fb, lb) = range_tup if lb == '': if retrlen is None or retrlen == 0: raise RangeError( 9, 'Requested Range Not Satisfiable due to unobtainable file length.' ) lb = retrlen retrlen = lb - fb if retrlen < 0: # beginning of range is larger than file raise RangeError(9, 'Requested Range Not Satisfiable') else: retrlen = lb - fb fp = RangeableFileObject(fp, (0, retrlen)) # -- range support modifications end here headers = "" mtype = mimetypes.guess_type(req.get_full_url())[0] if mtype: headers += "Content-Type: %s\n" % mtype if retrlen is not None and retrlen >= 0: headers += "Content-Length: %d\n" % retrlen sf = StringIO(headers) headers = mimetools.Message(sf) return addinfourl(fp, headers, req.get_full_url()) except ftplib.all_errors as msg: raise IOError('ftp error', msg).with_traceback(sys.exc_info()[2])
def test_http_url_error(self): status = self.do_collect_jobs_error(URLError("kaputt")) self.assertEqual(RequestStatus.ERROR, status[("api.newrelic.com", "all")].request_status)
def __init__(self, filename, apiurl, buildtype='spec', localpkgs=[]): try: tree = ET.parse(filename) except: print('could not parse the buildinfo:', file=sys.stderr) print(open(filename).read(), file=sys.stderr) sys.exit(1) root = tree.getroot() self.apiurl = apiurl if root.find('error') != None: sys.stderr.write('buildinfo is broken... it says:\n') error = root.find('error').text sys.stderr.write(error + '\n') sys.exit(1) if not (apiurl.startswith('https://') or apiurl.startswith('http://')): raise URLError('invalid protocol for the apiurl: \'%s\'' % apiurl) self.buildtype = buildtype self.apiurl = apiurl # are we building .rpm or .deb? # XXX: shouldn't we deliver the type via the buildinfo? self.pacsuffix = 'rpm' if self.buildtype == 'dsc': self.pacsuffix = 'deb' if self.buildtype == 'arch': self.pacsuffix = 'arch' if self.buildtype == 'livebuild': self.pacsuffix = 'deb' self.buildarch = root.find('arch').text if root.find('hostarch') != None: self.hostarch = root.find('hostarch').text else: self.hostarch = None if root.find('release') != None: self.release = root.find('release').text else: self.release = None self.downloadurl = root.get('downloadurl') self.debuginfo = 0 if root.find('debuginfo') != None: try: self.debuginfo = int(root.find('debuginfo').text) except ValueError: pass self.deps = [] self.projects = {} self.keys = [] self.prjkeys = [] self.pathes = [] for node in root.findall('bdep'): p = Pac(node, self.buildarch, self.pacsuffix, apiurl, localpkgs) if p.project: self.projects[p.project] = 1 self.deps.append(p) for node in root.findall('path'): self.pathes.append( node.get('project') + "/" + node.get('repository')) self.vminstall_list = [dep.name for dep in self.deps if dep.vminstall] self.preinstall_list = [ dep.name for dep in self.deps if dep.preinstall ] self.runscripts_list = [ dep.name for dep in self.deps if dep.runscripts ] self.noinstall_list = [dep.name for dep in self.deps if dep.noinstall] self.installonly_list = [ dep.name for dep in self.deps if dep.installonly ]
class TestRegisterUtils(unittest.TestCase): def test_build_post_data(self): r_content = six.b("""----------------GHSKFJDLGDS7543FJKLFHRE75642756743254\r\n""" \ """Content-Disposition: form-data; name="maintainer"\r\n\r\n\r\n""" \ """----------------GHSKFJDLGDS7543FJKLFHRE75642756743254\r\n""" \ """Content-Disposition: form-data; name="name"\r\n\r\n""" \ """foo\r\n""" \ """----------------GHSKFJDLGDS7543FJKLFHRE75642756743254\r\n""" \ """Content-Disposition: form-data; name="license"\r\n\r\n\r\n""" \ """----------------GHSKFJDLGDS7543FJKLFHRE75642756743254\r\n""" \ """Content-Disposition: form-data; name="author"\r\n\r\n""" \ """John Doe\r\n""" \ """----------------GHSKFJDLGDS7543FJKLFHRE75642756743254\r\n""" \ """Content-Disposition: form-data; name="url"\r\n\r\n\r\n""" \ """----------------GHSKFJDLGDS7543FJKLFHRE75642756743254\r\n""" \ """Content-Disposition: form-data; name=":action"\r\n\r\n""" \ """submit\r\n""" \ """----------------GHSKFJDLGDS7543FJKLFHRE75642756743254\r\n""" \ """Content-Disposition: form-data; name="download_url"\r\n\r\n\r\n""" \ """----------------GHSKFJDLGDS7543FJKLFHRE75642756743254\r\n""" \ """Content-Disposition: form-data; name="maintainer_email"\r\n\r\n\r\n""" \ """----------------GHSKFJDLGDS7543FJKLFHRE75642756743254\r\n""" \ """Content-Disposition: form-data; name="author_email"\r\n\r\n\r\n""" \ """----------------GHSKFJDLGDS7543FJKLFHRE75642756743254\r\n""" \ """Content-Disposition: form-data; name="version"\r\n\r\n""" \ """1.0\r\n""" \ """----------------GHSKFJDLGDS7543FJKLFHRE75642756743254\r\n""" \ """Content-Disposition: form-data; name="long_description"\r\n\r\n\r\n""" \ """----------------GHSKFJDLGDS7543FJKLFHRE75642756743254\r\n""" \ """Content-Disposition: form-data; name="description"\r\n\r\n\r\n""" \ """----------------GHSKFJDLGDS7543FJKLFHRE75642756743254--\r\n""" \ """""") bento_info = """\ Name: foo Version: 1.0 Author: John Doe """ package = PackageDescription.from_string(bento_info) post_data = build_post_data(package, "submit") content_type, body = encode_multipart(list(post_data.items()), []) self.assertEqual(r_content, body) @mock.patch(_OPENER_DIRECTOR, mock.MagicMock()) def test_register_server(self): package = PackageDescription(name="foo") repository = 'http://testpypi.python.org/pypi' realm = DEFAULT_REALM config = PyPIConfig(username="******", password="******", repository=repository, realm=realm) auth = HTTPPasswordMgr() host = urlparse(config.repository)[0] auth.add_password(config.realm, host, config.username, config.password) post_data = build_post_data(package, "submit") code, msg = post_to_server(post_data, config, auth) self.assertEqual(code, 200) self.assertEqual(msg, "OK") @mock.patch("%s.open" % _OPENER_DIRECTOR, mock.MagicMock(side_effect=HTTPError("", 404, "", {}, None))) def test_register_server_http_errors(self): code, msg = self._test_register_server_errors() self.assertEqual(code, 404) self.assertEqual(msg, "") @mock.patch("%s.open" % _OPENER_DIRECTOR, mock.MagicMock(side_effect=URLError(""))) def test_register_server_url_errors(self): code, msg = self._test_register_server_errors() self.assertEqual(code, 500) def _test_register_server_errors(self): package = PackageDescription(name="foo") config = PyPIConfig.from_string(""" [distutils] index-servers = pypi [pypi] username = cdavid password = yoyo server = http://testpypi.python.org """) post_data = build_post_data(package, "submit") return post_to_server(post_data, config)
def __call__(self, _, url): self.calls += 1 print('call {0}: {1}'.format(self.calls, url)) raise URLError(socket.timeout())