def make_devkit(options): import virtualenv from urlgrabber.grabber import urlgrab from urlgrabber.progress import text_progress_meter (path("package") / "devkit" / "share").makedirs() pip_bundle("package/devkit/share/geonode-core.pybundle -r shared/devkit.requirements") script = virtualenv.create_bootstrap_script(""" import os, subprocess, zipfile def after_install(options, home_dir): if sys.platform == 'win32': bin = 'Scripts' else: bin = 'bin' installer_base = os.path.abspath(os.path.dirname(__file__)) def pip(*args): subprocess.call([os.path.join(home_dir, bin, "pip")] + list(args)) pip("install", os.path.join(installer_base, "share", "geonode-core.pybundle")) setup_jetty(source=os.path.join(installer_base, "share"), dest=os.path.join(home_dir, "share")) def setup_jetty(source, dest): jetty_zip = os.path.join(source, "jetty-distribution-7.0.2.v20100331.zip") jetty_dir = os.path.join(dest, "jetty-distribution-7.0.2.v20100331") zipfile.ZipFile(jetty_zip).extractall(dest) shutil.rmtree(os.path.join(jetty_dir, "contexts")) shutil.rmtree(os.path.join(jetty_dir, "webapps")) os.mkdir(os.path.join(jetty_dir, "contexts")) os.mkdir(os.path.join(jetty_dir, "webapps")) deployments = [ ('geoserver', 'geoserver-geonode-dev.war'), ('geonetwork', 'geonetwork.war'), ('media', 'geonode-client.zip') ] for context, archive in deployments: src = os.path.join(source, archive) dst = os.path.join(jetty_dir, "webapps", context) zipfile.ZipFile(src).extractall(dst) """) open((path("package")/"devkit"/"go-geonode.py"), 'w').write(script) urlgrab( "http://download.eclipse.org/jetty/7.0.2.v20100331/dist/jetty-distribution-7.0.2.v20100331.zip", "package/devkit/share/jetty-distribution-7.0.2.v20100331.zip", progress_obj = text_progress_meter() ) urlgrab( "http://pypi.python.org/packages/source/p/pip/pip-0.7.1.tar.gz", "package/devkit/share/pip-0.7.1.tar.gz", progress_obj = text_progress_meter() ) geoserver_target.copy("package/devkit/share") geonetwork_target.copy("package/devkit/share") geonode_client_target().copy("package/devkit/share")
def test_reference_file(self): "download reference file via HTTP" filename = tempfile.mktemp() grabber.urlgrab(ref_http, filename) contents = open(filename, 'rb').read() self.assertTrue(contents == reference_data)
def test_reference_file(self): "download reference file via HTTP" _, filename = tempfile.mkstemp() grabber.urlgrab(ref_http, filename) fo = open(filename, 'rb' if not six.PY3 else 'r') contents = fo.read() fo.close() self.assert_(contents == reference_data)
def test_reference_file(self): "download refernce file via HTTP" filename = tempfile.mktemp() grabber.urlgrab(ref_http, filename) fo = file(filename, 'rb') contents = fo.read() fo.close() self.assert_(contents == reference_data)
def test_reference_file(self): "download reference file via HTTP" filename = tempfile.mktemp() grabber.urlgrab(ref_http, filename) fo = file(filename, 'rb') contents = fo.read() fo.close() self.assert_(contents == reference_data)
def insert_pkgtags(self): """Download and inject the pkgtags sqlite from fedora-tagger""" if config.get('pkgtags_url'): try: tags_url = config.get('pkgtags_url') tempdir = tempfile.mkdtemp('bodhi') local_tags = os.path.join(tempdir, 'pkgtags.sqlite') log.info('Downloading %s' % tags_url) urlgrab(tags_url, filename=local_tags) self.modifyrepo(local_tags) except: log.exception("There was a problem injecting pkgtags") finally: shutil.rmtree(tempdir)
def grab_unpack(name, options, section, src=path("./src").abspath()): """ Concurrency friendly download and unpacker. """ url = options.conf_get(section, name) cache = path('build') filename = url.split("/")[-1] dl_path = cache.abspath() / filename if not dl_path.exists(): info("Download %s" %name) urlgrab(url, dl_path, progress_obj=text_progress_meter()) eventlet.api.sleep(0) source_path = tarball_unpack(dl_path, src, True) eventlet.api.sleep(0) options.conf_set("sources", name, source_path.abspath())
def insert_pkgtags(self): """ Download and inject the pkgtags sqlite from fedora-tagger """ if config.get('pkgtags_url') not in [None, ""]: try: tags_url = config.get('pkgtags_url') local_tags = '/tmp/pkgtags.sqlite' log.info('Downloading %s' % tags_url) urlgrab(tags_url, filename=local_tags) for arch in os.listdir(self.repo): repomd = RepoMetadata(join(self.repo, arch, 'repodata')) repomd.add(local_tags) except Exception, e: log.exception(e) log.error("There was a problem injecting pkgtags")
def grab(src, dest): from urlgrabber.grabber import urlgrab, URLGrabError from urlgrabber.progress import text_progress_meter if getattr(options, 'clean', False) and os.path.exists(str(dest)): (path(".") / dest).remove() try: if not os.path.exists(str(dest)): urlgrab( str(src), str(dest), reget='simple', progress_obj = text_progress_meter() ) except URLGrabError, e: # Eat exceptions with error code 9; these indicate that we had already finished the download if e.errno != 9: raise
def retrievefile(url): """do the actual file retrieval to a temp dir, return tempdir+file""" tmpdir = tempfile.mkdtemp() fn = os.path.basename(url) tmpfn = '%s/%s' % (tmpdir, fn) # XXX Note - maybe make an option to use wget or curl directly here.? try: loc = grabber.urlgrab(url, filename=tmpfn) except grabber.URLGrabError, e: error('Error downloading %s: %s' % (url, e)) return None
# to address concerns that the overhead from the progress meter # and throttling slow things down, we do this little test. # # using this test, you get the FULL overhead of the progress # meter and throttling, without the benefit: the meter is directed # to /dev/null and the throttle bandwidth is set EXTREMELY high. # # note: it _is_ even slower to direct the progress meter to a real # tty or file, but I'm just interested in the overhead from _this_ # module. # get it nicely cached before we start comparing if DEBUG: print 'pre-caching' for i in range(100): urlgrab(tempsrc, tempdst, copy_local=1, throttle=None, proxies=proxies) if DEBUG: print 'running speed test.' reps = 500 for i in range(reps): if DEBUG: print '\r%4i/%-4i' % (i + 1, reps), sys.stdout.flush() t = time.time() urlgrab(tempsrc, tempdst, copy_local=1, progress_obj=tpm, throttle=throttle, proxies=proxies) full_times.append(1000 * (time.time() - t))
for build in args.build: buildinfo = remotekojisession.getBuild(build) logging.debug("build=%s" % (buildinfo)) if buildinfo == None: logging.critical("build %s doesn't exist" % (build)) break fname = "%s.src.rpm" % buildinfo['nvr'] url = "%s/packages/%s/%s/%s/src/%s" % (PACKAGEURL, buildinfo['package_name'], buildinfo['version'], buildinfo['release'], fname) if not os.path.isfile(fname): file = grabber.urlgrab(url, progress_obj = pg, text = "%s" % (fname)) serverdir = _unique_path('cli-build') logging.info("uploading %s ..." % (build)) localkojisession.uploadWrapper(fname, serverdir, blocksize=65536) source = "%s/%s" % (serverdir, fname) if args.scratch: opts = {} opts['scratch'] = True else: opts = None localkojisession.build(source, args.tag, opts=opts, priority=2) logging.info("submitted build: %s" % buildinfo['nvr'])
# to address concerns that the overhead from the progress meter # and throttling slow things down, we do this little test. # # using this test, you get the FULL overhead of the progress # meter and throttling, without the benefit: the meter is directed # to /dev/null and the throttle bandwidth is set EXTREMELY high. # # note: it _is_ even slower to direct the progress meter to a real # tty or file, but I'm just interested in the overhead from _this_ # module. # get it nicely cached before we start comparing if DEBUG: print 'pre-caching' for i in range(100): urlgrab(tempsrc, tempdst, copy_local=1, throttle=None, proxies=proxies) if DEBUG: print 'running speed test.' reps = 500 for i in range(reps): if DEBUG: print '\r%4i/%-4i' % (i+1, reps), sys.stdout.flush() t = time.time() urlgrab(tempsrc, tempdst, copy_local=1, progress_obj=tpm, throttle=throttle, proxies=proxies) full_times.append(1000 * (time.time() - t)) t = time.time() urlgrab(tempsrc, tempdst,
def speedtest(size): setuptemp(size) full_times = [] raw_times = [] none_times = [] throttle = 2**40 # throttle to 1 TB/s :) try: from urlgrabber.progress import text_progress_meter except ImportError as e: tpm = None print('not using progress meter') else: tpm = text_progress_meter(fo=open('/dev/null', 'w')) # to address concerns that the overhead from the progress meter # and throttling slow things down, we do this little test. # # using this test, you get the FULL overhead of the progress # meter and throttling, without the benefit: the meter is directed # to /dev/null and the throttle bandwidth is set EXTREMELY high. # # note: it _is_ even slower to direct the progress meter to a real # tty or file, but I'm just interested in the overhead from _this_ # module. # get it nicely cached before we start comparing if DEBUG: print('pre-caching') for i in range(100): urlgrab(tempsrc, tempdst, copy_local=1, throttle=None, proxies=proxies) if DEBUG: print('running speed test.') reps = 500 for i in range(reps): if DEBUG: print('\r%4i/%-4i' % (i + 1, reps), end=' ') sys.stdout.flush() t = time.time() urlgrab(tempsrc, tempdst, copy_local=1, progress_obj=tpm, throttle=throttle, proxies=proxies) full_times.append(1000 * (time.time() - t)) t = time.time() urlgrab(tempsrc, tempdst, copy_local=1, progress_obj=None, throttle=None, proxies=proxies) raw_times.append(1000 * (time.time() - t)) t = time.time() in_fo = open(tempsrc) out_fo = open(tempdst, 'wb') while True: s = in_fo.read(1024 * 8) if not s: break out_fo.write(s) in_fo.close() out_fo.close() none_times.append(1000 * (time.time() - t)) if DEBUG: print('\r') print("%d KB Results:" % (size / 1024)) print_result('full', full_times) print_result('raw', raw_times) print_result('none', none_times)
def speedtest(size): setuptemp(size) full_times = [] raw_times = [] none_times = [] throttle = 2**40 # throttle to 1 TB/s :) try: from urlgrabber.progress import text_progress_meter except ImportError as e: tpm = None print('not using progress meter') else: tpm = text_progress_meter(fo=open('/dev/null', 'w')) # to address concerns that the overhead from the progress meter # and throttling slow things down, we do this little test. # # using this test, you get the FULL overhead of the progress # meter and throttling, without the benefit: the meter is directed # to /dev/null and the throttle bandwidth is set EXTREMELY high. # # note: it _is_ even slower to direct the progress meter to a real # tty or file, but I'm just interested in the overhead from _this_ # module. # get it nicely cached before we start comparing if DEBUG: print('pre-caching') for i in range(100): urlgrab(tempsrc, tempdst, copy_local=1, throttle=None, proxies=proxies) if DEBUG: print('running speed test.') reps = 500 for i in range(reps): if DEBUG: six.print_('\r%4i/%-4i' % (i+1, reps), end=' ') sys.stdout.flush() t = time.time() urlgrab(tempsrc, tempdst, copy_local=1, progress_obj=tpm, throttle=throttle, proxies=proxies) full_times.append(1000 * (time.time() - t)) t = time.time() urlgrab(tempsrc, tempdst, copy_local=1, progress_obj=None, throttle=None, proxies=proxies) raw_times.append(1000 * (time.time() - t)) t = time.time() in_fo = open(tempsrc) out_fo = open(tempdst, 'wb') while 1: s = in_fo.read(1024 * 8) if not s: break out_fo.write(s if not six.PY3 else s.encode('utf-8')) in_fo.close() out_fo.close() none_times.append(1000 * (time.time() - t)) if DEBUG: print('\r') print("%d KB Results:" % (size / 1024)) print_result('full', full_times) print_result('raw', raw_times) print_result('none', none_times)