Пример #1
0
def download_package(destination, product, version, compiler):
  remove_existing_package(destination, product, version)

  label = get_release_label()
  file_name = "{0}-{1}-{2}-{3}.tar.gz".format(product, version, compiler, label)
  url_path="/{0}/{1}-{2}/{0}-{1}-{2}-{3}.tar.gz".format(product, version, compiler, label)
  download_path = HOST + url_path

  print "URL {0}".format(download_path)
  print "Downloading {0} to {1}".format(file_name, destination)
  # --no-clobber avoids downloading the file if a file with the name already exists
  sh.wget(download_path, directory_prefix=destination, no_clobber=True)
  print "Extracting {0}".format(file_name)
  sh.tar(z=True, x=True, f=os.path.join(destination, file_name), directory=destination)
  sh.rm(os.path.join(destination, file_name))

  if product == "kudu":
    # The Kudu tarball is actually a renamed parcel. Rename the contents to match the
    # naming convention.
    kudu_dirs = glob.glob("{0}/KUDU*{1}*".format(destination, version))
    if not kudu_dirs:
      raise Exception("Could not find contents of Kudu tarball")
    if len(kudu_dirs) > 1:
      raise Exception("Found too many Kudu folders: %s" % (kudu_dirs, ))
    new_dir = "{0}/{1}-{2}".format(destination, product, version)
    if os.path.exists(new_dir):
      shutil.rmtree(new_dir)
    os.rename(kudu_dirs[0], new_dir)

  write_version_file(destination, product, version, compiler, label)
Пример #2
0
    def download_image(self, image_id):
        """Download the image from the image_url, returning True/False if the operation
        was successful."""
        downloading_already = False
        with self._downloading_images_lock:
            downloading_already = (image_id in self._downloading_images)
            if not downloading_already:
                e = self._downloading_images[image_id] = threading.Event()
                e.clear()

        if downloading_already:
            self._log.debug(
                "image is already being downloaded, waiting for it to finish")
            self._downloading_images[image_id].wait()
            self._log.debug("image finished downloading")
            return

        image_filename = image_id_to_volume(image_id)
        dest = os.path.join(LIBVIRT_BASE, image_filename)
        self._log.debug("downloading image {} to {}".format(image_id, dest))

        try:
            wget("-q", "-O", dest, self.image_url + "/" + image_filename)
        except:
            self._log.error(
                "Could not download image! aborting running this VM")
            return False

        self._log.debug("downloaded {}".format(image_id))

        self._downloading_images[image_id].set()
        del self._downloading_images[image_id]

        return True
Пример #3
0
def seL4_rump(args):
    pwd = os.getcwd()
    with open("generated/redis.dat", "w") as result_file:

        results = dict()
        averaged = dict()
        result_file.writelines(
            "Hog utilisation\tthroughput (t/s)\tstddev\tidle\tstddev\n")
        for j in [0, 5, 25, 45, 55, 65, 85]:
            throughput = []
            utilisation = []
            for i in range(3):
                # print("%d %d" %(j,i))
                filename = os.path.join(pwd, "redis",
                                        "results-%d-%d.json" % (j, i))
                if args.build_number:
                    sh.wget(RUMP_URL_FORMAT_STRING % (args.build_number, j, i),
                            "-O", filename)
                with open(filename) as json_data:
                    results["%d-%d" % (j, i)] = json.load(json_data)
                utilisation.append(1 -
                                   (results["%d-%d" %
                                            (j, i)]["utilisation"]["idle"] /
                                    results["%d-%d" %
                                            (j, i)]["utilisation"]["total"]))
                throughput.append(results["%d-%d" % (j, i)]["throughput"])
            result_file.writelines(
                "%d\t%f\t%f\t%f\t%f\n" %
                (j, numpy.mean(throughput) / 1000, numpy.std(throughput) /
                 1000, 100 - numpy.mean(utilisation) * 100,
                 numpy.std(utilisation) * 100))
Пример #4
0
 def prebuild_arch(self, arch):
     if not self.is_patched(arch):
         super(ReportLabRecipe, self).prebuild_arch(arch)
         self.apply_patch('patches/fix-setup.patch', arch.arch)
         recipe_dir = self.get_build_dir(arch.arch)
         shprint(sh.touch, os.path.join(recipe_dir, '.patched'))
         ft = self.get_recipe('freetype', self.ctx)
         ft_dir = ft.get_build_dir(arch.arch)
         ft_lib_dir = os.environ.get('_FT_LIB_', os.path.join(ft_dir, 'objs', '.libs'))
         ft_inc_dir = os.environ.get('_FT_INC_', os.path.join(ft_dir, 'include'))
         tmp_dir = os.path.normpath(os.path.join(recipe_dir, "..", "..", "tmp"))
         info('reportlab recipe: recipe_dir={}'.format(recipe_dir))
         info('reportlab recipe: tmp_dir={}'.format(tmp_dir))
         info('reportlab recipe: ft_dir={}'.format(ft_dir))
         info('reportlab recipe: ft_lib_dir={}'.format(ft_lib_dir))
         info('reportlab recipe: ft_inc_dir={}'.format(ft_inc_dir))
         with current_directory(recipe_dir):
             sh.ls('-lathr')
             ensure_dir(tmp_dir)
             pfbfile = os.path.join(tmp_dir, "pfbfer-20070710.zip")
             if not os.path.isfile(pfbfile):
                 sh.wget("http://www.reportlab.com/ftp/pfbfer-20070710.zip", "-O", pfbfile)
             sh.unzip("-u", "-d", os.path.join(recipe_dir, "src", "reportlab", "fonts"), pfbfile)
             if os.path.isfile("setup.py"):
                 with open('setup.py', 'rb') as f:
                     text = f.read().replace('_FT_LIB_', ft_lib_dir).replace('_FT_INC_', ft_inc_dir)
                 with open('setup.py', 'wb') as f:
                     f.write(text)
Пример #5
0
    def fetch(self, path):
        mkdir('-p', self._own_workdir)

        # download sources
        archive_filename = str(Path(self._workdir).joinpath('kernel.tar.gz'))
        if not Path(archive_filename).is_file():
            wget('-O{}'.format(archive_filename),
                 path,
                 _out=sys.stdout,
                 _err=sys.stderr)

        # extract sources
        extracted_dir = str(Path(self._own_workdir).joinpath('_extracted'))
        mkdir('-p', extracted_dir)
        tar('-xf',
            archive_filename,
            '-C',
            extracted_dir,
            _out=sys.stdout,
            _err=sys.stderr)

        kernel_src_dir = [
            d for d in Path(extracted_dir).iterdir() if d.is_dir()
        ]
        if (len(kernel_src_dir) != 1):
            raise RuntimeError('Linux Kernel sources failed to extract')

        self._dir = kernel_src_dir[0]
Пример #6
0
def download_package(destination, product, version, compiler):
  remove_existing_package(destination, product, version)

  label = get_release_label()
  file_name = "{0}-{1}-{2}-{3}.tar.gz".format(product, version, compiler, label)
  url_path="/{0}/{1}-{2}/{0}-{1}-{2}-{3}.tar.gz".format(product, version, compiler, label)
  download_path = HOST + url_path

  print "URL {0}".format(download_path)
  print "Downloading {0} to {1}".format(file_name, destination)
  # --no-clobber avoids downloading the file if a file with the name already exists
  sh.wget(download_path, directory_prefix=destination, no_clobber=True)
  print "Extracting {0}".format(file_name)
  sh.tar(z=True, x=True, f=os.path.join(destination, file_name), directory=destination)
  sh.rm(os.path.join(destination, file_name))

  if product == "kudu":
    # The Kudu tarball is actually a renamed parcel. Rename the contents to match the
    # naming convention.
    kudu_dirs = glob.glob("{0}/KUDU*{1}*".format(destination, version))
    if not kudu_dirs:
      raise Exception("Could not find contents of Kudu tarball")
    if len(kudu_dirs) > 1:
      raise Exception("Found too many Kudu folders: %s" % (kudu_dirs, ))
    new_dir = "{0}/{1}-{2}".format(destination, product, version)
    if os.path.exists(new_dir):
      shutil.rmtree(new_dir)
    os.rename(kudu_dirs[0], new_dir)
    print "renamed shit"

  write_version_file(destination, product, version, compiler, label)
Пример #7
0
    def download_image(self, image_id):
        """Download the image from the image_url"""
        downloading_already = False
        with self._downloading_images_lock:
            downloading_already = (image_id in self._downloading_images)
            if not downloading_already:
                e = self._downloading_images[image_id] = threading.Event()
                e.clear()

        if downloading_already:
            self._log.debug(
                "image is already being downloaded, waiting for it to finish")
            self._downloading_images[image_id].wait()
            self._log.debug("image finished downloading")
            return

        image_filename = image_id_to_volume(image_id)
        dest = os.path.join(LIBVIRT_BASE, image_filename)
        self._log.debug("downloading image {} to {}".format(image_id, dest))

        wget("-q", "-O", dest, self.image_url + "/" + image_filename)
        self._log.debug("downloaded {}".format(image_id))

        self._downloading_images[image_id].set()
        del self._downloading_images[image_id]
Пример #8
0
def _init_certbot_env():
    logi("Creating certbot directories ...")
    if not os.path.isdir(CERTBOT_CONF_DIR):
        mkdir("-p", CERTBOT_CONF_DIR)

    if not os.path.isdir(CERTBOT_WWW_DIR):
        mkdir("-p", CERTBOT_WWW_DIR)

    if not os.path.isdir(CERTBOT_NGINX_DIR):
        mkdir("-p", CERTBOT_NGINX_DIR)

    logi("Downloading recommended TLS parameters ...")
    SSL_NGINX_URL = "https://raw.githubusercontent.com/certbot/certbot/master/certbot-nginx/certbot_nginx/options-ssl-nginx.conf"
    SSL_NGINX_FILE = CERTBOT_CONF_DIR + 'options-ssl-nginx.conf'
    if not os.path.exists(SSL_NGINX_FILE):
        wget(SSL_NGINX_URL, "-O", SSL_NGINX_FILE)

    DH_PARAMS_URL = "https://raw.githubusercontent.com/certbot/certbot/master/certbot/ssl-dhparams.pem"
    DH_PARAMS_FILE = CERTBOT_CONF_DIR + 'ssl-dhparams.pem'
    if not os.path.exists(DH_PARAMS_FILE):
        wget(DH_PARAMS_URL, "-O", DH_PARAMS_FILE)

    logi("Copying docker file to  ...")
    cp("-rf", 'templates/docker-compose.yml',
       CERTBOT_BASE_DIR + '/docker-compose.yml')
Пример #9
0
def download(target, temp_dir):
    zip_path = path.join(temp_dir, "temp.zip")
    tgt_path = path.join(temp_dir, "chunk")

    for chunk in CHUNKS:
        tif_name = TIF_FORMAT.format(chunk)
        tif_path = path.join(temp_dir, tif_name)

        wget(URL_FORMAT.format(chunk), q=True, O=zip_path)
        
        with zipfile.ZipFile(zip_path, 'r') as pack:
            contents = pack.namelist()
            if contents != [tif_name]:
                raise ValueError("Bad archive contents: {:r}".format(contents))
        
        unzip(zip_path, d=temp_dir)
        os.unlink(zip_path)

        convert(tif_path, '-quiet', 'GRAY:{}'.format(tgt_path))
        os.unlink(tif_path)

        if os.stat(tgt_path).st_size != EXPECT_SIZE:
            raise ValueError("Bad converted size: {}".format(chunk))

        with open(tgt_path, "rb") as f:
            shutil.copyfileobj(f, target)
        os.unlink(tgt_path)
Пример #10
0
def memtest_extract():
    zip_fpath = config.cache_dpath / 'memtest86-usb.zip'
    unzip_fpath = config.cache_dpath / 'memtest86-usb'
    img_fpath = unzip_fpath / 'memtest86-usb.img'
    zip_url = 'https://www.memtest86.com/downloads/memtest86-usb.zip'

    if not zip_fpath.exists():
        print('Downloading:', zip_url, 'to', zip_fpath)
        sh.wget('-O', zip_fpath, zip_url)

    if not img_fpath.exists():
        print('Extracting zip to', unzip_fpath)
        sh.unzip('-d', unzip_fpath, zip_fpath)

    output = sh.sgdisk('-p', img_fpath)
    lines = output.strip().splitlines()
    # Second line should look like:
    #   Sector size (logical): 512 bytes
    sector_size = lines[1].split(':')[1].replace('bytes', '').strip()
    sector_size = sector_size

    json_output = sh.sfdisk('--json', img_fpath)
    partitions = json.loads(str(json_output))['partitiontable']['partitions']
    efi_part = [p for p in partitions if p['name'] == 'EFI System Partition'].pop()
    efi_start_sector = efi_part['start']

    efi_start_bytes = int(sector_size) * (efi_start_sector)

    return img_fpath, efi_start_bytes
Пример #11
0
    def download_image(self, image_id):
        """Download the image from the image_url, returning True/False if the operation
        was successful."""
        downloading_already = False
        with self._downloading_images_lock:
            downloading_already = (image_id in self._downloading_images)
            if not downloading_already:
                e = self._downloading_images[image_id] = threading.Event()
                e.clear()

        if downloading_already:
            self._log.debug("image is already being downloaded, waiting for it to finish")
            self._downloading_images[image_id].wait()
            self._log.debug("image finished downloading")
            return

        image_filename = image_id_to_volume(image_id)
        dest = os.path.join(LIBVIRT_BASE, image_filename)
        self._log.debug("downloading image {} to {}".format(image_id, dest))

        try:
            wget("-q", "-O", dest, self.image_url + "/" + image_filename)
        except:
            self._log.error("Could not download image! aborting running this VM")
            return False

        self._log.debug("downloaded {}".format(image_id))

        self._downloading_images[image_id].set()
        del self._downloading_images[image_id]

        return True
Пример #12
0
 def compile( self, source_dir, build_dir, install_dir ):
     package_source_dir = os.path.join( source_dir, self.dirname )
     assert( os.path.exists( package_source_dir ) )
     package_build_dir = os.path.join( build_dir, self.dirname )
     runpath_dir = os.path.join( package_source_dir, 'RunPath' )
     if ( not os.path.exists( os.path.join( runpath_dir, 'media.zip' ) ) ):
         sh.cd( runpath_dir )
         sh.wget( '--no-check-certificate', 'https://bitbucket.org/jacmoe/ogitor/downloads/media.zip' )
         sh.unzip( 'media.zip' )
     if ( not os.path.exists( os.path.join( runpath_dir, 'projects.zip' ) ) ):
         sh.cd( runpath_dir )
         sh.wget( '--no-check-certificate', 'https://bitbucket.org/jacmoe/ogitor/downloads/projects.zip' )
         sh.unzip( 'projects.zip' )
     sh.mkdir( '-p', package_build_dir )
     sh.cd( package_build_dir )
     if ( platform.system() == 'Darwin' ):
         sh.cmake(
             '-G', 'Xcode',
             '-D', 'CMAKE_INSTALL_PREFIX=%s' % install_dir,
             '-D', 'CMAKE_MODULE_PATH=%s' % os.path.join( install_dir, 'CMake' ),
             package_source_dir,
             _out = sys.stdout )
         sh.xcodebuild( '-configuration', 'Release', _out = sys.stdout )
     else:
         sh.cmake(
             '-D', 'CMAKE_INSTALL_PREFIX=%s' % install_dir,
             '-D', 'CMAKE_MODULE_PATH=%s' % os.path.join( install_dir, 'lib/OGRE/cmake' ),
             package_source_dir,
             _out = sys.stdout )
         sh.make( '-j4', 'VERBOSE=1', _out = sys.stdout )
         sh.make.install( _out = sys.stdout )
    def update_cache(self):
        if not self.test_cache():
            rm(self.cache_dir, '-rf')
            self.cache_dir = mkdtemp()
            self.cache_uuid = uuid4()
            mkdir(os.path.join(self.cache_dir, 'repodata'))

            index_file_url = '/'.join([self.repo_url, self.index_file])
            index_file_path = os.path.join(self.cache_dir, self.index_file)

            try:
                print("Downloading index file '{0}' --> '{1}' ...".format(
                    index_file_url, index_file_path
                ))
                wget(index_file_url, '-O', index_file_path)
            except:
                self.broken = True
                return

            try:
                xmlroot = etree.parse(index_file_path).getroot()
                xmlns = xmlroot.nsmap[None]
                for item in xmlroot.findall("{{{0}}}data".format(xmlns)):
                    for subitem in item.findall("{{{0}}}location".format(xmlns)):
                        location = subitem.get('href')
                        url = '/'.join([self.repo_url, location])
                        path = '/'.join([self.cache_dir, location])
                        print("Downloading file '{0}' --> '{1}' ...".format(
                            url, path
                        ))
                        wget(url, '-O', path)
            except:
                self.broken = True
Пример #14
0
def download_file(url, path):
    wget('--progress=dot:meg',
         '-O',
         path,
         url,
         _out=sys.stdout,
         _err=sys.stderr)
Пример #15
0
def wget_and_unpack_package(download_path, file_name, destination, wget_no_clobber):
  print "URL {0}".format(download_path)
  print "Downloading {0} to {1}".format(file_name, destination)
  # --no-clobber avoids downloading the file if a file with the name already exists
  sh.wget(download_path, directory_prefix=destination, no_clobber=wget_no_clobber)
  print "Extracting {0}".format(file_name)
  sh.tar(z=True, x=True, f=os.path.join(destination, file_name), directory=destination)
  sh.rm(os.path.join(destination, file_name))
Пример #16
0
def download_wget(url: str, output_dir: str) -> None:
    sh.wget(
        '--directory-prefix',
        output_dir,
        url,
        _out=sys.stdout,
        _err_to_out=True,
    )
Пример #17
0
def wget_and_unpack_package(download_path, file_name, destination, wget_no_clobber):
  print "URL {0}".format(download_path)
  print "Downloading {0} to {1}".format(file_name, destination)
  # --no-clobber avoids downloading the file if a file with the name already exists
  sh.wget(download_path, directory_prefix=destination, no_clobber=wget_no_clobber)
  print "Extracting {0}".format(file_name)
  sh.tar(z=True, x=True, f=os.path.join(destination, file_name), directory=destination)
  sh.rm(os.path.join(destination, file_name))
Пример #18
0
def get_reference_DBs(dbs):
    fname = '' + dbs
    if not os.path.isfile(
            os.path.basename(fname)) & os.path.exists('HMP_ref_dbs') == False:
        wget(fname)
        archive = tarfile.open(dbs, 'r:gz')
        archive.extractall('.')
    return os.path.basename(fname)
Пример #19
0
def try_wget(deployment, args, count=1):
    try:
        sh.wget(*args)
    except: # Error codes
        log.error("Failed to get %s", deployment)
        log.info("Attempts remainging: %s", count)
        if count > 0:
            try_wget(deployment, args, count-1)
Пример #20
0
 def update_cache(self):
     if not self.test_cache():
         print("Downloading file ...")
         self.local_packages_gz = mkdtemp() + "/Packages.gz"
         try:
             wget(self.packages_gz_url, '-O', self.local_packages_gz)
         except:
             self.broken = True
Пример #21
0
    def input_dir(self):

        if not os.path.exists(self.test_data_dir):
            if not os.path.exists(self.test_data_tarball):
                sh.wget('-P', self.test_dir, data_tarball_url)
            sh.tar('zxvf', self.test_data_tarball, '-C', self.test_dir)

        return os.path.join(self.test_data_dir, 'input')
Пример #22
0
def install_cmake( build_dir, prefix ):
    cmake_archive='cmake-2.8.11.2'
    sh.cd( build_dir )
    sh.wget( '-nc', 'http://www.cmake.org/files/v2.8/%s.tar.gz' % cmake_archive )
    sh.tar( 'xvzf', '%s.tar.gz' % cmake_archive )
    sh.cd( cmake_archive )
    subprocess.check_call( [ './configure', '--prefix', PREFIX ], shell = True )
    sh.make( '-j4' )
    sh.make.install()
Пример #23
0
def download(url, name, destination=str(DATA_DIR), extension='csv'):
    """Download data and load that into a database"""
    os.makedirs(destination, exist_ok=True)
    file_path = os.path.join(destination, '{}.{}'.format(name, extension))

    print('downloading to {} ...'.format(file_path))

    sh.wget(url, '-c', '-O', file_path, _out=sys.stdout)
    return file_path
Пример #24
0
def install_cmake(build_dir, prefix):
    cmake_archive = 'cmake-2.8.11.2'
    sh.cd(build_dir)
    sh.wget('-nc', 'http://www.cmake.org/files/v2.8/%s.tar.gz' % cmake_archive)
    sh.tar('xvzf', '%s.tar.gz' % cmake_archive)
    sh.cd(cmake_archive)
    subprocess.check_call(['./configure', '--prefix', PREFIX], shell=True)
    sh.make('-j4')
    sh.make.install()
Пример #25
0
def setup_test_input_dir():
    data_tarball_url = 'http://s3-ap-southeast-2.amazonaws.com/dp-drop/esmgrids/test/test_data.tar.gz'
    test_data_tarball = os.path.join(test_dir, 'test_data.tar.gz')

    if not os.path.exists(test_data_dir):
        if not os.path.exists(test_data_tarball):
            sh.wget('-P', test_dir, data_tarball_url)
        sh.tar('zxvf', test_data_tarball, '-C', test_dir)

    return os.path.join(test_data_dir, 'input')
Пример #26
0
def download_dump():
    makedirs(DATA_DIR, exist_ok=True)

    if not path.isfile(OSM_DUMP_PATH_UNCOMPRESSED):
        click.echo("Downloading OSM dump")
        wget('-nc', '-P' + DATA_DIR, OSM_DUMP_URL, _fg=True)

        click.echo("Decompressing downloaded OSM dump")
        click.echo(bzip2('-d', OSM_DUMP_PATH))
    click.secho("OSM dump: " + OSM_DUMP_PATH_UNCOMPRESSED, fg="green")
Пример #27
0
def wget(url, args=None):
    ret = None
    try:
        if args is not None:
            ret = sh.wget(url, args)
        else:
            ret = sh.wget(url)
    except Exception as e:
        print('wget error: ' + url)
        logging.error('wget error: ' + url)
    return ret
Пример #28
0
    def input_dir(self):
        test_dir = os.path.dirname(os.path.realpath(__file__))
        test_data_dir = os.path.join(test_dir, 'test_data')
        test_data_tarball = os.path.join(test_dir, data_tarball)

        if not os.path.exists(test_data_dir):
            if not os.path.exists(test_data_tarball):
                sh.wget('-P', test_dir, data_tarball_url)
            sh.tar('zxvf', test_data_tarball, '-C', test_dir)

        return os.path.join(test_data_dir, 'input')
Пример #29
0
def download(url, out=None):
    """
    this is not very pythonic -- but it works!
    (tried urllib, urllib2, httplib, wget etc. -- they all
    have problems with proper redirects and cookies support,
    whereas wget quite simply just does what it should). Couldn't
    care less.)
    """
    from sh import wget  # @UnresolvedImport
    print wget(url, '-O', out)
    return out
Пример #30
0
 def __init__(self):
     super().__init__(256, 8)
     import os
     import sh
     if not os.path.exists('data/yolov3.cfg'):
         os.makedirs('data', exist_ok=True)
         sh.wget(
             '-O', 'data/yolov3.cfg', 'https://raw.githubusercontent.com/'
             'pjreddie/darknet/master/cfg/yolov3.cfg')
     self.blocks = parse_cfg('data/yolov3.cfg')
     self.net_info, self.module_list = create_modules(self.blocks)
Пример #31
0
    def get_heroku_client_path() -> str:
        """
        Get the path to the heroku executable client, download a new one if it
        doesnt exist.
        """
        print("Locating heroku...")
        # Install Heroku CLI
        os_name = None
        bit_architecture = None

        # Get the platform we are working on
        if sys.platform == "darwin":  # Mac OS X
            os_name = "darwin"
        elif sys.platform.startswith("linux"):  # Linux
            os_name = "linux"
        else:
            os_name = "windows"

        # Find our architecture
        bit_architecture_info = platform.architecture()[0]
        if "64bit" in bit_architecture_info:
            bit_architecture = "x64"
        else:
            bit_architecture = "x86"

        # Find existing heroku files to use
        existing_heroku_directory_names = glob.glob(
            os.path.join(HEROKU_TMP_DIR, "heroku-cli-*")
        )
        if len(existing_heroku_directory_names) == 0:
            print("Getting heroku")
            if os.path.exists(os.path.join(HEROKU_TMP_DIR, "heroku.tar.gz")):
                os.remove(os.path.join(HEROKU_TMP_DIR, "heroku.tar.gz"))

            # Get the heroku client and unzip
            tar_path = os.path.join(HEROKU_TMP_DIR, "heroku.tar.gz")
            sh.wget(
                shlex.split(
                    "{}-{}-{}.tar.gz -O {}".format(
                        HEROKU_CLIENT_URL, os_name, bit_architecture, tar_path
                    )
                )
            )
            sh.tar(shlex.split(f"-xvzf {tar_path} -C {HEROKU_TMP_DIR}"))

            # Clean up the tar
            if os.path.exists(tar_path):
                os.remove(tar_path)

        heroku_directory_name = os.path.basename(
            glob.glob(os.path.join(HEROKU_TMP_DIR, "heroku-cli-*"))[0]
        )
        heroku_directory_path = os.path.join(HEROKU_TMP_DIR, heroku_directory_name)
        return os.path.join(heroku_directory_path, "bin", "heroku")
Пример #32
0
 def backups(self):
     data = self.http.load('backups', {'dato': 1})
     if data:
         dialogo = Alerta_Combo('Lista de Backup', 'backup.png', 'Escoja el backup que desea descargar:', data, liststore=(str, str))
         url = dialogo.iniciar()
         if url:
             print 'Backup'
             print url
             sh.wget(url)
             1/0
             sh.unzip()
     print data
Пример #33
0
def _pull_release(release):
    if not release in RELEASES:
        raise ValueError('Invalid release')

    filename = release_filename(release)
    if not os.path.isdir( CACHE_DIR ):
        log( "Creating cache dir {} ".format( CACHE_DIR ) )
        mkdir("-p", CACHE_DIR )

    if not os.path.exists( CACHE_DIR + filename ):
        log( "Downloading release {} ".format( filename ) )
        wget( REPO + filename, "-O", CACHE_DIR + filename)
Пример #34
0
def load(url, crs_name):
    try:
        sh.wget("--reject", "html,htm", "--accept", "pdf,zip", "--no-parent",
                "-e", "robots=off", "--continue", "--no-host-directories",
                "--convert-links", "--cut-dirs=4",
                "--directory-prefix=" + crs_name, "--quiet", "--mirror",
                "--user-agent=\"\"", "--user="******"--password=" + password, url)
    except sh.ErrorReturnCode_1:
        sys.exit(1)
    except sh.ErrorReturnCode_8:
        time.sleep(1)
Пример #35
0
def _download(path, url, i, h):
    from sh import wget
    name = url.split('/')[-1]
    # perform download in fg so that progress is reported.
    wget('-O',
         f'{path}/{name}',
         url,
         '--continue',
         '-q',
         '--show-progress',
         _fg=True)
    print(f'\nCompleted {name}, {i+1}/{h}')
Пример #36
0
def download_sample(source_url):
    if not source_url.endswith('.mp3'):
        print('ERROR: sample doesn\'t appear to be in mp3 format',
              file=sys.stderr)
        sys.exit(1)

    t = tempfile.NamedTemporaryFile(delete=False)
    sh.wget('-O',
            t.name,
            source_url,
            _out=open('/dev/stdout', 'wb'),
            _err=open('/dev/stderr', 'wb'))
    return t
def download_package(name, destination, compiler=""):
    label = map_release_label()
    if len(compiler) > 0:
        compiler = "-" + compiler
    url = "{0}/{1}/label={2}/artifact/toolchain/build/{3}{4}.tar.gz".format(HOST, BUILD, label, name, compiler)

    # Download the file
    print "Downloading {0}".format(name)
    sh.wget(url, directory_prefix=destination, no_clobber=True)
    # Extract
    print "Extracting {0}".format(name)
    sh.tar(z=True, x=True, f="{0}/{1}{2}.tar.gz".format(destination, name, compiler), directory=destination)
    sh.rm("{0}/{1}{2}.tar.gz".format(destination, name, compiler))
Пример #38
0
def download_package(destination, product, version, compiler):
  label = get_release_label()
  file_name = "{0}-{1}-{2}-{3}.tar.gz".format(product, version, compiler, label)
  url_path="/{0}/{1}-{2}/{0}-{1}-{2}-{3}.tar.gz".format(product, version, compiler, label)
  download_path = HOST + url_path

  print "URL {0}".format(download_path)
  print "Downloading {0} to {1}".format(file_name, destination)
  # --no-clobber avoids downloading the file if a file with the name already exists
  sh.wget(download_path, directory_prefix=destination, no_clobber=True)
  print "Extracting {0}".format(file_name)
  sh.tar(z=True, x=True, f=os.path.join(destination, file_name), directory=destination)
  sh.rm(os.path.join(destination, file_name))
Пример #39
0
def downloadPaper(paper, config):
    '''Download the paper.

    Params
    ------
    :arg paper
        A `Paper` instance result given by the ads'''

    def open_file(fname):
        sh.Command(config['adsquery']['pdf_viewer'])(fname, _bg=True)

    def process_output(line):
        print(line, end='')

    if paper.pub == 'ArXiv e-prints':
        # Get the ArXiv name
        _id = paper.bibcode.split('arXiv')[1][:-1]
        _id = _id[:4] + '.' + _id[4:]
        url = 'https://arxiv.org/pdf/{id}'.format(id=_id)
    else:
        url = ("http://adsabs.harvard.edu/cgi-bin/nph-data_query?"
               "bibcode={paper.bibcode}&link_type=ARTICLE".format(
                   paper=paper))

    print(f'Downloading {url}')

    fname = '{paper.bibcode}_{author}.pdf'.format(
        paper=paper,
        author=paper.first_author.split(',')[0])

    filesDir = os.path.join(os.path.expanduser('~'), 'ADS')
    # create the directory of not existing
    if not os.path.isdir(filesDir):
        os.path.mkdir(filesDir)

    fname = os.path.join(filesDir, fname)

    if os.path.isfile(fname):
        ans = getInput('File already exists on disk. Overwrite [Y/n]?',
                       lambda e: e.lower() if e.lower() in ['y', 'n', '']
                       else None)
        if ans == 'n':
            open_file(fname)
            return

    sh.wget(url,
            header="User-Agent: Mozilla/5.0 (Windows NT 5.1; rv:23.0) Gecko/20100101 Firefox/23.0",
            O=fname,
            _out=process_output)
    print('Downloaded into %s' % fname)
    open_file(fname)
Пример #40
0
def main():

    my_dir = os.path.dirname(os.path.realpath(__file__))
    tarball = os.path.join(my_dir, data_filename)

    # Download input data.
    if not os.path.exists(data_path):
        sh.wget('-P', my_dir, data_url)
    else:
        sh.cp(data_path, my_dir)

    sh.tar('zxvf', tarball, '-C', my_dir)

    return 0
Пример #41
0
def download_sample(source_url: str) -> AudioSample:
    "Download an mp3 file from the internet."
    metadata = {'source_url': source_url}

    if not source_url.endswith('.mp3'):
        print('ERROR: sample doesn\'t appear to be in mp3 format',
              file=sys.stderr)
        sys.exit(1)

    t = tempfile.NamedTemporaryFile(delete=False)
    sh.wget('-O', t.name, source_url, _out=open('/dev/stdout', 'wb'),
            _err=open('/dev/stderr', 'wb'))

    return AudioSample(tempfile=t, metadata=metadata)
Пример #42
0
def store():
    """
    Stores all directories in the filesystem that contains a .sync file to the configured
    ``root_sync_path``

    """
    wget('-O', 'dropbox.py', 'https://www.dropbox.com/download?dl=packages/dropbox.py')
    dropbox.start_dropbox()
    sync_paths = _get_sync_paths('/home/cada', excludes | {root_sync_path})
    sync_mappings = [(path, Path(root_sync_path) / path.relative_to('/'))
                     for path in sync_paths]
    _print_sync_mappings(sync_mappings)
    _sync(sync_mappings)
    dropbox.start_dropbox()
Пример #43
0
	def transfer(self, filePath, remote=False):
		""".. function:: transfer(files, remote)

		      Transfer files, local or remote, to host specified on command line

		      :param filePath: path/address of files to be transferred to host
		      :param remote: boolean dictating if files are remote (true) or local (false)"""

		if remote:
			sh.wget("-N", "-P", "./files", filePath) #Places files within a local directory named "files"
			scpHost = self.host + ":~"
			scp("-r", "./files", scpHost)
		else:
			scpHost = self.host + ":~"
			scp(filePath, scpHost) #May need to edit command to recursively handle directories
def retrieve_data(where, d):
    ud=where+d
    pathDeployName = ud.split('/')[-1]
    path_arg = ud + "/" + pathDeployName + ".nc3.nc"
    host_arg = SERVER+"/tabledap/" + pathDeployName + ".ncCFMA"
    print "Path Arg", path_arg
    print "Host Arg", host_arg
    args = [
        "--no-host-directories",
        "--cut-dirs=2",
        "--output-document=%s" % path_arg,
        host_arg
     ]
    print "Args:", ' '.join(args)
    sh.wget(*args)
    def update_cache(self):
        if not self.test_cache():
            rm(self.cache_dir, '-rf')
            self.cache_dir = mkdtemp()

            index_file_url = '/'.join([self.repo_url, self.index_file])
            index_file_path = os.path.join(self.cache_dir, self.index_file)

            try:
                print("Downloading index file '{0}' --> '{1}' ...".format(
                    index_file_url, index_file_path
                ))
                wget(index_file_url, '-O', index_file_path)
            except:
                self.broken = True
Пример #46
0
def initialize():
    # noinspection PyUnresolvedReferences
    from sh import wget, tar, rm, shasum
    if not os.path.exists(prefix):
        os.makedirs(prefix)

    if (not os.path.exists(dirs['inputs'])) or (not os.path.exists(dirs['intermediates'])):
        try:
            if not os.path.exists(prefix):
                logger.info("Creating {DIR}".format(DIR=prefix))
                os.makedirs(prefix)
            logger.info("Downloading data from {URL} to {DIR}".format(URL=data_url, DIR=prefix))
            tar(wget(data_url, "-qO-", _piped=True), "xz", _cwd=prefix)
            logger.info("Checking checksums of downloaded files")
            for line in shasum("-c", _cwd=prefix, _in=checksums, _iter=True):
                logger.info(line)
        except Exception as e:
            logger.info("Error: {}".format(e.message))
            logger.info("Deleting {DIR}".format(DIR=dirs['inputs']))
            rm(dirs['inputs'], '-rf')
            logger.info("Deleting {DIR}".format(DIR=dirs['intermediates']))
            rm(dirs['intermediates'], '-rf')
            raise

    # make sure all those directories exist
    for d in (dirs['outputs'], dirs['plots']):
        if not os.path.exists(d):
            logger.info("Creating {DIR}".format(DIR=d))
            os.makedirs(d)
Пример #47
0
    def setup_tomcat(self, args):
        try:
            log("Downloading Tomcat ...")
            result = wget(dict['TOMCAT_DOWNLOAD_URL'])
        except:
            log("Error getting Tomcat from : " + dict['TOMCAT_DOWNLOAD_URL'])

        try:
            log("Extracting Tomcat ...")            
            result = tar("xvzf " , dict['TOMCAT_VERSION']+ ".tar.gz")
        except:
            log("Error extracting Tomcat ..." + dict['TOMCAT_VERSION']+ ".tar.gz")
        
        setup_airavata_server(args)

        try:
            log("Copying the Airavata war files to Tomcat's webapp directory ...")
            result = cp(dict['AIRAVATA_VERSION']+ "/*.war " , dict['TOMCAT_VERSION']+ "/webapps", "-v")
        except:
            log("Error copying the Airavata war files to Tomcat's webapp directory ...")

        try :
            log("Granting executeable permissions to the script")
            result = chmod("a+x" , dict['TOMCAT_VERSION']+ "/*.sh")
        except:
            log("Error granting executable permissions to " + dict['TOMCAT_VERSION']+ "/*.sh")
Пример #48
0
def grade(uniqname, link):
    print("Grading {}".format(uniqname))
    with pushd("373-f15-linked-list"):
        wget(link, "-O", "{}.c".format(uniqname))
        rm("-f", "list.c", "list.o", "list")
        ln("-s", "{}.c".format(uniqname), "list.c")
        make("run")
        try:
            diff("list.out", "golden.out")
            perfect_grade(uniqname)
        except sh.ErrorReturnCode_1:
            try:
                diff("list.out", "naive.out")
                no_change(uniqname)
            except sh.ErrorReturnCode_1:
                handgrade(uniqname)
Пример #49
0
def wget_and_unpack_package(download_path, file_name, destination, wget_no_clobber):
  if not download_path.endswith("/" + file_name):
    raise Exception("URL {0} does not match with expected file_name {1}"
        .format(download_path, file_name))
  NUM_ATTEMPTS = 3
  for attempt in range(1, NUM_ATTEMPTS + 1):
    logging.info("Downloading {0} to {1}/{2} (attempt {3})".format(
      download_path, destination, file_name, attempt))
    # --no-clobber avoids downloading the file if a file with the name already exists
    try:
      sh.wget(download_path, directory_prefix=destination, no_clobber=wget_no_clobber)
      break
    except Exception, e:
      if attempt == NUM_ATTEMPTS:
        raise
      logging.error("Download failed; retrying after sleep: " + str(e))
      time.sleep(10 + random.random() * 5) # Sleep between 10 and 15 seconds.
Пример #50
0
    def update_cache(self):
        if not self.test_cache():
            rm(self.path, '-rf')
            mkdir('-p', self.path)

            index_file_url = '/'.join([self.repo_url.url.geturl(), 'Packages.gz'])
            index_file_path = os.path.join(self.path, self.index_file)

            print("Downloading index file '{0}' --> '{1}' ...".format(
                index_file_url, index_file_path
            ))
            try:
                with pushd(self.path):
                    wget(index_file_url, '-O', self.index_file + '.gz')
                    gzip('-d', self.index_file + '.gz')
            except Exception as err:
                print(str(err))
                self.broken = True
Пример #51
0
def setup_environment(exp):
    """
    Do environment setup like downloading code and inputs.
    """

    # Get the code, we only handle git repos.
    code_urls = exp.code_urls.split(',')
    for u in code_urls:
        assert u[-4:] == '.git'
    code_repos = [re.search('(\w+)\.git$', u).group(1) for u in code_urls]
    code_hashes = exp.code_hashes.split(',')
    assert len(code_urls) == len(code_hashes)
    assert len(code_repos) == len(code_urls)

    for c in code_urls:
        git.clone(['--recursive', c])

    orig_dir = os.getcwd()
    for i, c in enumerate(code_hashes):
        os.chdir(code_repos[i])
        git.checkout(code_hashes[i])
        git.submodule('update')
    os.chdir(orig_dir)

    data_urls = exp.data_urls.split(',')
    data_hashes = exp.data_hashes.split(',')
    for u in data_urls:
        assert u[-7:] == '.tar.gz'
    data_dirs = [re.search('(\w+)\.tar.gz$', u).group(1) for u in data_urls]
    assert len(data_urls) == len(data_hashes)
    assert len(data_dirs) == len(data_urls)

    for u, d in zip(data_urls, data_dirs)
        wget(d)

    # Do build, then link in data.
    if exp.codebase == 'MOM6':
        mom6.build()
    elif exp.codebase == 'MOM5':
        mom5.build()
    else:
        assert False

    return True
Пример #52
0
def cached_page (url, cached_filename='', refresh=False):
  '''
  cached_page (url):

  Retrieves url with all assets, using wget, including css, js, etc, all converted to proper extensions.
  Caches this manually (regardless of server caching instructions) into directory named after site, per wget conventions.
  Returns just the cached file if it's already been downloaded previously.

  url: URL to be downloaded with all assets, eg https://www.djangoproject.com/

  cached_filename: Previously downloaded filename (with directory), known once wget runs, eg 'www.djangoproject.com/index.html'

  refresh: call wget to refresh the previoously saved directory, even if cached_filename is already present.

  Todo:
    Save cached assets list,
    canonicalize / slugify / hash name from url, and
    dispense with the need for the two-step cached_filename.
  '''
  global wget

  if not refresh and cached_filename:
    with open (cached_filename) as f:
      return f.read()

  # "bake in" parameters to retrieve all assets and normalize filenames - see below for details
  wget = wget.bake ('-E -k -p -N'.split())

  # call wget to retrieve file and all assets
  wget (url, _out = 'wget.log', _err='wget.err')

  with open ('wget.err') as f:
    s = f.read()
    saved_list = [line.split (':')[1].strip ("' `") for line in s.split ('\n') if line.startswith ('Saving to')]

  if trace: print saved_list

  #with open (dir_of (url) + '/cached_page_assets.txt') as f:
  #  f.write ('\n'.join (saved_list))

  with open (saved_list[0]) as f:
    return f.read()
Пример #53
0
    def setup_airavata_server(self, args):
        try:
            log("Downloading Airavata web-app ...")
            result = wget(dict['AIRAVATA_DOWNLOAD_URL']+ ".zip")
        except:
            log("Error Downloading Airavata web-app from: " + dict['AIRAVATA_DOWNLOAD_URL'])

        try:
            log("Extracting Airavata war")
            result = unzip(dict['AIRAVATA_VERSION']+ ".zip")
        except:
            log("Error extracting Airavata war" + dict['AIRAVATA_VERSION']+ ".zip")
Пример #54
0
    def prebuild_arch(self, arch):
        if not self.is_patched(arch):
            super(ReportLabRecipe, self).prebuild_arch(arch)
            recipe_dir = self.get_build_dir(arch.arch)

            # Some versions of reportlab ship with a GPL-licensed font.
            # Remove it, since this is problematic in .apks unless the
            # entire app is GPL:
            font_dir = os.path.join(recipe_dir,
                                    "src", "reportlab", "fonts")
            if os.path.exists(font_dir):
                for l in os.listdir(font_dir):
                    if l.lower().startswith('darkgarden'):
                        os.remove(os.path.join(font_dir, l))

            # Apply patches:
            self.apply_patch('patches/fix-setup.patch', arch.arch)
            shprint(sh.touch, os.path.join(recipe_dir, '.patched'))
            ft = self.get_recipe('freetype', self.ctx)
            ft_dir = ft.get_build_dir(arch.arch)
            ft_lib_dir = os.environ.get('_FT_LIB_', os.path.join(ft_dir, 'objs', '.libs'))
            ft_inc_dir = os.environ.get('_FT_INC_', os.path.join(ft_dir, 'include'))
            tmp_dir = os.path.normpath(os.path.join(recipe_dir, "..", "..", "tmp"))
            info('reportlab recipe: recipe_dir={}'.format(recipe_dir))
            info('reportlab recipe: tmp_dir={}'.format(tmp_dir))
            info('reportlab recipe: ft_dir={}'.format(ft_dir))
            info('reportlab recipe: ft_lib_dir={}'.format(ft_lib_dir))
            info('reportlab recipe: ft_inc_dir={}'.format(ft_inc_dir))
            with current_directory(recipe_dir):
                ensure_dir(tmp_dir)
                pfbfile = os.path.join(tmp_dir, "pfbfer-20070710.zip")
                if not os.path.isfile(pfbfile):
                    sh.wget("http://www.reportlab.com/ftp/pfbfer-20070710.zip", "-O", pfbfile)
                sh.unzip("-u", "-d", os.path.join(recipe_dir, "src", "reportlab", "fonts"), pfbfile)
                if os.path.isfile("setup.py"):
                    with open('setup.py', 'r') as f:
                        text = f.read().replace('_FT_LIB_', ft_lib_dir).replace('_FT_INC_', ft_inc_dir)
                    with open('setup.py', 'w') as f:
                        f.write(text)
Пример #55
0
    def download_image(self, image_id):
        """Download the image from the image_url"""
        downloading_already = False
        with self._downloading_images_lock:
            downloading_already = (image_id in self._downloading_images)
            if not downloading_already:
                e = self._downloading_images[image_id] = threading.Event()
                e.clear()

        if downloading_already:
            self._log.debug("image is already being downloaded, waiting for it to finish")
            self._downloading_images[image_id].wait()
            self._log.debug("image finished downloading")
            return

        image_filename = image_id_to_volume(image_id)
        dest = os.path.join(LIBVIRT_BASE, image_filename)
        self._log.debug("downloading image {} to {}".format(image_id, dest))

        wget("-q", "-O", dest, self.image_url + "/" + image_filename)
        self._log.debug("downloaded {}".format(image_id))

        self._downloading_images[image_id].set()
        del self._downloading_images[image_id]
Пример #56
0
def downloaded(url, suffix, method='wget'):
    print('      downloading {0}'.format(url))
    with tempfile.NamedTemporaryFile(suffix=suffix) as t:
        if method == 'wget':
            try:
                sh.wget('-e', 'robots=off',
                        '-U', DEFAULT_USER_AGENT,
                        '-O', t.name,
                        url)
            except sh.ErrorReturnCode_8:
                raise DownloadError(url)

        elif method == 'requests':
            resp = requests.get(url)
            if not resp.status_code == 200:
                raise DownloadError(url)

            with open(t.name, 'wb') as ostream:
                ostream.write(resp.content)

        else:
            raise ValueError('unsupported method {0}'.format(method))

        yield t.name
    def fetchEpg(self, filename):
        """Use wget to fetch EPG data into memory"""
        try:
            wgetProc = sh.wget(self.config.epgUrl, "-nv", a=self.config.logFile, O="-", user=self.config.username, password=self.config.password)
        except sh.ErrorReturnCode:
            return None
        else:
            year = str(datetime.datetime.today().year)
            targetDir = os.path.join(self.config.dataDir, year)

            if not os.path.exists(targetDir):
                os.mkdir(targetDir)

            filepath = os.path.join(targetDir, filename)
            return EpgFile(self.config, filepath, data=wgetProc.stdout)