Ejemplo n.º 1
3
def extract_data(input_file_name, output_file_names):
    sh.unzip("-o", input_file_name)

    # This also updates timestamps.  Ruffus doesn't recognize these files as complete results unles the
    # timestamp is up to date.
    sh.mv("testdata.manual.2009.06.14.csv", "sentiment140.test.csv")
    sh.mv("training.1600000.processed.noemoticon.csv", "sentiment140.train.csv")

    # Re-encode the files as utf8.  They look like utf8 already (e.g. file thinks they're utf8)
    # but they are actually encoded as latin1.  This doesn't make a difference for the test data
    # (the utf8 and latin1 encoded test data are identical files) but the train data has some
    # byte sequences that are invalid utf8 and this makes simplejson really upset.
    for output_file in output_file_names:
        sh.mv(output_file, "temp")
        sh.iconv("-f", "latin1", "-t", "utf8", "temp", _out=output_file)
        sh.rm("temp")
Ejemplo n.º 2
0
def memtest_extract():
    zip_fpath = config.cache_dpath / 'memtest86-usb.zip'
    unzip_fpath = config.cache_dpath / 'memtest86-usb'
    img_fpath = unzip_fpath / 'memtest86-usb.img'
    zip_url = 'https://www.memtest86.com/downloads/memtest86-usb.zip'

    if not zip_fpath.exists():
        print('Downloading:', zip_url, 'to', zip_fpath)
        sh.wget('-O', zip_fpath, zip_url)

    if not img_fpath.exists():
        print('Extracting zip to', unzip_fpath)
        sh.unzip('-d', unzip_fpath, zip_fpath)

    output = sh.sgdisk('-p', img_fpath)
    lines = output.strip().splitlines()
    # Second line should look like:
    #   Sector size (logical): 512 bytes
    sector_size = lines[1].split(':')[1].replace('bytes', '').strip()
    sector_size = sector_size

    json_output = sh.sfdisk('--json', img_fpath)
    partitions = json.loads(str(json_output))['partitiontable']['partitions']
    efi_part = [p for p in partitions if p['name'] == 'EFI System Partition'].pop()
    efi_start_sector = efi_part['start']

    efi_start_bytes = int(sector_size) * (efi_start_sector)

    return img_fpath, efi_start_bytes
Ejemplo n.º 3
0
def convert_to_geojson(path):
    """
    Receive a path to zip file.
    Unzip the file, convert .shp files within to GeoJSON data files.
    Return a list of paths to the GeoJSON files.
    """
    outdir = path.rstrip('.zip')
    basename = outdir.split('/')[-1]

    if os.path.exists(outdir):  # Delete any existing outdir
        shutil.rmtree(outdir)
    os.makedirs(outdir, exist_ok=True)
    unzip(path, '-d', outdir)

    geojson_files = []

    for filename in os.listdir(outdir):
        if filename.endswith(".shp"):
            shpFile = os.path.join(outdir, filename)
            geojsonFile = shpFile.replace('.shp', '.geojson')
            print(shpFile, geojsonFile)

            ogr_command = 'ogr2ogr -f "GeoJSON" -t_srs crs:84 {outpath} {inpath}'.format(
                outpath=quote(geojsonFile), inpath=quote(shpFile))

            os.popen(ogr_command).read()
            geojson_files.append(geojsonFile)

    return geojson_files
Ejemplo n.º 4
0
def download(target, temp_dir):
    zip_path = path.join(temp_dir, "temp.zip")
    tgt_path = path.join(temp_dir, "chunk")

    for chunk in CHUNKS:
        tif_name = TIF_FORMAT.format(chunk)
        tif_path = path.join(temp_dir, tif_name)

        wget(URL_FORMAT.format(chunk), q=True, O=zip_path)
        
        with zipfile.ZipFile(zip_path, 'r') as pack:
            contents = pack.namelist()
            if contents != [tif_name]:
                raise ValueError("Bad archive contents: {:r}".format(contents))
        
        unzip(zip_path, d=temp_dir)
        os.unlink(zip_path)

        convert(tif_path, '-quiet', 'GRAY:{}'.format(tgt_path))
        os.unlink(tif_path)

        if os.stat(tgt_path).st_size != EXPECT_SIZE:
            raise ValueError("Bad converted size: {}".format(chunk))

        with open(tgt_path, "rb") as f:
            shutil.copyfileobj(f, target)
        os.unlink(tgt_path)
Ejemplo n.º 5
0
def prepare_datasets(configs, output_dir, input_dir):
    if input_dir.startswith("http"):
        file_path = download_file(configs, "imagenet_less.zip", output_dir)
        sh.unzip("-o", file_path, "-d", output_dir)
        return output_dir + "/imagenet_less"
    else:
        return input_dir
Ejemplo n.º 6
0
 def prebuild_arch(self, arch):
     if not self.is_patched(arch):
         super(ReportLabRecipe, self).prebuild_arch(arch)
         self.apply_patch('patches/fix-setup.patch', arch.arch)
         recipe_dir = self.get_build_dir(arch.arch)
         shprint(sh.touch, os.path.join(recipe_dir, '.patched'))
         ft = self.get_recipe('freetype', self.ctx)
         ft_dir = ft.get_build_dir(arch.arch)
         ft_lib_dir = os.environ.get('_FT_LIB_', os.path.join(ft_dir, 'objs', '.libs'))
         ft_inc_dir = os.environ.get('_FT_INC_', os.path.join(ft_dir, 'include'))
         tmp_dir = os.path.normpath(os.path.join(recipe_dir, "..", "..", "tmp"))
         info('reportlab recipe: recipe_dir={}'.format(recipe_dir))
         info('reportlab recipe: tmp_dir={}'.format(tmp_dir))
         info('reportlab recipe: ft_dir={}'.format(ft_dir))
         info('reportlab recipe: ft_lib_dir={}'.format(ft_lib_dir))
         info('reportlab recipe: ft_inc_dir={}'.format(ft_inc_dir))
         with current_directory(recipe_dir):
             sh.ls('-lathr')
             ensure_dir(tmp_dir)
             pfbfile = os.path.join(tmp_dir, "pfbfer-20070710.zip")
             if not os.path.isfile(pfbfile):
                 sh.wget("http://www.reportlab.com/ftp/pfbfer-20070710.zip", "-O", pfbfile)
             sh.unzip("-u", "-d", os.path.join(recipe_dir, "src", "reportlab", "fonts"), pfbfile)
             if os.path.isfile("setup.py"):
                 with open('setup.py', 'rb') as f:
                     text = f.read().replace('_FT_LIB_', ft_lib_dir).replace('_FT_INC_', ft_inc_dir)
                 with open('setup.py', 'wb') as f:
                     f.write(text)
Ejemplo n.º 7
0
    def download_client_config(self, cluster, service):
        """Download the client configuration zip for a particular cluster and service.

        Since cm_api does not provide a way to download the archive we build the URL
        manually and download the file. Once it downloaded the file the archive is
        extracted and its content is copied to the Hadoop configuration directories
        defined by Impala.
        """
        logger.info("Downloading client configuration for {0}".format(
            service.name))
        url = "http://{0}:7180/api/{1}/clusters/{2}/services/{3}/clientConfig".format(
            self.cm_host, CM_API_VERSION, urlquote(cluster.name),
            urlquote(service.name))
        path = mkdtemp()
        sh.curl(url,
                o=os.path.join(path, "clientConfig.zip"),
                _out=tee,
                _err=tee)
        current = os.getcwd()
        os.chdir(path)
        sh.unzip("clientConfig.zip")
        for root, _, file_names in os.walk("."):
            for filename in fnmatch.filter(file_names, "*.xml"):
                src = os.path.join(root, filename)
                dst = os.path.join(self.impala_home, "fe", "src", "test",
                                   "resources")
                logger.debug("Copying {0} to {1}".format(src, dst))
                shutil.copy(src, dst)
        os.chdir(current)
    def downloadShapeFile(self, fipsCode):
        print(("downloading %s" % fipsCode))
        sFile = self.netShapeFile.replace('XX', "%02d" % fipsCode)
        path = self.netShapePath + sFile
        conn = httplib.HTTPConnection(self.netShapeHost)
        conn.request("GET", path)
        r = conn.getresponse()

        if r.status != 200:
            raise RuntimeError("*** failed download ***\n\t%s, %s, %s" %
                               (sFile, r.status, r.reason))

        tmpDir = mkdtemp()
        sFileFull = os.path.join(tmpDir, sFile)
        with open(sFileFull, "wb") as f:
            data = r.read()
            f.write(data)

        conn.close()

        unzip(sFileFull, d=tmpDir)
        shapeFiles = glob(os.path.join(tmpDir, '*.shp'))
        if len(shapeFiles) != 1:
            raise RuntimeError(
                "There should only be one shape file in %s, failing" % sFile)
        for shapeFile in shapeFiles:
            infoName = self.ogrFileName(fipsCode)
            with gzip.open(infoName, "wb") as f:
                ogrinfo('-al', shapeFile, _out=f)
        rmtree(tmpDir)
Ejemplo n.º 9
0
 def compile( self, source_dir, build_dir, install_dir ):
     package_source_dir = os.path.join( source_dir, self.dirname )
     assert( os.path.exists( package_source_dir ) )
     package_build_dir = os.path.join( build_dir, self.dirname )
     runpath_dir = os.path.join( package_source_dir, 'RunPath' )
     if ( not os.path.exists( os.path.join( runpath_dir, 'media.zip' ) ) ):
         sh.cd( runpath_dir )
         sh.wget( '--no-check-certificate', 'https://bitbucket.org/jacmoe/ogitor/downloads/media.zip' )
         sh.unzip( 'media.zip' )
     if ( not os.path.exists( os.path.join( runpath_dir, 'projects.zip' ) ) ):
         sh.cd( runpath_dir )
         sh.wget( '--no-check-certificate', 'https://bitbucket.org/jacmoe/ogitor/downloads/projects.zip' )
         sh.unzip( 'projects.zip' )
     sh.mkdir( '-p', package_build_dir )
     sh.cd( package_build_dir )
     if ( platform.system() == 'Darwin' ):
         sh.cmake(
             '-G', 'Xcode',
             '-D', 'CMAKE_INSTALL_PREFIX=%s' % install_dir,
             '-D', 'CMAKE_MODULE_PATH=%s' % os.path.join( install_dir, 'CMake' ),
             package_source_dir,
             _out = sys.stdout )
         sh.xcodebuild( '-configuration', 'Release', _out = sys.stdout )
     else:
         sh.cmake(
             '-D', 'CMAKE_INSTALL_PREFIX=%s' % install_dir,
             '-D', 'CMAKE_MODULE_PATH=%s' % os.path.join( install_dir, 'lib/OGRE/cmake' ),
             package_source_dir,
             _out = sys.stdout )
         sh.make( '-j4', 'VERBOSE=1', _out = sys.stdout )
         sh.make.install( _out = sys.stdout )
    def html_output(self):
        ext = '.html'
        today = datetime.date.today().isoformat()
        sha = self.test_file + ".html.sha"
        # cannot recover if generating html fails
        options = (['--zip'] + self.options
                   + ['-f', 'html', self.test_file,
                      self.test_out + ext + '.zip'])
        try:
            self.gdoc_to(*options,
                         _err=self.test_err + ".html.log")
            # XXX it hangs without -n, didn't have time to figure out why
            out_dir = os.path.dirname(self.test_out)
            sh.unzip('-n', '-d', out_dir, self.test_out + ext + '.zip')
            sh.sed('-i', '-e', 's/%s/TODAYS_DATE/g' % today,
                   self.test_out + ext)
            test_result = slurp('%s.html' % self.test_out)
        except ErrorReturnCode as e:
            self.say(red("gdoc-to failed: {}. See {}.html.log"),
                     e, self.test_err)
            self.say(red("Ran in {}"), os.getcwd())
            self.failed = True
            sh.rm('-f', sha)
            return
        try:
            html5check(self.test_out + ext,
                       _out=self.test_out + ".html.errors")
        except ErrorReturnCode:
            self.say(red("Test output did not validate as XHTML5!"))
            self.say(red("\tSee {}.html.errors"), self.test_out)
            self.failed = True

        if test_result != slurp(self.test_file + ext):
            # the file changed, but the change might be okay
            spit(self._canonical_body(self.test_out + ext),
                 self.test_out + ".body")
            spit(self._canonical_body(self.test_file + ext),
                 self.test_out + ".canon.body")

            if (slurp(self.test_out + '.body')
                    == slurp(self.test_out + '.canon.body')):
                self.say(yellow("File changed. Updating canonical file."))
                sh.cp(self.test_out + ext, self.test_file + ext)
            else:
                self.say(red("HTML body changed!"))
                self.say(red("\tSee {}.*"), fail_path(self.test_name))
                sh.cp(self.test_out + ext, fail_path(self.test_name + ext))
                sh.diff('-u', self.test_file + ext, self.test_out + ext,
                        _out=fail_path(self.test_name + ".html.diff"),
                        _ok_code=[0, 1])
                sh.cp(self.test_out + ".body",
                      fail_path(self.test_name + ".body"))
                sh.cp(self.test_out + ".canon.body",
                      fail_path(self.test_name + ".body.expected"))
                sh.diff('-u', self.test_out + ".canon.body",
                        self.test_out + ".body",
                        _out=fail_path(self.test_name + '.body.diff'),
                        _ok_code=[0, 1])
                self.failed = True
Ejemplo n.º 11
0
def get_mace(configs, abis, output_dir, build_mace):
    if build_mace:
        sh.bash("tools/build_mace.sh",
                abis,
                os.path.abspath(output_dir),
                _fg=True)
    else:
        file_path = download_file(configs, "libmace.zip", output_dir)
        sh.unzip("-o", file_path, "-d", "third_party/mace")
Ejemplo n.º 12
0
def archiveDownload(url, destination, archiveType):
    logging.info('Now downloading archive file from URL %s to %s' % (url, destination))
    filename = wget.download(url)
    if archiveType == 'zip':
        logging.info('Unzipping zip file from: ', filename)
        sh.unzip(filename)
    elif archiveType == 'tar.gz':
        logging.info('Untarring tar.gz file from: ', filename)
        sh.tar('-xvzf', filename )
    logging.info('Removing archive file.')
    sh.rm(filename)
    return
Ejemplo n.º 13
0
 def backups(self):
     data = self.http.load('backups', {'dato': 1})
     if data:
         dialogo = Alerta_Combo('Lista de Backup', 'backup.png', 'Escoja el backup que desea descargar:', data, liststore=(str, str))
         url = dialogo.iniciar()
         if url:
             print 'Backup'
             print url
             sh.wget(url)
             1/0
             sh.unzip()
     print data
Ejemplo n.º 14
0
def archiveDownload(url, destination, archiveType):
    logging.info('Now downloading archive file from URL %s to %s' %
                 (url, destination))
    filename = wget.download(url)
    if archiveType == 'zip':
        logging.info('Unzipping zip file from: ', filename)
        sh.unzip(filename)
    elif archiveType == 'tar.gz':
        logging.info('Untarring tar.gz file from: ', filename)
        sh.tar('-xvzf', filename)
    logging.info('Removing archive file.')
    sh.rm(filename)
    return
Ejemplo n.º 15
0
def extract_data(input_file_name, output_file_names):
    sh.unzip("-o", input_file_name)

    # This also updates timestamps.  Ruffus doesn't recognize these files as complete results unles the
    # timestamp is up to date.
    sh.mv("testdata.manual.2009.06.14.csv", "sentiment140.test.csv")
    sh.mv("training.1600000.processed.noemoticon.csv", "sentiment140.train.csv")

    # Re-encode the files as utf8.  They look like utf8 already (e.g. file thinks they're utf8)
    # but they are actually encoded as latin1.  This doesn't make a difference for the test data
    # (the utf8 and latin1 encoded test data are identical files) but the train data has some
    # byte sequences that are invalid utf8 and this makes simplejson really upset.
    for output_file in output_file_names:
        sh.mv(output_file, "temp")
        sh.iconv("-f", "latin1", "-t", "utf8", "temp", _out=output_file)
        sh.rm("temp")
Ejemplo n.º 16
0
    def install_agentlib(self):
        # Ensure that no non-python modules are built
        os.putenv("CC", "/bin/false")
        os.putenv("CXX", "/bin/false")

        # Use a local development agentlib directory if it exists, else get the latest wheel from the repo
        dev_path = os.path.join(self.inpath, 'agentlib')
        if os.path.exists(dev_path):
            print("Using local dev agentlib/")
            os.chdir(dev_path)
            print(self.python('setup.py', 'bdist_wheel', '--os', 'android'))
            print(
                self.pip(
                    'install', 'dist/agentlib-{}-py3-none-any.whl'.format(
                        AGENTLIB_VERSION)))
        else:
            artifacts = GitlabUtils.get_latest_artifact(AGENTLIB_ID, JOB_ID)
            if artifacts:
                with open('/tmp/wheel.zip', 'wb') as ofile:
                    ofile.write(artifacts)
                print(
                    sh.unzip(
                        '-o', '/tmp/wheel.zip',
                        'wheel/{}/agentlib-{}-py3-none-any.whl'.format(
                            JOB_ID, AGENTLIB_VERSION), '-d', '/tmp/'))
                print(
                    self.pip(
                        'install',
                        '/tmp/wheel/{}/agentlib-{}-py3-none-any.whl'.format(
                            JOB_ID, AGENTLIB_VERSION)))
            else:
                raise RuntimeError("Cannot get agentlib")
Ejemplo n.º 17
0
    def prebuild_arch(self, arch):
        if not self.is_patched(arch):
            super().prebuild_arch(arch)
            recipe_dir = self.get_build_dir(arch.arch)

            # Some versions of reportlab ship with a GPL-licensed font.
            # Remove it, since this is problematic in .apks unless the
            # entire app is GPL:
            font_dir = os.path.join(recipe_dir, "src", "reportlab", "fonts")
            if os.path.exists(font_dir):
                for file in os.listdir(font_dir):
                    if file.lower().startswith('darkgarden'):
                        os.remove(os.path.join(font_dir, file))

            # Apply patches:
            self.apply_patch('patches/fix-setup.patch', arch.arch)
            shprint(sh.touch, os.path.join(recipe_dir, '.patched'))
            ft = self.get_recipe('freetype', self.ctx)
            ft_dir = ft.get_build_dir(arch.arch)
            ft_lib_dir = os.environ.get('_FT_LIB_',
                                        os.path.join(ft_dir, 'objs', '.libs'))
            ft_inc_dir = os.environ.get('_FT_INC_',
                                        os.path.join(ft_dir, 'include'))
            tmp_dir = os.path.normpath(
                os.path.join(recipe_dir, "..", "..", "tmp"))
            info('reportlab recipe: recipe_dir={}'.format(recipe_dir))
            info('reportlab recipe: tmp_dir={}'.format(tmp_dir))
            info('reportlab recipe: ft_dir={}'.format(ft_dir))
            info('reportlab recipe: ft_lib_dir={}'.format(ft_lib_dir))
            info('reportlab recipe: ft_inc_dir={}'.format(ft_inc_dir))
            with current_directory(recipe_dir):
                ensure_dir(tmp_dir)
                pfbfile = os.path.join(tmp_dir, "pfbfer-20070710.zip")
                if not os.path.isfile(pfbfile):
                    sh.wget("http://www.reportlab.com/ftp/pfbfer-20070710.zip",
                            "-O", pfbfile)
                sh.unzip("-u", "-d",
                         os.path.join(recipe_dir, "src", "reportlab", "fonts"),
                         pfbfile)
                if os.path.isfile("setup.py"):
                    with open('setup.py', 'r') as f:
                        text = f.read().replace('_FT_LIB_',
                                                ft_lib_dir).replace(
                                                    '_FT_INC_', ft_inc_dir)
                    with open('setup.py', 'w') as f:
                        f.write(text)
Ejemplo n.º 18
0
	def install_tool(self):
		results = sh.unzip("-o", DOWNLOAD, "-d", TOOL_HOME)
		parts = results.split('\n')
		for part in parts:
			if part.find("inflating") > -1:
				path = self._return_path_bit(part.strip().split(" ")[1])
				break
		sh.rm("-f", AMI_HOME)
		sh.ln("-s", TOOL_HOME + "/" + path, AMI_HOME)
Ejemplo n.º 19
0
    def fetch(self):

        cache = self.workdir('cache', True)
        key = self.key(self.path)

        if key is not None:

            outfile = '%s/%s' % (cache, os.path.basename(key.name))

            with open(outfile, 'w') as f:
                key.get_file(f)

            unpack_dir = self.workdir('unpacked', True)
            sh.unzip(outfile, _cwd=unpack_dir)
            return (key.etag.strip('"'), unpack_dir)

            raise Exception("Unable to unpack archive format '%s'" % ext)

        return (None, cache)
Ejemplo n.º 20
0
def _install_fancybox(stdout=None):
    """Callback to install necessary library for the Fancybox module"""
    http.dl(FANCYBOX_URI, 'fancybox.zip')
    output = unzip('fancybox.zip')

    if stdout and output:
        stdout.write(str(output).strip() + '\n')

    os.rename('fancyapps-fancyBox-18d1712', 'fancybox')
    rm('fancybox.zip')
Ejemplo n.º 21
0
def _install_predis(stdout=None):
    """Callback to install Predis for the Redis module"""
    http.dl(PREDIS_URI, 'predis.zip')
    output = unzip('predis.zip')

    if stdout and output:
        stdout.write(str(output).strip() + '\n')

    os.rename('nrk-predis-d02e2e1', 'predis')
    rm('predis.zip')
Ejemplo n.º 22
0
    def _get_code(self, nmpi_job, job_desc):
        """
        Obtain the code and place it in the working directory.

        If the experiment description is the URL of a Git repository, try to clone it.
        If it is the URL of a zip or .tar.gz archive, download and unpack it.
        Otherwise, the content of "code" is the code: write it to a file.
        """
        url_candidate = urlparse(nmpi_job['code'])
        logger.debug("Get code: %s %s", url_candidate.netloc,
                     url_candidate.path)
        if url_candidate.scheme and url_candidate.path.endswith(
            (".tar.gz", ".zip", ".tgz")):
            self._create_working_directory(job_desc.working_directory)
            target = os.path.join(job_desc.working_directory,
                                  os.path.basename(url_candidate.path))
            #urlretrieve(nmpi_job['code'], target) # not working via KIP https proxy
            curl(nmpi_job['code'], '-o', target)
            logger.info("Retrieved file from {} to local target {}".format(
                nmpi_job['code'], target))
            if url_candidate.path.endswith((".tar.gz", ".tgz")):
                tar("xzf", target, directory=job_desc.working_directory)
            elif url_candidate.path.endswith(".zip"):
                try:
                    # -o for auto-overwrite
                    unzip('-o', target, d=job_desc.working_directory)
                except:
                    logger.error("Could not unzip file {}".format(target))
        else:
            try:
                # Check the "code" field for a git url (clone it into the workdir) or a script (create a file into the workdir)
                # URL: use git clone
                git.clone('--recursive', nmpi_job['code'],
                          job_desc.working_directory)
                logger.info("Cloned repository {}".format(nmpi_job['code']))
            except (sh.ErrorReturnCode_128, sh.ErrorReturnCode):
                # SCRIPT: create file (in the current directory)
                logger.info("The code field appears to be a script.")
                self._create_working_directory(job_desc.working_directory)
                with codecs.open(job_desc.arguments[0], 'w',
                                 encoding='utf8') as job_main_script:
                    job_main_script.write(nmpi_job['code'])
Ejemplo n.º 23
0
def _install_jquery_colorpicker(stdout=None):
    """Callback to install necessary library for the module"""
    os.makedirs('./colorpicker')
    http.dl(JQ_COLOR_PICKER_URI, './colorpicker/colorpicker.zip')
    with pushd('./colorpicker'):
        output = unzip('colorpicker.zip')

        if stdout and output:
            stdout.write(str(output).strip() + '\n')

        rm('colorpicker.zip')
Ejemplo n.º 24
0
    def process_layer(self, typename):
	fname_in = '%s.shp' % typename
	fname_out = '%s_rasterized.tiff' % typename
	pwd = str(sh.pwd()).strip()
        try:
            sh.cd('/tmp/layer')
            sh.rm("-rf",sh.glob('*'))
            sh.unzip('../layer.zip')
	    saga_cmd.shapes_points("Points Filter", POINTS=fname_in, FIELD="MASA_HUMEDO", FILTER="tmp1.shp", RADIUS=100, MINNUM=25, MAXNUM=200, METHOD=4, PERCENT=15)
	    saga_cmd.shapes_points("Points Filter", POINTS="tmp1.shp", FIELD="MASA_HUMEDO", FILTER="tmp2.shp", RADIUS=100, MINNUM=25, MAXNUM=200, METHOD=5, PERCENT=90)
    	    saga_cmd.grid_gridding("Shapes to Grid", INPUT="tmp2.shp", FIELD="MASA_HUMEDO", MULTIPLE=4, LINE_TYPE=0, GRID_TYPE=3, USER_SIZE=0.0001, TARGET=0, USER_GRID="tmp3.sgrd")
	    saga_cmd.grid_tools("Close Gaps", INPUT="tmp3.sgrd", RESULT="tmp4.sgrd")
	    saga_cmd.shapes_points("Convex Hull", SHAPES="tmp2.shp", HULLS="tmphull.shp", POLYPOINTS=0)
	    saga_cmd.shapes_grid("Clip Grid with Polygon", INPUT="tmp4.sgrd", OUTPUT="tmp5.sgrd", POLYGONS="tmphull.shp")
	    saga_cmd.grid_filter("Gaussian Filter", INPUT="tmp5.sgrd", RESULT="tmp6", SIGMA=3, MODE=1, RADIUS=50)

	    sh.gdal_translate("-of", "gtiff", "tmp6.sdat", fname_out)
        finally:
	    sh.cd(pwd)
	return '/tmp/layer/%s' % fname_out
Ejemplo n.º 25
0
    def fetch(self):

        cache = self.workdir('cache', True)
        r = requests.get(self.url, stream=True)

        if r.status_code == 200:

            outfile = '%s/latest.zip' % cache

            with open(outfile, 'w') as f:
                for chunk in r.iter_content(8192):
                    f.write(chunk)

            unpack_dir = self.workdir('unpacked', True)
            sh.unzip(outfile, _cwd=unpack_dir)
            return (self.key(r), unpack_dir)

            raise Exception("Unable to unpack archive format '%s'" % ext)

        return (None, cache)
Ejemplo n.º 26
0
    def unpack(self, arch):
        build_dir = self.get_build_container_dir(arch)
        filename = self.versioned_url.split("/")[-1]

        with current_directory(build_dir):
            directory_name = self.get_build_dir(arch)

            if not exists(directory_name) or not isdir(directory_name):
                extraction_filename = join(
                    self.ctx.packages_path, self.name, filename)
                try:
                    sh.unzip(extraction_filename)
                except (sh.ErrorReturnCode_1, sh.ErrorReturnCode_2):
                    # return code 1 means unzipping had
                    # warnings but did complete,
                    # apparently happens sometimes with
                    # github zips
                    pass
                shprint(sh.mv, "jni", directory_name)
            else:
                info("{} is already unpacked, skipping".format(self.name))
Ejemplo n.º 27
0
    def setup_airavata_server(self, args):
        try:
            log("Downloading Airavata web-app ...")
            result = wget(dict['AIRAVATA_DOWNLOAD_URL']+ ".zip")
        except:
            log("Error Downloading Airavata web-app from: " + dict['AIRAVATA_DOWNLOAD_URL'])

        try:
            log("Extracting Airavata war")
            result = unzip(dict['AIRAVATA_VERSION']+ ".zip")
        except:
            log("Error extracting Airavata war" + dict['AIRAVATA_VERSION']+ ".zip")
Ejemplo n.º 28
0
    def prebuild_arch(self, arch):
        if not self.is_patched(arch):
            super(ReportLabRecipe, self).prebuild_arch(arch)
            recipe_dir = self.get_build_dir(arch.arch)

            # Some versions of reportlab ship with a GPL-licensed font.
            # Remove it, since this is problematic in .apks unless the
            # entire app is GPL:
            font_dir = os.path.join(recipe_dir,
                                    "src", "reportlab", "fonts")
            if os.path.exists(font_dir):
                for l in os.listdir(font_dir):
                    if l.lower().startswith('darkgarden'):
                        os.remove(os.path.join(font_dir, l))

            # Apply patches:
            self.apply_patch('patches/fix-setup.patch', arch.arch)
            shprint(sh.touch, os.path.join(recipe_dir, '.patched'))
            ft = self.get_recipe('freetype', self.ctx)
            ft_dir = ft.get_build_dir(arch.arch)
            ft_lib_dir = os.environ.get('_FT_LIB_', os.path.join(ft_dir, 'objs', '.libs'))
            ft_inc_dir = os.environ.get('_FT_INC_', os.path.join(ft_dir, 'include'))
            tmp_dir = os.path.normpath(os.path.join(recipe_dir, "..", "..", "tmp"))
            info('reportlab recipe: recipe_dir={}'.format(recipe_dir))
            info('reportlab recipe: tmp_dir={}'.format(tmp_dir))
            info('reportlab recipe: ft_dir={}'.format(ft_dir))
            info('reportlab recipe: ft_lib_dir={}'.format(ft_lib_dir))
            info('reportlab recipe: ft_inc_dir={}'.format(ft_inc_dir))
            with current_directory(recipe_dir):
                ensure_dir(tmp_dir)
                pfbfile = os.path.join(tmp_dir, "pfbfer-20070710.zip")
                if not os.path.isfile(pfbfile):
                    sh.wget("http://www.reportlab.com/ftp/pfbfer-20070710.zip", "-O", pfbfile)
                sh.unzip("-u", "-d", os.path.join(recipe_dir, "src", "reportlab", "fonts"), pfbfile)
                if os.path.isfile("setup.py"):
                    with open('setup.py', 'r') as f:
                        text = f.read().replace('_FT_LIB_', ft_lib_dir).replace('_FT_INC_', ft_inc_dir)
                    with open('setup.py', 'w') as f:
                        f.write(text)
Ejemplo n.º 29
0
def main():

    # get the inputs
    string_ports = '/mnt/work/input/ports.json'
    input_data = '/mnt/work/input/data'

    # create output directory
    out_path = '/mnt/work/output/data'
    if os.path.exists(out_path) is False:
        os.makedirs(out_path)

    # read the inputs
    with open(string_ports) as ports:
        inputs = json.load(ports)
    resolution = inputs.get('resolution', 20)

    # convert the inputs to the correct dtypes
    resolution = convert_type(resolution, int, 'Int')

    # get the SAFE file in the input folder
    zips = glob.glob1(input_data, '*.zip')
    if len(zips) == 0:
        raise ValueError("No zips found in input data port")
    if len(zips) > 1:
        raise ValueError("Multiple zips found in input data port")
    in_zip = os.path.join(input_data, zips[0])

    # unzip it
    sh.unzip(in_zip, _cwd=os.path.dirname(in_zip))

    # rename to safe
    in_safe = in_zip.replace('.zip', '.SAFE')

    # run the processing
    print("Starting L2A_Process...")
    L2A_process.main([in_safe,
                      out_path,
                      '--resolution', resolution])
Ejemplo n.º 30
0
 def fetch_yaradroid(self):
     with tempfile.TemporaryDirectory() as tmpdir:
         tmpzip = os.path.join(tmpdir, "yaradroid.zip")
         artifacts = GitlabUtils.get_latest_artifact(YARADROID_ID, 'build')
         if artifacts:
             with open(tmpzip, 'wb') as ofile:
                 ofile.write(artifacts)
             print(
                 sh.unzip(
                     '-o', '-j', tmpzip,
                     'app/build/intermediates/cmake/debug/obj/armeabi-v7a/yara.so',
                     '-d', self.CODE_PATH))
         else:
             raise RuntimeError("Cannot download yaradroid")
    def download_client_config(self, cluster, service):
        """Download the client configuration zip for a particular cluster and service.

        Since cm_api does not provide a way to download the archive we build the URL
        manually and download the file. Once it downloaded the file the archive is
        extracted and its content is copied to the Hadoop configuration directories
        defined by Impala.
        """
        logger.info("Downloading client configuration for {0}".format(service.name))
        url = "http://{0}:7180/api/{1}/clusters/{2}/services/{3}/clientConfig".format(
            self.cm_host, CM_API_VERSION, urlquote(cluster.name), urlquote(service.name))
        path = mkdtemp()
        sh.curl(url, o=os.path.join(path, "clientConfig.zip"), _out=tee, _err=tee)
        current = os.getcwd()
        os.chdir(path)
        sh.unzip("clientConfig.zip")
        for root, _, file_names in os.walk("."):
            for filename in fnmatch.filter(file_names, "*.xml"):
                src = os.path.join(root, filename)
                dst = os.path.join(self.impala_home, "fe", "src", "test", "resources")
                logger.debug("Copying {0} to {1}".format(src, dst))
                shutil.copy(src, dst)
        os.chdir(current)
Ejemplo n.º 32
0
 def retrieve(self):
     if self.do_retrieve:
         sh.rm("-fr", self.dirname)
         os.mkdir(self.dirname)
         self.pushd(self.dirname)
         retrieved = FileBuildConfiguration.download(self.url)
         if ".tar.gz" in retrieved:
             sh.tar("xvzf", retrieved)
         if ".tar.bz2" in retrieved:
             sh.tar("xjvf", retrieved)
         if ".zip" in retrieved:
             sh.unzip(retrieved)
     else:
         self.pushd(self.dirname)
     # Either one directory *OR* one directory + a README.
     if(len(os.listdir(".")) <= 3):
         # we can assume that we need to chdir before we can build, so set that to the local build path
         for curr in os.listdir("."):
             if(os.path.isdir(curr)):
                 self.buildpath = curr
     if not getattr(self, 'buildpath'):
         self.buildpath = "."
     self.popd()
Ejemplo n.º 33
0
def copy_src(installDir, release):
    _pull_release(release)
    filename    = CACHE_DIR + release_filename(release)
    extract_dir = CACHE_DIR + release_extract_dir(release)
    if not os.path.isdir( extract_dir ):
        log( "Extracting {} ".format( filename ) )
        unzip("-n", "-q", filename, "-d", extract_dir)

    
    log( "Removing old files ... " )
    rm("-rf", installDir)

    log( "Creating install dir {} ... ".format(installDir) )
    mkdir("-p", installDir )

    log( "Copying files ... " )
    unzip("-n", "-q", extract_dir + '/prestashop.zip' , "-d", installDir)

    log( "Renaming admin as {}".format(ADMIN_DIR) )
    mv(installDir + 'admin', installDir + ADMIN_DIR)

    chown("-R", APP_OWNER, installDir)
    chmod("-R", "777", installDir + 'var/')
Ejemplo n.º 34
0
    def _get_code(self, nmpi_job, job_desc):
        """
        Obtain the code and place it in the working directory.

        If the experiment description is the URL of a Git repository, try to clone it.
        If it is the URL of a zip or .tar.gz archive, download and unpack it.
        Otherwise, the content of "code" is the code: write it to a file.
        """
        url_candidate = urlparse(nmpi_job['code'])
        logger.debug("Get code: %s %s", url_candidate.netloc, url_candidate.path)
        if url_candidate.scheme and url_candidate.path.endswith((".tar.gz", ".zip", ".tgz")):
            self._create_working_directory(job_desc.working_directory)
            target = os.path.join(job_desc.working_directory, os.path.basename(url_candidate.path))
            #urlretrieve(nmpi_job['code'], target) # not working via KIP https proxy
            curl(nmpi_job['code'], '-o', target)
            logger.info("Retrieved file from {} to local target {}".format(nmpi_job['code'], target))
            if url_candidate.path.endswith((".tar.gz", ".tgz")):
                tar("xzf", target, directory=job_desc.working_directory)
            elif url_candidate.path.endswith(".zip"):
                try:
                    # -o for auto-overwrite
                    unzip('-o', target, d=job_desc.working_directory)
                except:
                    logger.error("Could not unzip file {}".format(target))
        else:
            try:
                # Check the "code" field for a git url (clone it into the workdir) or a script (create a file into the workdir)
                # URL: use git clone
                git.clone('--recursive', nmpi_job['code'], job_desc.working_directory)
                logger.info("Cloned repository {}".format(nmpi_job['code']))
            except (sh.ErrorReturnCode_128, sh.ErrorReturnCode):
                # SCRIPT: create file (in the current directory)
                logger.info("The code field appears to be a script.")
                self._create_working_directory(job_desc.working_directory)
                with codecs.open(job_desc.arguments[0], 'w', encoding='utf8') as job_main_script:
                    job_main_script.write(nmpi_job['code'])
Ejemplo n.º 35
0
def get_highlight(rjs_dir):
    if exists("{}/.hljs".format(rjs_dir)):
        return
    touch("{}/.hljs".format(rjs_dir))
    HIGHLIGHT_URL = "https://highlightjs.org/download/"
    html = get(HIGHLIGHT_URL).text.split("\n")
    lang = [
        x.strip().split('"')[3] for x in html
        if "checkbox" in x and "<li>" in x
    ]
    csrf = [x.strip() for x in html if "csrf" in x][0].split("'")[5]
    data = {x: "on" for x in lang}
    data.update({"csrfmiddlewaretoken": csrf})
    headers = {"Referer": "https://highlightjs.org/download/"}
    headers.update({"Cookie": "csrftoken={}".format(csrf)})
    r = post(HIGHLIGHT_URL, data=data, headers=headers, stream=True)
    with open("highlight.zip", "wb") as f:
        for chunk in r.iter_content(4096):
            f.write(chunk)
    unzip("-d", "highlight", "highlight.zip")
    mv("highlight/highlight.pack.js",
       "{}/plugin/highlight/highlight.js".format(rjs_dir))
    mv(glob("highlight/styles/*"), "{}/lib/css/".format(rjs_dir))
    rm("-r", "highlight", "highlight.zip")
Ejemplo n.º 36
0
#!/usr/bin/env python
import simplejson as json
import os
from sh import wget, cd, unzip, rm

C = json.load(open('sources.json'))
mydir = os.path.dirname(os.path.realpath(__file__)) + '/sources/'

for k in C.keys():
  if not os.path.exists(mydir + k):
    print "mkdir %s%s" % (mydir, k)
    os.makedirs(mydir + k)
    for source in C[k]:
      file_name = mydir + k + source.split('/')[-1]
      print "wget -nc -O %s %s" % (file_name, source)
      wget('-nc', '-O', file_name, source)
      print "unzip -d %s %s" % (mydir + k, file_name)
      unzip('-d', mydir + k, file_name)
      print "rm %s" % file_name
      rm(file_name)
Ejemplo n.º 37
0
for year in (2015, 2016, 2017, 2018, 2019, 2020):
    for month in range(1, 12 + 1):
        # future
        if year == 2020 and month > 1:
            continue
        # earliest
        if year == 2015 and month < 6:
            continue

        filename = "{game}-{year}-{month}.zip".format(game=game,
                                                      year=year,
                                                      month=month)
        url = download_template.format(game=game, year=year, month=month)
        print("download url:", url)
        if os.path.exists("./{filename}".format(filename=filename)):
            print("{filename} already exists".format(filename=filename))
            continue
        wget.download(url)
        print("{filename} download finish".format(filename=filename))

for year in (2015, 2016, 2017, 2018, 2019, 2020):
    for month in range(1, 12 + 1):
        filename = "{game}-{year}-{month}".format(game=game,
                                                  year=year,
                                                  month=month)
        if not os.path.exists(filename + ".zip"):
            continue
        print("unzip", filename)
        sh.unzip(filename + ".zip", "-d", filename)
Ejemplo n.º 38
0
    return s[:-len(s.split()[-1])].strip()


def get_last_time(s):
    return int(s.split(':')[-1])


zips = sorted(glob.glob('logs/*.zip'))
dic = {}
for z in zips:
    name = z[5:-4]
    print()
    print(f'Unzip {z}')
    sh.rm(['-rf', 'log.log'])
    sh.rm(['-rf', './__MACOSX'])
    sh.unzip(z)
    print(f'RE {name}')

    ll = open('log.log', 'r').readlines()

    device = ''
    cpuinfo = ''
    hardware = ''
    judgeDevices = ''
    PreviewParameters = ''
    CreateEngine_CONV2D_FILTER_Error = ''
    ModelName = ''
    time = []
    for i in ll:
        if 'VersaAiInitData' in i:
            device = i.strip().split(":")[-3][0:3]
Ejemplo n.º 39
0
def extract_shapefiles(in_file):
    print "Extracting shapefiles..."
    unzip( "-d", "/tmp/%s" % in_file, "/tmp/%s.zip" % in_file)

    return "/tmp/%s" % in_file
Ejemplo n.º 40
0
def index():
    return 'Detected\nunzip:{}\nlftp:{}\nforego:{}'.format(
        unzip("-v", _tty_out=False),
        lftp("-v", _tty_out=False),
        forego("version", _tty_out=False),
    )
Ejemplo n.º 41
0
def extract_shapefiles(in_file):
    print "Extracting shapefiles..."
    unzip("-d", "/tmp/%s" % in_file, "/tmp/%s.zip" % in_file)

    return "/tmp/%s" % in_file
Ejemplo n.º 42
0
def get_mace(configs, output_dir):
    file_path = download_file(configs, "mace-1.0.4.zip", output_dir)
    sh.unzip("-o", file_path, "-d", "third_party/mace")
Ejemplo n.º 43
0
        converted_boms.append(txt)
boms += converted_boms

# Get the output file name
output_name = fab_zip.split("_to_fab")[0] + "_{}.zip".format(args.date)

# Actually make the zip

# Generate the folders we use to organize things
mkdir(FAB_FOLDER)
mkdir(ASSEM_FOLDER)
mkdir(IMAGE_FOLDER)

# Put the contents of the zip files in the folders
# This way we don't have to replicate that logic
unzip(fab_zip, "-d", FAB_FOLDER)
unzip(assem_zip, "-d", ASSEM_FOLDER)

# Put the images in the images folder
for jpg in jpgs:
    cp(jpg, IMAGE_FOLDER)

# Get the filenames for fab
fab_files = glob.glob("{}/*".format(FAB_FOLDER))
assem_files = glob.glob("{}/*".format(ASSEM_FOLDER))
image_files = glob.glob("{}/*".format(IMAGE_FOLDER))

combined = [output_name] + schs + brds + pdfs + dxfs + infos + boms + fab_files + assem_files + image_files

sh.zip(*combined)
Ejemplo n.º 44
0
    def _convert_zipfile(self, source_filename):
        # unzip file
        # take stock of files unzipped
        # if shapefiles exist, then look at unique base-names
        # and create new layers in the output spatialite for each base-name
        # and add projection files for each layer if they don't exist

        _log.info(
            'zipfile was designated for {0}, converting to sqlite'.format(
                self.resource.slug))

        stdout = StringIO()
        stderr = StringIO()

        def _write_shapefile_layer(layer_name, out_filename):
            _log.info('writing layer {0} to {1} for {2}'.format(
                layer_name, out_filename, self.resource.slug))

            if not os.path.exists(layer_name + '.prj'):
                _log.warning(
                    "no projection file for {0}, assuming EPSG:4326".format(
                        self.resource.slug))
                with open(layer_name + '.prj', 'w') as prj:
                    prj.write(e4326)

            saki = now()
            sh.ogr2ogr('-explodecollections',
                       '-skipfailures',
                       '-append',
                       '-gt',
                       '131072',
                       '-t_srs',
                       'epsg:3857',
                       '-f',
                       'SQLite',
                       '-dsco',
                       'SPATIALITE=YES',
                       out_filename,
                       self.cache_path + '/' + layer_name + '.shp',
                       _out=stdout,
                       _err=stderr)
            ima = now()
            _log.info("wrote shapefile layer {0} to {1} in {2}".format(
                layer_name, out_filename, ima - saki))

        e4326 = osr.SpatialReference()
        e4326.ImportFromEPSG(4326)
        e4326 = e4326.ExportToWkt()

        out_filename = self.get_filename('sqlite')
        if not os.path.exists(source_filename):  # say it's stored in S3...
            p, f = os.path.split(source_filename)
            sh.mkdir('-p', p)
            with open(source_filename, 'w') as out:
                out.write(self.resource.original_file.read())

        archive = ZipFile(source_filename)
        names = archive.namelist()
        names = filter(lambda x: ('.' in x) and (not x.startswith('__MACOSX')),
                       sorted(names))
        extensions = {os.path.splitext(name)[-1].lower() for name in names}

        layer_name = self._layer_name(sorted(names)[0])
        if '.shp' in extensions:
            written = []
            for name in names:
                xtn = os.path.splitext(name)[-1]
                this_layer_name = self._layer_name(name)
                if os.path.exists(self.cache_path + '/' + this_layer_name +
                                  xtn):
                    os.unlink(self.cache_path + '/' + this_layer_name + xtn)

                archive.extract(name, self.cache_path)
                if name != (this_layer_name + xtn):
                    sh.mv(self.cache_path + '/' + name,
                          self.cache_path + "/" + this_layer_name + xtn)

                written.append(self.cache_path + '/' + this_layer_name + xtn)

                if layer_name != this_layer_name:
                    _write_shapefile_layer(layer_name, out_filename)
                    layer_name = this_layer_name
            _write_shapefile_layer(layer_name, out_filename)
            for name in written:
                os.unlink(name)

        else:
            sh.unzip(source_filename)

            saki = now()
            sh.ogr2ogr('-explodecollections',
                       '-skipfailures',
                       '-overwrite',
                       '-gt',
                       '131072',
                       '-t_srs',
                       'epsg:3857',
                       '-f',
                       'SQLite',
                       '-dsco',
                       'SPATIALITE=YES',
                       out_filename,
                       source_filename.rsplit('.', 1)[0],
                       _out=stdout,
                       _err=stderr)
            ima = now()

            _log.info('wrote dataset {0} to {1} in {2}'.format(
                source_filename, out_filename, ima - saki))

        return out_filename, stdout, stderr
Ejemplo n.º 45
0
def get_mnn(configs, output_dir):
    file_path = download_file(configs, "MNN-1.1.1.zip", output_dir)
    sh.unzip("-o", file_path, "-d", "third_party/mnn")
Ejemplo n.º 46
0
    def _convert_zipfile(self, source_filename):
        # unzip file
        # take stock of files unzipped
        # if shapefiles exist, then look at unique base-names
        # and create new layers in the output spatialite for each base-name
        # and add projection files for each layer if they don't exist

        _log.info("zipfile was designated for {0}, converting to sqlite".format(self.resource.slug))

        stdout = StringIO()
        stderr = StringIO()

        def _write_shapefile_layer(layer_name, out_filename):
            _log.info("writing layer {0} to {1} for {2}".format(layer_name, out_filename, self.resource.slug))

            if not os.path.exists(layer_name + ".prj"):
                _log.warning("no projection file for {0}, assuming EPSG:4326".format(self.resource.slug))
                with open(layer_name + ".prj", "w") as prj:
                    prj.write(e4326)

            saki = now()
            sh.ogr2ogr(
                "-explodecollections",
                "-skipfailures",
                "-append",
                "-gt",
                "10384",
                "-t_srs",
                "epsg:3857",
                "-f",
                "SQLite",
                "-dsco",
                "SPATIALITE=YES",
                out_filename,
                self.cache_path + "/" + layer_name + ".shp",
                _out=stdout,
                _err=stderr,
            )
            ima = now()
            _log.info("wrote shapefile layer {0} to {1} in {2}".format(layer_name, out_filename, ima - saki))

        e4326 = osr.SpatialReference()
        e4326.ImportFromEPSG(4326)
        e4326 = e4326.ExportToWkt()

        out_filename = self.get_filename("sqlite")
        archive = ZipFile(source_filename)
        names = archive.namelist()
        names = filter(lambda x: ("." in x) and (not x.startswith("__MACOSX")), sorted(names))
        extensions = {os.path.splitext(name)[-1].lower() for name in names}

        layer_name = self._layer_name(sorted(names)[0])
        if ".shp" in extensions:
            written = []
            for name in names:
                xtn = os.path.splitext(name)[-1]
                this_layer_name = self._layer_name(name)
                if os.path.exists(self.cache_path + "/" + this_layer_name + xtn):
                    os.unlink(self.cache_path + "/" + this_layer_name + xtn)

                archive.extract(name, self.cache_path)
                if name != (this_layer_name + xtn):
                    sh.mv(self.cache_path + "/" + name, self.cache_path + "/" + this_layer_name + xtn)

                written.append(self.cache_path + "/" + this_layer_name + xtn)

                if layer_name != this_layer_name:
                    _write_shapefile_layer(layer_name, out_filename)
                    layer_name = this_layer_name
            _write_shapefile_layer(layer_name, out_filename)
            for name in written:
                os.unlink(name)

        else:
            sh.unzip(source_filename)

            saki = now()
            sh.ogr2ogr(
                "-explodecollections",
                "-skipfailures",
                "-overwrite",
                "-gt",
                "131072",
                "-t_srs",
                "epsg:3857",
                "-f",
                "SQLite",
                "-dsco",
                "SPATIALITE=YES",
                out_filename,
                source_filename.rsplit(".", 1)[0],
                _out=stdout,
                _err=stderr,
            )
            ima = now()

            _log.info("wrote dataset {0} to {1} in {2}".format(source_filename, out_filename, ima - saki))

        return out_filename, stdout, stderr
Ejemplo n.º 47
0
		converted_boms.append(txt)
boms += converted_boms

# Get the output file name
output_name = fab_zip.split('_to_fab')[0] + '_{}.zip'.format(args.date)

# Actually make the zip

# Generate the folders we use to organize things
mkdir(FAB_FOLDER)
mkdir(ASSEM_FOLDER)
mkdir(IMAGE_FOLDER)

# Put the contents of the zip files in the folders
# This way we don't have to replicate that logic
unzip(fab_zip, '-d', FAB_FOLDER)
unzip(assem_zip, '-d', ASSEM_FOLDER)

# Put the images in the images folder
for jpg in jpgs:
	cp(jpg, IMAGE_FOLDER)

# Get the filenames for fab
fab_files = glob.glob('{}/*'.format(FAB_FOLDER))
assem_files = glob.glob('{}/*'.format(ASSEM_FOLDER))
image_files = glob.glob('{}/*'.format(IMAGE_FOLDER))

combined =  [output_name] + schs + brds + pdfs + dxfs + infos + boms + \
                            fab_files + assem_files + image_files

sh.zip(*combined)
Ejemplo n.º 48
0
    def unpack(self, arch):
        info_main('Unpacking {} for {}'.format(self.name, arch))

        build_dir = self.get_build_container_dir(arch)

        user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower()))
        if user_dir is not None:
            info('P4A_{}_DIR exists, symlinking instead'.format(
                self.name.lower()))
            if exists(self.get_build_dir(arch)):
                return
            shprint(sh.rm, '-rf', build_dir)
            shprint(sh.mkdir, '-p', build_dir)
            shprint(sh.rmdir, build_dir)
            ensure_dir(build_dir)
            shprint(sh.cp, '-a', user_dir, self.get_build_dir(arch))
            return

        if self.url is None:
            info('Skipping {} unpack as no URL is set'.format(self.name))
            return

        filename = shprint(
            sh.basename, self.versioned_url).stdout[:-1].decode('utf-8')
        ma = match(u'^(.+)#md5=([0-9a-f]{32})$', filename)
        if ma:                  # fragmented URL?
            filename = ma.group(1)

        with current_directory(build_dir):
            directory_name = self.get_build_dir(arch)

            if not exists(directory_name) or not isdir(directory_name):
                extraction_filename = join(
                    self.ctx.packages_path, self.name, filename)
                if isfile(extraction_filename):
                    if extraction_filename.endswith('.zip'):
                        try:
                            sh.unzip(extraction_filename)
                        except (sh.ErrorReturnCode_1, sh.ErrorReturnCode_2):
                            # return code 1 means unzipping had
                            # warnings but did complete,
                            # apparently happens sometimes with
                            # github zips
                            pass
                        import zipfile
                        fileh = zipfile.ZipFile(extraction_filename, 'r')
                        root_directory = fileh.filelist[0].filename.split('/')[0]
                        if root_directory != basename(directory_name):
                            shprint(sh.mv, root_directory, directory_name)
                    elif (extraction_filename.endswith('.tar.gz') or
                          extraction_filename.endswith('.tgz') or
                          extraction_filename.endswith('.tar.bz2') or
                          extraction_filename.endswith('.tbz2') or
                          extraction_filename.endswith('.tar.xz') or
                          extraction_filename.endswith('.txz')):
                        sh.tar('xf', extraction_filename)
                        root_directory = shprint(
                            sh.tar, 'tf', extraction_filename).stdout.decode(
                                'utf-8').split('\n')[0].split('/')[0]
                        if root_directory != directory_name:
                            shprint(sh.mv, root_directory, directory_name)
                    else:
                        raise Exception(
                            'Could not extract {} download, it must be .zip, '
                            '.tar.gz or .tar.bz2 or .tar.xz'.format(extraction_filename))
                elif isdir(extraction_filename):
                    mkdir(directory_name)
                    for entry in listdir(extraction_filename):
                        if entry not in ('.git',):
                            shprint(sh.cp, '-Rv',
                                    join(extraction_filename, entry),
                                    directory_name)
                else:
                    raise Exception(
                        'Given path is neither a file nor a directory: {}'
                        .format(extraction_filename))

            else:
                info('{} is already unpacked, skipping'.format(self.name))
Ejemplo n.º 49
0
    def Deploy(self, request, context):

        start_time = time.time()

        script_env = {
            'PROJECT_NAME': request.project,
            'DEPLOYMENT_ENVIRONMENT': request.environment,
        }

        # Return the initial PENDING event
        event = self.new_event(request,
                               deployment_pb2.DeploymentEvent.CREATED,
                               message='New deployment created')
        yield event

        # Start deployment, return INCOMPLETE event
        event = self.new_event(request,
                               deployment_pb2.DeploymentEvent.QUEUED,
                               message='Deployment started')
        yield event

        ###
        # ApplicationStop
        ###

        # run_lifecycle_hook_scripts(deploy_tmp, 'ApplicationStop', script_env, hooks)

        ###
        # DownloadBundle
        ###

        # Download artefact from S3
        event = self.new_event(
            request,
            deployment_pb2.DeploymentEvent.IN_PROGRESS,
            message='Downloading deployment artefact file from S3',
            lifecycle_event='DownloadBundle')
        yield event

        deploy_artefact_tmp = tempfile.mkstemp(prefix='deploy.', suffix='.zip')
        deploy_artefact = deploy_artefact_tmp[1]
        logger.info('Downloading eployment artefact from s3://%s/%s to %s',
                    request.artefact.s3_bucket, request.artefact.s3_key,
                    deploy_artefact)
        download_success = download_from_s3(request.artefact.s3_bucket,
                                            request.artefact.s3_key,
                                            deploy_artefact)
        if not download_success:
            logger.info('Artefact download failed')
            event = self.new_event(request,
                                   deployment_pb2.DeploymentEvent.FAILED,
                                   message='Failed to download build artefact',
                                   lifecycle_event='DownloadBundle')
            yield event
            return

        event = self.new_event(request,
                               deployment_pb2.DeploymentEvent.IN_PROGRESS,
                               message='Extract deployment artefact',
                               lifecycle_event='DownloadBundle')
        yield event

        deploy_tmp = tempfile.mkdtemp()
        logger.info('Extracting deployment artefact %s to %s', deploy_artefact,
                    deploy_tmp)
        try:
            for line in unzip(deploy_artefact, '-d', deploy_tmp, _iter=True):
                print(line)
        except sh.ErrorReturnCode:
            logger.exception("Error when extracting %s archive",
                             deploy_artefact)
            event = self.new_event(
                request,
                deployment_pb2.DeploymentEvent.FAILED,
                message='Failed to extract files from the build archive',
                lifecycle_event='DownloadBundle')
            yield event
            return

        appspec = None
        appspec_file = os.path.join(deploy_tmp, 'appspec.yml')
        logger.info('Reading appspec.yml file from %s', appspec_file)
        try:
            with open(appspec_file, 'r') as s:
                try:
                    appspec = yaml.load(s)
                except yaml.YAMLError:
                    logger.exception("Error when parsing appspec.yml file")
        except IOError:
            logger.exception("I/O error when opening appspec.yml file")

        if not appspec:
            logger.info('Could not load appspec.yml file')
            event = self.new_event(request,
                                   deployment_pb2.DeploymentEvent.FAILED,
                                   message='Could not load appspec.yml file',
                                   lifecycle_event='DownloadBundle')
            yield event
            return

        files = appspec.setdefault('files', [])
        hooks = appspec.setdefault('hooks', dict())

        ###
        # BeforeInstall
        ###

        if hooks.get('BeforeInstall', None):

            event = self.new_event(request,
                                   deployment_pb2.DeploymentEvent.IN_PROGRESS,
                                   message='Run BeforeInstall scripts',
                                   lifecycle_event='BeforeInstall')
            yield event

            scripts_success = run_lifecycle_hook_scripts(
                deploy_tmp, 'BeforeInstall', script_env, hooks)
            if all(scripts_success):
                logger.info('All BeforeInstall hooks completed')
                event = self.new_event(
                    request,
                    deployment_pb2.DeploymentEvent.IN_PROGRESS,
                    message='BeforeInstall hooks completed',
                    lifecycle_event='BeforeInstall')
                yield event
            else:
                logger.warning('Failed to run some BeforeInstall hooks')
                event = self.new_event(
                    request,
                    deployment_pb2.DeploymentEvent.FAILED,
                    message='Failed to run BeforeInstall hooks',
                    lifecycle_event='BeforeInstall')
                yield event
                return

        else:
            logger.info('No BeforeInstall hooks found, skipping...')

        event = self.new_event(request,
                               deployment_pb2.DeploymentEvent.IN_PROGRESS,
                               message='Copy files',
                               lifecycle_event='Install')
        yield event

        if not files:
            event = self.new_event(request,
                                   deployment_pb2.DeploymentEvent.FAILED,
                                   message='No files to install',
                                   lifecycle_event='Install')
            yield event

        files_success = []
        for f in files:

            try:
                source = os.path.join(deploy_tmp, f['source'])
                destination = f['destination']
            except KeyError:
                logger.exception('Invalid appspec.yml files configuration')
                files_success.append(False)
                continue

            logger.info("Copying files from %s to %s", source, destination)
            try:
                if os.path.isdir(source):
                    for line in rsync("-avr",
                                      source + '/',
                                      destination,
                                      _iter=True):
                        print(line)
                else:
                    print(rsync("-av", source, destination))
                files_success.append(True)
            except sh.ErrorReturnCode:
                logger.exception('Copying files failed')
                files_success.append(False)

        if all(files_success):
            logger.info('All files copied')
        else:
            logger.warning('Failed to copy some files')
            event = self.new_event(request,
                                   deployment_pb2.DeploymentEvent.FAILED,
                                   message='Failed to copy files',
                                   lifecycle_event='Install')
            yield event
            return

        if hooks.get('AfterInstall', None):

            event = self.new_event(request,
                                   deployment_pb2.DeploymentEvent.IN_PROGRESS,
                                   message='Running AfterInstall scripts',
                                   lifecycle_event='AfterInstall')
            yield event

            scripts_success = run_lifecycle_hook_scripts(
                deploy_tmp, 'AfterInstall', script_env, hooks)
            if all(scripts_success):
                logger.info('All AfterInstall hooks completed')
                event = self.new_event(
                    request,
                    deployment_pb2.DeploymentEvent.IN_PROGRESS,
                    message='AfterInstall hooks completed',
                    lifecycle_event='AfterInstall')
                yield event
            else:
                logger.warning('Failed to run some AfterInstall hooks')
                event = self.new_event(
                    request,
                    deployment_pb2.DeploymentEvent.FAILED,
                    message='Failed to run AfterInstall hooks',
                    lifecycle_event='AfterInstall')
                yield event
                return

        else:
            logger.info('No AfterInstall hooks found, skipping...')

        logger.info('Cleaning up deployment files...')
        # event = self.new_event(request, deployment_pb2.DeploymentEvent.IN_PROGRESS,
        #                        message='Cleanup deployment files',
        #                        lifecycle_event='End')
        # yield event

        os.remove(deploy_artefact)
        shutil.rmtree(deploy_tmp)

        end_time = time.time()
        duration = pretty_time_delta(end_time - start_time)
        logger.info('Deployment completed in %s', duration)

        event = self.new_event(request,
                               deployment_pb2.DeploymentEvent.SUCCEEDED,
                               message='Deployment completed in %s' % duration,
                               lifecycle_event='End')
        yield event
Ejemplo n.º 50
0
    def unpack(self, arch):
        info_main('Unpacking {} for {}'.format(self.name, arch))

        build_dir = self.get_build_container_dir(arch)

        user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower()))
        if user_dir is not None:
            info('P4A_{}_DIR exists, symlinking instead'.format(
                self.name.lower()))
            if exists(self.get_build_dir(arch)):
                return
            shprint(sh.rm, '-rf', build_dir)
            shprint(sh.mkdir, '-p', build_dir)
            shprint(sh.rmdir, build_dir)
            ensure_dir(build_dir)
            shprint(sh.cp, '-a', user_dir, self.get_build_dir(arch))
            return

        if self.url is None:
            info('Skipping {} unpack as no URL is set'.format(self.name))
            return

        filename = shprint(sh.basename,
                           self.versioned_url).stdout[:-1].decode('utf-8')
        ma = match(u'^(.+)#md5=([0-9a-f]{32})$', filename)
        if ma:  # fragmented URL?
            filename = ma.group(1)

        with current_directory(build_dir):
            directory_name = self.get_build_dir(arch)

            if not exists(directory_name) or not isdir(directory_name):
                extraction_filename = join(self.ctx.packages_path, self.name,
                                           filename)
                if isfile(extraction_filename):
                    if extraction_filename.endswith('.zip'):
                        try:
                            sh.unzip(extraction_filename)
                        except (sh.ErrorReturnCode_1, sh.ErrorReturnCode_2):
                            pass  # return code 1 means unzipping had
                            # warnings but did complete,
                            # apparently happens sometimes with
                            # github zips
                        import zipfile
                        fileh = zipfile.ZipFile(extraction_filename, 'r')
                        root_directory = fileh.filelist[0].filename.split(
                            '/')[0]
                        if root_directory != basename(directory_name):
                            shprint(sh.mv, root_directory, directory_name)
                    elif (extraction_filename.endswith('.tar.gz')
                          or extraction_filename.endswith('.tgz')
                          or extraction_filename.endswith('.tar.bz2')
                          or extraction_filename.endswith('.tbz2')
                          or extraction_filename.endswith('.tar.xz')
                          or extraction_filename.endswith('.txz')):
                        sh.tar('xf', extraction_filename)
                        root_directory = shprint(
                            sh.tar, 'tf', extraction_filename).stdout.decode(
                                'utf-8').split('\n')[0].split('/')[0]
                        if root_directory != directory_name:
                            shprint(sh.mv, root_directory, directory_name)
                    else:
                        raise Exception(
                            'Could not extract {} download, it must be .zip, '
                            '.tar.gz or .tar.bz2 or .tar.xz'.format(
                                extraction_filename))
                elif isdir(extraction_filename):
                    mkdir(directory_name)
                    for entry in listdir(extraction_filename):
                        if entry not in ('.git', ):
                            shprint(sh.cp, '-Rv',
                                    join(extraction_filename, entry),
                                    directory_name)
                else:
                    raise Exception(
                        'Given path is neither a file nor a directory: {}'.
                        format(extraction_filename))

            else:
                info('{} is already unpacked, skipping'.format(self.name))
Ejemplo n.º 51
0
def get_tflite(configs, output_dir):
    file_path = download_file(configs, "tensorflow-1.10.1.zip", output_dir)
    sh.unzip("-o", file_path, "-d", "third_party/tflite")
Ejemplo n.º 52
0
    def init_dir(self, version='latest', config={}, table_prefix='wp_'):
        """Initialises a new WordPress installation.

        Kwargs:
            version (str): Version number or ``'latest'``.
            config (dict): Configuration. Must have keys ``'db_name'``,
                             ``'db_user'``, ``'db_password'`` at minimum.
            table_prefix (str): Table prefix.

        Raises:
            WordPressError, WordPressConfigurationError

        Optional ``config`` keys (all str):
            db_host: Database host.
            db_charset: Database character set (MySQL).
            db_collate: Database collation ('' for default).
            wplang: Language code.
            auth_key: Authentication key.
            secure_auth_key: Secure authentication key.
            logged_in_key: Logged in key.
            nonce_key: Nonce key.
        """

        if self._is_initialized:
            raise WordPressError('Directory %s already exists' % (self._path))

        uri = self.LATEST_URI
        cache = True

        if version != 'latest':
            uri = self.DL_FORMAT % (version)
            cache = False

        prev_listing = os.listdir('.')
        dir_name = path_join(tempfile.gettempdir(), '__wp__')

        http.dl(uri, '_wp.zip', cache=cache)
        unzip(['-d', dir_name, '_wp.zip'])
        rm('_wp.zip')

        dir_name = path_join(dir_name, 'wordpress')
        os.rename(dir_name, self._path)

        defaults = {
            'db_host': 'localhost',
            'db_charset': 'utf8',
            'db_collate': '',
            'wplang': '',
            'auth_key': ',Qi8F3A:ME>+!G*|a!>zbW!GWe,[email protected]}LR0][j/995U'
                        '+4*3H:i]]DH',
            'secure_auth_key': 'UjN_-SP+Whq/^taB31&lg$fj0-<XSgKy@UzK*B-k-4aiT9'
                               '~m^s_vT[dE,5P;kx(E',
            'logged_in_key': '2dfV^z4rJqrSEdQc.ec)KJC UZv$#)OhJKRY~Vj9+]M-]CIB'
                             'L(RvGZ|[C!S|]MOv',
            'nonce_key': '.Ue WG1NN/cKo^MC53$_U0!V>Mtdw-ar$rP8o+;rawQ)B$9LlAAL'
                         '<@GLoXS_POaa',
        }
        keys = [
            'db_name',
            'db_user',
            'db_password',
            'db_host',
            'db_collate',
            'db_charset',
            'auth_key',
            'secure_auth_key',
            'logged_in_key',
            'nonce_key',
            'wplang',
        ]

        line_format = 'define(%s, %s);'
        wp_config_php = ['<?php']

        for key in keys:
            if key in config:
                value = config[key]
            elif key in defaults:
                value = defaults[key]
            else:
                raise WordPressConfigurationError('Configuration key %s is required' %
                                           (key))

            wp_config_php.append(line_format %
                                 (php.generate_scalar(key.upper()),
                                  php.generate_scalar(value)))

        wp_config_php.append('$table_prefix = %s;' %
                             (php.generate_scalar(table_prefix)))

        lines = '\n'.join(wp_config_php) + '\n'
        lines += '''if (!defined('ABSPATH'))
  define('ABSPATH', dirname(__FILE__) . '/');
require_once(ABSPATH . 'wp-settings.php');'''
        lines += '\n'

        with open(path_join(self._path, 'wp-config.php'), 'wb+') as f:
            f.write(lines)

        os.remove(path_join(self._path, 'wp-config-sample.php'))
Ejemplo n.º 53
0
    def unpack(self, arch):
        info_main('Unpacking {} for {}'.format(self.name, arch))

        build_dir = self.get_build_container_dir(arch)

        # user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower()))
        if self.user_dir is not None:
            info('P4A_{}_DIR exists, symlinking instead'.format(
                self.name.lower()))
            # AND: Currently there's something wrong if I use ln, fix this
            warning('Using cp -a instead of symlink...fix this!')
            if exists(self.get_build_dir(arch)) and not self.force_build:
                info('copy in place skipping cp {0}'.format(self.user_dir))
                return
            else:
                info('clening dirs')
                shprint(sh.rm, '-rf', build_dir)
                shprint(sh.mkdir, '-p', build_dir)
                shprint(sh.rmdir, build_dir)
                ensure_dir(build_dir)
                info('starting cp {0}'.format(self.user_dir))
                shprint(sh.cp, '-a', self.user_dir, self.get_build_dir(arch))
                return

        if self.url is None:
            info('Skipping {} unpack as no URL is set'.format(self.name))
            return

        filename = shprint(sh.basename,
                           self.versioned_url).stdout[:-1].decode('utf-8')

        with current_directory(build_dir):
            directory_name = self.get_build_dir(arch)

            # AND: Could use tito's get_archive_rootdir here
            if not exists(directory_name) or not isdir(directory_name):
                extraction_filename = join(self.ctx.packages_path, self.name,
                                           filename)
                if isfile(extraction_filename):
                    if extraction_filename.endswith('.zip'):
                        try:
                            sh.unzip(extraction_filename)
                        except (sh.ErrorReturnCode_1, sh.ErrorReturnCode_2):
                            pass  # return code 1 means unzipping had
                            # warnings but did complete,
                            # apparently happens sometimes with
                            # github zips
                        import zipfile
                        fileh = zipfile.ZipFile(extraction_filename, 'r')
                        root_directory = fileh.filelist[0].filename.split(
                            '/')[0]
                        if root_directory != basename(directory_name):
                            shprint(sh.mv, root_directory, directory_name)
                    elif (extraction_filename.endswith('.tar.gz')
                          or extraction_filename.endswith('.tgz')
                          or extraction_filename.endswith('.tar.bz2')
                          or extraction_filename.endswith('.tbz2')
                          or extraction_filename.endswith('.tar.xz')
                          or extraction_filename.endswith('.txz')):
                        sh.tar('xf', extraction_filename)
                        root_directory = shprint(
                            sh.tar, 'tf', extraction_filename).stdout.decode(
                                'utf-8').split('\n')[0].split('/')[0]
                        if root_directory != directory_name:
                            shprint(sh.mv, root_directory, directory_name)
                    else:
                        raise Exception(
                            'Could not extract {} download, it must be .zip, '
                            '.tar.gz or .tar.bz2 or .tar.xz'.format(
                                extraction_filename))
                elif isdir(extraction_filename):
                    mkdir(directory_name)
                    for entry in listdir(extraction_filename):
                        if entry not in ('.git', ):
                            shprint(sh.cp, '-Rv',
                                    join(extraction_filename, entry),
                                    directory_name)
                else:
                    raise Exception(
                        'Given path is neither a file nor a directory: {}'.
                        format(extraction_filename))

            else:
                info('{} is already unpacked, skipping'.format(self.name))
Ejemplo n.º 54
0
    def unpack(self, arch):
        info_main('Unpacking {} for {}'.format(self.name, arch))

        build_dir = self.get_build_container_dir(arch)

        user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower()))
        if user_dir is not None:
            info('P4A_{}_DIR exists, symlinking instead'.format(
                self.name.lower()))
            # AND: Currently there's something wrong if I use ln, fix this
            warning('Using git clone instead of symlink...fix this!')
            if exists(self.get_build_dir(arch)):
                return
            shprint(sh.rm, '-rf', build_dir)
            shprint(sh.mkdir, '-p', build_dir)
            shprint(sh.rmdir, build_dir)
            ensure_dir(build_dir)
            shprint(sh.git, 'clone', user_dir, self.get_build_dir(arch))
            return

        if self.url is None:
            info('Skipping {} unpack as no URL is set'.format(self.name))
            return

        filename = shprint(sh.basename,
                           self.versioned_url).stdout[:-1].decode('utf-8')

        with current_directory(build_dir):
            directory_name = self.get_build_dir(arch)

            # AND: Could use tito's get_archive_rootdir here
            if not exists(directory_name) or not isdir(directory_name):
                extraction_filename = join(self.ctx.packages_path, self.name,
                                           filename)
                if isfile(extraction_filename):
                    if extraction_filename.endswith('.tar.gz') or \
                       extraction_filename.endswith('.tgz'):
                        sh.tar('xzf', extraction_filename)
                        root_directory = shprint(
                            sh.tar, 'tzf', extraction_filename).stdout.decode(
                                'utf-8').split('\n')[0].split('/')[0]
                        if root_directory != directory_name:
                            shprint(sh.mv, root_directory, directory_name)
                    elif (extraction_filename.endswith('.tar.bz2')
                          or extraction_filename.endswith('.tbz2')):
                        info('Extracting {} at {}'.format(
                            extraction_filename, filename))
                        sh.tar('xjf', extraction_filename)
                        root_directory = sh.tar(
                            'tjf', extraction_filename).stdout.decode(
                                'utf-8').split('\n')[0].split('/')[0]
                        if root_directory != directory_name:
                            shprint(sh.mv, root_directory, directory_name)
                    elif extraction_filename.endswith('.zip'):
                        sh.unzip(extraction_filename)
                        import zipfile
                        fileh = zipfile.ZipFile(extraction_filename, 'r')
                        root_directory = fileh.filelist[0].filename.strip('/')
                        if root_directory != directory_name:
                            shprint(sh.mv, root_directory, directory_name)
                    else:
                        raise Exception(
                            'Could not extract {} download, it must be .zip, '
                            '.tar.gz or .tar.bz2')
                elif isdir(extraction_filename):
                    mkdir(directory_name)
                    for entry in listdir(extraction_filename):
                        if entry not in ('.git', ):
                            shprint(sh.cp, '-Rv',
                                    join(extraction_filename, entry),
                                    directory_name)
                else:
                    raise Exception(
                        'Given path is neither a file nor a directory: {}'.
                        format(extraction_filename))

            else:
                info('{} is already unpacked, skipping'.format(self.name))
Ejemplo n.º 55
0
    def unpack(self, arch):
        info_main('Unpacking {} for {}'.format(self.name, arch))

        build_dir = self.get_build_container_dir(arch)

        user_dir = environ.get('P4A_{}_DIR'.format(self.name.lower()))
        if user_dir is not None:
            info('P4A_{}_DIR exists, symlinking instead'.format(
                self.name.lower()))
            # AND: Currently there's something wrong if I use ln, fix this
            warning('Using git clone instead of symlink...fix this!')
            if exists(self.get_build_dir(arch)):
                return
            shprint(sh.rm, '-rf', build_dir)
            shprint(sh.mkdir, '-p', build_dir)
            shprint(sh.rmdir, build_dir)
            ensure_dir(build_dir)
            shprint(sh.git, 'clone', user_dir, self.get_build_dir(arch))
            return

        if self.url is None:
            info('Skipping {} unpack as no URL is set'.format(self.name))
            return

        filename = shprint(
            sh.basename, self.versioned_url).stdout[:-1].decode('utf-8')

        with current_directory(build_dir):
            directory_name = self.get_build_dir(arch)

            # AND: Could use tito's get_archive_rootdir here
            if not exists(directory_name) or not isdir(directory_name):
                extraction_filename = join(
                    self.ctx.packages_path, self.name, filename)
                if isfile(extraction_filename):
                    if extraction_filename.endswith('.tar.gz') or \
                       extraction_filename.endswith('.tgz'):
                        sh.tar('xzf', extraction_filename)
                        root_directory = shprint(
                            sh.tar, 'tzf', extraction_filename).stdout.decode(
                                'utf-8').split('\n')[0].split('/')[0]
                        if root_directory != directory_name:
                            shprint(sh.mv, root_directory, directory_name)
                    elif (extraction_filename.endswith('.tar.bz2') or
                          extraction_filename.endswith('.tbz2')):
                        info('Extracting {} at {}'
                             .format(extraction_filename, filename))
                        sh.tar('xjf', extraction_filename)
                        root_directory = sh.tar(
                            'tjf', extraction_filename).stdout.decode(
                                'utf-8').split('\n')[0].split('/')[0]
                        if root_directory != directory_name:
                            shprint(sh.mv, root_directory, directory_name)
                    elif extraction_filename.endswith('.zip'):
                        sh.unzip(extraction_filename)
                        import zipfile
                        fileh = zipfile.ZipFile(extraction_filename, 'r')
                        root_directory = fileh.filelist[0].filename.strip('/')
                        if root_directory != directory_name:
                            shprint(sh.mv, root_directory, directory_name)
                    else:
                        raise Exception(
                            'Could not extract {} download, it must be .zip, '
                            '.tar.gz or .tar.bz2')
                elif isdir(extraction_filename):
                    mkdir(directory_name)
                    for entry in listdir(extraction_filename):
                        if entry not in ('.git',):
                            shprint(sh.cp, '-Rv',
                                    join(extraction_filename, entry),
                                    directory_name)
                else:
                    raise Exception(
                        'Given path is neither a file nor a directory: {}'
                        .format(extraction_filename))

            else:
                info('{} is already unpacked, skipping'.format(self.name))
Ejemplo n.º 56
0
        converted_boms.append(txt)
boms += converted_boms

# Get the output file name
output_name = fab_zip.split('_to_fab')[0] + '_{}.zip'.format(args.date)

# Actually make the zip

# Generate the folders we use to organize things
mkdir(FAB_FOLDER)
mkdir(ASSEM_FOLDER)
mkdir(IMAGE_FOLDER)

# Put the contents of the zip files in the folders
# This way we don't have to replicate that logic
unzip(fab_zip, '-d', FAB_FOLDER)
unzip(assem_zip, '-d', ASSEM_FOLDER)

# Put the images in the images folder
for jpg in jpgs:
    cp(jpg, IMAGE_FOLDER)

# Get the filenames for fab
fab_files = glob.glob('{}/*'.format(FAB_FOLDER))
assem_files = glob.glob('{}/*'.format(ASSEM_FOLDER))
image_files = glob.glob('{}/*'.format(IMAGE_FOLDER))

combined =  [output_name] + schs + brds + pdfs + dxfs + infos + boms + \
                            fab_files + assem_files + image_files

sh.zip(*combined)
Ejemplo n.º 57
0
    def _convert_zipfile(self, source_filename):
        # unzip file
        # take stock of files unzipped
        # if shapefiles exist, then look at unique base-names
        # and create new layers in the output spatialite for each base-name
        # and add projection files for each layer if they don't exist

        _log.info('zipfile was designated for {0}, converting to sqlite'.format(self.resource.slug))

        stdout = StringIO()
        stderr = StringIO()

        def _write_shapefile_layer(layer_name, out_filename):
            _log.info('writing layer {0} to {1} for {2}'.format(
                layer_name,
                out_filename,
                self.resource.slug
            ))

            if not os.path.exists(layer_name + '.prj'):
                _log.warning("no projection file for {0}, assuming EPSG:4326".format(self.resource.slug))
                with open(layer_name + '.prj', 'w') as prj:
                    prj.write(e4326)

            saki = now()
            sh.ogr2ogr(
                '-explodecollections',
                '-skipfailures',
                '-append',
                '-gt', '131072',
                '-t_srs', 'epsg:3857',
                '-f', 'SQLite',
                '-dsco', 'SPATIALITE=YES',
                out_filename, self.cache_path + '/' + layer_name + '.shp',
                _out=stdout,
                _err=stderr
            )
            ima = now()
            _log.info("wrote shapefile layer {0} to {1} in {2}".format(layer_name, out_filename, ima-saki))


        e4326 = osr.SpatialReference()
        e4326.ImportFromEPSG(4326)
        e4326 = e4326.ExportToWkt()

        out_filename = self.get_filename('sqlite')
        if not os.path.exists(source_filename):  # say it's stored in S3...
            p, f = os.path.split(source_filename)
            sh.mkdir('-p', p)
            with open(source_filename, 'w') as out:
                out.write(self.resource.original_file.read())

        archive = ZipFile(source_filename)
        names = archive.namelist()
        names = filter(lambda x: ('.' in x) and (not x.startswith('__MACOSX')), sorted(names))
        extensions = {os.path.splitext(name)[-1].lower() for name in names}

        layer_name = self._layer_name(sorted(names)[0])
        if '.shp' in extensions:
            written = []
            for name in names:
                xtn = os.path.splitext(name)[-1]
                this_layer_name = self._layer_name(name)
                if os.path.exists(self.cache_path + '/' + this_layer_name + xtn):
                    os.unlink(self.cache_path + '/' + this_layer_name + xtn)

                archive.extract(name, self.cache_path)
                if name != (this_layer_name + xtn):
                    sh.mv(self.cache_path + '/' + name, self.cache_path + "/" + this_layer_name + xtn)

                written.append(self.cache_path + '/' + this_layer_name + xtn)

                if layer_name != this_layer_name:
                    _write_shapefile_layer(layer_name, out_filename)
                    layer_name = this_layer_name
            _write_shapefile_layer(layer_name, out_filename)
            for name in written:
                os.unlink(name)


        else:
            sh.unzip(source_filename)

            saki = now()
            sh.ogr2ogr(
                '-explodecollections',
                '-skipfailures',
                '-overwrite',
                '-gt', '131072',
                '-t_srs', 'epsg:3857',
                '-f', 'SQLite',
                '-dsco', 'SPATIALITE=YES',
                out_filename,
                source_filename.rsplit('.', 1)[0],
                _out=stdout,
                _err=stderr
            )
            ima = now()

            _log.info('wrote dataset {0} to {1} in {2}'.format(
                source_filename,
                out_filename,
                ima-saki
            ))

        return out_filename, stdout, stderr
Ejemplo n.º 58
0
 def unzip_files( self ):
     for file in self.data_types:
         sh.unzip('%s/%s.zip'%(self.directory, file), '-d', self.directory)