Beispiel #1
0
def unpack(sproj, branch, outdir):
    subprocess.check_call(['git', 'clone', '-b', branch, 'https://github.com/mesonbuild/%s.git' % sproj, outdir])
    usfile = os.path.join(outdir, 'upstream.wrap')
    assert(os.path.isfile(usfile))
    config = configparser.ConfigParser()
    config.read(usfile)
    us_url = config['wrap-file']['source_url']
    us = urllib.request.urlopen(us_url).read()
    h = hashlib.sha256()
    h.update(us)
    dig = h.hexdigest()
    should = config['wrap-file']['source_hash']
    if dig != should:
        print('Incorrect hash on download.')
        print(' expected:', dig)
        print(' obtained:', should)
        return 1
    spdir = os.path.split(outdir)[0]
    ofilename = os.path.join(spdir, config['wrap-file']['source_filename'])
    with open(ofilename, 'wb') as ofile:
        ofile.write(us)
    if 'lead_directory_missing' in config['wrap-file']:
        os.mkdir(outdir)
        shutil.unpack_archive(ofilename, outdir)
    else:
        shutil.unpack_archive(ofilename, spdir)
        extdir = os.path.join(spdir, config['wrap-file']['directory'])
        assert(os.path.isdir(extdir))
        shutil.move(os.path.join(outdir, '.git'), extdir)
        subprocess.check_call(['git', 'reset', '--hard'], cwd=extdir)
        shutil.rmtree(outdir)
        shutil.move(extdir, outdir)
    shutil.rmtree(os.path.join(outdir, '.git'))
    os.unlink(ofilename)
def openFile(fileName):
    """
    Otevøe soubor fileName vhodným kompresním programem a vrátí otevøený stream.

    @result {Stream | None}
    """
    fileExtension = fileName[fileName.rfind(".") + 1:].lower()
    if fileExtension in UNCOMPRESSED_EXTENSIONS:
        return open(fileName)
    else:
        formats = shutil.get_archive_formats()
        for dict in formats:
            if fileExtension == dict[0]:
                tempDir = "tempDir"
                if os.path.exists(tempDir):
                    shutil.rmtree(tempDir)

                shutil.unpack_archive(fileName, tempDir)
                onlyfiles = [ f for f in os.listdir(tempDir) if os.path.isfile(os.path.join(tempDir,f)) ]
                if onlyfiles == []:
                    return None
                else:
                    return open(tempDir + '\\' + onlyfiles[0])
            pass
    return None
Beispiel #3
0
    def restore(self, backup_file, message=str(), delay=0):
        """
        Restores the backup of the world from the given *backup_file*. If
        the backup archive contains the configuration and the server, they
        are restored too.

        Raises: Exception
                A lot of things can go wrong here, so catch *Exception* if you
                want to be sure, you catch everything.
        """
        # Extract the backup in a temporary directory and copy then all things
        # into the EMSM directories.
        with tempfile.TemporaryDirectory() as temp_dir:
            shutil.unpack_archive(
                filename = backup_file,
                extract_dir = temp_dir
                )

            # Stop the world.
            was_online = self.world.is_online()
            if was_online:
                self.world.send_command("say {}".format(message))
                time.sleep(delay)
                self.world.kill_processes()

            # Restore the world.
            manifest = self._get_manifest(temp_dir)

            self._restore_world(manifest, temp_dir)
            self._restore_world_configuration(manifest, temp_dir)
            self._restore_server(manifest, temp_dir)

        if was_online:
            self.world.start()
        return None
Beispiel #4
0
    def restore(self, backup_file, message=str(), delay=0):
        """
        Restores the backup of the world from the given *backup_file*. If
        the backup archive contains the server executable it will be restored
        too if necessairy.

        Exceptions:
            * WorldStartFailed
            * WorldStopFailed
            * ... shutil.unpack_archive() exceptions ...
        """
        # Extract the backup in a temporary directory and copy then all things
        # into the EMSM directories.
        with tempfile.TemporaryDirectory() as temp_dir:
            shutil.unpack_archive(
                filename = backup_file,
                extract_dir = temp_dir
                )

            # Stop the world.
            was_online = self._world.is_online()
            if was_online:
                self._world.send_command("say {}".format(message))
                time.sleep(delay)
                self._world.kill_processes()

            # Restore the world.
            self._restore_world(temp_dir)
            self._restore_world_conf(temp_dir)

        # Restart the world if it was online before restoring.
        if was_online:
            self._world.start()
        return None
Beispiel #5
0
 def extract_package(self, package):
     if sys.version_info < (3, 5):
         try:
             import lzma
             del lzma
             try:
                 shutil.register_unpack_format('xztar', ['.tar.xz', '.txz'], shutil._unpack_tarfile, [], "xz'ed tar-file")
             except shutil.RegistryError:
                 pass
         except ImportError:
             pass
     target_dir = os.path.join(self.subdir_root, package.get('directory'))
     if os.path.isdir(target_dir):
         return
     extract_dir = self.subdir_root
     # Some upstreams ship packages that do not have a leading directory.
     # Create one for them.
     try:
         package.get('lead_directory_missing')
         os.mkdir(target_dir)
         extract_dir = target_dir
     except KeyError:
         pass
     shutil.unpack_archive(os.path.join(self.cachedir, package.get('source_filename')), extract_dir)
     if package.has_patch():
         shutil.unpack_archive(os.path.join(self.cachedir, package.get('patch_filename')), self.subdir_root)
Beispiel #6
0
 def download(self):
     furlo = FBURLopener({})
     try:
         tmpfile, msg = furlo.retrieve(self.url, reporthook=self.rhook)
         print()
     except HTTPError as ex:
         urlcleanup()
         sys.exit(ex)
     except URLError as ex:
         urlcleanup()
         sys.exit(ex)
     if os.path.exists(self.dlpath) and filecmp.cmp(self.dlpath, tmpfile):
         print('You already have the newest version of ' + self.plugin)
         done = True
     else:
         shutil.copyfile(tmpfile, self.dlpath)
         print(self.plugin + ' downloaded.')
         done = False
     urlcleanup()
     if done or self.format == 'jar':
         return
     try:
         shutil.unpack_archive(self.dlpath, self.dest_dir, self.format)
     except ValueError as ex:
         sys.exit('Error: ' + str(ex))
Beispiel #7
0
    def restore(self, backup_file, message=str(), delay=0):
        """
        Restores the backup backup_file.

        Raises: WorldError
        """
        with tempfile.TemporaryDirectory() as temp_dir:
            shutil.unpack_archive(
                filename = backup_file,
                extract_dir = temp_dir
                )
            
            # Stop the world.
            was_online = self.world.is_online()
            if was_online:
                self.world.send_command("say {}".format(message))
                time.sleep(delay)
                self.world.kill_processes()

            # Delete the world and restore the backup.
            for i in range(5):
                # XXX: Fixes an error with server.log.lck
                # and shutil.rmtree(...).
                try:
                    shutil.rmtree(self.world.directory)
                    break
                except OSError:
                    time.sleep(0.05)
            shutil.copytree(temp_dir, self.world.directory)
            
        if was_online:
            self.world.start()
        return None
Beispiel #8
0
 def get_treebank_perseus_latin_tar(self):
     """Fetch Persus's Latin treebank files"""
     orig_files_dir_treebank_perseus_latin = \
         os.path.join(self.orig_files_dir, 'treebank_perseus_latin')
     pg_url = 'https://raw.githubusercontent.com/kylepjohnson/' \
              'treebank_perseus_latin/master/treebank_perseus_latin.tar.gz'
     session = requests.Session()
     session.mount(pg_url, SSLAdapter(ssl.PROTOCOL_TLSv1))
     pg_tar = session.get(pg_url, stream=True)
     treebank_perseus_latin_file_name = urlsplit(pg_url).path.split('/')[-1]
     treebank_perseus_latin_file_path = \
         os.path.join(orig_files_dir_treebank_perseus_latin,
                      treebank_perseus_latin_file_name)
     try:
         with open(treebank_perseus_latin_file_path, 'wb') as new_file:
             new_file.write(pg_tar.content)
             logging.info('Finished writing %s.',
                          treebank_perseus_latin_file_name)
     except IOError:
         logging.error('Failed to write file %s',
                       treebank_perseus_latin_file_name)
     try:
         shutil.unpack_archive(treebank_perseus_latin_file_path,
                               self.compiled_files_dir)
         logging.info('Finished unpacking %s',
                      treebank_perseus_latin_file_name)
     except IOError:
         logging.info('Failed to unpack %s.',
                      treebank_perseus_latin_file_name)
Beispiel #9
0
 def get_treebank_perseus_latin_tar(self):
     """Fetch Perseus's Latin treebank files"""
     compiled_files_dir_treebank_perseus_latin = os.path.join(self.compiled_files_dir, 'treebank_perseus_latin')
     if os.path.isdir(compiled_files_dir_treebank_perseus_latin) is True:
         pass
     else:
         os.mkdir(compiled_files_dir_treebank_perseus_latin)
         logging.info('Made new directory at "%s"', compiled_files_dir_treebank_perseus_latin)
     orig_files_dir_treebank_perseus_latin = \
         os.path.join(self.orig_files_dir, 'treebank_perseus_latin')
     pg_url = 'https://raw.githubusercontent.com/cltk/latin_treebank_perseus/master/latin_treebank_perseus.tar.gz'
     session = requests.Session()
     session.mount(pg_url, SSLAdapter(ssl.PROTOCOL_TLSv1))
     pg_tar = session.get(pg_url, stream=True)
     treebank_perseus_latin_file_name = urlsplit(pg_url).path.split('/')[-1]
     treebank_perseus_latin_file_path = \
         os.path.join(orig_files_dir_treebank_perseus_latin,
                      treebank_perseus_latin_file_name)
     try:
         with open(treebank_perseus_latin_file_path, 'wb') as new_file:
             new_file.write(pg_tar.content)
             logging.info('Finished writing %s.',
                          treebank_perseus_latin_file_name)
     except IOError:
         logging.error('Failed to write file %s',
                       treebank_perseus_latin_file_name)
     try:
         shutil.unpack_archive(treebank_perseus_latin_file_path,
                               compiled_files_dir_treebank_perseus_latin)
         logging.info('Finished unpacking %s',
                      treebank_perseus_latin_file_name)
     except IOError:
         logging.info('Failed to unpack %s.',
                      treebank_perseus_latin_file_name)
Beispiel #10
0
 def get_lacus_curtius_latin_tar(self):
     """Fetch lacus_curtius_latin_tar"""
     orig_files_dir_lacus_curtius_latin = \
         os.path.join(self.orig_files_dir, 'lacus_curtius_latin')
     lc_url = 'https://raw.githubusercontent.com/cltk/' \
              'latin_corpus_lacus_curtius/master/lacus_curtius.tar.gz'
     session = requests.Session()
     session.mount(lc_url, SSLAdapter(ssl.PROTOCOL_TLSv1))
     ll_tar = session.get(lc_url, stream=True)
     lacus_curtius_latin_file_name = urlsplit(lc_url).path.split('/')[-1]
     lacus_curtius_latin_file_path = \
         os.path.join(orig_files_dir_lacus_curtius_latin,
                      lacus_curtius_latin_file_name)
     try:
         with open(lacus_curtius_latin_file_path, 'wb') as new_file:
             new_file.write(ll_tar.content)
             logging.info('Finished writing %s.',
                          lacus_curtius_latin_file_name)
     except IOError:
         logging.error('Failed to write file %s',
                       lacus_curtius_latin_file_name)
     try:
         shutil.unpack_archive(lacus_curtius_latin_file_path,
                               self.compiled_files_dir)
         logging.info('Finished unpacking %s',
                      lacus_curtius_latin_file_name)
     except IOError:
         logging.info('Failed to unpack %s.', lacus_curtius_latin_file_name)
Beispiel #11
0
 def get_sentence_tokens_greek_tar(self):
     """Fetch algorithm for Greek sentence tokenization"""
     orig_files_dir_tokens_greek = \
         os.path.join(self.orig_files_dir, 'sentence_tokens_greek')
     # make compiled files dir for tokens_greek
     compiled_files_dir_tokens_greek = \
         os.path.join(self.compiled_files_dir, 'sentence_tokens_greek')
     if os.path.isdir(compiled_files_dir_tokens_greek) is True:
         pass
     else:
         os.mkdir(compiled_files_dir_tokens_greek)
     pg_url = 'https://raw.githubusercontent.com/cltk/' \
              'cltk_greek_sentence_tokenizer/master/greek.tar.gz'
     session = requests.Session()
     session.mount(pg_url, SSLAdapter(ssl.PROTOCOL_TLSv1))
     pg_tar = session.get(pg_url, stream=True)
     tokens_greek_file_name = urlsplit(pg_url).path.split('/')[-1]
     tokens_greek_file_path = os.path.join(orig_files_dir_tokens_greek,
                                           tokens_greek_file_name)
     try:
         with open(tokens_greek_file_path, 'wb') as new_file:
             new_file.write(pg_tar.content)
             logging.info('Finished writing %s.', tokens_greek_file_name)
             try:
                 shutil.unpack_archive(tokens_greek_file_path,
                                       compiled_files_dir_tokens_greek)
                 logging.info('Finished unpacking %s.',
                              tokens_greek_file_name)
             except IOError:
                 logging.info('Failed to unpack %s.',
                              tokens_greek_file_name)
     except IOError:
         logging.error('Failed to write file %s', tokens_greek_file_name)
Beispiel #12
0
 def get_pos_latin_tar(self):
     """Fetch Latin part-of-speech files"""
     orig_files_dir_pos_latin = os.path.join(self.orig_files_dir,
                                             'pos_latin')
     pg_url = 'https://raw.githubusercontent.com/cltk/pos_latin/' \
              'master/pos_latin.tar.gz'
     session = requests.Session()
     session.mount(pg_url, SSLAdapter(ssl.PROTOCOL_TLSv1))
     pg_tar = session.get(pg_url, stream=True)
     pos_latin_file_name = urlsplit(pg_url).path.split('/')[-1]
     pos_latin_file_path = os.path.join(orig_files_dir_pos_latin,
                                        pos_latin_file_name)
     try:
         with open(pos_latin_file_path, 'wb') as new_file:
             new_file.write(pg_tar.content)
             logging.info('Finished writing %s.', pos_latin_file_name)
     except IOError:
         logging.error('Failed to write file %s', pos_latin_file_name)
     compiled_files_dir_pos_latin = os.path.join(self.compiled_files_dir,
                                             'pos_latin')
     if os.path.isdir(compiled_files_dir_pos_latin) is True:
         pass
     else:
         os.mkdir(compiled_files_dir_pos_latin)
         logging.info('Made new directory "pos_latin" at "%s"',
                      compiled_files_dir_pos_latin)
     try:
         shutil.unpack_archive(pos_latin_file_path, compiled_files_dir_pos_latin)
         logging.info('Finished unpacking %s', pos_latin_file_name)
     except IOError:
         logging.info('Failed to unpack %s.', pos_latin_file_name)
Beispiel #13
0
def read_gtfs(path):
    """
    Given a path (string or pathlib object) to a (zipped) GTFS feed,
    read the feed and return its corresponding Feed instance.

    NOTES:
        - Ignore files that are not valid GTFS; see https://developers.google.com/transit/gtfs/reference/.
        - Ensure that all ID fields that could be string ('stop_id', 'route_id', etc.) are parsed as strings and not as numbers.    
    """
    path = Path(path)
    
    # Unzip feed into temporary directory
    tmp_dir = tempfile.TemporaryDirectory()
    shutil.unpack_archive(str(path), tmp_dir.name, 'zip')

    # Read valid GTFS files into Pandas data frames
    feed_dict = {}
    dtype = {field: str for field in Feed.str_fields} # ensure some string types
    for p in Path(tmp_dir.name).iterdir():        
        name = p.stem
        if name in Feed.gtfs_tables:
            feed_dict[name] = pd.read_csv(p, dtype=dtype)
        
    # Delete temporary directory
    tmp_dir.cleanup()
    
    return Feed(**feed_dict)
Beispiel #14
0
 def get_cltk_latin_linguistic_data_tar(self):
     """Get CLTK's ML taggers, tokenizers, etc."""
     orig_files_dir_ling_latin = \
         os.path.join(self.orig_files_dir, 'cltk_latin_linguistic_data')
     latin_dir = os.path.join(self.cltk_data, 'latin')
     if os.path.isdir(latin_dir) is True:
         pass
     else:
         os.mkdir(latin_dir)
     latin_dir_ling = os.path.join(latin_dir, 'cltk_linguistic_data')
     if os.path.isdir(latin_dir_ling) is True:
         pass
     else:
         os.mkdir(latin_dir_ling)
     pg_url = 'https://raw.githubusercontent.com/cltk/cltk_latin_linguistic_data/master/latin.tar.gz'
     session = requests.Session()
     session.mount(pg_url, SSLAdapter(ssl.PROTOCOL_TLSv1))
     pg_tar = session.get(pg_url, stream=True)
     ling_latin_file_name = urlsplit(pg_url).path.split('/')[-1]
     tar_latin_file_path = os.path.join(orig_files_dir_ling_latin,
                                        ling_latin_file_name)
     try:
         with open(tar_latin_file_path, 'wb') as new_file:
             new_file.write(pg_tar.content)
             logging.info('Finished writing %s.', ling_latin_file_name)
             try:
                 shutil.unpack_archive(tar_latin_file_path,
                                       latin_dir_ling)
                 logging.info('Finished unpacking %s.',
                              ling_latin_file_name)
             except IOError:
                 logging.info('Failed to unpack %s.',
                              ling_latin_file_name)
     except IOError:
         logging.error('Failed to write file %s', ling_latin_file_name)
def get_file_content( params, target_directory ):
    directory_content = params.get( 'directory_content', [] )
    for content in directory_content:
        target_path = os.path.join( target_directory, content.get( 'subdir', '' ) )
        try:
            os.makedirs( target_path )
        except OSError:
            pass
        if content.get( 'file_source', {}).get( 'file_source_selector', None ) == 'URL':
            ( filename, headers ) = urlretrieve( content.get( 'file_source', {}).get( 'file_URL', None ) )
            try:
                bname = headers['Content-Disposition']
            except KeyError:
                bname = os.path.basename( urllib2.urlparse.urlsplit( content.get( 'file_source', {}).get( 'file_URL', None ) ).path )
        else:
            filename = content.get( 'file_source', {}).get( 'file_history', None )
            bname = os.path.basename( filename )
        file_action = content.get( 'file_action', {}).get( 'file_action_selector', None )
        if file_action == 'unpack':
            unpack_archive( filename, target_path )
        else:
            filename_override = content.get( 'file_action', {}).get( 'filename_override', None )
            if filename_override:
                target_path = os.path.join( target_path, filename_override )
            else:
                target_path = os.path.join( target_path, bname )
            shutil.copyfile( filename, target_path )
    return len( directory_content )
Beispiel #16
0
 def telecharger_envoi(self, archive):
     """Intégration d'une archive au sein d'un dossier
     """
     tmp = cfg.TMP / motaleatoire(6)
     tmp.mkdir()
     tmp_archive = tmp / archive.filename
     archive.save(str(tmp_archive))
     try:
         os.chdir(str(tmp))
         shutil.unpack_archive(
             str(tmp_archive),
             extract_dir=str(tmp),
             format='zip',
         )
         tmp_archive.unlink()
         dossier = [dss for dss in tmp.iterdir()]
         if len(dossier) != 1:
             return self.afficher(markdown(_(
                 "L'archive doit contenir un et un seul dossier, "
                 "dans lequel doivent se trouver :\n\n"
                 "- les fichiers du projet ;\n"
                 "- éventuellement, le dossier `.git` contenant "
                 "les données de gestion de version."
             )))
         dossier = dossier[0]
         shutil.copytree(str(dossier), str(self.dossier))
         if not (self.dossier / '.git').is_dir():
             self.depot.initialiser()
         b.redirect(i18n_path('/' + self.chemin))
     except shutil.ReadError as err:
         f.traiter_erreur(err)
         return self.afficher(_("Ceci n'est pas une archive zip."))
     finally:
         os.chdir(str(cfg.PWD))
         shutil.rmtree(str(tmp))
Beispiel #17
0
def import_yaml():
    '''
    import user pre-defined files from ~/xpdUser/Import

    Files can be compreesed or .yml, once imported, bt.list() should show updated acquire object list
    '''
    src_dir = glbl.import_dir
    dst_dir = glbl.yaml_dir
    f_list = os.listdir(src_dir)
    if len(f_list) == 0:
        print('INFO: There is no pre-defined user objects in {}'.format(src_dir))
        return 
    # two possibilites: .yml or compressed files; shutil should handle all compressed cases
    moved_f_list = []
    for f in f_list:
        full_path = os.path.join(src_dir, f)
        (root, ext) = os.path.splitext(f)
        if ext == '.yml':
            shutil.copy(full_path, dst_dir)
            moved_f_list.append(f)
            # FIXME - do we want user confirmation?
            os.remove(full_path)
        else:
            try:
                shutil.unpack_archive(full_path, dst_dir)
                moved_f_list.append(f)
                # FIXME - do we want user confirmation?
                os.remove(full_path)
            except ReadError:
                print('Unrecongnized file type {} is found inside {}'.format(f, src_dir))
                pass
    return moved_f_list
def fetch_data():
    """
    What it expects:
    ----------------
    That the DATA_DIR has been created

    What it does:
    -------------
    Downloads the zip file from the Guardian
    Saves it to DATA_DIR
    Unzips it into DATA_DIR

    What it returns:
    ----------------
    Nothing
    """

    resp = requests.get(SRC_URL)
    print("Downloading", SRC_URL)
    # save the zip file to disk
    with open(ZIPPED_DATA_FILENAME, 'wb') as f:
        f.write(resp.content)
    # unzip the zip file
    print("Unzipping", ZIPPED_DATA_FILENAME)
    unpack_archive(ZIPPED_DATA_FILENAME, extract_dir=DATA_DIR)
    def test_unpack_registery(self):

        formats = get_unpack_formats()

        def _boo(filename, extract_dir, extra):
            self.assertEquals(extra, 1)
            self.assertEquals(filename, 'stuff.boo')
            self.assertEquals(extract_dir, 'xx')

        register_unpack_format('Boo', ['.boo', '.b2'], _boo, [('extra', 1)])
        unpack_archive('stuff.boo', 'xx')

        # trying to register a .boo unpacker again
        self.assertRaises(RegistryError, register_unpack_format, 'Boo2',
                          ['.boo'], _boo)

        # should work now
        unregister_unpack_format('Boo')
        register_unpack_format('Boo2', ['.boo'], _boo)
        self.assertIn(('Boo2', ['.boo'], ''), get_unpack_formats())
        self.assertNotIn(('Boo', ['.boo'], ''), get_unpack_formats())

        # let's leave a clean state
        unregister_unpack_format('Boo2')
        self.assertEquals(get_unpack_formats(), formats)
Beispiel #20
0
def aurbuild(package_name):
    os.chdir(BUILDFOLDER)
    # Get the filename from the URL.
    url = "https://aur.archlinux.org/cgit/aur.git/snapshot/{0}.tar.gz".format(package_name)
    fileinfo = urllib.parse.urlparse(url)
    filename = urllib.parse.unquote(os.path.basename(fileinfo.path))
    # Get the file.
    urllib.request.urlretrieve(url, filename)

    # Extract the file.
    shutil.unpack_archive(filename, BUILDFOLDER)
    AURFOLDER=BUILDFOLDER+"/"+package_name

    subprocess.run("""
    chmod a+rwx -R {0}
    cd {0}
    su {2} -s /bin/bash -c 'makepkg --noconfirm -A -s'
    pacman -U --noconfirm ./{1}-*.pkg.tar.xz
""".format(AURFOLDER, package_name, USERNAMEVAR), shell=True)

    # Cleanup
    os.chdir(BUILDFOLDER)
    if os.path.isdir(AURFOLDER):
        shutil.rmtree(AURFOLDER)
    os.remove(BUILDFOLDER+"/"+filename)
    return
Beispiel #21
0
    def setUpClass(cls):
        super(TestImporterAFMAsc, cls).setUpClass()

        testdata = os.path.join(os.path.dirname(__file__), '..', '..', 'testdata')
        filepath = os.path.join(testdata, 'afm.asc.zip')

        cls.tmpdir = tempfile.mkdtemp()
        shutil.unpack_archive(filepath, cls.tmpdir, 'zip')
Beispiel #22
0
 def apply_patch(self):
     path = self.get_file_internal('patch')
     try:
         shutil.unpack_archive(path, self.subdir_root)
     except Exception:
         with tempfile.TemporaryDirectory() as workdir:
             shutil.unpack_archive(path, workdir)
             self.copy_tree(workdir, self.subdir_root)
def main():
    print('Downloading', SOURCE_URL)
    resp = requests.get(SOURCE_URL)
    with open(DATA_ZIP_PATH, 'wb') as zip_file:
        zip_file.write(resp.content)
    unpack_archive(DATA_ZIP_PATH, extract_dir=GENDER_DIR)
    baby_filenames = glob(path.join(GENDER_DIR, '*.txt'))
    print('There are {} txt files'.format(len(baby_filenames)))
Beispiel #24
0
def process_topography(in_path, out_path, high_definition=False):
    """
    Convert each SRTM HGT topography file in the directory ``in_path`` to a SPLAT! Data File (SDF) file in the directory ``out_path``,     creating the directory if it does not exist.
    If ``high_definition``, then assume the input data is high definition.

    INPUT:
        - ``in_path``: string or Path object specifying a directory
        - ``out_path``: string or Path object specifying a directory
        - ``high_definition``: boolean

    OUTPUT:
        None.

    NOTES:
        - Calls SPLAT!'s ``srtm2sdf`` or ``srtm2sdf-hd`` 
          (if ``high_definition``) command to do the work
        - Raises a ``subprocess.CalledProcessError`` if SPLAT! fails to 
          convert a file
        - Each SRTM1 or SRTM3 file must have a name of the form <SRTM tile ID>[.something].hgt.zip or <SRTM tile ID>[.something].hgt, e.g. S36E173.SRTMGL3.hgt.zip 
    """
    in_path = Path(in_path)
    out_path = Path(out_path)
    if not out_path.exists():
        out_path.mkdir(parents=True)

    splat = 'srtm2sdf'
    if high_definition:
        splat += '-hd'

    sdf_pattern = re.compile(r"[\d\w\-\:]+\.sdf")

    for f in in_path.iterdir():
        if not (f.name.endswith('.hgt') or f.name.endswith('.hgt.zip')):
            continue

        # Unzip if necessary
        is_zip = False
        if f.name.endswith('.zip'):
            is_zip = True
            shutil.unpack_archive(str(f), str(f.parent))
            tile_id = f.name.split('.')[0]
            f = f.parent/'{!s}.hgt'.format(tile_id)

        # Convert to SDF
        cp = subprocess.run([splat, f.name], cwd=str(f.parent),
          stdout=subprocess.PIPE, universal_newlines=True, check=True)

        # Get name of output file, which SPLAT! created and which differs
        # from the original name, and move the output to the out path
        m = sdf_pattern.search(cp.stdout)
        name = m.group(0)        
        src = in_path/name
        tgt = out_path/name
        shutil.move(str(src), str(tgt))

        # Clean up
        if is_zip:
            f.unlink()
Beispiel #25
0
 def pre_benchmark_run(self) -> None:
     """
     When benchmarking, it is better to have a bigger workload. We use transmission for this purpose
     """
     shutil.unpack_archive(
         os.path.join(get_global_conf().get("install", "source_directory"), "cppcheck-152/cppcheck-1.52.tar.gz"),
         "/tmp/cppcheck-152"
     )
     self.cmd = " ".join(self.cmd.split(" ")[:-1]) + " /tmp/cppcheck-152/cppcheck-1.52"
Beispiel #26
0
def install_s3_object(s3_bucket, filename, destination):
    s3 = boto3.resource('s3')
    with tempfile.TemporaryDirectory() as tmpdir:
        saved_file = os.path.join(tmpdir, filename)
        s3.Object(s3_bucket, filename).download_file(saved_file)
        if filename.endswith('.tar'):
            shutil.unpack_archive(saved_file, destination)
        else:
            shutil.copy(saved_file, destination)
Beispiel #27
0
 def pre_benchmark_run(self) -> None:
     """
     For benchmarking, we need to work on bigger files. We use transmission for this purpose
     """
     shutil.unpack_archive(
         os.path.join(get_global_conf().get("install", "source_directory"), "cppcheck-148/cppcheck-1.48.tar.gz"),
         "/tmp/cppcheck-148"
     )
     self.cmd = " ".join(self.cmd.split(" ")[:-1]) + " /tmp/cppcheck-148/cppcheck-1.48"
Beispiel #28
0
def untar(path, fname, deleteTar=True):
    """Unpacks the given archive file to the same directory, then (by default)
    deletes the archive file.
    """
    print('unpacking ' + fname)
    fullpath = os.path.join(path, fname)
    shutil.unpack_archive(fullpath, path)
    if deleteTar:
        os.remove(fullpath)
Beispiel #29
0
def test_shallow_clone():
    with TemporaryDirectory() as tmpdir:
        archive_filename = resource_filename(__name__, 'data/test_repository.tar')
        unpack_archive(archive_filename, tmpdir)

        with shallow_clone(os.path.join(tmpdir, 'test_repository')) as working_copy_path:
            assert 'known_file' in os.listdir(working_copy_path)
            with open(os.path.join(working_copy_path, 'known_file')) as known_file:
                assert known_file.read() == 'Hello, World!\n'
Beispiel #30
0
def unpackFile( downloaddir, filename, workdir ):
    """unpack file specified by 'filename' from 'downloaddir' into 'workdir'"""
    EmergeDebug.debug("unpacking this file: %s" % filename)

    ( shortname, ext ) = os.path.splitext( filename )
    if re.match( "(.*\.tar.*$|.*\.tgz$)", filename ):
        shutil.unpack_archive(os.path.join(downloaddir, filename),workdir)
    elif ext == "":
        return True
    return un7zip( os.path.join( downloaddir, filename ), workdir, ext )
Beispiel #31
0
#!/usr/bin/python3
"""
    Just a routine practice: unzip a .zip file.
"""
import shutil

output_dir = '/tmp/os_logs'
zip_filename = 'os_logs.zip'

shutil.unpack_archive(zip_filename, output_dir, 'zip')
Beispiel #32
0
def _setupSSHDImpl(public_key, tunnel, ngrok_token, ngrok_region, is_VNC):
    #apt-get update
    #apt-get upgrade
    my_apt = _MyApt()
    #Following packages are useless because nvidia kernel modules are already loaded and I cannot remove or update it.
    #Uninstall them because upgrading them take long time.
    my_apt.deleteInstalledPkg("nvidia-dkms", "nvidia-kernel-common",
                              "nvidia-kernel-source")
    my_apt.commit()
    my_apt.update_upgrade()
    my_apt.commit()

    subprocess.run(["unminimize"],
                   input="y\n",
                   check=True,
                   universal_newlines=True)

    my_apt.installPkg("openssh-server")
    my_apt.commit()
    my_apt.close()

    #Reset host keys
    for i in pathlib.Path("/etc/ssh").glob("ssh_host_*_key"):
        i.unlink()
    subprocess.run(["ssh-keygen", "-A"], check=True)

    #Prevent ssh session disconnection.
    with open("/etc/ssh/sshd_config", "a") as f:
        f.write("\n\n# Options added by remocolab\n")
        f.write("ClientAliveInterval 120\n")
        if public_key != None:
            f.write("PasswordAuthentication no\n")

    msg = ""
    msg += "ECDSA key fingerprint of host:\n"
    ret = subprocess.run(
        ["ssh-keygen", "-lvf", "/etc/ssh/ssh_host_ecdsa_key.pub"],
        stdout=subprocess.PIPE,
        check=True,
        universal_newlines=True)
    msg += ret.stdout + "\n"

    root_password = secrets.token_urlsafe()
    user_password = secrets.token_urlsafe()
    user_name = "colab"
    msg += "✂️" * 24 + "\n"
    msg += f"root password: {root_password}\n"
    msg += f"{user_name} password: {user_password}\n"
    msg += "✂️" * 24 + "\n"
    subprocess.run(["useradd", "-s", "/bin/bash", "-m", user_name])
    subprocess.run(["adduser", user_name, "sudo"], check=True)
    subprocess.run(["chpasswd"],
                   input=f"root:{root_password}",
                   universal_newlines=True)
    subprocess.run(["chpasswd"],
                   input=f"{user_name}:{user_password}",
                   universal_newlines=True)
    subprocess.run(["service", "ssh", "restart"])
    _set_public_key(user_name, public_key)

    ssh_common_options = "-o UserKnownHostsFile=/dev/null -o VisualHostKey=yes"

    if tunnel == "ngrok":
        pyngrok_config = pyngrok.conf.PyngrokConfig(auth_token=ngrok_token,
                                                    region=ngrok_region)
        url = pyngrok.ngrok.connect(port=22,
                                    proto="tcp",
                                    pyngrok_config=pyngrok_config)
        m = re.match("tcp://(.+):(\d+)", url)
        hostname = m.group(1)
        port = m.group(2)
        ssh_common_options += f" -p {port}"
    elif tunnel == "argotunnel":
        _download(
            "https://bin.equinox.io/c/VdrWdbjqyF/cloudflared-stable-linux-amd64.deb",
            "cloudflared.deb")
        my_apt.installDebPackage("cloudflared.deb")
        print('DEB LINUX')
        shutil.unpack_archive("cloudflared.deb")
        cfd_proc = subprocess.Popen([
            "./cloudflared", "tunnel", "--url", "ssh://*****:*****@{hostname}\n"
    else:
        msg += "Command to connect to the ssh server:\n"
        msg += "✂️" * 24 + "\n"
        msg += f"ssh {ssh_common_options} {user_name}@{hostname}\n"
        msg += "✂️" * 24 + "\n"
    return msg
Beispiel #33
0
print('')
print('###############|| Post processing ||###############')
print('')
for top in topologies:
    model_name = top['name'] + get_extensions(top['framework'])[0]
    weights_name = top['name'] + get_extensions(top['framework'])[1]
    output = os.path.join(args.output_dir, top['output'])
    path_to_model = os.path.join(output, model_name)
    path_to_weights = os.path.join(output, weights_name)
    for path, subdirs, files in os.walk(output):
        for name in files:
            fname = os.path.join(path, name)
            if fname.endswith('.tar.gz'):
                print('========= Extracting files from %s.tar.gz' %
                      (top['name']))
                shutil.unpack_archive(fname, path)
    if {'model_path_prefix', 'weights_path_prefix'} <= top.keys():
        downloaded_model = os.path.join(output, top['model_path_prefix'])
        downloaded_weights = os.path.join(output, top['weights_path_prefix'])
        if (os.path.exists(downloaded_model)
                and os.path.exists(downloaded_weights)):
            print(
                '========= Moving %s and %s to %s after untarring the archive ========='
                % (model_name, weights_name, output))
            shutil.move(downloaded_model, path_to_model)
            shutil.move(downloaded_weights, path_to_weights)
    elif 'model_path_prefix' in top:
        downloaded_model = os.path.join(output, top['model_path_prefix'])
        if os.path.exists(downloaded_model):
            print(
                '========= Moving %s to %s after untarring the archive ========='
Beispiel #34
0
from pathlib import Path
import src.utils

repo_dir = Path(__file__).parent.resolve()
assert repo_dir.name == 'hicetnunc-dataset'

cache_dir = repo_dir / 'cache'
export_cache_dir = repo_dir / 'export_cache'
export_cache_archive_file = repo_dir / 'cache.zip'
export_dataset_archive_file = repo_dir / 'dataset.zip'

if not (cache_dir / 'ipfs0').exists():
    if export_cache_archive_file.exists():
        print('Extracting cache.zip...', end=' ')
        sys.stdout.flush()
        shutil.unpack_archive(export_cache_archive_file, cache_dir, 'zip')
        print('done\n')
    else:
        print('You should download cache.zip and put into repository root')
        sys.exit()

ipfs0_dir = cache_dir / 'ipfs0'
ipfs0_dir.mkdir(exist_ok=True)
ipfs1_dir = cache_dir / 'ipfs1'
ipfs1_dir.mkdir(exist_ok=True)
previews_dir = cache_dir / 'previews'
previews_dir.mkdir(exist_ok=True)
previews_dimensions_cache_file = previews_dir / 'dimensions.json'

parsed_transactions_dir = cache_dir / 'parsed_transactions'
parsed_transactions_dir.mkdir(exist_ok=True)
for url in urls:

    name = url[-14:-7]

    os.chdir(path)

    dir = os.path.join(f"{name}")
    if not os.path.exists(dir):
        os.mkdir(dir)

    urllib.request.urlretrieve(url, path + f"/{name}/{name}.v4.tar")

    os.chdir(path + f"/{name}")

    shutil.unpack_archive(f"{name}.v4.tar",
                          extract_dir=path + f"/{name}")

print("Files successfully downloaded")

#####################################

# Extract the files

os.chdir(path)

folders = [folder for folder in os.listdir() if "F1" in folder]

dir = os.path.join("Images")
if not os.path.exists(dir):
    os.mkdir(dir)
Beispiel #36
0
def unzip_batch(batch, ligand_dir):
    targz = str(ligand_dir / batch) + ".tar.gz"
    output_dir = str(ligand_dir)
    shutil.unpack_archive(targz, output_dir, format='gztar')
Beispiel #37
0
 try:
     open(filepath, "r").read()
 except UnicodeDecodeError:
     stream = open(filepath, "rb").read()
     if len(stream) > 4:
         if stream[0:4] == b"PK\x03\x04":
             print(
                 "[ddcrypt] file: zip archive data [sig:PK\\x03\\x04]"
             )
             filenoformat = filename
             if filename.endswith(".zip") or filename.endswith(
                     ".apk"):
                 filenoformat = filename[0:len(filename) - 4]
                 destdir = destdir + filenoformat + "/"
                 print("[ddcrypt] extracting raw resources...")
                 shutil.unpack_archive(filepath, destdir, "zip")
                 strings = ""
                 try:
                     androidmanifest = open(
                         destdir + "AndroidManifest.xml",
                         "r").read()
                     strings = androidmanifest
                     userdir = destdir + "assets/user"
                     if os.path.isdir(userdir):
                         isfilejs = False
                         for f in os.listdir(userdir):
                             if f.lower().endswith(".js"):
                                 isfilejs = True
                                 break
                         if not isfilejs:
                             print(
def trigger_training_job():

    # Define Vars < Change the vars>. 
    # In a production situation, don't put secrets in source code, but as secret variables, 
    # see https://docs.microsoft.com/en-us/azure/devops/pipelines/process/variables?view=azure-devops&tabs=yaml%2Cbatch#secret-variables
    workspace="<Name of your workspace>"
    subscription_id="<Subscription id>"
    resource_grp="<Name of your resource group where aml service is created>"

    domain = "westeurope.azuredatabricks.net" # change location in case databricks instance is not in westeurope
    DBR_PAT_TOKEN = bytes("<<your Databricks Personal Access Token>>", encoding='utf-8') # adding b'

    notebookRemote = "/3_IncomeNotebookDevops"
    experiment_name = "experiment_model_release"
    model_name_run = datetime.datetime.now().strftime("%Y%m%d%H%M%S")+ "_dbrmod.mml" # in case you want to change the name, keep the .mml extension
    model_name = "databricksmodel.mml" # in case you want to change the name, keep the .mml extension

    #
    # Step 1: Create job and attach it to cluster
    #
    # In this steps, secret are added as parameters (spn_tenant, spn_clientid, spn_clientsecret)
    # Never do this in a production situation, but use secret scope backed by key vault instead
    # See https://docs.azuredatabricks.net/user-guide/secrets/secret-scopes.html#azure-key-vault-backed-scopes
    response = requests.post(
        'https://%s/api/2.0/jobs/create' % domain,
        headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN},
        json={
                "name": "Run AzureDevopsNotebook Job",
                "new_cluster": {
                    "spark_version": "4.0.x-scala2.11",
                    "node_type_id": "Standard_D3_v2",
                    "spark_env_vars": {
                        'PYSPARK_PYTHON': '/databricks/python3/bin/python3',
                    },
                    "autoscale": {
                        "min_workers": 1,
                        "max_workers": 2
                    }
                },
                "libraries": [
                   {
                     "pypi": {
                        "package": "azureml-sdk[databricks]"
                     }
                  }
                ],
                "notebook_task": {
                "notebook_path": notebookRemote,
                "base_parameters": [{"key":"subscription_id", "value":subscription_id}, {"key":"resource_group", "value":resource_grp}, {"key":"workspace_name","value":workspace},
                                    {"key":"model_name", "value":model_name_run}
                                   ]
             }
        }
    )

    if response.status_code != 200:
        print("Error launching cluster: %s: %s" % (response.json()["error_code"], response.json()["message"]))
        exit(2)

    #
    # Step 2: Start job
    #
    databricks_job_id = response.json()['job_id']

    response = requests.post(
        'https://%s/api/2.0/jobs/run-now' % domain,
        headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN},
        json={
            "job_id": + databricks_job_id
        }
    )

    if response.status_code != 200:
        print("Error launching cluster: %s: %s" % (response.json()["error_code"], response.json()["message"]))
        exit(3)

    print(response.json()['run_id'])

    #
    # Step 3: Wait until job is finished
    #
    databricks_run_id = response.json()['run_id']
    scriptRun = 1
    count = 0
    while scriptRun == 1:
        response = requests.get(
            'https://%s/api/2.0/jobs/runs/get?run_id=%s' % (domain, databricks_run_id),
            headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN},
        )

        state = response.json()['state']
        life_cycle_state = state['life_cycle_state']
        print(state)

        if life_cycle_state in ["TERMINATED", "SKIPPED", "INTERNAL_ERROR"]:
            result_state = state['result_state']
            if result_state == "SUCCESS":
                print("run ok")
                scriptRun = 0
            #exit(0)
            else:
                exit(4)
        elif count > 180:
            print("time out occurred after 30 minutes")
            exit(5)
        else:
            count += 1
            time.sleep(30) # wait 30 seconds before next status update

    #
    # Step 4: Retrieve model from dbfs
    #
    mdl, ext = model_name_run.split(".")
    model_zip_run = mdl + ".zip"
    
    response = requests.get(
        'https://%s/api/2.0/dbfs/read?path=/%s' % (domain, model_zip_run),
        headers={'Authorization': b"Bearer " + DBR_PAT_TOKEN}
    )
    if response.status_code != 200:
        print("Error copying dbfs results: %s: %s" % (response.json()["error_code"], response.json()["message"]))
        exit(6)

    model_output = base64.b64decode(response.json()['data'])

    # download model in deploy folder
    os.chdir("deploy")
    with open(model_zip_run, "wb") as outfile:
        outfile.write(model_output)
    print("Downloaded model {} to Project root directory".format(model_name))

    #
    # Step 5: Put model to Azure ML Service
    #
    cli_auth = AzureCliAuthentication()

    ws = Workspace(workspace_name = workspace,
               subscription_id = subscription_id,
               resource_group = resource_grp,
               auth=cli_auth)
    ws.get_details()
    # start a training run by defining an experiment
    myexperiment = Experiment(ws, experiment_name)
    run = myexperiment.start_logging()
    run.upload_file("outputs/" + model_zip_run, model_zip_run)
    run.complete()
    run_id = run.id
    print ("run id:", run_id)

    # unzip file to model_name_run
    shutil.unpack_archive(model_zip_run, model_name_run)

    model = Model.register(
        model_path=model_name_run,  # this points to a local file
        model_name=model_name,  # this is the name the model is registered as
        tags={"area": "spar", "type": "regression", "run_id": run_id},
        description="Medium blog test model",
        workspace=ws,
    )
    print("Model registered: {} \nModel Description: {} \nModel Version: {}".format(model.name, model.description, model.version))

    # Step 6. Finally, writing the registered model details to conf/model.json
    model_json = {}
    model_json["model_name"] = model.name
    model_json["model_version"] = model.version
    model_json["run_id"] = run_id
    model_json["model_name_run"] = model_name_run
    with open("../conf/model.json", "w") as outfile:
        json.dump(model_json, outfile)
Beispiel #39
0
def add_node_protocol(self):
	'''
	Add the node to the network
	'''
	timeout = False
	print("Add Node protocol started !")
	print("Asking Leader Information from Standard IP's ")
	#send message "add_node" to standard IP's
	send_msg_add_node(self.config_table, self.PORT)

	with self.AN_condition:
		timeout = self.AN_condition.wait(timeout = self.add_node_timeout)	# blocking - Waiting for reply from sponsor node 
	
	if timeout is False:
		print("No reply from any node... Sending request again")
		send_msg_add_node(self.config_table, self.PORT)
		with self.AN_condition:
			timeout = self.AN_condition.wait(timeout = self.add_node_timeout)	# blocking - Waiting for reply from sponsor node 
			
		if timeout is False:
			print("Still no response. Exiting...")
			os._exit(0)



	message = self.thread_msg_qs[self.main_thread_tid].get()
	self.sponsor_host = message._source_host
	self.sponsor_port = message.get_data('port')		# Port on which sponsor server is listening							
	self.ldr_ip = message.get_data('ldr_ip')
	self.ldr_port = message.get_data('ldr_port')
	self.network_dict = message.get_data('network_dict')
	self.ldr_id = message.get_data('id')
	self.network_dict[message._msg_id[0]]=(self.sponsor_host,self.sponsor_port,1)

	self.pause_heartbeat = False

	print("network_dict: ",self.network_dict)
	print("Sponsor_host is : ",self.sponsor_host," Sponsor port is : ",self.sponsor_port)
	print("Message recieved: Leader ip :", self.ldr_ip, "Leader port :",self.ldr_port)
	
	try:
		with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
			s.connect((self.ldr_ip,self.ldr_port))
			send_msg(s, Message(Msg_type['AN_assign_id'], data_dict = {'port': self.PORT }))	# Contacting leader to give a new id
	except:
		exception_handler(self.ldr_ip,self.ldr_port)

	with self.AN_condition:
		timeout = self.AN_condition.wait(timeout = self.add_node_timeout)	# blocking - Waiting for leader to assign ID

	if timeout is False:
		print("Leader not responding. Exiting...")	
		os._exit(0)

	message = self.thread_msg_qs[self.main_thread_tid].get()
	self.node_id = message.get_data('id')

	print("Message recieved: New id assigned :", self.node_id)

	#Get file system from sponsored node
	try:
		with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
			s.connect((self.sponsor_host, self.sponsor_port))
			send_msg(s, Message(Msg_type['AN_FS_data_req'], data_dict = {'port': self.PORT }))
		s.close()
	except:
		print("Not able to connect to sponsor node not responding. Exiting...")
		os._exit(0)
		
	
	with self.AN_condition:
		timeout = self.AN_condition.wait(timeout = 5*self.add_node_timeout)
	
	if timeout is False:
		print("Sponsor node not responding. Exiting...")
		os._exit(0)

	message = self.thread_msg_qs[self.main_thread_tid].get()
	self.file_system_name = message.get_data('name')
	file_system_size = message.get_data('size')
	self.meta_data = message.get_data('meta_data')
	print("File system name is ",self.file_system_name," size is ",file_system_size)

	file_pointer = open(self.file_system_name+".zip",'wb')
	current_size = 0
	chunk_no = 0
	try:
		while True:
			# with self.AN_condition:
			# 	self.AN_condition.wait()
			if self.thread_msg_qs[self.main_thread_tid].empty() is False:
				message = self.thread_msg_qs[self.main_thread_tid].get()
				current_size+=file_pointer.write(message.get_data('data'))
				# current_size+= sys.getsizeof(message.get_data('data'))
				print("Size of chunk_no ",chunk_no," is ",sys.getsizeof(message.get_data('data')))
				print("File size transferred ",current_size)
				if current_size >= file_system_size:
					break
	except:
		print("Connection broken. File System transfer failed. Exiting...")
		os._exit(0)

	file_pointer.close()

	if os.path.exists("./"+self.file_system_name):
		shutil.rmtree("./"+self.file_system_name)
	os.makedirs("./"+self.file_system_name)

	shutil.unpack_archive("./"+self.file_system_name + ".zip",'./'+self.file_system_name)
	# zipfilePath = ("./"+self.file_system_name + ".zip")
	# zip1 = zipfile.ZipFile(zipfilePath)
	# zip1.extractall(".")
	# zip1.close()
	# self.inputs.remove(self.file_system_port)
	# self.file_system_port.close()
	self.add_node = True
	print("Node added successfully !")
	print("DEUBG_MSG: metadata",self.meta_data)
Beispiel #40
0
 async def extractor(download_path):
     try:
         shutil.unpack_archive(str(download_path), str(directory_path))
     except Exception as error:
         shutil.rmtree(directory_path, onerror=on_error)
         raise DirectoryNotExtractedError(directory_path) from error
Beispiel #41
0
 def convert_contribution_to_pack(self):
     """Create a Pack in the content repo from the contents of a contribution zipfile"""
     try:
         packs_dir = os.path.join(get_content_path(), 'Packs')
         metadata_dict = {}
         with zipfile.ZipFile(self.contribution) as zipped_contrib:
             with zipped_contrib.open('metadata.json') as metadata_file:
                 click.echo(
                     f'Pulling relevant information from {metadata_file.name}',
                     color=LOG_COLORS.NATIVE)
                 metadata = json.loads(metadata_file.read())
                 # a name passed on the cmd line should take precedence over one pulled
                 # from contribution metadata
                 pack_display_name = self.name or metadata.get(
                     'name', 'ContributionPack')
                 # Strip 'Pack' suffix from pack display name if present
                 pack_display_name = pack_display_name.strip()
                 if pack_display_name.casefold().endswith('pack') > len(
                         pack_display_name) > 4:
                     stripped_pack_display_name = pack_display_name[:
                                                                    -4].strip(
                                                                    )
                     pack_display_name = stripped_pack_display_name or pack_display_name
                 pack_name = self.dir_name or self.format_pack_dir_name(
                     metadata.get('name', 'ContributionPack'))
                 # a description passed on the cmd line should take precedence over one pulled
                 # from contribution metadata
                 metadata_dict[
                     'description'] = self.description or metadata.get(
                         'description')
                 metadata_dict['name'] = pack_display_name
                 metadata_dict['author'] = self.author or metadata.get(
                     'author', '')
                 metadata_dict['support'] = 'community'
                 metadata_dict['url'] = metadata.get(
                     'supportDetails',
                     {}).get('url', MARKETPLACE_LIVE_DISCUSSIONS)
                 metadata_dict['categories'] = metadata.get(
                     'categories') if metadata.get('categories') else []
                 metadata_dict['tags'] = metadata.get(
                     'tags') if metadata.get('tags') else []
                 metadata_dict['useCases'] = metadata.get(
                     'useCases') if metadata.get('useCases') else []
                 metadata_dict['keywords'] = metadata.get(
                     'keywords') if metadata.get('keywords') else []
         while os.path.exists(os.path.join(packs_dir, pack_name)):
             click.echo(
                 f'Modifying pack name because pack {pack_name} already exists in the content repo',
                 color=LOG_COLORS.NATIVE)
             if len(pack_name) >= 2 and pack_name[-2].lower(
             ) == 'v' and pack_name[-1].isdigit():
                 # increment by one
                 pack_name = pack_name[:-1] + str(int(pack_name[-1]) + 1)
             else:
                 pack_name += 'V2'
             click.echo(f'New pack name is "{pack_name}"',
                        color=LOG_COLORS.NATIVE)
         pack_dir = os.path.join(packs_dir, pack_name)
         os.mkdir(pack_dir)
         shutil.unpack_archive(filename=self.contribution,
                               extract_dir=pack_dir)
         pack_subdirectories = get_child_directories(pack_dir)
         for pack_subdir in pack_subdirectories:
             basename = os.path.basename(pack_subdir)
             if basename in ENTITY_TYPE_TO_DIR:
                 dst_name = ENTITY_TYPE_TO_DIR.get(basename)
                 src_path = os.path.join(pack_dir, basename)
                 dst_path = os.path.join(pack_dir, dst_name)
                 if os.path.exists(dst_path):
                     # move src folder files to dst folder
                     content_item_dir = dst_path
                     for _, _, files in os.walk(src_path, topdown=False):
                         for name in files:
                             src_file_path = os.path.join(src_path, name)
                             shutil.move(src_file_path, dst_path)
                 else:
                     # replace dst folder with src folder
                     content_item_dir = shutil.move(src_path, dst_path)
                 if basename in {SCRIPT, AUTOMATION, INTEGRATION}:
                     self.content_item_to_package_format(content_item_dir,
                                                         del_unified=True)
         # create pack's base files
         self.full_output_path = pack_dir
         self.create_pack_base_files()
         metadata_dict = Initiator.create_metadata(fill_manually=False,
                                                   data=metadata_dict)
         metadata_path = os.path.join(self.full_output_path,
                                      'pack_metadata.json')
         with open(metadata_path, 'w') as pack_metadata_file:
             json.dump(metadata_dict, pack_metadata_file, indent=4)
         # remove metadata.json file
         os.remove(os.path.join(pack_dir, 'metadata.json'))
         click.echo(
             f'Executing \'format\' on the restructured contribution zip files at "{pack_dir}"'
         )
         from_version = '6.0.0'
         format_manager(input=pack_dir,
                        from_version=from_version,
                        no_validate=True,
                        update_docker=True,
                        assume_yes=True)
     except Exception as e:
         click.echo(
             f'Creating a Pack from the contribution zip failed with error: {e}\n {traceback.format_exc()}',
             color=LOG_COLORS.RED)
     finally:
         if self.contrib_conversion_errs:
             click.echo(
                 'The following errors occurred while converting unified content YAMLs to package structure:'
             )
             click.echo(
                 textwrap.indent('\n'.join(self.contrib_conversion_errs),
                                 '\t'))
Beispiel #42
0
def load_experiment(args):
    '''load experiment data'''
    package_path = os.path.expanduser(args.path)
    if not os.path.exists(args.path):
        print_error('file path %s does not exist!' % args.path)
        exit(1)
    if args.searchSpacePath and os.path.isdir(args.searchSpacePath):
        print_error(
            'search space path should be a full path with filename, not a directory!'
        )
        exit(1)
    temp_root_dir = generate_temp_dir()
    shutil.unpack_archive(package_path, temp_root_dir)
    print_normal('Loading...')
    # Step1. Validation
    if not os.path.exists(args.codeDir):
        print_error('Invalid: codeDir path does not exist!')
        exit(1)
    if args.logDir:
        if not os.path.exists(args.logDir):
            print_error('Invalid: logDir path does not exist!')
            exit(1)
    experiment_temp_dir = os.path.join(temp_root_dir, 'experiment')
    if not os.path.exists(os.path.join(experiment_temp_dir, 'db')):
        print_error('Invalid archive file: db file does not exist!')
        shutil.rmtree(temp_root_dir)
        exit(1)
    nnictl_temp_dir = os.path.join(temp_root_dir, 'nnictl')
    if not os.path.exists(os.path.join(nnictl_temp_dir, '.experiment')):
        print_error(
            'Invalid archive file: nnictl metadata file does not exist!')
        shutil.rmtree(temp_root_dir)
        exit(1)
    try:
        with open(os.path.join(nnictl_temp_dir, '.experiment'), 'r') as file:
            experiment_metadata = json.load(file)
    except ValueError as err:
        print_error('Invalid nnictl metadata file: %s' % err)
        shutil.rmtree(temp_root_dir)
        exit(1)
    experiments_config = Experiments()
    experiments_dict = experiments_config.get_all_experiments()
    experiment_id = experiment_metadata.get('id')
    if experiment_id in experiments_dict:
        print_error('Invalid: experiment id already exist!')
        shutil.rmtree(temp_root_dir)
        exit(1)
    if not os.path.exists(os.path.join(nnictl_temp_dir, experiment_id)):
        print_error('Invalid: experiment metadata does not exist!')
        shutil.rmtree(temp_root_dir)
        exit(1)

    # Step2. Copy nnictl metadata
    src_path = os.path.join(nnictl_temp_dir, experiment_id)
    dest_path = os.path.join(NNI_HOME_DIR, experiment_id)
    if os.path.exists(dest_path):
        shutil.rmtree(dest_path)
    shutil.copytree(src_path, dest_path)

    # Step3. Copy experiment data
    os.rename(os.path.join(temp_root_dir, 'experiment'),
              os.path.join(temp_root_dir, experiment_id))
    src_path = os.path.join(os.path.join(temp_root_dir, experiment_id))
    experiment_config = Config(experiment_id, temp_root_dir).get_config()
    if args.logDir:
        logDir = args.logDir
        experiment_config['logDir'] = logDir
    else:
        if experiment_config.get('logDir'):
            logDir = experiment_config['logDir']
        else:
            logDir = NNI_HOME_DIR

    dest_path = os.path.join(logDir, experiment_id)
    if os.path.exists(dest_path):
        shutil.rmtree(dest_path)
    shutil.copytree(src_path, dest_path)

    # Step4. Copy code dir
    codeDir = os.path.expanduser(args.codeDir)
    if not os.path.isabs(codeDir):
        codeDir = os.path.join(os.getcwd(), codeDir)
        print_normal('Expand codeDir to %s' % codeDir)
    experiment_config['trial']['codeDir'] = codeDir
    archive_code_dir = os.path.join(temp_root_dir, 'code')
    if os.path.exists(archive_code_dir):
        file_list = os.listdir(archive_code_dir)
        for file_name in file_list:
            src_path = os.path.join(archive_code_dir, file_name)
            target_path = os.path.join(codeDir, file_name)
            if os.path.exists(target_path):
                print_error('Copy %s failed, %s exist!' %
                            (file_name, target_path))
                continue
            if os.path.isdir(src_path):
                shutil.copytree(src_path, target_path)
            else:
                shutil.copy(src_path, target_path)

    # Step5. Create experiment metadata
    experiments_config.add_experiment(
        experiment_id, experiment_metadata.get('port'),
        experiment_metadata.get('startTime'),
        experiment_metadata.get('platform'),
        experiment_metadata.get('experimentName'),
        experiment_metadata.get('endTime'), experiment_metadata.get('status'),
        experiment_metadata.get('tag'), experiment_metadata.get('pid'),
        experiment_metadata.get('webUrl'), logDir)
    print_normal('Load experiment %s succsss!' % experiment_id)

    # Step6. Cleanup temp data
    shutil.rmtree(temp_root_dir)
Beispiel #43
0
 def install(self):
     if not self.path.is_dir():
         github_runner = self.get_runner_app()
         self.path.parent.mkdir(exist_ok=True, parents=True)
         shutil.unpack_archive(str(github_runner.absolute()), str(self.path.absolute()))
Beispiel #44
0
shutil.copystat()

# copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
#              ignore_dangling_symlinks=False) 复制一个文件夹及其内容到另一个文件夹下,ignore过滤条件,可以多个
# 注意,必须要指定一个文件夹,即把a文件夹的内容,复制到父目录的b里面(b一定不能事先存在)
shutil.copytree('a', '../b', ignore=shutil.ignore_patterns('*.bat', '*.py'))

# 获取一个路径的磁盘占用,返回一个三元组(total,used,free)
print(shutil.disk_usage('D:\\'))

# 显示支持的打包格式/解压包的格式
print(shutil.get_archive_formats())
print(shutil.get_unpack_formats())

# 打包文件 (包名,格式,要打包的路径)
shutil.make_archive('a_zip', 'zip', 'a')

# 移动文件或文件夹到另一路径: (源文件路径,目标路径)
shutil.move('a', '../')

# 注册一个新的打包方式?
shutil.register_archive_format()
shutil.register_unpack_format()

# 删除文件夹及其内容,选择是否忽略错误(默认false,没有要删除的文件夹会报错)
shutil.rmtree('a', ignore_errors=True)
# (文件名,[,解压路径,解压方式]) unpack_archive(filename, extract_dir=None, format=None)
shutil.unpack_archive('a.zip', 'a')

# shutil.unregister_archive_format()
# shutil.unregister_unpack_format()
Beispiel #45
0
def unzip_file(a, b):
    shutil.unpack_archive(a, extract_dir=b)
Beispiel #46
0
 parser.add_argument('--ffmpeg_path',type=str,default='ffmpeg')
 parser.add_argument('--sample_rate',type=int,default=8000)
 parser.add_argument('--use_abs_path',default=False,action='store_true',help='Use absolute paths in resulting manifest')
 args = parser.parse_args()
 
 progress_function = lambda gen : gen
 try:
     import tqdm
     progress_function = tqdm.tqdm
 except:
     print('tqdm not available, will not show progress')
 
 if not os.path.exists(args.extracted_dir):
     os.makedirs(args.extracted_dir,exist_ok=True)
     print('Unpacking archive')
     shutil.unpack_archive(args.zip_file,args.extracted_dir)
 	
 def convert_to_wav(src,dest,ffmpeg_path,sr):
     if os.path.exists(dest):
         return
     conversion_command = r'%s -hide_banner -loglevel warning -i %s -ar %d %s' % (ffmpeg_path,src,sr,dest)
     subprocess.check_output(conversion_command.split(' '))
 
 
 def get_wav_filename(src):
     return os.path.join(args.target_dir,os.path.splitext(os.path.basename(src))[0] + '.wav')
 
 
 def convert_librispeech_audio(src):
     dest = get_wav_filename(src)
     convert_to_wav(src,dest,args.ffmpeg_path,args.sample_rate)
# destibnation folder
destination_folder = Path('data')

# set up some filenames
zip_file = destination_folder.joinpath(zipfile)
shape_file = zip_file.with_name(zipfile.replace('zip', 'shp'))

# download zip if need to
if not Path(zip_file).exists():
    r = requests.get(tm_borders_url)
    with open(zip_file, 'wb') as fp:
        fp.write(r.content)

# extract shp from zip if need to
if force or not Path(shape_file).exists():
    shutil.unpack_archive(zip_file.as_posix(), extract_dir=destination_folder)

# Read the LAI dataset for a given country and year
# read in the LAI data for given country code
from geog0111.process_timeseries import process_timeseries
'''
Note, the saved npz file can be quite large
e.g. 8.1 G for France.

You can override saving it by setting save = False
but if it is saved, it will be faster to access
data the next time you need it.

If you have a slow network, you might set download=False
'''
save = True
Beispiel #48
0
def main() -> None:
    import argparse
    parser = argparse.ArgumentParser(
        description="bootstrapping utility for hadrian.",
        epilog=USAGE,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('-d',
                        '--deps',
                        type=Path,
                        help='bootstrap dependency file (plan-bootstrap.json)')
    parser.add_argument('-w', '--with-compiler', type=Path, help='path to GHC')
    parser.add_argument('-s',
                        '--bootstrap-sources',
                        type=Path,
                        help='Path to prefetched bootstrap sources tarball')

    subparsers = parser.add_subparsers(dest="command")

    parser_list = subparsers.add_parser(
        'list-sources', help='list all sources required to download')
    parser_list.add_argument('-o',
                             '--output',
                             type=Path,
                             default='fetch_plan.json')

    parser_fetch = subparsers.add_parser(
        'fetch',
        help='fetch all required sources from hackage (for offline builds)')
    parser_fetch.add_argument('-o',
                              '--output',
                              type=Path,
                              default='bootstrap-sources')
    parser_fetch.add_argument(
        '-p',
        '--fetch-plan',
        type=Path,
        default=None,
        help=
        "A json document that lists the urls required for download (optional)")

    args = parser.parse_args()

    ghc = None

    if args.deps is None:
        if args.bootstrap_sources is None:
            # find appropriate plan in the same directory as the script
            ghc = find_ghc(args.with_compiler)
            args.deps = Path(
                sys.path[0]
            ) / f"plan-bootstrap-{ghc.version.replace('.','_')}.json"
            print(f"defaulting bootstrap plan to {args.deps}")
        # We have a tarball with all the required information, unpack it and use for further
        elif args.bootstrap_sources is not None and args.command != 'list-sources':
            print(f'Unpacking {args.bootstrap_sources} to {TARBALLS}')
            shutil.unpack_archive(args.bootstrap_sources.resolve(), TARBALLS,
                                  'gztar')
            args.deps = TARBALLS / 'plan-bootstrap.json'
            print(
                f"using plan-bootstrap.json ({args.deps}) from {args.bootstrap_sources}"
            )
        else:
            print(
                "We need a bootstrap plan (plan-bootstrap.json) or a tarball containing bootstrap information"
            )
            print("Perhaps pick an appropriate one from: ")
            for child in Path(sys.path[0]).iterdir:
                if child.match('plan-bootstrap-*.json'):
                    print(child)
            sys.exit(1)
    info = read_bootstrap_info(args.deps)

    print(
        dedent("""
        DO NOT use this script if you have another recent cabal-install available.
        This script is intended only for bootstrapping hadrian on new
        architectures.
    """))

    if (args.command == 'fetch'):
        if args.fetch_plan is not None:
            plan = {
                path: FetchInfo(p["url"], p["sha256"])
                for path, p in json.load(args.fetch_plan.open()).items()
            }
        else:
            plan = gen_fetch_plan(info)

        if ghc is None:
            ghc = find_ghc(args.with_compiler)

        # In temporary directory, create a directory which we will archive
        tmpdir = TMPDIR.resolve()
        tmpdir.mkdir(parents=True, exist_ok=True)

        rootdir = Path(tempfile.mkdtemp(dir=tmpdir))

        fetch_from_plan(plan, rootdir)

        shutil.copyfile(args.deps, rootdir / 'plan-bootstrap.json')

        fmt = 'gztar'
        if platform.system() == 'Windows': fmt = 'zip'

        archivename = shutil.make_archive(args.output, fmt, root_dir=rootdir)

        print(f"""
Bootstrap sources saved to {archivename}

Use `bootstrap.py -w {ghc.ghc_path} -s {archivename}` to continue
""")

    elif (args.command == 'list-sources'):
        ghc = find_ghc(args.with_compiler)
        plan = gen_fetch_plan(info)
        with open(args.output, 'w') as out:
            json.dump({path: val._asdict() for path, val in plan.items()}, out)
        print(f"Required hackage sources saved to {args.output}")
        tarfmt = "\n./"
        print(f"""
Download the files listed in {args.output}, copying {args.deps} to plan-bootstrap.json, and save them to a tarball ($TARBALL)

The contents of $TARBALL should look like:

./
./plan-bootstrap.json
./{tarfmt.join(path for path in plan)}

To generate $TARBALL, e.g. files in sources-tarball, `TARBALL=sources-tarball.tar.gz; pushd sources-tarball; tar -zcf ../$TARBALL .; popd`

Then use `bootstrap.py -w {ghc.ghc_path} -s $TARBALL` to continue

Alternatively, you could use `bootstrap.py -w {ghc.ghc_path} -d {args.deps} fetch -o sources-tarball` to download and generate the tarball, skipping this step
""")

    elif (args.command == None):
        if ghc is None:
            ghc = find_ghc(args.with_compiler)

        print(
            f'Bootstrapping hadrian with GHC {ghc.version} at {ghc.ghc_path}...'
        )

        if args.bootstrap_sources is None:
            plan = gen_fetch_plan(info)
            fetch_from_plan(plan, TARBALLS)

        bootstrap(info, ghc)
        hadrian_path = (BINDIR / 'hadrian').resolve()

        archive = make_archive(hadrian_path)

        print(
            dedent(f'''
            Bootstrapping finished!

            The resulting hadrian executable can be found at

                {hadrian_path}

            It has been archived for distribution in

                {archive}

            You can use this executable to build GHC.
        '''))
    else:
        print(f"No such command: {args.command}")
Beispiel #49
0
def main(parsec_source):
    BUILD_DIR.mkdir(parents=True, exist_ok=True)

    # Retrieve parsec version
    global_dict = {}
    exec((parsec_source / "parsec/_version.py").read_text(), global_dict)
    parsec_version = global_dict.get("__version__")
    print(f"### Detected Parsec version {parsec_version} ###")

    # Fetch CPython distrib
    if not CPYTHON_DISTRIB_ARCHIVE.is_file():
        print(f"### Fetch CPython {CPYTHON_VERSION} build ###")
        req = urlopen(CPYTHON_DISTRIB_URL)
        CPYTHON_DISTRIB_ARCHIVE.write_bytes(req.read())

    # Bootstrap tools virtualenv
    if not TOOLS_VENV_DIR.is_dir():
        print(f"### Create tool virtualenv ###")
        run(f"python -m venv {TOOLS_VENV_DIR}")
        run(f"{ TOOLS_VENV_DIR / 'Scripts/python' } -m pip install wheel")

    if not WHEELS_DIR.is_dir():
        print(f"### Generate wheels from Parsec&dependencies ###")
        run(
            f"{ TOOLS_VENV_DIR / 'Scripts/python' } -m pip wheel {parsec_source}[core] --wheel-dir {WHEELS_DIR}"
        )

    # Now we actually generate the build target

    target_dir = BUILD_DIR / f"parsec-{parsec_version}-{get_archslug()}"
    if target_dir.exists():
        raise SystemExit(f"{target_dir} already exists, exiting...")

    # Extract CPython distrib
    print(f"### Extracting CPython embedded distribution ###")
    shutil.unpack_archive(str(CPYTHON_DISTRIB_ARCHIVE), extract_dir=str(target_dir))

    # Bootstrap build virtualenv
    build_venv_dir = target_dir / "build_venv"
    print(f"### Installing wheels in temporary virtualenv ###")
    run(f"python -m venv {build_venv_dir}")
    wheels = " ".join(map(str, WHEELS_DIR.glob("*.whl")))
    run(f"{ build_venv_dir / 'Scripts/python' } -m pip install {wheels}")

    # Move build virtualenv's site-packages to the build and patch imports
    print(f"### Move site-packages to embedded distribution ###")
    shutil.move(build_venv_dir / "Lib/site-packages", target_dir / "site-packages")
    shutil.rmtree(build_venv_dir)
    pth_file, = target_dir.glob("*._pth")
    pth_file.write_text(pth_file.read_text() + "site-packages\n")

    # Include LICENSE file
    (target_dir / "LICENSE.txt").write_text((parsec_source / "LICENSE").read_text())

    # Build parsec.exe
    resource_rc = BUILD_DIR / "resource.rc"
    resource_res = BUILD_DIR / "resource.res"
    versioninfo = (*re.match(r"^.*([0-9]+)\.([0-9]+)\.([0-9]+)", parsec_version).groups(), "0")
    escaped_parsec_ico = str(Path("parsec.ico").resolve()).replace("\\", "\\\\")
    escaped_parsec_manifest = str(Path("parsec.manifest").resolve()).replace("\\", "\\\\")
    resource_rc.write_text(
        f"""
#include <windows.h>

1 RT_MANIFEST "{escaped_parsec_manifest}"
2 ICON "{escaped_parsec_ico}"

VS_VERSION_INFO VERSIONINFO
FILEVERSION     {','.join(versioninfo)}
PRODUCTVERSION  {','.join(versioninfo)}
FILEFLAGSMASK 0x3fL
FILEFLAGS 0x0L
FILEOS VOS__WINDOWS32
FILETYPE VFT_APP
FILESUBTYPE 0x0L
BEGIN
    BLOCK "StringFileInfo"
    BEGIN
        BLOCK "000004b0"
        BEGIN
            VALUE "CompanyName",      "Scille SAS\\0"
            VALUE "FileDescription",  "Parsec Secure Cloud Storage\\0"
            VALUE "FileVersion",      "{parsec_version}\\0"
            VALUE "InternalName",     "Parsec GUI Bootstrapper\\0"
            VALUE "LegalCopyright",   "Parsec Cloud (https://parsec.cloud) Copyright (c) AGPLv3 2019 Scille SAS\\0"
            VALUE "OriginalFilename", "parsec.exe\\0"
            VALUE "ProductName",      "Parsec\\0"
            VALUE "ProductVersion",   "{parsec_version}\\0"
        END
    END
    BLOCK "VarFileInfo"
    BEGIN
        VALUE "Translation", 0x0, 1200
    END
END
"""
    )
    run(f"rc.exe /i. /fo {resource_res} {resource_rc}")
    # Must make sure /Fo option ends with a "\", otherwise it is not considered as a folder...
    run(f"cl.exe parsec-bootstrap.c /c /I { CPYTHON_DIR / 'include' } /Fo{BUILD_DIR}\\")
    run(
        f"link.exe { BUILD_DIR / 'parsec-bootstrap.obj' } {resource_res} "
        f"/LIBPATH:{ CPYTHON_DIR / 'libs' } /OUT:{ target_dir / 'parsec.exe' } "
        f"/subsystem:windows /entry:mainCRTStartup"
    )

    # Create build info file for NSIS installer
    (BUILD_DIR / "BUILD.tmp").write_text(
        f'target = "{target_dir}"\n'
        f'parsec_version = "{parsec_version}"\n'
        f'python_version = "{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"\n'
        f'platform = "{get_archslug()}"\n'
    )

    # Create the install and uninstall file list for NSIS installer
    target_files = []

    def _recursive_collect_target_files(curr_dir):
        subdirs = []
        for entry in curr_dir.iterdir():
            if entry.is_dir():
                subdirs.append(entry)
            else:
                target_files.append((False, entry.relative_to(target_dir)))
        for subdir in subdirs:
            target_files.append((True, subdir.relative_to(target_dir)))
            _recursive_collect_target_files(subdir)

    _recursive_collect_target_files(target_dir)

    install_files_lines = ["; Files to install", 'SetOutPath "$INSTDIR\\"']
    curr_dir = Path(".")
    for target_is_dir, target_file in target_files:
        if target_is_dir:
            install_files_lines.append(f'SetOutPath "$INSTDIR\\{target_file}"')
            curr_dir = target_file
        else:
            assert curr_dir == target_file.parent
            install_files_lines.append(f'File "${{PARSEC_FREEZE_BUILD_DIR}}\\{target_file}"')
    (BUILD_DIR / "install_files.nsh").write_text("\n".join(install_files_lines))

    uninstall_files_lines = ["; Files to uninstall"]
    for target_is_dir, target_file in reversed(target_files):
        if target_is_dir:
            uninstall_files_lines.append(f'RMDir "$INSTDIR\\{target_file}"')
        else:
            uninstall_files_lines.append(f'Delete "$INSTDIR\\{target_file}"')
    (BUILD_DIR / "uninstall_files.nsh").write_text("\n".join(uninstall_files_lines))
Beispiel #50
0
def update_the_challenge():
    """
    Updates the local copy of The Challenge.
    """

    # Check if the user is online
    if not check_internet_connection():
        print("You are not connected to the internet. Try again later.")
        sys.exit()

    # Get the latest version
    most_recent_version, most_recent_files = get_most_recent_version_and_files(
    )

    # Get local version
    local_version = __version__

    # Check if local version is smaller than the GitHub version
    if version.parse(local_version) < version.parse(most_recent_version):
        print(
            f"There is a new version, {most_recent_version}, available.\n(Installed Version: {__version__})\n"
        )

        while True:
            print("Do you want to update to the new version?")
            want_to_update = input("[Y]es or [N]o: ").upper()

            if want_to_update not in ["Y", "N"]:
                print("Please enter either 'Y' or 'N'.\n")
            elif want_to_update == "N":
                print("Keeping local version. Quitting now.")
                sys.exit()
            else:
                print("Starting update process...")
                break
    else:
        print(
            "You are already on the latest version. Quitting the program now.")
        sys.exit()

    print()

    # Get the latest distribution file
    distribution_url = ""
    for file in most_recent_files:
        if file["path"].find("The-Challenge-Production-Server_") != -1:
            distribution_url = file["url"]
            break

    # Download the latest distribution
    if distribution_url != "":
        print("Downloading latest distribution...")
        download_request = requests.get(distribution_url)
        download_request.raise_for_status()
        print("Done!")

        print("Writing distribution package contents to file...")
        with open("./The-Challenge_Latest-Dist.tar.gz", "wb+") as f:
            f.write(b64decode(json.loads(download_request.text)["content"]))
            f.close()
        print("Done!")

    else:
        print("Can't get the latest distribution. Try again later.")
        sys.exit()

    # Extract the latest distribution
    print("Extracting contents of latest distribution...")
    shutil.unpack_archive("The-Challenge_Latest-Dist.tar.gz", "./extracted")
    os.remove("./The-Challenge_Latest-Dist.tar.gz")
    print("Done!")

    # Recursively try to find the wheel file in the extracted folder
    try:
        latest_wheel_file = [
            f for f in glob.glob("./extracted/" + "**/*.whl", recursive=True)
        ][0]

        # Once found install it using pip
        os.system(f"pip install -U {latest_wheel_file}")

        print("The update was completed successfully.")

    except IndexError:
        print("The latest distribution file cannot be found. Quitting now...")
        sys.exit()

    # Clean up
    shutil.rmtree("./extracted")

    # Offer to automatically restart the service
    print("\n!!! IMPORTANT !!!")
    print(
        "Only answer 'Y' to the following prompt if you (a) are on Ubuntu; (b) have a systemd service that "
        "hosts The Challenge's server; and (c) are an administrator that can use the 'sudo' command."
    )
    print("!!! IMPORTANT !!!\n")

    while True:
        print("Would you like to restart the systemd service?")
        confirm_systemd_name = input("[Y]es or [N]o: ").upper()

        if confirm_systemd_name not in ["Y", "N"]:
            print("Please enter either 'Y' or 'N'.\n")
        elif confirm_systemd_name == "N":
            print("Quitting now.")
            sys.exit()
        else:
            break

    # Ask user to input the systemd service name
    while True:
        print("Please enter the systemd service name.")
        systemd_service_name = input("?> ")

        if systemd_service_name == "":
            print("Please enter the name.")
        else:
            print(
                "\nPlease confirm that you want to restart the systemd service named:"
            )
            print(f"'{systemd_service_name}'")

            while True:
                confirm_systemd_name = input("[Y]es or [N]o: ").upper()

                if confirm_systemd_name not in ["Y", "N"]:
                    print("Please enter either 'Y' or 'N'.\n")
                    print("Please confirm the systemd service name.")
                elif confirm_systemd_name == "N":
                    print(
                        "Disregarding current input of the systemd service name."
                    )
                    break
                else:
                    os.system(f"sudo systemctl restart {systemd_service_name}")
                    print("The systemd service has been restarted. Quitting.")
                    sys.exit()
Beispiel #51
0
    # download with wget
    sftp_cmd = "sshpass -p 'burnt' sftp fire@" + url + " " + cfg.get(
        args.source, 'local_path')
    sftp_status = os.system(sftp_cmd)
    # TODO: check time for wget process
    # check wget_status
    if sftp_status == 0:
        log.info('download finished: ' + localFilename)
        break
    else:
        # if wget_status =! 0 is due a some error
        log.info("attempt " + str(attempt) + ': error downloading: ' + url)
        sleep(120)

##### extract
shutil.unpack_archive(localFilename, cfg.get(args.source, 'local_path'))

##### dissolve geometries
import geopandas as gpd
from shapely.geometry.multipolygon import MultiPolygon

burned_area_file = localFilename.replace(".shapefiles.tar.gz", ".shp")
f_in = gpd.read_file(burned_area_file)
f_dissolve = gpd.geoseries.GeoSeries(
    MultiPolygon([geom for geom in f_in.unary_union.geoms]))
f_dissolve.crs = f_in.crs
df_clip = gpd.clip(f_dissolve, gpd.read_file("../shapes/Colombia.shp"))

for f in glob.glob(burned_area_file.replace(".shp", "*")):
    os.remove(f)
df_clip.to_file(burned_area_file)
4. Write to sql
"""

import shutil
import os
import glob
from simpledbf import Dbf5
import sqlite3
import pandas as pd

all_zip_files = glob.glob("data/customs_ru_raw/*.zip")
# file = all_zip_files[0]

for file in all_zip_files:
    # Unzip
    shutil.unpack_archive(file, "data/temp", "zip")

    # Read dbf to DataFrame
    df = Dbf5("data/temp/DATTSVT.dbf", codec="CP866").to_dataframe()

    # Delete already readed file
    os.remove("data/temp/DATTSVT.dbf")

    # Rename columns
    df = df.rename(
        columns={
            "Stoim": "usd",
            "Netto": "kg",
            "Kol": "qty",
            "period": "year_month",
            "edizm": "uom",
Beispiel #53
0
def _setupSSHDImpl(ngrok_token, ngrok_region, is_VNC):
    #apt-get update
    #apt-get upgrade
    my_apt = _MyApt()
    #Following packages are useless because nvidia kernel modules are already loaded and I cannot remove or update it.
    #Uninstall them because upgrading them take long time.
    my_apt.deleteInstalledPkg("nvidia-dkms", "nvidia-kernel-common",
                              "nvidia-kernel-source")
    my_apt.commit()
    my_apt.update_upgrade()
    my_apt.commit()

    subprocess.run(["unminimize"],
                   input="y\n",
                   check=True,
                   universal_newlines=True)

    my_apt.installPkg("openssh-server")
    my_apt.commit()
    my_apt.close()

    #Reset host keys
    for i in pathlib.Path("/etc/ssh").glob("ssh_host_*_key"):
        i.unlink()
    subprocess.run(["ssh-keygen", "-A"], check=True)

    #Prevent ssh session disconnection.
    with open("/etc/ssh/sshd_config", "a") as f:
        f.write("\n\nClientAliveInterval 120\n")

    msg = ""
    msg += "ECDSA key fingerprint of host:\n"
    ret = subprocess.run(
        ["ssh-keygen", "-lvf", "/etc/ssh/ssh_host_ecdsa_key.pub"],
        stdout=subprocess.PIPE,
        check=True,
        universal_newlines=True)
    msg += ret.stdout + "\n"

    _download(
        "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip",
        "ngrok.zip")
    shutil.unpack_archive("ngrok.zip")
    pathlib.Path("ngrok").chmod(stat.S_IXUSR)

    root_password = secrets.token_urlsafe()
    user_password = secrets.token_urlsafe()
    user_name = "colab"
    msg += "✂️" * 24 + "\n"
    msg += f"root password: {root_password}\n"
    msg += f"{user_name} password: {user_password}\n"
    msg += "✂️" * 24 + "\n"
    subprocess.run(["useradd", "-s", "/bin/bash", "-m", user_name])
    subprocess.run(["adduser", user_name, "sudo"], check=True)
    subprocess.run(["chpasswd"],
                   input=f"root:{root_password}",
                   universal_newlines=True)
    subprocess.run(["chpasswd"],
                   input=f"{user_name}:{user_password}",
                   universal_newlines=True)
    subprocess.run(["service", "ssh", "restart"])

    if not pathlib.Path('/root/.ngrok2/ngrok.yml').exists():
        subprocess.run(["./ngrok", "authtoken", ngrok_token])

    ngrok_proc = subprocess.Popen(
        ["./ngrok", "tcp", "-region", ngrok_region, "22"])
    time.sleep(2)
    if ngrok_proc.poll() != None:
        raise RuntimeError("Failed to run ngrok. Return code:" +
                           str(ngrok_proc.returncode) +
                           "\nSee runtime log for more info.")

    with urllib.request.urlopen(
            "http://*****:*****@{hostname}\n"
    else:
        msg += "Command to connect to the ssh server:\n"
        msg += "✂️" * 24 + "\n"
        msg += f"ssh {ssh_common_options} -p {port} {user_name}@{hostname}\n"
        msg += "✂️" * 24 + "\n"
    return msg
Beispiel #54
0
def unpack_archive(path):
    shutil.unpack_archive(path)
Beispiel #55
0
import zipfile
import shutil

f = open("file_one.txt", "w+")
f.write("This is the first file")
f.close()


f = open("file_two.txt", "w+")
f.write("This is the second file")
f.close()

# compress files
comp_file = zipfile.ZipFile("files.zip", "w")
comp_file.write("file_one.txt", compress_type=zipfile.ZIP_DEFLATED)
comp_file.write("file_two.txt", compress_type=zipfile.ZIP_DEFLATED)
comp_file.close()

# decompress files
zip_obj = zipfile.ZipFile("files.zip", "r")
zip_obj.extractall("extracted_files")

# Creating a zip archive
zip_path = "E:\\Workspace\\Complete-Python-3-Bootcamp\\12-Advanced Python Modules"
out_file = "example"
shutil.make_archive(out_file, 'zip', zip_path)

# Extracting a zip archive
shutil.unpack_archive("example.zip", "final_unzip", 'zip')
Beispiel #56
0
    for match in matches:
        url = match
        filename = url[url.rfind("/")+1:]
        file_path = path.join(models_dir, filename)
        dir_name = filename.split(".")[0]
        dir_path = path.join(models_dir, dir_name)

        # if the archived file not exist, download it
        if not os.path.isfile(file_path) and not os.path.isdir(dir_path):
            print("downloading ... {}".format(filename))
            urllib.request.urlretrieve(url, filename=file_path)

        # if the dir already existed, no need to unarchive again
        if not os.path.isdir(dir_path):
            print("unarchiving ... {}".format(filename))
            shutil.unpack_archive(file_path, dir_path)

        # if remove is flagged and archive exist, remove if after unarchiving
        if os.path.isfile(file_path):
            os.remove(file_path)

        # https://github.com/tensorflow/models/tree/master/research/slim#exporting-the-inference-graph
        for f in os.listdir(dir_path):
            if f.endswith(".ckpt"):
                ckpt = f
                break
        if ckpt = "inception_resnet_v2_2016_08_30.ckpt":
            ckpt = "inception_resnet_v2.ckpt"
        pb = ckpt.split(".")[0] + ".pb"
        cmd = "python " + os.environ["GOPATH"]+"/src/github.com/tensorflow/models/research/slim/export_inference_graph.py --alsologtostderr --model_name=" + ckpt.split(".")[
            0] + " --output_file=" + path.join(dir_path, pb)
Beispiel #57
0
    def _prepare_task(self, identifier, description):
        """
        Prepare a working directory before starting the solution.

        :param identifier: Verification task identifier.
        :param description: Dictionary with task description.
        :raise SchedulerException: If a task cannot be scheduled or preparation failed.
        """
        # Prepare working directory
        task_work_dir = os.path.join(self.work_dir, "tasks", identifier)
        task_data_dir = os.path.join(task_work_dir, "data")
        job_id = description['job id']

        self.logger.debug(
            "Make directory for the task to solve {!r}".format(task_data_dir))
        os.makedirs(task_data_dir.encode("utf8"), exist_ok=True)

        # This method can be called several times to adjust resource limitations but we should avoid extra downloads
        # from the server
        if identifier not in self.__tasks:
            archive = os.path.join(task_work_dir, "task.zip")
            self.logger.debug(
                "Pull from the verification gateway archive {!r}".format(
                    archive))
            ret = self.server.pull_task(identifier, archive)
            if not ret:
                self.logger.info(
                    "Seems that the task data cannot be downloaded because of a respected reason, "
                    "so we have nothing to do there")
                os._exit(1)
            self.logger.debug("Unpack archive {!r} to {!r}".format(
                archive, task_data_dir))
            shutil.unpack_archive(archive, task_data_dir)

            # Update description
            description.update(self.__get_credentials(job_id))

        # TODO: Add more exceptions handling to make code more reliable
        with open(os.path.join(
                os.path.join(self.work_dir, "tasks", identifier), "task.json"),
                  "w",
                  encoding="utf8") as fp:
            json.dump(description,
                      fp,
                      ensure_ascii=False,
                      sort_keys=True,
                      indent=4)

        # Prepare command to submit
        self.logger.debug(
            "Prepare arguments of the task {!r}".format(identifier))
        task_data_dir = os.path.join(self.work_dir, "tasks", identifier,
                                     "data")
        try:
            assert description["priority"] in ["LOW", "IDLE"]
            run = Run(task_data_dir, description)
        except Exception as err:
            raise schedulers.SchedulerException(
                'Cannot prepare task description on base of given benchmark.xml: {}'
                .format(err))

        self.__track_task(job_id, run, identifier)
        return True
Beispiel #58
0
def unpack(zipfile: str):
    filename = datapath(zipfile)
    rootdir = datapath(zipfile[:-4])
    shutil.unpack_archive(filename, extract_dir=rootdir, format="zip")
Beispiel #59
0
    def _process_task_result(self, identifier, future, description):
        """
        Process result and send results to the server.

        :param identifier: Task identifier string.
        :param future: Future object.
        :return: status of the task after solution: FINISHED.
        :raise SchedulerException: in case of ERROR status.
        """
        run = self.__tasks[identifier]
        self.__drop_task(identifier)

        task_work_dir = os.path.join(self.work_dir, "tasks", identifier)
        solution_file = os.path.join(task_work_dir, "solution.zip")
        self.logger.debug(
            "Save solution to the disk as {}".format(solution_file))
        try:
            result = future.result()
        except Exception as err:
            error_msg = "Task {} has been finished but no data has been received: {}".format(
                identifier, err)
            self.logger.warning(error_msg)
            raise schedulers.SchedulerException(error_msg)

        # Save result
        with open(solution_file, 'wb') as sa:
            sa.write(result)

        # Unpack results
        task_solution_dir = os.path.join(task_work_dir, "solution")
        self.logger.debug(
            "Make directory for the solution to extract {0}".format(
                task_solution_dir))
        os.makedirs(task_solution_dir.encode("utf8"), exist_ok=True)
        self.logger.debug("Extract results from {} to {}".format(
            solution_file, task_solution_dir))
        shutil.unpack_archive(solution_file, task_solution_dir)
        # Process results and convert RunExec output to result description
        # TODO: what will happen if there will be several input files?
        # Simulate BenchExec behaviour when one input file is provided.
        os.makedirs(os.path.join(task_solution_dir, "output",
                                 "benchmark.logfiles").encode("utf8"),
                    exist_ok=True)
        shutil.move(
            os.path.join(task_solution_dir, 'output.log'),
            os.path.join(
                task_solution_dir, "output", "benchmark.logfiles",
                "{}.log".format(os.path.basename(run.run.sourcefiles[0]))))

        try:
            solution_identifier, solution_description = self.__extract_description(
                task_solution_dir)
            self.logger.debug(
                "Successfully extracted solution {} for task {}".format(
                    solution_identifier, identifier))
        except Exception as err:
            self.logger.warning(
                "Cannot extract results from a solution: {}".format(err))
            raise err

        # Make fake BenchExec XML report
        self.__make_fake_benchexec(
            solution_description,
            os.path.join(task_work_dir, 'solution', 'output',
                         "benchmark.results.xml"))

        # Add actual restrictions
        solution_description['resource limits'] = description[
            "resource limits"]

        # Make archive
        solution_archive = os.path.join(task_work_dir, "solution")
        self.logger.debug(
            "Make archive {} with a solution of the task {}.zip".format(
                solution_archive, identifier))
        shutil.make_archive(solution_archive, 'zip', task_solution_dir)
        solution_archive += ".zip"

        # Push result
        self.logger.debug(
            "Upload solution archive {} of the task {} to the verification gateway"
            .format(solution_archive, identifier))
        try:
            utils.submit_task_results(self.logger, self.server,
                                      self.scheduler_type(), identifier,
                                      solution_description,
                                      os.path.join(task_work_dir, "solution"))
        except Exception as err:
            error_msg = "Cannot submit solution results of task {}: {}".format(
                identifier, err)
            self.logger.warning(error_msg)
            raise schedulers.SchedulerException(error_msg)

        if "keep working directory" not in self.conf["scheduler"] or \
                not self.conf["scheduler"]["keep working directory"]:
            self.logger.debug("Clean task working directory {} for {}".format(
                task_work_dir, identifier))
            shutil.rmtree(task_work_dir)

        self.logger.debug(
            "Task {} has been processed successfully".format(identifier))
        return "FINISHED"
Beispiel #60
0
#######################################################################################
#  In the name of God, the Compassionate, the Merciful
#  Pyabr (c) 2020 Mani Jamali. GNU General Public License v3.0
#
#  Programmer & Creator:    Mani Jamali <*****@*****.**>
#  Telegram or Gap channel: @pyabr
#  Telegram or Gap group:   @pyabr_community
#  Git source:              github.com/manijamali2003/pyabr
#
#######################################################################################

import shutil, os, sys

if not os.path.isdir("build-packs"): os.mkdir("build-packs")
if not os.path.isdir("wheel/src"): os.mkdir("wheel/src")
shutil.unpack_archive('wheel/setup.zip', 'wheel/setup',
                      'zip')  # Unpack setup wheel package

## Copy all files and dirs in wheel/setup/src ##

list = os.listdir('.')
list.remove('.git')
list.remove('.idea')
list.remove('wheel')
list.remove('latest')

for i in list:
    if os.path.isdir(i):
        shutil.copytree(i, 'wheel/src/' + i)
    else:
        shutil.copyfile(i, 'wheel/src/' + i)