Exemple #1
0
def dump_db(db_name, stream, backup_format='zip'):
    """Dump database `db` into file-like object `stream` if stream is None
    return a file object with the dump """

    _logger.info('DUMP DB: %s format %s', db_name, backup_format)

    cmd = ['pg_dump', '--no-owner']
    cmd.append(db_name)

    if backup_format == 'zip':
        with odoo.tools.osutil.tempdir() as dump_dir:
            filestore = odoo.tools.config.filestore(db_name)
            if os.path.exists(filestore):
                shutil.copytree(filestore, os.path.join(dump_dir, 'filestore'))
            with open(os.path.join(dump_dir, 'manifest.json'), 'w') as fh:
                db = odoo.sql_db.db_connect(db_name)
                with db.cursor() as cr:
                    json.dump(dump_db_manifest(cr), fh, indent=4)
            cmd.insert(-1, '--file=' + os.path.join(dump_dir, 'dump.sql'))
            odoo.tools.exec_pg_command(*cmd)
            if stream:
                odoo.tools.osutil.zip_dir(dump_dir, stream, include_dir=False, fnct_sort=lambda file_name: file_name != 'dump.sql')
            else:
                t=tempfile.TemporaryFile()
                odoo.tools.osutil.zip_dir(dump_dir, t, include_dir=False, fnct_sort=lambda file_name: file_name != 'dump.sql')
                t.seek(0)
                return t
    else:
        cmd.insert(-1, '--format=c')
        stdin, stdout = odoo.tools.exec_pg_command_pipe(*cmd)
        if stream:
            shutil.copyfileobj(stdout, stream)
        else:
            return stdout
Exemple #2
0
    def copy_linter(self, name):
        """Copy the template linter to a new linter with the given name."""

        self.name = name
        self.fullname = 'SublimeLinter-contrib-{}'.format(name)
        self.dest = os.path.join(sublime.packages_path(), self.fullname)

        if os.path.exists(self.dest):
            sublime.error_message('The plugin “{}” already exists.'.format(self.fullname))
            return

        src = os.path.join(sublime.packages_path(), persist.PLUGIN_DIRECTORY, 'linter-plugin-template')
        self.temp_dir = None

        try:
            self.temp_dir = tempfile.mkdtemp()
            self.temp_dest = os.path.join(self.temp_dir, self.fullname)
            shutil.copytree(src, self.temp_dest)

            self.get_linter_language(name, self.configure_linter)

        except Exception as ex:
            if self.temp_dir and os.path.exists(self.temp_dir):
                shutil.rmtree(self.temp_dir)

            sublime.error_message('An error occurred while copying the template plugin: {}'.format(str(ex)))
Exemple #3
0
def cp_r(src, dest):
    src = expandPath(src)
    dest = expandPath(dest)
    if os.path.isdir(src):
        shutil.copytree(src, dest)
    else:
        shutil.copy(src, dest)
  def _InstrumentExecutables(self):
    build_dir = self._build_dir
    work_dir = self._work_dir
    _LOGGER.info('Build dir "%s".', build_dir)

    # Make a copy of all unittest executables, DLLs, PDBs and test_data in
    # the build directory.
    for pattern in ('*_unittests.exe', '*.dll', '*.pdb', 'test_data'):
      files = glob.glob(os.path.join(build_dir, pattern))
      for path in files:
        _LOGGER.info('Copying "%s" to "%s".', path, work_dir)
        if os.path.isdir(path):
          # If the source file is a directory, do a recursive copy.
          dst = os.path.join(work_dir, os.path.basename(path))
          shutil.copytree(path, dst)
        else:
          shutil.copy(path, work_dir)

    # Instrument all EXEs in the work dir.
    for exe in glob.glob(os.path.join(work_dir, '*.exe')):
      self._InstrumentOneFile(exe)

    # And the DLLs we've specified.
    for dll in _DLLS_TO_INSTRUMENT:
      self._InstrumentOneFile(os.path.join(work_dir, dll))
Exemple #5
0
def cleanupFiles():
    # First get rid of modified files
    for l in ["l1", "l2", "l3"]:
        arcpy.Delete_management(l)

    for f in glob.glob("C:\\Arctmp\\*"):
        try:
            shutil.rmtree(f)
        except:
            print "UNABLE TO REMOVE:", f
    # Now remove the old directory
    for i in xrange(0, 1000000):
        new_workspace = "C:\\Arctmp\\workspace." + str(i)
        if not os.path.exists(new_workspace):
            break
    print "TESTING USING WORKSPACE", new_workspace
    # Now move in fresh copies
    shutil.copytree("C:\\Arcbase", new_workspace)
    print "CONTENTS:"
    arcpy.env.workspace = new_workspace
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.shp")):
        print f
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.lyr")):
        print f
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.gdb")):
        print f
    def test_build_debian(self):
        from . import pkgbuild

        tmpdir = tempfile.mkdtemp()
        try:
            source = PackageSource.objects.get(id=1)
            br = BuildRecord(source=source, build_counter=10, sha='e65b55054c5220321c56bb3dfa96fbe5199f329c')
            br.save()

            basedir = os.path.join(tmpdir, 'd')
            shutil.copytree(os.path.join(os.path.dirname(__file__), 'test_data', 'debian'), basedir)

            orig_stdout = sys.stdout
            sys.stdout = StringIO()
            try:
                pkgbuild.main(['--basedir', basedir, 'version', self.live_server_url + br.get_absolute_url()])
                self.assertEquals(sys.stdout.getvalue(), '0.1+10')
                sys.stdout = StringIO()

                pkgbuild.main(['--basedir', basedir, 'name', self.live_server_url + br.get_absolute_url()])
                self.assertEquals(sys.stdout.getvalue(), 'buildsvctest')

                pkgbuild.main(['--basedir', basedir, 'build', self.live_server_url + br.get_absolute_url()])
            finally:
                sys.stdout = orig_stdout

            self.assertTrue(os.path.exists(os.path.join(basedir, 'buildsvctest_0.1+10_source.changes')))
            self.assertTrue(os.path.exists(os.path.join(basedir, 'buildsvctest_0.1+10_amd64.changes')))
        finally:
            shutil.rmtree(tmpdir)
Exemple #7
0
 def install(self, spec, prefix):
     # cppcheck does not have a configure script
     make("CFGDIR=%s" % os.path.join(prefix, 'cfg'))
     # manually install the final cppcheck binary
     mkdirp(prefix.bin)
     install('cppcheck', prefix.bin)
     shutil.copytree('cfg', os.path.join(prefix, 'cfg'))
Exemple #8
0
 def setUpClass(cls):
     """Run before all tests"""
     cls.port = QGIS_SERVER_WFST_PORT
     # Create tmp folder
     cls.temp_path = tempfile.mkdtemp()
     cls.testdata_path = cls.temp_path + '/' + 'wfs_transactional' + '/'
     copytree(unitTestDataPath('wfs_transactional') + '/',
              cls.temp_path + '/' + 'wfs_transactional')
     cls.project_path = cls.temp_path + '/' + 'wfs_transactional' + '/' + \
         'wfs_transactional.qgs'
     assert os.path.exists(cls.project_path), "Project not found: %s" % \
         cls.project_path
     # Clean env just to be sure
     env_vars = ['QUERY_STRING', 'QGIS_PROJECT_FILE']
     for ev in env_vars:
         try:
             del os.environ[ev]
         except KeyError:
             pass
     # Clear all test layers
     for ln in ['test_point', 'test_polygon', 'test_linestring']:
         cls._clearLayer(ln)
     os.environ['QGIS_SERVER_PORT'] = str(cls.port)
     server_path = os.path.dirname(os.path.realpath(__file__)) + \
         '/qgis_wrapped_server.py'
     cls.server = subprocess.Popen([sys.executable, server_path],
                                   env=os.environ, stdout=subprocess.PIPE)
     line = cls.server.stdout.readline()
     cls.port = int(re.findall(b':(\d+)', line)[0])
     assert cls.port != 0
     # Wait for the server process to start
     assert waitServer('http://127.0.0.1:%s' % cls.port), "Server is not responding!"
Exemple #9
0
    def copy_template():
        config_prompt(template)
        shutil.copytree(template, name)

        if os.path.exists('%s/%s' % (name, 'config.yaml')):
            os.remove('%s/%s' % (name, 'config.yaml'))

        for dirname, dirnames, files in os.walk(name):
            for d in dirnames:
                if d == options.template:
                    shutil.copytree('%s/%s' % (dirname, d), '%s/%s' % (dirname, name))
                    shutil.rmtree('%s/%s' % (dirname, d))

        for dirname, dirnames, files in os.walk(name):
            for filename in files:
                f = open('%s/%s' % (dirname, filename), 'r')
                lines = f.readlines()
                f.close()

                first_pass = [re.sub('{{\s*(\w+)\s*}}', replace_variable, line) for line in lines]
                new_lines = [re.sub('__config_(\w+)__', replace_variable, line) for line in first_pass]

                f = open('%s/%s' % (dirname, filename), 'w')
                f.write(''.join(new_lines))
                f.close()
Exemple #10
0
def _copy_contents(dst_dir, contents):
    items = {"dirs": set(), "files": set()}

    for path in contents:
        if isdir(path):
            items['dirs'].add(path)
        elif isfile(path):
            items['files'].add(path)

    dst_dir_name = basename(dst_dir)

    if dst_dir_name == "src" and len(items['dirs']) == 1:
        copytree(list(items['dirs']).pop(), dst_dir, symlinks=True)
    else:
        makedirs(dst_dir)
        for d in items['dirs']:
            copytree(d, join(dst_dir, basename(d)), symlinks=True)

    if not items['files']:
        return

    if dst_dir_name == "lib":
        dst_dir = join(dst_dir, mkdtemp(dir=dst_dir))

    for f in items['files']:
        copyfile(f, join(dst_dir, basename(f)))
Exemple #11
0
def share_static(main):
    """Makes the sphinx _static folder shared.

    Can be run multiple times to dedup newly added modules.
    """

    rewrite_static_links(main)

    roots = []
    for entry in os.listdir(main):
        if entry.startswith(("_", ".")) or "-" not in entry:
            continue
        path = os.path.join(main, entry)
        if not os.path.isdir(path):
            continue
        roots.append(path)

    shared = os.path.join(main, "_static")

    if not os.path.exists(shared):
        # copy one to the root
        shutil.rmtree(shared, ignore_errors=True)
        shutil.copytree(os.path.join(roots[0], "_static"), shared)

    # remove all others
    for root in roots:
        static = os.path.join(root, "_static")
        shutil.rmtree(static, ignore_errors=True)
Exemple #12
0
 def run_check(*args):
     # create a fresh tree for the profile work everytime.
     # do this, so that it's always a unique pathway- this sidesteps
     # any potential issues of ProfileNode instance caching.
     path = pjoin(self.dir, 'foo', str(counter.next()))
     shutil.copytree(pjoin(self.dir, 'foo'), path, symlinks=True)
     return self.process_check(path, list(args))
Exemple #13
0
def mvtree(simstr,comp_end=None):
    """
    Moves an entire run tree without changing file names.
    """
    # Set constants
    usrcomp=mmlinfo.hostname2compid()
    complist=mmlinfo.complist()
    usrindx=complist['compid'].index(usrcomp)
    # Get location of sim
    comp_beg=simstr['memcomp']
    # Get computer memory info
    if comp_beg not in complist['memlist'][usrindx]:
        raise Exception('Cannot access computer {} from the current one {}. Switch to bender.'.format(comp_beg,usrcomp))
    print 'Current tree location: {}'.format(comp_beg)
    if comp_end not in complist['memlist'][usrindx]:
        comp_end=mmlio.askselect('What computer should the tree be moved to?',complist['memlist'][usrindx])
    # Get filenames
    files_beg=simstr.mkfiledict(memcomp=comp_beg,checkaccess=True)
    files_end=simstr.mkfiledict(memcomp=comp_end,checkaccess=True)
    # Copy the tree
    print 'Source      run tree: '+files_beg['rundir']
    print 'Destination run tree: '+files_end['rundir']
    if mmlio.yorn('Continue moving tree?'):
        mmlfiles.mkdirs(files_end['simdir'])
        shutil.copytree(files_beg['rundir'],files_end['rundir'])
    else:
        return
    # Remove the old tree
    if mmlio.yorn('Remove old run tree?'):
        shutil.rmtree(files_beg['rundir'])
    # Update the run files
    if mmlio.yorn('Update the run files?'):
        simstr['memcomp']=comp_end
        mmlparam.savepar('mmlyt.simlist','galsim',dict(simstr),overwrite=True)
    return
Exemple #14
0
 def clone_from(self, source_url):
     '''Initialize a repo as a clone of another'''
     self._repo.set_status('cloning')
     log.info('Initialize %r as a clone of %s',
              self._repo, source_url)
     try:
         fullname = self._setup_paths(create_repo_dir=False)
         if os.path.exists(fullname):
             shutil.rmtree(fullname)
         if self.can_hotcopy(source_url):
             shutil.copytree(source_url, fullname)
             post_receive = os.path.join(
                 self._repo.full_fs_path, 'hooks', 'post-receive')
             if os.path.exists(post_receive):
                 os.rename(post_receive, post_receive + '-user')
             repo = git.Repo(fullname)
         else:
             repo = git.Repo.clone_from(
                 source_url,
                 to_path=fullname,
                 bare=True)
         self.__dict__['_git'] = repo
         self._setup_special_files(source_url)
     except:
         self._repo.set_status('ready')
         raise
Exemple #15
0
 def run(self):
     dst = self.config.get_dst_folder()
     cdv_dst = self.config.get_cordova_dst_folder(self.key)
     if os.path.exists(cdv_dst):
         names = os.listdir(cdv_dst)
         for name in names:
             if not name.startswith('.'):
                 name = os.path.join(cdv_dst, name)
                 if os.path.isfile(name):
                     os.remove(name)
                 else:
                     shutil.rmtree(name)
     names = os.listdir(dst)
     for name in names:
         if not name.startswith('.'):
             src = os.path.join(dst, name)
             copy = os.path.join(cdv_dst, name)
             if os.path.isfile(src):
                 shutil.copy(src, copy)
             else:
                 shutil.copytree(src, copy, ignore=shutil.ignore_patterns('.*'))
     for r, d, f in os.walk(cdv_dst):
         for files in filter(lambda x: x.endswith('.html'), f):
             p = os.path.join(r, files)
             self.replace_cordova_tag(p)
     self.copy_icons(dst)
     self.copy_splash(dst)
Exemple #16
0
def deploy_wnmp():
    os.chdir(os.path.join(BASE_DIR, 'wnmp'))
    git_export('wnmp', TARGET_DIR)

    # PHP
    wget('http://windows.php.net/downloads/releases/'
            'php-5.4.5-Win32-VC9-x86.zip',
        sha1='028eb12e09fe011e20097c82064d6c550bf896c4')
    logging.info('Extracting PHP...')
    path = os.path.join(BASE_DIR, '_tmp', 'php')
    makedirs(path, exist_ok=True)
    ar = zipfile.ZipFile(
        os.path.join(BASE_DIR, 'php-5.4.5-Win32-VC9-x86.zip'))
    ar.extractall(path)
    shutil.rmtree(os.path.join(TARGET_DIR, 'php'))
    shutil.copytree(path, os.path.join(TARGET_DIR, 'php'))

    # nginx
    wget('http://nginx.org/download/nginx-1.2.2.zip',
        sha1='0a5dfbb766bfefa238207db25d7b64b69aa37908')
    logging.info('Extracting nginx...')
    path = os.path.join(BASE_DIR, '_tmp')
    makedirs(path, exist_ok=True)
    ar = zipfile.ZipFile(
        os.path.join(BASE_DIR, 'nginx-1.2.2.zip'))
    ar.extractall(path)
    shutil.rmtree(os.path.join(TARGET_DIR, 'nginx'))
    shutil.copytree(os.path.join(path, 'nginx-1.2.2'),
        os.path.join(TARGET_DIR, 'nginx'))
    shutil.move(os.path.join(TARGET_DIR, 'example.nginx.conf'),
        os.path.join(TARGET_DIR, 'nginx', 'conf', 'nginx.conf'))

    # cleanup
    shutil.rmtree(os.path.join(BASE_DIR, '_tmp'))
def copyDir():
    for file in os.listdir("."):
        if file in file_list:
            shutil.copy(file, dest)
        elif file in dir_list:
            destDir = dest + "/" + file
            shutil.copytree(file, destDir)
Exemple #18
0
 def copy(self, user):
     """ Override the copy method to make sure the marxan files get copied """
     orig = self.outdir
     copy = super(Scenario, self).copy(user)
     shutil.copytree(orig, copy.outdir, symlinks=True)
     copy.save(rerun=False)
     return copy
Exemple #19
0
def _get_temp_catalog_for_testing(subpath_test_files='test_context_enerpi',
                                  raw_file='enerpi_data_test.h5', check_integrity=True):
    """
    Copy example ENERPI files & sets common data catalog for testing.

    """
    print('TEST DEBUGGING: in get_temp_catalog_for_testing')
    dir_config = os.path.join(BASE_PATH, 'config')
    path_default_datapath = os.path.join(dir_config, '.enerpi_data_path')
    before_tests = open(path_default_datapath).read()

    # Prepara archivos:
    path_files_test = os.path.abspath(os.path.join(BASE_PATH, '..', 'tests', 'rsc', subpath_test_files))
    tmp_dir = tempfile.TemporaryDirectory(prefix='ENERPIDATA_test')
    data_path = tmp_dir.name
    open(path_default_datapath, 'w').write(data_path)
    try:
        shutil.copytree(path_files_test, data_path)
    except FileExistsError:
        tmp_dir.cleanup()  # shutil.rmtree(data_path)
        shutil.copytree(path_files_test, data_path)
    # with patch('builtins.input', return_value='1'):
    #     from enerpi.base import reload_config
    #     from enerpi.api import enerpi_data_catalog
    #     cat = enerpi_data_catalog(base_path=data_path, raw_file=raw_file,
    # check_integrity=check_integrity, verbose=True)

    from enerpi.base import reload_config
    reload_config()
    from enerpi.api import enerpi_data_catalog
    cat = enerpi_data_catalog(base_path=data_path, raw_file=raw_file, check_integrity=check_integrity,
                              verbose=True, test_mode=True)

    return tmp_dir, data_path, cat, path_default_datapath, before_tests
def walk(src, dest):
    print '****************************************************************'
    print dest
    print '****************************************************************'
    dirCmp = filecmp.dircmp(src, dest, ignore=['Thumbs.db'])
    for destFile in dirCmp.right_only:
        destFilePath = dest+'/'+destFile
        if os.path.isfile(destFilePath):
            print u'删除文件\n',destFilePath
            os.remove(destFilePath)
        else:
            print u'删除文件夹\n',destFilePath
#            os.rmdir(destFilePath)
            shutil.rmtree(destFilePath)
    for srcFile in dirCmp.left_only:
        srcFilePath = src+'/'+srcFile
        destFilePath = dest+'/'+srcFile
        if os.path.isfile(srcFilePath):
            print u'复制文件\n',destFilePath
            shutil.copy2(srcFilePath, dest)
        else:
            print u'复制文件夹\n',destFilePath
            shutil.copytree(srcFilePath, destFilePath)
    for srcFile in dirCmp.diff_files:
        srcFilePath = src+'/'+srcFile
        destFilePath = dest+'/'+srcFile
        print u'同步文件\n',destFilePath
        shutil.copy2(srcFilePath, dest)
    subDirs = set(os.listdir(src))-set(dirCmp.left_only)
    targetDirs = [subDir for subDir in subDirs if os.path.isdir(src+'/'+subDir)]
    for targetDir in targetDirs:
        walk(src+'/'+targetDir, dest+'/'+targetDir)
Exemple #21
0
 def copy_drafts():
     """
     Copy drafts directory from the old archive structure to the new.
     """
     draft_dir = path(source_dir) / course_name / DRAFT_DIR
     if draft_dir.isdir():
         shutil.copytree(draft_dir, copy_root / DRAFT_DIR)
Exemple #22
0
 def setup(self):
     self.temp_dir = tempfile.mkdtemp(prefix='setup.cfg-test-')
     self.package_dir = os.path.join(self.temp_dir, 'testpackage')
     shutil.copytree(os.path.join(os.path.dirname(__file__), 'testpackage'),
                     self.package_dir)
     self.oldcwd = os.getcwd()
     os.chdir(self.package_dir)
def _run_install(self):
    """
    The definition of the "run" method for the CustomInstallCommand metaclass.
    """
    # Get paths
    tethysapp_dir = get_tethysapp_directory()
    destination_dir = os.path.join(tethysapp_dir, self.app_package)

    # Notify user
    print('Copying App Package: {0} to {1}'.format(self.app_package_dir, destination_dir))

    # Copy files
    try:
        shutil.copytree(self.app_package_dir, destination_dir)

    except:
        try:
            shutil.rmtree(destination_dir)
        except:
            os.remove(destination_dir)

        shutil.copytree(self.app_package_dir, destination_dir)

    # Install dependencies
    for dependency in self.dependencies:
        subprocess.call(['pip', 'install', dependency])

    # Run the original install command
    install.run(self)
Exemple #24
0
def convert(input_file_path, output_file_path):

    temp_dir = tempfile.mkdtemp()    

    shutil.copytree(template_dir, temp_dir+'/template')
    shutil.copy(input_file_path, temp_dir+'/template/customXml/item1.xml')

    output_file = zipfile.ZipFile(output_file_path, mode='w', compression=zipfile.ZIP_DEFLATED)

    pwd = os.path.abspath('.')
    os.chdir(temp_dir+'/template')

    files_to_ignore = ['.DS_Store']

    for dir_path, dir_names, file_names in os.walk('.'):
        for file_name in file_names:

            if file_name in files_to_ignore:
                continue

            template_file_path = os.path.join(dir_path, file_name)
            output_file.write(template_file_path, template_file_path[2:])

    output_file.close()
    os.chdir(pwd)
Exemple #25
0
def install(host, src, dstdir):
    if isLocal(host):
        if not exists(host, src):
            util.output("file does not exist: %s" % src)
            return False

        dst = os.path.join(dstdir, os.path.basename(src))
        if exists(host, dst):
            # Do not clobber existing files/dirs (this is not an error)
            return True

        util.debug(1, "cp %s %s" % (src, dstdir))

        try:
            if os.path.isfile(src):
                shutil.copy2(src, dstdir)
            elif os.path.isdir(src):
                shutil.copytree(src, dst)
        except OSError:
            # Python 2.6 has a bug where this may fail on NFS. So we just
            # ignore errors.
            pass

    else:
        util.error("install() not yet supported for remote hosts")

    return True
Exemple #26
0
def copy_packages(packages_names, dest, create_links=False, extra_ignores=None):
    """Copy python packages ``packages_names`` to ``dest``, spurious data.

    Copy will happen without tests, testdata, mercurial data or C extension module source with it.
    ``py2app`` include and exclude rules are **quite** funky, and doing this is the only reliable
    way to make sure we don't end up with useless stuff in our app.
    """
    if ISWINDOWS:
        create_links = False
    if not extra_ignores:
        extra_ignores = []
    ignore = shutil.ignore_patterns('.hg*', 'tests', 'testdata', 'modules', 'docs', 'locale', *extra_ignores)
    for package_name in packages_names:
        if op.exists(package_name):
            source_path = package_name
        else:
            mod = __import__(package_name)
            source_path = mod.__file__
            if mod.__file__.endswith('__init__.py'):
                source_path = op.dirname(source_path)
        dest_name = op.basename(source_path)
        dest_path = op.join(dest, dest_name)
        if op.exists(dest_path):
            if op.islink(dest_path):
                os.unlink(dest_path)
            else:
                shutil.rmtree(dest_path)
        print("Copying package at {0} to {1}".format(source_path, dest_path))
        if create_links:
            os.symlink(op.abspath(source_path), dest_path)
        else:
            if op.isdir(source_path):
                shutil.copytree(source_path, dest_path, ignore=ignore)
            else:
                shutil.copy(source_path, dest_path)
Exemple #27
0
    def testPluginPath(self):
        for t in ['test_plugins', 'test plugins', 'test_pluginsé€']:

            # get a unicode test dir
            if sys.version_info.major == 2:
                t = t.encode(locale.getpreferredencoding())
            testDir = os.path.join(self.TMP_DIR, t)

            # copy from testdata
            if not os.path.exists(testDir):
                os.mkdir(testDir)
            test_plug_dir = os.path.join(TEST_DATA_DIR, 'test_plugin_path')
            for item in os.listdir(test_plug_dir):
                shutil.copytree(os.path.join(test_plug_dir, item),
                                os.path.join(testDir, item))

            # we use here a minimal plugin that writes to 'plugin_started.txt'
            # when it is started. if QGIS_PLUGINPATH is correctly parsed, this
            # plugin is executed and the file is created
            self.doTestStartup(
                option="--optionspath",
                testDir=testDir,
                testFile="plugin_started.txt",
                timeOut=360,
                loadPlugins=True,
                env={'QGIS_PLUGINPATH': testDir})
Exemple #28
0
def copytree(src, dest, dry_run=None, echo=True):
    dry_run = _coerce_dry_run(dry_run)
    if dry_run or echo:
        _echo_command(dry_run, ['cp', '-r', src, dest])
    if dry_run:
        return
    shutil.copytree(src, dest)
    def exportIndex(self, name, dir="."):
        if self.indexes.has_section(name):
            print "Exporting index properties for %s" % name

            global tempdir
            global tarName
            global extension
            
            tempdir = tempfile.mkdtemp()
            exportConfig = ConfigParser.ConfigParser()
            exportConfig.add_section(name)
            for opt in self.indexes.options(name):
                exportConfig.set(name, opt, self.getIndexProp(name, opt))
                
            exportConfig.set(name, INDEX_DIR, "indexes")
            exportConfig.set(name, CONTENT_DIR, "contents")
                
            exportConfig.write(open(os.path.join(tempdir, "indexes.cfg"), "w"))
            
            print "Copying index files (this may take a while)..."
            import shutil
            shutil.copytree(self.getIndexProp(name, INDEX_DIR), os.path.join(tempdir, INDEX_DIR))
            
            print "Copying content files (this may take a while)..."
            shutil.copytree(self.getIndexProp(name, INDEX_DIR), os.path.join(tempdir, CONTENT_DIR))
            
            tarName = utils.createSafeFilename(name)
            archive = tarfile.open(os.path.join(dir, tarName + extension), "w:bz2")
            os.path.walk(tempdir, walker, archive)
            archive.close()
            
            shutil.rmtree(tempdir)
def get_pulsar_binary():
    binary = "pulsar" + (PLATFORM["os"] == "windows" and ".exe" or "")

    platform = PLATFORM.copy()
    if platform["os"] == "darwin": # 64 bits anyway on Darwin
        platform["arch"] = "x64"
    elif platform["os"] == "windows": # 32 bits anyway on Windows
        platform["arch"] = "x86"

    binary_dir = os.path.join(ADDON.getAddonInfo("path"), "resources", "bin", "%(os)s_%(arch)s" % platform)
    if platform["os"] == "android":
        app_id = android_get_current_appid()
        xbmc_data_path = os.path.join("/data", "data", app_id)
        if os.path.exists(xbmc_data_path) and uid == os.stat(xbmc_data_path).st_uid:
            binary_dir = os.path.join(xbmc_data_path, "files", ADDON_ID)
    else:
        dest_binary_dir = os.path.join(xbmc.translatePath(ADDON.getAddonInfo("profile")), "bin", "%(os)s_%(arch)s" % platform)

    binary_path = os.path.join(binary_dir, binary)
    dest_binary_path = os.path.join(dest_binary_dir, binary)

    # Testing for size to see if update is needed. This is a poor test indeed, but it's sufficient.
    if not os.path.exists(dest_binary_path) or os.path.getsize(dest_binary_path) != os.path.getsize(binary_path):
        log.info("pulsar daemon is outdated, updating...")
        import shutil
        try:
            os.makedirs(dest_binary_dir)
        except OSError:
            pass
        shutil.rmtree(dest_binary_dir)
        shutil.copytree(binary_dir, dest_binary_dir)

    return dest_binary_dir, ensure_exec_perms(dest_binary_path)
Exemple #31
0
import glob
'''
최종수정
2018/07/19

프로그램 설명
USB에 있는 내용을 원하는 폴더로 옳기는 프로그램
'''
#인식하고자 하는 USB의 경로를 첫번째 전달 인자로 할것
a=glob.glob("D:\\*")
b=a[:]
i=0
if a:
    while i < len(a):
        a[i]=a[i].replace("D:\\","C:\\USBCOPY\\")
        if a[i] == 'C:\\USBCOPY\\System Volume Information':
            del a[i]
            del b[i]
            continue
        i=i+1
    print (a)
    i=0
    while i < len(a):
        try:
            shutil.copy(b[i],a[i])
        except:
            shutil.copytree(b[i],a[i])
        finally:
            i=i+1
else:
    print ("usb가 인식되어 있지 않습니다.")
    def _initialize_chain(self):
        """Initialize a pre-mined blockchain for use by the test.

        Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
        Afterward, create num_nodes copies from the cache."""

        assert self.num_nodes <= MAX_NODES
        create_cache = False
        for i in range(MAX_NODES):
            if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
                create_cache = True
                break

        if create_cache:
            self.log.debug("Creating data directories from cached datadir")

            # find and delete old cache directories if any exist
            for i in range(MAX_NODES):
                if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
                    shutil.rmtree(get_datadir_path(self.options.cachedir, i))

            # Create cache directories, run bitcoinds:
            for i in range(MAX_NODES):
                datadir = initialize_datadir(self.options.cachedir, i)
                args = [os.getenv("MANGACOIND", "mangacoind"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
                if i > 0:
                    args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
                self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
                self.nodes[i].args = args
                self.start_node(i)

            # Wait for RPC connections to be ready
            for node in self.nodes:
                node.wait_for_rpc_connection()

            # Create a 200-block-long chain; each of the 4 first nodes
            # gets 25 mature blocks and 25 immature.
            # Note: To preserve compatibility with older versions of
            # initialize_chain, only 4 nodes will generate coins.
            #
            # blocks are created with timestamps 10 minutes apart
            # starting from 2010 minutes in the past
            self.enable_mocktime()
            block_time = self.mocktime - (201 * 10 * 60)
            for i in range(2):
                for peer in range(4):
                    for j in range(25):
                        set_node_times(self.nodes, block_time)
                        self.nodes[peer].generate(1)
                        block_time += 10 * 60
                    # Must sync before next peer starts generating blocks
                    sync_blocks(self.nodes)

            # Shut them down, and clean up cache directories:
            self.stop_nodes()
            self.nodes = []
            self.disable_mocktime()

            def cache_path(n, *paths):
                return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)

            for i in range(MAX_NODES):
                for entry in os.listdir(cache_path(i)):
                    if entry not in ['wallets', 'chainstate', 'blocks']:
                        os.remove(cache_path(i, entry))

        for i in range(self.num_nodes):
            from_dir = get_datadir_path(self.options.cachedir, i)
            to_dir = get_datadir_path(self.options.tmpdir, i)
            shutil.copytree(from_dir, to_dir)
            initialize_datadir(self.options.tmpdir, i)  # Overwrite port/rpcport in bitcoin.conf
Exemple #33
0
##    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
##    GNU General Public License for more details.
##
##    You should have received a copy of the GNU General Public License
##    along with this program.  If not, see <http://www.gnu.org/licenses/>.
##
##    you can contact me [email protected]

''' % (FULL_TITLE, COPYRIGHT)

import py2exe, shutil, sys, os
from  distutils.core import setup
from zipfile import ZipFile

shutil.copytree('.', srcpath,
                ignore=shutil.ignore_patterns(
                    '*.pyc', '*.pp', 'error.txt', 'log.txt', 'downloaded', '*.zip'))
z = ZipFile(srcpath+'.zip', 'w')
for root, dirs, files in os.walk(srcpath):
    for name in files:
        path = os.path.join(root, name)
        if os.path.splitext(name)[1] == '.py':
            text = open(path).read()
            f = open(path, 'w')
            f.write(gplcomment)
            f.write(text)
            f.close()
        z.write(path)
z.close()

shutil.rmtree(srcpath)
Exemple #34
0
    def _initialize_chain(self):
        """Initialize a pre-mined blockchain for use by the test.

        Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
        Afterward, create num_nodes copies from the cache."""

        assert self.num_nodes <= MAX_NODES
        create_cache = False
        for i in range(MAX_NODES):
            if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
                create_cache = True
                break

        if create_cache:
            self.log.debug("Creating data directories from cached datadir")

            # find and delete old cache directories if any exist
            for i in range(MAX_NODES):
                if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
                    shutil.rmtree(get_datadir_path(self.options.cachedir, i))

            # Create cache directories, run bitcoinds:
            for i in range(MAX_NODES):
                datadir = initialize_datadir(self.options.cachedir, i)
                self.nodes.append(TestNode(
                    i,
                    get_datadir_path(self.options.cachedir, i),
                    extra_conf=["bind=127.0.0.1"],
                    extra_args=[],
                    host=None,
                    rpc_port=rpc_port(i),
                    p2p_port=p2p_port(i),
                    timewait=self.rpc_timeout,
                    bitcoind=self.options.bitcoind,
                    bitcoin_cli=self.options.bitcoincli,
                    mocktime=self.mocktime,
                    coverage_dir=None
                ))
                self.nodes[i].clear_default_args()
                self.nodes[i].extend_default_args(["-datadir=" + datadir])
                self.nodes[i].extend_default_args(["-disablewallet"])
                if i > 0:
                    self.nodes[i].extend_default_args(
                        ["-connect=127.0.0.1:" + str(p2p_port(0))])
                if self.options.gravitonactivation:
                    self.nodes[i].extend_default_args(
                        ["-gravitonactivationtime={}".format(TIMESTAMP_IN_THE_PAST)])
                self.start_node(i)

            # Wait for RPC connections to be ready
            for node in self.nodes:
                node.wait_for_rpc_connection()

            # For backward compatibility of the python scripts with previous
            # versions of the cache, set mocktime to Jan 1,
            # 2014 + (201 * 10 * 60)
            self.mocktime = 1388534400 + (201 * 10 * 60)

            # Create a 200-block-long chain; each of the 4 first nodes
            # gets 25 mature blocks and 25 immature.
            # Note: To preserve compatibility with older versions of
            # initialize_chain, only 4 nodes will generate coins.
            #
            # blocks are created with timestamps 10 minutes apart
            # starting from 2010 minutes in the past
            block_time = self.mocktime - (201 * 10 * 60)
            for i in range(2):
                for peer in range(4):
                    for j in range(25):
                        set_node_times(self.nodes, block_time)
                        self.nodes[peer].generatetoaddress(
                            1, self.nodes[peer].get_deterministic_priv_key()[0])
                        block_time += 10 * 60
                    # Must sync before next peer starts generating blocks
                    sync_blocks(self.nodes)

            # Shut them down, and clean up cache directories:
            self.stop_nodes()
            self.nodes = []
            self.mocktime = 0

            def cache_path(n, *paths):
                return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)

            for i in range(MAX_NODES):
                # Remove empty wallets dir
                os.rmdir(cache_path(i, 'wallets'))
                for entry in os.listdir(cache_path(i)):
                    if entry not in ['chainstate', 'blocks']:
                        os.remove(cache_path(i, entry))

        for i in range(self.num_nodes):
            from_dir = get_datadir_path(self.options.cachedir, i)
            to_dir = get_datadir_path(self.options.tmpdir, i)
            shutil.copytree(from_dir, to_dir)
            # Overwrite port/rpcport in bitcoin.conf
            initialize_datadir(self.options.tmpdir, i)
Exemple #35
0
    def setUp(self):
        if (os.path.exists(self.msfile)):
            shutil.rmtree(self.msfile)

        shutil.copytree(datapath+self.msfile, self.msfile, symlinks=True)
Exemple #36
0
package_archive = os.path.join(output_dir, package_name + '.' + extension)
package_dir = package_name

# remove existing package with the same name
try:
    if os.path.exists(package_archive):
        os.remove(package_archive)
    if os.path.exists(package_dir):
        shutil.rmtree(package_dir)
except Exception as ex:
    sys.stderr.write('Failed to clean up old package files: ' + str(ex) + '\n')
    sys.exit(1)

# create temporary package dir
try:
    shutil.copytree(install_dir, package_dir)

    for f in os.listdir(package_dir):
        if f.startswith('makes'):
            os.remove(os.path.join(package_dir, f))
except Exception as ex:
    sys.stderr.write('Failed to copy install directory: ' + str(ex) + '\n')
    sys.exit(1)

# create archive
try:
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    archive_env = os.environ.copy()
Exemple #37
0
def initialize_chain(test_dir):
    """
    Create (or copy from cache) a 200-block-long chain and
    4 wallets.
    """

    if (not os.path.isdir(os.path.join("cache","node0"))
        or not os.path.isdir(os.path.join("cache","node1"))
        or not os.path.isdir(os.path.join("cache","node2"))
        or not os.path.isdir(os.path.join("cache","node3"))):

        #find and delete old cache directories if any exist
        for i in range(4):
            if os.path.isdir(os.path.join("cache","node"+str(i))):
                shutil.rmtree(os.path.join("cache","node"+str(i)))

        # Create cache directories, run onexds:
        for i in range(4):
            datadir=initialize_datadir("cache", i)
            args = [ os.getenv("ONEXD", "onexd"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
            if i > 0:
                args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
            bitcoind_processes[i] = subprocess.Popen(args)
            if os.getenv("PYTHON_DEBUG", ""):
                print "initialize_chain: onexd started, waiting for RPC to come up"
            wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
            if os.getenv("PYTHON_DEBUG", ""):
                print "initialize_chain: RPC succesfully started"

        rpcs = []
        for i in range(4):
            try:
                rpcs.append(get_rpc_proxy(rpc_url(i), i))
            except:
                sys.stderr.write("Error connecting to "+url+"\n")
                sys.exit(1)

        # Create a 200-block-long chain; each of the 4 nodes
        # gets 25 mature blocks and 25 immature.
        # blocks are created with timestamps 156 seconds apart
        # starting from 31356 seconds in the past
        enable_mocktime()
        block_time = get_mocktime() - (201 * 156)
        for i in range(2):
            for peer in range(4):
                for j in range(25):
                    set_node_times(rpcs, block_time)
                    rpcs[peer].generate(1)
                    block_time += 156
                # Must sync before next peer starts generating blocks
                sync_blocks(rpcs)

        # Shut them down, and clean up cache directories:
        stop_nodes(rpcs)
        wait_bitcoinds()
        disable_mocktime()
        for i in range(4):
            os.remove(log_filename("cache", i, "debug.log"))
            os.remove(log_filename("cache", i, "db.log"))
            os.remove(log_filename("cache", i, "peers.dat"))
            os.remove(log_filename("cache", i, "fee_estimates.dat"))

    for i in range(4):
        from_dir = os.path.join("cache", "node"+str(i))
        to_dir = os.path.join(test_dir,  "node"+str(i))
        shutil.copytree(from_dir, to_dir)
        initialize_datadir(test_dir, i) # Overwrite port/rpcport in onex.conf
def copyDirFromDev(dr):
  print "Copying "+dr+" from DEV"
  shutil.copytree(srcdir+dr,dstdir+dr)
Exemple #39
0
        try:
            os.mkdir(TEMP_PATH)
        except:
            print("Temp path for symlink to parent already exists {0}".format(
                TEMP_PATH),
                  file=sys.stderr)
            sys.exit(-1)
        flink_ml_version = VERSION.replace(".dev0", "-SNAPSHOT")
        FLINK_ML_ROOT = os.path.abspath("..")

        EXAMPLES_PATH = os.path.join(this_directory, "pyflink/examples")

        try:
            os.symlink(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
        except BaseException:  # pylint: disable=broad-except
            copytree(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)

    PACKAGES = [
        'pyflink', 'pyflink.ml', 'pyflink.ml.core', 'pyflink.ml.lib',
        'pyflink.ml.util', 'pyflink.examples'
    ]

    PACKAGE_DIR = {'pyflink.examples': TEMP_PATH + '/examples'}

    PACKAGE_DATA = {'pyflink.examples': ['*.py', '*/*.py']}

    setup(
        name='apache-flink-ml',
        version=VERSION,
        packages=PACKAGES,
        include_package_data=True,
Exemple #40
0
    def do_install(self, name, data):
        if name in data:
            utils.makedirs(self.output_dir)
            LOGGER.notice('Downloading: ' + data[name])
            zip_file = BytesIO()
            zip_file.write(requests.get(data[name]).content)
            LOGGER.notice('Extracting: {0} into plugins'.format(name))
            utils.extract_all(zip_file, 'plugins')
            dest_path = os.path.join('plugins', name)
        else:
            try:
                plugin_path = utils.get_plugin_path(name)
            except:
                LOGGER.error("Can't find plugin " + name)
                return False

            utils.makedirs(self.output_dir)
            dest_path = os.path.join(self.output_dir, name)
            if os.path.exists(dest_path):
                LOGGER.error("{0} is already installed".format(name))
                return False

            LOGGER.notice('Copying {0} into plugins'.format(plugin_path))
            shutil.copytree(plugin_path, dest_path)

        reqpath = os.path.join(dest_path, 'requirements.txt')
        print(reqpath)
        if os.path.exists(reqpath):
            LOGGER.notice('This plugin has Python dependencies.')
            LOGGER.notice('Installing dependencies with pip...')
            try:
                subprocess.check_call(('pip', 'install', '-r', reqpath))
            except subprocess.CalledProcessError:
                LOGGER.error('Could not install the dependencies.')
                print('Contents of the requirements.txt file:\n')
                with codecs.open(reqpath, 'rb', 'utf-8') as fh:
                    print(indent(fh.read(), 4 * ' '))
                print('You have to install those yourself or through a '
                      'package manager.')
            else:
                LOGGER.notice('Dependency installation succeeded.')
        reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt')
        if os.path.exists(reqnpypath):
            LOGGER.notice('This plugin has third-party '
                          'dependencies you need to install '
                          'manually.')
            print('Contents of the requirements-nonpy.txt file:\n')
            with codecs.open(reqnpypath, 'rb', 'utf-8') as fh:
                for l in fh.readlines():
                    i, j = l.split('::')
                    print(indent(i.strip(), 4 * ' '))
                    print(indent(j.strip(), 8 * ' '))
                    print()

            print('You have to install those yourself or through a package '
                  'manager.')
        confpypath = os.path.join(dest_path, 'conf.py.sample')
        if os.path.exists(confpypath):
            LOGGER.notice('This plugin has a sample config file.')
            print('Contents of the conf.py.sample file:\n')
            with codecs.open(confpypath, 'rb', 'utf-8') as fh:
                print(
                    indent(
                        pygments.highlight(fh.read(), PythonLexer(),
                                           TerminalFormatter()), 4 * ' '))
        return True
Exemple #41
0
    def _run_test_module(self, module, results_dir, gisdbase, location):
        """Run one test file."""
        self.testsuite_dirs[module.tested_dir].append(module.name)
        cwd = os.path.join(results_dir, module.tested_dir, module.name)
        data_dir = os.path.join(module.file_dir, "data")
        if os.path.exists(data_dir):
            # TODO: link dir instead of copy tree and remove link afterwads
            # (removing is good because of testsuite dir in samplecode)
            # TODO: use different dir name in samplecode and test if it works
            shutil.copytree(
                data_dir,
                os.path.join(cwd, "data"),
                ignore=shutil.ignore_patterns("*.svn*"),
            )
        ensure_dir(os.path.abspath(cwd))
        # TODO: put this to constructor and copy here again
        env = os.environ.copy()
        mapset, mapset_dir = self._create_mapset(gisdbase, location, module)
        gisrc = gsetup.write_gisrc(gisdbase, location, mapset)

        # here is special setting of environmental variables for running tests
        # some of them might be set from outside in the future and if the list
        # will be long they should be stored somewhere separately

        # use custom gisrc, not current session gisrc
        env["GISRC"] = gisrc
        # percentage in plain format is 0...10...20... ...100
        env["GRASS_MESSAGE_FORMAT"] = "plain"

        stdout_path = os.path.join(cwd, "stdout.txt")
        stderr_path = os.path.join(cwd, "stderr.txt")

        self.reporter.start_file_test(module)
        # TODO: we might clean the directory here before test if non-empty

        if module.file_type == "py":
            # ignoring shebang line to use current Python
            # and also pass parameters to it
            # add also '-Qwarn'?
            if sys.version_info.major >= 3:
                args = [sys.executable, "-tt", module.abs_file_path]
            else:
                args = [sys.executable, "-tt", "-3", module.abs_file_path]
            p = subprocess.Popen(
                args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE
            )
        elif module.file_type == "sh":
            # ignoring shebang line to pass parameters to shell
            # expecting system to have sh or something compatible
            # TODO: add some special checks for MS Windows
            # using -x to see commands in stderr
            # using -e to terminate fast
            # from dash manual:
            # -e errexit     If not interactive, exit immediately if any
            #                untested command fails.  The exit status of a com‐
            #                mand is considered to be explicitly tested if the
            #                command is used to control an if, elif, while, or
            #                until; or if the command is the left hand operand
            #                of an '&&' or '||' operator.
            p = subprocess.Popen(
                ["sh", "-e", "-x", module.abs_file_path],
                cwd=cwd,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
            )
        else:
            p = subprocess.Popen(
                [module.abs_file_path],
                cwd=cwd,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
            )
        stdout, stderr = p.communicate()
        returncode = p.returncode
        encodings = [_get_encoding(), "utf8", "latin-1", "ascii"]

        def try_decode(data, encodings):
            """Try to decode data (bytes) using one of encodings

            Falls back to decoding as UTF-8 with replacement for bytes.
            Strings are returned unmodified.
            """
            for encoding in encodings:
                try:
                    return decode(stdout, encoding=encoding)
                except UnicodeError:
                    pass
            if isinstance(data, bytes):
                return data.decode(encoding="utf-8", errors="replace")
            return data

        stdout = try_decode(stdout, encodings=encodings)
        stderr = try_decode(stderr, encodings=encodings)

        with open(stdout_path, "w") as stdout_file:
            stdout_file.write(stdout)
        with open(stderr_path, "w") as stderr_file:
            if type(stderr) == "bytes":
                stderr_file.write(decode(stderr))
            else:
                if isinstance(stderr, str):
                    stderr_file.write(stderr)
                else:
                    stderr_file.write(stderr.encode("utf8"))
        self._file_anonymizer.anonymize([stdout_path, stderr_path])

        test_summary = update_keyval_file(
            os.path.join(os.path.abspath(cwd), "test_keyvalue_result.txt"),
            module=module,
            returncode=returncode,
        )
        self.reporter.end_file_test(
            module=module,
            cwd=cwd,
            returncode=returncode,
            stdout=stdout_path,
            stderr=stderr_path,
            test_summary=test_summary,
        )
        # TODO: add some try-except or with for better error handling
        os.remove(gisrc)
        # TODO: only if clean up
        if self.clean_mapsets:
            shutil.rmtree(mapset_dir)
Exemple #42
0
def makelib_dir(src, dst):
    def ignore(dirname, files):
        return [f for f in files if prune(f)]
    shutil.copytree(src, dst, ignore=ignore)
subprocess.call(
        [
                "conda", 
                "run", 
                "-n", 
                "geodecision", 
                "python", 
                "accessibility_measures.py", 
                input_config
                ]
        )

if os.path.isfile(input_config):
    with open(input_config) as f: 
       params = json.load(f)
       output_dir = params["output_folder"]
       target_output_dir = os.path.join("/Output", output_dir)
    
    if os.path.isdir(target_output_dir):
        target_output_dir = target_output_dir + "_" + re.sub(
                r"[-: ]", "_", str(
                        datetime.datetime.now()
                ).split(".")[0]
        )
    shutil.copytree(output_dir, target_output_dir)
else: 
    raise FileNotFoundError(
            "ERROR: Please check if {} exists".format(input_config)
            )
Exemple #44
0
 def _copy(src, dest):
     print("copying %s to %s..." % (src, dest))
     if os.path.exists(dest):
         shutil.rmtree(dest)
     shutil.copytree(src, dest)
 def run(
     self, network, antecedents, out_attributes, user_options, num_cores,
     out_path):
     import shutil
     in_data = antecedents
     shutil.copytree(in_data.identifier, out_path)
Exemple #46
0
def staging(input, samplename, alleles, epitope_lengths, prediction_algorithms,
            peptide_sequence_length, gene_expn_file, transcript_expn_file,
            normal_snvs_coverage_file, normal_indels_coverage_file,
            tdna_snvs_coverage_file, tdna_indels_coverage_file,
            trna_snvs_coverage_file, trna_indels_coverage_file,
            net_chop_method, netmhc_stab, top_result_per_mutation,
            top_score_metric, binding_threshold, minimum_fold_change,
            normal_cov, tdna_cov, trna_cov, normal_vaf, tdna_vaf, trna_vaf,
            expn_val, net_chop_threshold, fasta_size, iedb_retries,
            iedb_install_dir, downstream_sequence_length, keep_tmp_files,
            force):
    """Stage input for a new pVAC-Seq run.  Generate a unique output directory and \
    save uploaded files to temporary locations (and give pVAC-Seq the filepaths). \
    Then forward the command to start()"""
    input_file = input
    data = current_app.config['storage']['loader']()
    samplename = re.sub(r'[^\w\s.]', '_', samplename)
    list_input()  #update the manifest stored in current_app
    # input_manifest = current_app.config['storage']['manifest']
    current_path = os.path.join(current_app.config['files']['data-dir'],
                                'results', samplename)
    if os.path.exists(current_path):
        i = 1
        while os.path.exists(current_path + "_" + str(i)):
            i += 1
        current_path += "_" + str(i)

    temp_path = tempfile.TemporaryDirectory()

    input_path = resolve_filepath(input_file)
    if not input_path:
        return ({
            'code': 400,
            'message': 'Unable to locate the given file: %s' % input_file,
            'fields': 'input'
        }, 400)

    additional_input_file_list = open(
        os.path.join(temp_path.name, "additional_input_file_list.yml"), 'w')

    if gene_expn_file:
        gene_expn_file_path = resolve_filepath(gene_expn_file)
        if not gene_expn_file_path:
            return ({
                'code': 400,
                'message':
                'Unable to locate the given file: %s' % gene_expn_file,
                'fields': 'gene_expn_file'
            }, 400)
        if os.path.getsize(gene_expn_file_path):
            yaml.dump({"gene_expn_file": gene_expn_file_path},
                      additional_input_file_list,
                      default_flow_style=False)

    if transcript_expn_file:
        transcript_expn_file_path = resolve_filepath(transcript_expn_file)
        if not transcript_expn_file_path:
            return ({
                'code': 400,
                'message':
                'Unable to locate the given file: %s' % transcript_expn_file,
                'fields': 'transcript_expn_file'
            }, 400)
        if os.path.getsize(transcript_expn_file_path):
            yaml.dump({"transcript_expn_file": transcript_expn_file_path},
                      additional_input_file_list,
                      default_flow_style=False)

    if normal_snvs_coverage_file:
        normal_snvs_coverage_file_path = resolve_filepath(
            normal_snvs_coverage_file)
        if not normal_snvs_coverage_file_path:
            return ({
                'code': 400,
                'message': 'Unable to locate the given file: %s' %
                normal_snvs_coverage_file,
                'fields': 'normal_snvs_coverage_file'
            }, 400)
        if os.path.getsize(normal_snvs_coverage_file_path):
            yaml.dump(
                {"normal_snvs_coverage_file": normal_snvs_coverage_file_path},
                additional_input_file_list,
                default_flow_style=False)

    if normal_indels_coverage_file:
        normal_indels_coverage_file_path = resolve_filepath(
            normal_indels_coverage_file)
        if not normal_indels_coverage_file_path:
            return ({
                'code': 400,
                'message': 'Unable to locate the given file: %s' %
                normal_indels_coverage_file,
                'fields': 'normal_indels_coverage_file'
            }, 400)

        if os.path.getsize(normal_indels_coverage_file_path):
            yaml.dump(
                {
                    "normal_indels_coverage_file":
                    normal_indels_coverage_file_path
                },
                additional_input_file_list,
                default_flow_style=False)

    if tdna_snvs_coverage_file:
        tdna_snvs_coverage_file_path = resolve_filepath(
            tdna_snvs_coverage_file)
        if not tdna_snvs_coverage_file_path:
            return ({
                'code': 400,
                'message': 'Unable to locate the given file: %s' %
                tdna_snvs_coverage_file,
                'fields': 'tdna_snvs_coverage_file'
            }, 400)
        if os.path.getsize(tdna_snvs_coverage_file_path):
            yaml.dump(
                {"tdna_snvs_coverage_file": tdna_snvs_coverage_file_path},
                additional_input_file_list,
                default_flow_style=False)

    if tdna_indels_coverage_file:
        tdna_indels_coverage_file_path = resolve_filepath(
            tdna_indels_coverage_file)
        if not tdna_indels_coverage_file_path:
            return ({
                'code': 400,
                'message': 'Unable to locate the given file: %s' %
                tdna_indels_coverage_file,
                'fields': 'tdna_indels_coverage_file'
            }, 400)

        if os.path.getsize(tdna_indels_coverage_file_path):
            yaml.dump(
                {"tdna_indels_coverage_file": tdna_indels_coverage_file_path},
                additional_input_file_list,
                default_flow_style=False)

    if trna_snvs_coverage_file:
        trna_snvs_coverage_file_path = resolve_filepath(
            trna_snvs_coverage_file)
        if not trna_snvs_coverage_file_path:
            return ({
                'code': 400,
                'message': 'Unable to locate the given file: %s' %
                trna_snvs_coverage_file,
                'fields': 'trna_snvs_coverage_file'
            }, 400)

        if os.path.getsize(trna_snvs_coverage_file_path):
            yaml.dump(
                {"trna_snvs_coverage_file": trna_snvs_coverage_file_path},
                additional_input_file_list,
                default_flow_style=False)

    if trna_indels_coverage_file:
        trna_indels_coverage_file_path = resolve_filepath(
            trna_indels_coverage_file)
        if not trna_indels_coverage_file_path:
            return ({
                'code': 400,
                'message': 'Unable to locate the given file: %s' %
                trna_indels_coverage_file,
                'fields': 'trna_indels_coverage_file'
            }, 400)

        if os.path.getsize(trna_indels_coverage_file_path):
            yaml.dump(
                {"trna_indels_coverage_file": trna_indels_coverage_file_path},
                additional_input_file_list,
                default_flow_style=False)

    additional_input_file_list.flush()

    configObj = {
        'input':
        input_path,  #input
        'samplename':
        samplename,  #samplename
        'alleles':
        alleles.split(','),
        'output':
        current_path,
        'epitope_lengths': [int(item) for item in epitope_lengths.split(',')],
        'prediction_algorithms':
        prediction_algorithms.split(','),
        'peptide_sequence_length':
        peptide_sequence_length,
        'additional_input_file_list': (
            additional_input_file_list.name
            if additional_input_file_list.tell() else
            ''  #check if any data was written to file
        ),
        'net_chop_method':
        net_chop_method,
        'netmhc_stab':
        bool(netmhc_stab),
        'top_result_per_mutation':
        bool(top_result_per_mutation),
        'top_score_metric':
        top_score_metric,
        'binding_threshold':
        binding_threshold,
        'minimum_fold_change':
        minimum_fold_change,
        'normal_cov':
        normal_cov,  #normal_cov
        'tdna_cov':
        tdna_cov,  #tdna_cov
        'trna_cov':
        trna_cov,  #trna_cov
        'normal_vaf':
        normal_vaf,  #normal_vaf
        'tdna_vaf':
        tdna_vaf,  #tdna_vaf
        'trna_vaf':
        trna_vaf,  #trna_vaf
        'expn_val':
        expn_val,  #expn_val
        'net_chop_threshold':
        net_chop_threshold,
        'fasta_size':
        fasta_size,
        'iedb_retries':
        iedb_retries,
        'iedb_install_dir':
        iedb_install_dir,
        'keep_tmp_files':
        bool(keep_tmp_files),
        'downstream_sequence_length':
        downstream_sequence_length
    }
    checkOK = precheck(configObj, data) if not force else None
    if checkOK is None:
        copytree(temp_path.name, current_path)
        print(additional_input_file_list.tell())
        if configObj['additional_input_file_list']:
            configObj['additional_input_file_list'] = os.path.join(
                current_path,
                os.path.basename(additional_input_file_list.name))
        writer = open(
            os.path.join(os.path.abspath(current_path), 'config.json'), 'w')
        json.dump(configObj, writer, indent='\t')
        writer.close()
        temp_path.cleanup()
        new_id = start(**configObj)

        return ({
            'code': 201,
            'message': "Process started.",
            'processid': new_id
        }, 201)

    return ({
        'code': 400,
        'message': "The given parameters match process %d" % checkOK,
        'fields': "N/A"
    }, 400)
Exemple #47
0
def copy(src, dst):
    shutil.rmtree(dst, ignore_errors=True)
    shutil.copytree(src, dst)
Exemple #48
0
def copy(src, dst):
    shutil.copytree(src,
                    dst,
                    ignore=shutil.ignore_patterns("*.pyc", "__pycache__"))
Exemple #49
0
    def run_test(self):
        self.nodes[0].generatetoaddress(101, self.nodes[0].getnewaddress())

        sync_blocks(self.nodes)

        # Sanity check the test framework:
        res = self.nodes[self.num_nodes - 1].getblockchaininfo()
        assert_equal(res['blocks'], 101)

        node_master = self.nodes[self.num_nodes - 5]
        node_v19 = self.nodes[self.num_nodes - 4]
        node_v18 = self.nodes[self.num_nodes - 3]
        node_v17 = self.nodes[self.num_nodes - 2]
        node_v16 = self.nodes[self.num_nodes - 1]

        self.log.info("Test wallet backwards compatibility...")
        # Create a number of wallets and open them in older versions:

        # w1: regular wallet, created on master: update this test when default
        #     wallets can no longer be opened by older versions.
        node_master.rpc.createwallet(wallet_name="w1")
        wallet = node_master.get_wallet_rpc("w1")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled']
        assert info['keypoolsize'] > 0
        # Create a confirmed transaction, receiving coins
        address = wallet.getnewaddress()
        self.nodes[0].sendtoaddress(address, 10)
        sync_mempools(self.nodes)
        self.nodes[0].generate(1)
        sync_blocks(self.nodes)
        # Create a conflicting transaction using RBF
        return_address = self.nodes[0].getnewaddress()
        tx1_id = self.nodes[1].sendtoaddress(return_address, 1)
        tx2_id = self.nodes[1].bumpfee(tx1_id)["txid"]
        # Confirm the transaction
        sync_mempools(self.nodes)
        self.nodes[0].generate(1)
        sync_blocks(self.nodes)
        # Create another conflicting transaction using RBF
        tx3_id = self.nodes[1].sendtoaddress(return_address, 1)
        tx4_id = self.nodes[1].bumpfee(tx3_id)["txid"]
        # Abandon transaction, but don't confirm
        self.nodes[1].abandontransaction(tx3_id)

        # w1_v19: regular wallet, created with v0.19
        node_v19.rpc.createwallet(wallet_name="w1_v19")
        wallet = node_v19.get_wallet_rpc("w1_v19")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled']
        assert info['keypoolsize'] > 0
        # Use addmultisigaddress (see #18075)
        address_18075 = wallet.rpc.addmultisigaddress(1, [
            "0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52",
            "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073"
        ], "", "legacy")["address"]
        assert wallet.getaddressinfo(address_18075)["solvable"]

        # w1_v18: regular wallet, created with v0.18
        node_v18.rpc.createwallet(wallet_name="w1_v18")
        wallet = node_v18.get_wallet_rpc("w1_v18")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled']
        assert info['keypoolsize'] > 0

        # w2: wallet with private keys disabled, created on master: update this
        #     test when default wallets private keys disabled can no longer be
        #     opened by older versions.
        node_master.rpc.createwallet(wallet_name="w2",
                                     disable_private_keys=True)
        wallet = node_master.get_wallet_rpc("w2")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled'] == False
        assert info['keypoolsize'] == 0

        # w2_v19: wallet with private keys disabled, created with v0.19
        node_v19.rpc.createwallet(wallet_name="w2_v19",
                                  disable_private_keys=True)
        wallet = node_v19.get_wallet_rpc("w2_v19")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled'] == False
        assert info['keypoolsize'] == 0

        # w2_v18: wallet with private keys disabled, created with v0.18
        node_v18.rpc.createwallet(wallet_name="w2_v18",
                                  disable_private_keys=True)
        wallet = node_v18.get_wallet_rpc("w2_v18")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled'] == False
        assert info['keypoolsize'] == 0

        # w3: blank wallet, created on master: update this
        #     test when default blank wallets can no longer be opened by older versions.
        node_master.rpc.createwallet(wallet_name="w3", blank=True)
        wallet = node_master.get_wallet_rpc("w3")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled']
        assert info['keypoolsize'] == 0

        # w3_v19: blank wallet, created with v0.19
        node_v19.rpc.createwallet(wallet_name="w3_v19", blank=True)
        wallet = node_v19.get_wallet_rpc("w3_v19")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled']
        assert info['keypoolsize'] == 0

        # w3_v18: blank wallet, created with v0.18
        node_v18.rpc.createwallet(wallet_name="w3_v18", blank=True)
        wallet = node_v18.get_wallet_rpc("w3_v18")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled']
        assert info['keypoolsize'] == 0

        # Copy the wallets to older nodes:
        node_master_wallets_dir = os.path.join(node_master.datadir,
                                               "regtest/wallets")
        node_v19_wallets_dir = os.path.join(node_v19.datadir,
                                            "regtest/wallets")
        node_v18_wallets_dir = os.path.join(node_v18.datadir,
                                            "regtest/wallets")
        node_v17_wallets_dir = os.path.join(node_v17.datadir,
                                            "regtest/wallets")
        node_v16_wallets_dir = os.path.join(node_v16.datadir, "regtest")
        node_master.unloadwallet("w1")
        node_master.unloadwallet("w2")
        node_v19.unloadwallet("w1_v19")
        node_v19.unloadwallet("w2_v19")
        node_v18.unloadwallet("w1_v18")
        node_v18.unloadwallet("w2_v18")

        # Copy wallets to v0.16
        for wallet in os.listdir(node_master_wallets_dir):
            shutil.copytree(os.path.join(node_master_wallets_dir, wallet),
                            os.path.join(node_v16_wallets_dir, wallet))

        # Copy wallets to v0.17
        for wallet in os.listdir(node_master_wallets_dir):
            shutil.copytree(os.path.join(node_master_wallets_dir, wallet),
                            os.path.join(node_v17_wallets_dir, wallet))
        for wallet in os.listdir(node_v18_wallets_dir):
            shutil.copytree(os.path.join(node_v18_wallets_dir, wallet),
                            os.path.join(node_v17_wallets_dir, wallet))

        # Copy wallets to v0.18
        for wallet in os.listdir(node_master_wallets_dir):
            shutil.copytree(os.path.join(node_master_wallets_dir, wallet),
                            os.path.join(node_v18_wallets_dir, wallet))

        # Copy wallets to v0.19
        for wallet in os.listdir(node_master_wallets_dir):
            shutil.copytree(os.path.join(node_master_wallets_dir, wallet),
                            os.path.join(node_v19_wallets_dir, wallet))

        # Open the wallets in v0.19
        node_v19.loadwallet("w1")
        wallet = node_v19.get_wallet_rpc("w1")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled']
        assert info['keypoolsize'] > 0
        txs = wallet.listtransactions()
        assert_equal(len(txs), 5)
        assert_equal(txs[1]["txid"], tx1_id)
        assert_equal(txs[2]["walletconflicts"], [tx1_id])
        assert_equal(txs[1]["replaced_by_txid"], tx2_id)
        assert not (txs[1]["abandoned"])
        assert_equal(txs[1]["confirmations"], -1)
        assert_equal(txs[2]["blockindex"], 1)
        assert txs[3]["abandoned"]
        assert_equal(txs[4]["walletconflicts"], [tx3_id])
        assert_equal(txs[3]["replaced_by_txid"], tx4_id)
        assert not (hasattr(txs[3], "blockindex"))

        node_v19.loadwallet("w2")
        wallet = node_v19.get_wallet_rpc("w2")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled'] == False
        assert info['keypoolsize'] == 0

        node_v19.loadwallet("w3")
        wallet = node_v19.get_wallet_rpc("w3")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled']
        assert info['keypoolsize'] == 0

        # Open the wallets in v0.18
        node_v18.loadwallet("w1")
        wallet = node_v18.get_wallet_rpc("w1")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled']
        assert info['keypoolsize'] > 0
        txs = wallet.listtransactions()
        assert_equal(len(txs), 5)
        assert_equal(txs[1]["txid"], tx1_id)
        assert_equal(txs[2]["walletconflicts"], [tx1_id])
        assert_equal(txs[1]["replaced_by_txid"], tx2_id)
        assert not (txs[1]["abandoned"])
        assert_equal(txs[1]["confirmations"], -1)
        assert_equal(txs[2]["blockindex"], 1)
        assert txs[3]["abandoned"]
        assert_equal(txs[4]["walletconflicts"], [tx3_id])
        assert_equal(txs[3]["replaced_by_txid"], tx4_id)
        assert not (hasattr(txs[3], "blockindex"))

        node_v18.loadwallet("w2")
        wallet = node_v18.get_wallet_rpc("w2")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled'] == False
        assert info['keypoolsize'] == 0

        node_v18.loadwallet("w3")
        wallet = node_v18.get_wallet_rpc("w3")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled']
        assert info['keypoolsize'] == 0

        # Open the wallets in v0.17
        node_v17.loadwallet("w1_v18")
        wallet = node_v17.get_wallet_rpc("w1_v18")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled']
        assert info['keypoolsize'] > 0

        node_v17.loadwallet("w1")
        wallet = node_v17.get_wallet_rpc("w1")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled']
        assert info['keypoolsize'] > 0

        node_v17.loadwallet("w2_v18")
        wallet = node_v17.get_wallet_rpc("w2_v18")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled'] == False
        assert info['keypoolsize'] == 0

        node_v17.loadwallet("w2")
        wallet = node_v17.get_wallet_rpc("w2")
        info = wallet.getwalletinfo()
        assert info['private_keys_enabled'] == False
        assert info['keypoolsize'] == 0

        # RPC loadwallet failure causes bitcoind to exit, in addition to the RPC
        # call failure, so the following test won't work:
        # assert_raises_rpc_error(-4, "Wallet loading failed.", node_v17.loadwallet, 'w3_v18')

        # Instead, we stop node and try to launch it with the wallet:
        self.stop_node(4)
        node_v17.assert_start_raises_init_error([
            "-wallet=w3_v18"
        ], "Error: Error loading w3_v18: Wallet requires newer version of Bitcoin Core"
                                                )
        node_v17.assert_start_raises_init_error([
            "-wallet=w3"
        ], "Error: Error loading w3: Wallet requires newer version of Bitcoin Core"
                                                )
        self.start_node(4)

        # Open most recent wallet in v0.16 (no loadwallet RPC)
        self.stop_node(5)
        self.start_node(5, extra_args=["-wallet=w2"])
        wallet = node_v16.get_wallet_rpc("w2")
        info = wallet.getwalletinfo()
        assert info['keypoolsize'] == 1

        # Create upgrade wallet in v0.16
        self.stop_node(-1)
        self.start_node(-1, extra_args=["-wallet=u1_v16"])
        wallet = node_v16.get_wallet_rpc("u1_v16")
        v16_addr = wallet.getnewaddress('', "bech32")
        v16_info = wallet.validateaddress(v16_addr)
        v16_pubkey = v16_info['pubkey']
        self.stop_node(-1)

        self.log.info("Test wallet upgrade path...")
        # u1: regular wallet, created with v0.17
        node_v17.rpc.createwallet(wallet_name="u1_v17")
        wallet = node_v17.get_wallet_rpc("u1_v17")
        address = wallet.getnewaddress("bech32")
        v17_info = wallet.getaddressinfo(address)
        hdkeypath = v17_info["hdkeypath"]
        pubkey = v17_info["pubkey"]

        # Copy the 0.16 wallet to the last Bitcoin Core version and open it:
        shutil.copyfile(os.path.join(node_v16_wallets_dir, "wallets/u1_v16"),
                        os.path.join(node_master_wallets_dir, "u1_v16"))
        load_res = node_master.loadwallet("u1_v16")
        # Make sure this wallet opens without warnings. See https://github.com/bitcoin/bitcoin/pull/19054
        assert_equal(load_res['warning'], '')
        wallet = node_master.get_wallet_rpc("u1_v16")
        info = wallet.getaddressinfo(v16_addr)
        descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[
            1:] + "]" + v16_pubkey + ")"
        assert_equal(info["desc"], descsum_create(descriptor))

        # Now copy that same wallet back to 0.16 to make sure no automatic upgrade breaks it
        os.remove(os.path.join(node_v16_wallets_dir, "wallets/u1_v16"))
        shutil.copyfile(os.path.join(node_master_wallets_dir, "u1_v16"),
                        os.path.join(node_v16_wallets_dir, "wallets/u1_v16"))
        self.start_node(-1, extra_args=["-wallet=u1_v16"])
        wallet = node_v16.get_wallet_rpc("u1_v16")
        info = wallet.validateaddress(v16_addr)
        assert_equal(info, v16_info)

        # Copy the 0.17 wallet to the last Bitcoin Core version and open it:
        node_v17.unloadwallet("u1_v17")
        shutil.copytree(os.path.join(node_v17_wallets_dir, "u1_v17"),
                        os.path.join(node_master_wallets_dir, "u1_v17"))
        node_master.loadwallet("u1_v17")
        wallet = node_master.get_wallet_rpc("u1_v17")
        info = wallet.getaddressinfo(address)
        descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[
            1:] + "]" + pubkey + ")"
        assert_equal(info["desc"], descsum_create(descriptor))

        # Now copy that same wallet back to 0.17 to make sure no automatic upgrade breaks it
        node_master.unloadwallet("u1_v17")
        shutil.rmtree(os.path.join(node_v17_wallets_dir, "u1_v17"))
        shutil.copytree(os.path.join(node_master_wallets_dir, "u1_v17"),
                        os.path.join(node_v17_wallets_dir, "u1_v17"))
        node_v17.loadwallet("u1_v17")
        wallet = node_v17.get_wallet_rpc("u1_v17")
        info = wallet.getaddressinfo(address)
        assert_equal(info, v17_info)

        # Copy the 0.19 wallet to the last Bitcoin Core version and open it:
        shutil.copytree(os.path.join(node_v19_wallets_dir, "w1_v19"),
                        os.path.join(node_master_wallets_dir, "w1_v19"))
        node_master.loadwallet("w1_v19")
        wallet = node_master.get_wallet_rpc("w1_v19")
        assert wallet.getaddressinfo(address_18075)["solvable"]

        # Now copy that same wallet back to 0.19 to make sure no automatic upgrade breaks it
        node_master.unloadwallet("w1_v19")
        shutil.rmtree(os.path.join(node_v19_wallets_dir, "w1_v19"))
        shutil.copytree(os.path.join(node_master_wallets_dir, "w1_v19"),
                        os.path.join(node_v19_wallets_dir, "w1_v19"))
        node_v19.loadwallet("w1_v19")
        wallet = node_v19.get_wallet_rpc("w1_v19")
        assert wallet.getaddressinfo(address_18075)["solvable"]
 def setUp(self):
     self.temp_dir = tempfile.mkdtemp(prefix='gstools_test')
     self.base_path = os.path.join(self.temp_dir, 'test_files')
     shutil.copytree(os.path.join(TEST_DIR, 'gstools'), self.base_path)
rgidf = rgidf.sort_values('Area', ascending=False)

log.info('Starting run for RGI reg: ' + rgi_region)
log.info('Number of glaciers: {}'.format(len(rgidf)))

# Go - initialize working directories
# -----------------------------------
gdirs = workflow.init_glacier_regions(rgidf)

if RUN_GIS_mask:
    execute_entity_task(tasks.glacier_masks, gdirs)

#We copy Columbia glacier dir with the itmix dem
shutil.rmtree(os.path.join(WORKING_DIR,
                           'per_glacier/RGI60-01/RGI60-01.10/RGI60-01.10689'))
shutil.copytree(Columbia_dir, os.path.join(WORKING_DIR,
                            'per_glacier/RGI60-01/RGI60-01.10/RGI60-01.10689'))

# Pre-processing tasks
task_list = [
    tasks.compute_centerlines,
    tasks.initialize_flowlines,
    tasks.catchment_area,
    tasks.catchment_intersections,
    tasks.catchment_width_geom,
    tasks.catchment_width_correction,
]

if RUN_GIS_PREPRO:
    for task in task_list:
        execute_entity_task(task, gdirs)
Exemple #52
0
        temp_path,
        '{package}_{version}'.format(**config))

    debian_path = os.path.join(pkg_path, 'debian')

    pkg_src_path = os.path.join(pkg_path, 'src')

    debian_source_path = os.path.join(debian_path, 'source')

    target_path = os.path.join(pkg_src_path, 'usr', 'lib', 'siridb', 'server')

    os.makedirs(target_path)
    os.makedirs(debian_source_path)

    shutil.copy2(source_path, os.path.join(target_path, config['package']))
    shutil.copytree('help', os.path.join(target_path, 'help'))

    db_path = os.path.join(pkg_src_path, 'var', 'lib', 'siridb')
    os.makedirs(db_path)

    cfg_path = os.path.join(pkg_src_path, 'etc', 'siridb')
    os.makedirs(cfg_path)
    shutil.copy('siridb.conf', cfg_path)

    systemd_path = os.path.join(target_path, 'systemd')
    os.makedirs(systemd_path)
    with open(os.path.join(
            systemd_path, '{package}.service'.format(**config)), 'w') as f:
        f.write(SYSTEMD)

    with open(os.path.join(debian_path, 'postinst'), 'w') as f:
Exemple #53
0
def make_package(args):
    # If no launcher is specified, require a main.py/main.pyo:
    if (get_bootstrap_name() != "sdl" or args.launcher is None) and \
            get_bootstrap_name() not in ["webview", "service_library"]:
        # (webview doesn't need an entrypoint, apparently)
        if args.private is None or (
                not exists(join(realpath(args.private), 'main.py'))
                and not exists(join(realpath(args.private), 'main.pyo'))):
            print(
                '''BUILD FAILURE: No main.py(o) found in your app directory. This
file must exist to act as the entry point for you app. If your app is
started by a file with a different name, rename it to main.py or add a
main.py that loads it.''')
            sys.exit(1)

    assets_dir = "src/main/assets"

    # Delete the old assets.
    shutil.rmtree(assets_dir, ignore_errors=True)
    ensure_dir(assets_dir)

    # Add extra environment variable file into tar-able directory:
    env_vars_tarpath = tempfile.mkdtemp(prefix="p4a-extra-env-")
    with open(os.path.join(env_vars_tarpath, "p4a_env_vars.txt"), "w") as f:
        if hasattr(args, "window"):
            f.write("P4A_IS_WINDOWED=" + str(args.window) + "\n")
        if hasattr(args, "orientation"):
            f.write("P4A_ORIENTATION=" + str(args.orientation) + "\n")
        f.write("P4A_NUMERIC_VERSION=" + str(args.numeric_version) + "\n")
        f.write("P4A_MINSDK=" + str(args.min_sdk_version) + "\n")

    # Package up the private data (public not supported).
    use_setup_py = get_dist_info_for("use_setup_py",
                                     error_if_missing=False) is True
    tar_dirs = [env_vars_tarpath]
    _temp_dirs_to_clean = []
    try:
        if args.private:
            if not use_setup_py or (
                    not exists(join(args.private, "setup.py"))
                    and not exists(join(args.private, "pyproject.toml"))):
                print('No setup.py/pyproject.toml used, copying '
                      'full private data into .apk.')
                tar_dirs.append(args.private)
            else:
                print("Copying main.py's ONLY, since other app data is "
                      "expected in site-packages.")
                main_py_only_dir = tempfile.mkdtemp()
                _temp_dirs_to_clean.append(main_py_only_dir)

                # Check all main.py files we need to copy:
                copy_paths = ["main.py", join("service", "main.py")]
                for copy_path in copy_paths:
                    variants = [
                        copy_path,
                        copy_path.partition(".")[0] + ".pyc",
                        copy_path.partition(".")[0] + ".pyo",
                    ]
                    # Check in all variants with all possible endings:
                    for variant in variants:
                        if exists(join(args.private, variant)):
                            # Make sure surrounding directly exists:
                            dir_path = os.path.dirname(variant)
                            if (len(dir_path) > 0 and not exists(
                                    join(main_py_only_dir, dir_path))):
                                os.mkdir(join(main_py_only_dir, dir_path))
                            # Copy actual file:
                            shutil.copyfile(
                                join(args.private, variant),
                                join(main_py_only_dir, variant),
                            )

                # Append directory with all main.py's to result apk paths:
                tar_dirs.append(main_py_only_dir)
        for python_bundle_dir in ('private', '_python_bundle'):
            if exists(python_bundle_dir):
                tar_dirs.append(python_bundle_dir)
        if get_bootstrap_name() == "webview":
            tar_dirs.append('webview_includes')

        for asset in args.assets:
            asset_src, asset_dest = asset.split(":")
            if isfile(realpath(asset_src)):
                ensure_dir(dirname(join(assets_dir, asset_dest)))
                shutil.copy(realpath(asset_src), join(assets_dir, asset_dest))
            else:
                shutil.copytree(realpath(asset_src),
                                join(assets_dir, asset_dest))

        if args.private or args.launcher:
            make_tar(join(assets_dir, 'private.mp3'),
                     tar_dirs,
                     args.ignore_path,
                     optimize_python=args.optimize_python)
    finally:
        for directory in _temp_dirs_to_clean:
            shutil.rmtree(directory)

    # Remove extra env vars tar-able directory:
    shutil.rmtree(env_vars_tarpath)

    # Prepare some variables for templating process
    res_dir = "src/main/res"
    default_icon = 'templates/kivy-icon.png'
    default_presplash = 'templates/kivy-presplash.jpg'
    shutil.copy(args.icon or default_icon, join(res_dir, 'drawable/icon.png'))
    if get_bootstrap_name() != "service_only":
        shutil.copy(args.presplash or default_presplash,
                    join(res_dir, 'drawable/presplash.jpg'))

    # If extra Java jars were requested, copy them into the libs directory
    jars = []
    if args.add_jar:
        for jarname in args.add_jar:
            if not exists(jarname):
                print('Requested jar does not exist: {}'.format(jarname))
                sys.exit(-1)
            shutil.copy(jarname, 'src/main/libs')
            jars.append(basename(jarname))

    # If extra aar were requested, copy them into the libs directory
    aars = []
    if args.add_aar:
        ensure_dir("libs")
        for aarname in args.add_aar:
            if not exists(aarname):
                print('Requested aar does not exists: {}'.format(aarname))
                sys.exit(-1)
            shutil.copy(aarname, 'libs')
            aars.append(basename(aarname).rsplit('.', 1)[0])

    versioned_name = (args.name.replace(' ', '').replace('\'', '') + '-' +
                      args.version)

    version_code = 0
    if not args.numeric_version:
        # Set version code in format (arch-minsdk-app_version)
        arch = get_dist_info_for("archs")[0]
        arch_dict = {
            "x86_64": "9",
            "arm64-v8a": "8",
            "armeabi-v7a": "7",
            "x86": "6"
        }
        arch_code = arch_dict.get(arch, '1')
        min_sdk = args.min_sdk_version
        for i in args.version.split('.'):
            version_code *= 100
            version_code += int(i)
        args.numeric_version = "{}{}{}".format(arch_code, min_sdk,
                                               version_code)

    if args.intent_filters:
        with open(args.intent_filters) as fd:
            args.intent_filters = fd.read()

    if not args.add_activity:
        args.add_activity = []

    if not args.activity_launch_mode:
        args.activity_launch_mode = ''

    if args.extra_source_dirs:
        esd = []
        for spec in args.extra_source_dirs:
            if ':' in spec:
                specdir, specincludes = spec.split(':')
                print(
                    'WARNING: Currently gradle builds only support including source '
                    'directories, so when building using gradle all files in '
                    '{} will be included.'.format(specdir))
            else:
                specdir = spec
                specincludes = '**'
            esd.append((realpath(specdir), specincludes))
        args.extra_source_dirs = esd
    else:
        args.extra_source_dirs = []

    service = False
    if args.private:
        service_main = join(realpath(args.private), 'service', 'main.py')
        if exists(service_main) or exists(service_main + 'o'):
            service = True

    service_names = []
    for sid, spec in enumerate(args.services):
        spec = spec.split(':')
        name = spec[0]
        entrypoint = spec[1]
        options = spec[2:]

        foreground = 'foreground' in options
        sticky = 'sticky' in options

        service_names.append(name)
        service_target_path =\
            'src/main/java/{}/Service{}.java'.format(
                args.package.replace(".", "/"),
                name.capitalize()
            )
        render(
            'Service.tmpl.java',
            service_target_path,
            name=name,
            entrypoint=entrypoint,
            args=args,
            foreground=foreground,
            sticky=sticky,
            service_id=sid + 1,
        )

    # Find the SDK directory and target API
    with open('project.properties', 'r') as fileh:
        target = fileh.read().strip()
    android_api = target.split('-')[1]
    try:
        int(android_api)
    except (ValueError, TypeError):
        raise ValueError("failed to extract the Android API level from " +
                         "build.properties. expected int, got: '" +
                         str(android_api) + "'")
    with open('local.properties', 'r') as fileh:
        sdk_dir = fileh.read().strip()
    sdk_dir = sdk_dir[8:]

    # Try to build with the newest available build tools
    ignored = {".DS_Store", ".ds_store"}
    build_tools_versions = [
        x for x in listdir(join(sdk_dir, 'build-tools')) if x not in ignored
    ]
    build_tools_versions = sorted(build_tools_versions, key=LooseVersion)
    build_tools_version = build_tools_versions[-1]

    # Folder name for launcher (used by SDL2 bootstrap)
    url_scheme = 'kivy'

    # Render out android manifest:
    manifest_path = "src/main/AndroidManifest.xml"
    render_args = {
        "args": args,
        "service": service,
        "service_names": service_names,
        "android_api": android_api,
        "debug": "debug" in args.build_mode,
    }
    if get_bootstrap_name() == "sdl2":
        render_args["url_scheme"] = url_scheme
    render('AndroidManifest.tmpl.xml', manifest_path, **render_args)

    # Copy the AndroidManifest.xml to the dist root dir so that ant
    # can also use it
    if exists('AndroidManifest.xml'):
        remove('AndroidManifest.xml')
    shutil.copy(manifest_path, 'AndroidManifest.xml')

    # gradle build templates
    render(
        'build.tmpl.gradle',
        'build.gradle',
        args=args,
        aars=aars,
        jars=jars,
        android_api=android_api,
        build_tools_version=build_tools_version,
        debug_build="debug" in args.build_mode,
        is_library=(get_bootstrap_name() == 'service_library'),
    )

    # ant build templates
    render('build.tmpl.xml',
           'build.xml',
           args=args,
           versioned_name=versioned_name)

    # String resources:
    render_args = {"args": args, "private_version": str(time.time())}
    if get_bootstrap_name() == "sdl2":
        render_args["url_scheme"] = url_scheme
    render('strings.tmpl.xml', join(res_dir, 'values/strings.xml'),
           **render_args)

    if exists(join("templates", "custom_rules.tmpl.xml")):
        render('custom_rules.tmpl.xml', 'custom_rules.xml', args=args)

    if get_bootstrap_name() == "webview":
        render('WebViewLoader.tmpl.java',
               'src/main/java/org/kivy/android/WebViewLoader.java',
               args=args)

    if args.sign:
        render('build.properties', 'build.properties')
    else:
        if exists('build.properties'):
            os.remove('build.properties')

    # Apply java source patches if any are present:
    if exists(join('src', 'patches')):
        print("Applying Java source code patches...")
        for patch_name in os.listdir(join('src', 'patches')):
            patch_path = join('src', 'patches', patch_name)
            print("Applying patch: " + str(patch_path))

            # -N: insist this is FORWARD patch, don't reverse apply
            # -p1: strip first path component
            # -t: batch mode, don't ask questions
            patch_command = ["patch", "-N", "-p1", "-t", "-i", patch_path]

            try:
                # Use a dry run to establish whether the patch is already applied.
                # If we don't check this, the patch may be partially applied (which is bad!)
                subprocess.check_output(patch_command + ["--dry-run"])
            except subprocess.CalledProcessError as e:
                if e.returncode == 1:
                    # Return code 1 means not all hunks could be applied, this usually
                    # means the patch is already applied.
                    print(
                        "Warning: failed to apply patch (exit code 1), "
                        "assuming it is already applied: ", str(patch_path))
                else:
                    raise e
            else:
                # The dry run worked, so do the real thing
                subprocess.check_output(patch_command)
def copy(src, dst):
    return shutil.copytree(src, dst, True)
Exemple #55
0
 def do_copy(self, src, dest):
     """Copy the src dir to the dest dir omitting the self.coursedir.ignore globs."""
     shutil.copytree(src, dest, ignore=shutil.ignore_patterns(*self.coursedir.ignore))
    'uint32'  :   ('uint32_t',          4, PrimitiveDataType, []),
    'int64'   :   ('int64_t',           8, PrimitiveDataType, []),
    'uint64'  :   ('uint64_t',          4, PrimitiveDataType, []),
    'float32' :   ('float',             4, PrimitiveDataType, []),
    'float64' :   ('double',            8, PrimitiveDataType, []),
    'time'    :   ('ros::Time',         8, TimeDataType, ['ros/time']),
    'duration':   ('ros::Duration',     8, TimeDataType, ['ros/duration']),
    'string'  :   ('char*',             0, StringDataType, []),
    'Header'  :   ('std_msgs::Header',  0, MessageDataType, ['std_msgs/Header'])
}

# need correct inputs
if (len(sys.argv) < 2):
    print(__usage__)
    exit()

# get output path
path = sys.argv[1]
output_path = os.path.join(sys.argv[1], "ros_lib")
print("\nExporting to %s" % output_path)

rospack = rospkg.RosPack()

# copy ros_lib stuff in
shutil.rmtree(output_path, ignore_errors=True)
shutil.copytree(os.path.join(rospack.get_path(THIS_PACKAGE), "src", "ros_lib"), output_path)
rosserial_client_copy_files(rospack, output_path)

# generate messages
rosserial_generate(rospack, output_path, ROS_TO_EMBEDDED_TYPES)
Exemple #57
0
# Get the username of the logged-in user.
username = getpass.getuser()

# Read where to copy icons from, and which places to copy to, from the .ini
config_file = 'dir.ini'
icons_folder, options = read_config_file(config_file)
icons = os.listdir(icons_folder)

# Things we won't try to copy as files.
fails = ['$RECYCLE.BIN','Applications by Subject']

for option in options:
    for icon in icons:
        if icon not in fails:
            try:
                shutil.copy(icons_folder + icon,option)
            except:
                print('failed on',icon)

    try:
        shutil.copytree(icons_folder + '/Applications by Subject',option + '/Applications by Subject')
    except:
        print('failed')

# Report.
root = Tk()
root.title('Desktop Icon Repair')
mainapp = App(root)

root.mainloop()
Exemple #58
0
def generate(sources_dir):
    """Generates the markdown files for the documentation.

    # Arguments
        sources_dir: Where to put the markdown files.
    """
    template_dir = os.path.join(str(keras_dir), 'docs', 'templates')

    if K.backend() != 'tensorflow':
        raise RuntimeError('The documentation must be built '
                           'with the TensorFlow backend because this '
                           'is the only backend with docstrings.')

    print('Cleaning up existing sources directory.')
    if os.path.exists(sources_dir):
        shutil.rmtree(sources_dir)

    print('Populating sources directory with templates.')
    shutil.copytree(template_dir, sources_dir)

    readme = read_file(os.path.join(str(keras_dir), 'README.md'))
    index = read_file(os.path.join(template_dir, 'index.md'))
    index = index.replace('{{autogenerated}}', readme[readme.find('##'):])
    with open(os.path.join(sources_dir, 'index.md'), 'w') as f:
        f.write(index)

    print('Generating docs for Keras %s.' % keras.__version__)
    for page_data in PAGES:
        classes = read_page_data(page_data, 'classes')

        blocks = []
        for element in classes:
            if not isinstance(element, (list, tuple)):
                element = (element, [])
            cls = element[0]
            subblocks = []
            signature = get_class_signature(cls)
            subblocks.append('<span style="float:right;">' +
                             class_to_source_link(cls) + '</span>')
            if element[1]:
                subblocks.append('## ' + cls.__name__ + ' class\n')
            else:
                subblocks.append('### ' + cls.__name__ + '\n')
            subblocks.append(code_snippet(signature))
            docstring = cls.__doc__
            if docstring:
                subblocks.append(process_docstring(docstring))
            methods = collect_class_methods(cls, element[1])
            if methods:
                subblocks.append('\n---')
                subblocks.append('## ' + cls.__name__ + ' methods\n')
                subblocks.append('\n---\n'.join([
                    render_function(method, method=True) for method in methods
                ]))
            blocks.append('\n'.join(subblocks))

        methods = read_page_data(page_data, 'methods')

        for method in methods:
            blocks.append(render_function(method, method=True))

        functions = read_page_data(page_data, 'functions')

        for function in functions:
            blocks.append(render_function(function, method=False))

        if not blocks:
            raise RuntimeError('Found no content for page ' +
                               page_data['page'])

        mkdown = '\n----\n\n'.join(blocks)
        # Save module page.
        # Either insert content into existing page,
        # or create page otherwise.
        page_name = page_data['page']
        path = os.path.join(sources_dir, page_name)
        if os.path.exists(path):
            template = read_file(path)
            if '{{autogenerated}}' not in template:
                raise RuntimeError('Template found for ' + path +
                                   ' but missing {{autogenerated}}'
                                   ' tag.')
            mkdown = template.replace('{{autogenerated}}', mkdown)
            print('...inserting autogenerated content into template:', path)
        else:
            print('...creating new page with autogenerated content:', path)
        subdir = os.path.dirname(path)
        if not os.path.exists(subdir):
            os.makedirs(subdir)
        with open(path, 'w') as f:
            f.write(mkdown)

    shutil.copyfile(os.path.join(str(keras_dir), 'CONTRIBUTING.md'),
                    os.path.join(str(sources_dir), 'contributing.md'))
    copy_examples(os.path.join(str(keras_dir), 'examples'),
                  os.path.join(str(sources_dir), 'examples'))
Exemple #59
0
def copytree(src, dst):
    LOG.debug("Copying full tree: %r => %r" % (src, dst))
    if not is_dry_run():
        shutil.copytree(src, dst)
    return dst
Exemple #60
0

// ************************************************************************* //''' % (
        1.5492 * water_percent)

    for acc in (5.0, 8.0):  #in m/s^2
        for speed in (10, 24):  #in m/s
            s0 = time.time()
            speed_down_time = round(speed / acc, 1)  #in s
            stop_time = 10
            total_time = speed_down_time + stop_time
            path = working_dir + r'/py/water=' + str(
                water_percent) + '_' + 'acc=' + str(
                    acc) + '_' + 'speed=' + str(speed)
            clear_dir(path)
            shutil.copytree(working_dir + r'/0.0', path + r'/0.0')
            shutil.copytree(working_dir + r'/constant', path + r'/constant')
            shutil.copytree(working_dir + r'/system', path + r'/system')

            #setFieldDict
            with open(path + r'/system/setFieldsDict', 'w') as f:
                f.write(setFieldDict)
            #g
            gFile = foamfile.FoamFile.fromFile(path + r'/constant/g')
            g = '(-' + str(acc) + ' -9.81 0)'
            gDict = {'value': g}
            gFile.updateValues(gDict)
            gFile.save(path)

            #controlDict
            c = controlDict.ControlDict.fromFile(path + r'/system/controlDict')