Exemple #1
0
 def setUp(self):
     super(ArvPutUploadJobTest, self).setUp()
     run_test_server.authorize_with('active')
     # Temp files creation
     self.tempdir = tempfile.mkdtemp()
     subdir = os.path.join(self.tempdir, 'subdir')
     os.mkdir(subdir)
     data = "x" * 1024 # 1 KB
     for i in range(1, 5):
         with open(os.path.join(self.tempdir, str(i)), 'w') as f:
             f.write(data * i)
     with open(os.path.join(subdir, 'otherfile'), 'w') as f:
         f.write(data * 5)
     # Large temp file for resume test
     _, self.large_file_name = tempfile.mkstemp()
     fileobj = open(self.large_file_name, 'w')
     # Make sure to write just a little more than one block
     for _ in range((arvados.config.KEEP_BLOCK_SIZE>>20)+1):
         data = random.choice(['x', 'y', 'z']) * 1024 * 1024 # 1 MiB
         fileobj.write(data)
     fileobj.close()
     # Temp dir containing small files to be repacked
     self.small_files_dir = tempfile.mkdtemp()
     data = 'y' * 1024 * 1024 # 1 MB
     for i in range(1, 70):
         with open(os.path.join(self.small_files_dir, str(i)), 'w') as f:
             f.write(data + str(i))
     self.arvfile_write = getattr(arvados.arvfile.ArvadosFileWriter, 'write')
     # Temp dir to hold a symlink to other temp dir
     self.tempdir_with_symlink = tempfile.mkdtemp()
     os.symlink(self.tempdir, os.path.join(self.tempdir_with_symlink, 'linkeddir'))
     os.symlink(os.path.join(self.tempdir, '1'),
                os.path.join(self.tempdir_with_symlink, 'linkedfile'))
Exemple #2
0
def find_bowtie2_index(r, path_to_bowtie2='bowtie2'):
    """check for bowtie2 index as given.
    return True if found, else return False
    """
    args = [path_to_bowtie2 + '-inspect', '-v', '-s', r]
    debug(' '.join(args))
    P = Popen(args, stdout=open(devnull, 'w'), stderr=PIPE, cwd=mkdtemp())
    stderr = P.communicate()[1].splitlines()
    if not stderr[0].startswith('Could not locate'):
        for line in stderr:
            if line.startswith('Opening'):
                index_bt2 = line[(1 + line.find('"')):line.rfind('"')]
                index_basename = index_bt2[0:index_bt2.find('.1.bt2')]
                return index_basename
    for d in [getcwd(), os.path.split(path_to_bowtie2)[0],
              join(os.path.split(path_to_bowtie2)[0], 'indexes')]:
        rprime = join(d, r)
        args = [path_to_bowtie2 + '-inspect', '-v', '-s', rprime]
        debug(' '.join(args))
        P = Popen(args, stdout=open(devnull, 'w'), stderr=PIPE, cwd=mkdtemp())
        stderr = P.communicate()[1].splitlines()
        if not stderr[0].startswith('Could not locate'):
            for line in stderr:
                if line.startswith('Opening'):
                    index_bt2 = line[(1 + line.find('"')):line.rfind('"')]
                    index_basename = index_bt2[0:index_bt2.find('.1.bt2')]
                    return index_basename
    return None
    def __init__(self, userdata, tmp_installroot=False):
        self._scenario_data = {}

        self.tempdir = tempfile.mkdtemp(prefix="dnf_ci_tempdir_")
        if tmp_installroot:
            # some tests need to be run inside the installroot, it can be set
            # per scenario by using @force_tmp_installroot decorator
            self.installroot = tempfile.mkdtemp(dir=self.tempdir, prefix="tmp_installroot_")
            self.delete_installroot = True
        else:
            if "installroot" in userdata:
                self.installroot = userdata["installroot"]
                # never delete user defined installroot - this allows running tests on /
                self.delete_installroot = False
            else:
                self.installroot = tempfile.mkdtemp(prefix="dnf_ci_installroot_")
                self.delete_installroot = True

        self.dnf_command = userdata.get("dnf_command", DEFAULT_DNF_COMMAND)
        self.config = userdata.get("config", DEFAULT_CONFIG)
        self.releasever = userdata.get("releasever", DEFAULT_RELEASEVER)
        self.module_platform_id = userdata.get("module_platform_id", DEFAULT_PLATFORM_ID)
        self.reposdir = userdata.get("reposdir", DEFAULT_REPOSDIR)
        self.repos_location = userdata.get("repos_location", DEFAULT_REPOS_LOCATION)
        self.fixturesdir = FIXTURES_DIR
        self.disable_plugins = True
        self.disable_repos_option = "--disablerepo='*'"
        self.assumeyes_option = "-y"

        # temporarily use DNF0 for substituting fixturesdir in repo files
        # the future could be in named environment variable like DNF_VAR_FIXTURES_DIR
        os.environ['DNF0'] = self.fixturesdir
Exemple #4
0
 def test_extract_dev(self):
     st = create_setting()
     st.validate = False
     # create tmp dirs for extraction output
     st.inst_dir = tempfile.mkdtemp()
     st.true_dir = tempfile.mkdtemp()
     
     extract(st)
     
     # check no of files
     self.assertEqual(len(st.dev_true_fns), 
                      len(st.dev_part_fns))
     self.assertEqual(len(st.dev_inst_fns), 
                      len(st.dev_part_fns))
     
     # test loading a corpus file
     corpus = ParallelGraphCorpus(inf=st.dev_true_fns[0])
     
     # test loading a instances file
     inst = CorpusInst()
     inst.loadtxt(st.dev_inst_fns[0],
                  st.descriptor.dtype)
     self.assertEqual(len(corpus), len(inst))
     
     clean_inst(st)
     clean_true(st)
    def setUp(self):

        self.host_log_dir = tempfile.mkdtemp(prefix='host_log_dir.')
        self.volume = tempfile.mkdtemp(prefix='volume.')
        for logf in ['test1.log', 'test2.log']:
            with open(os.path.join(self.volume, logf), 'w') as logp:
                logp.write(logf)
 def test_repository_creation(self, preserve=False):
     if not SKIP_SLOW_TESTS:
         with Context() as finalizers:
             config_dir = tempfile.mkdtemp()
             repo_dir = tempfile.mkdtemp()
             if not preserve:
                 finalizers.register(shutil.rmtree, config_dir)
                 finalizers.register(shutil.rmtree, repo_dir)
             from deb_pkg_tools import config
             config.user_config_directory = config_dir
             with open(os.path.join(config_dir, config.repo_config_file), 'w') as handle:
                 handle.write('[test]\n')
                 handle.write('directory = %s\n' % repo_dir)
                 handle.write('release-origin = %s\n' % TEST_REPO_ORIGIN)
             self.test_package_building(repo_dir)
             update_repository(repo_dir, release_fields=dict(description=TEST_REPO_DESCRIPTION), cache=self.package_cache)
             self.assertTrue(os.path.isfile(os.path.join(repo_dir, 'Packages')))
             self.assertTrue(os.path.isfile(os.path.join(repo_dir, 'Packages.gz')))
             self.assertTrue(os.path.isfile(os.path.join(repo_dir, 'Release')))
             with open(os.path.join(repo_dir, 'Release')) as handle:
                 fields = Deb822(handle)
                 self.assertEqual(fields['Origin'], TEST_REPO_ORIGIN)
                 self.assertEqual(fields['Description'], TEST_REPO_DESCRIPTION)
             if not apt_supports_trusted_option():
                 self.assertTrue(os.path.isfile(os.path.join(repo_dir, 'Release.gpg')))
             return repo_dir
    def setUp(self):
        super(ImportTestCase, self).setUp()
        self.url = reverse_course_url('import_handler', self.course.id)
        self.content_dir = path(tempfile.mkdtemp())

        def touch(name):
            """ Equivalent to shell's 'touch'"""
            with file(name, 'a'):
                os.utime(name, None)

        # Create tar test files -----------------------------------------------
        # OK course:
        good_dir = tempfile.mkdtemp(dir=self.content_dir)
        os.makedirs(os.path.join(good_dir, "course"))
        with open(os.path.join(good_dir, "course.xml"), "w+") as f:
            f.write('<course url_name="2013_Spring" org="EDx" course="0.00x"/>')

        with open(os.path.join(good_dir, "course", "2013_Spring.xml"), "w+") as f:
            f.write('<course></course>')

        self.good_tar = os.path.join(self.content_dir, "good.tar.gz")
        with tarfile.open(self.good_tar, "w:gz") as gtar:
            gtar.add(good_dir)

        # Bad course (no 'course.xml' file):
        bad_dir = tempfile.mkdtemp(dir=self.content_dir)
        touch(os.path.join(bad_dir, "bad.xml"))
        self.bad_tar = os.path.join(self.content_dir, "bad.tar.gz")
        with tarfile.open(self.bad_tar, "w:gz") as btar:
            btar.add(bad_dir)

        self.unsafe_common_dir = path(tempfile.mkdtemp(dir=self.content_dir))
Exemple #8
0
    def __init__(self, flags=None):
        super(Agent, self).__init__()

        if Agent.count > 0:
            raise CLIException("Creating more than one agent"
                               " is currently not possible")

        if flags is None:
            flags = {}

        if "ip" not in flags:
            flags["ip"] = TEST_AGENT_IP
        if "port" not in flags:
            flags["port"] = TEST_AGENT_PORT
        if "master" not in flags:
            flags["master"] = "{ip}:{port}".format(
                ip=TEST_MASTER_IP,
                port=TEST_MASTER_PORT)
        if "work_dir" not in flags:
            flags["work_dir"] = tempfile.mkdtemp()
        if "runtime_dir" not in flags:
            flags["runtime_dir"] = tempfile.mkdtemp()
        # Disabling systemd support on Linux to run without sudo.
        if "linux" in sys.platform and "systemd_enable_support" not in flags:
            flags["systemd_enable_support"] = "false"

        self.flags = flags
        self.name = "agent"
        self.addr = "{ip}:{port}".format(ip=flags["ip"], port=flags["port"])
        self.executable = os.path.join(
            CLITestCase.MESOS_BUILD_DIR,
            "bin",
            "mesos-{name}.sh".format(name=self.name))
        self.shell = True
Exemple #9
0
    def setUp(self):
        # Setup workspace
        tmpdir1 = os.path.realpath(tempfile.mkdtemp())
        tmpdir2 = os.path.realpath(tempfile.mkdtemp())
        os.makedirs(os.path.join(tmpdir1, 'swift'))

        self.workspace = Workspace(source_root=tmpdir1,
                                   build_root=tmpdir2)

        # Setup toolchain
        self.toolchain = host_toolchain()
        self.toolchain.cc = '/path/to/cc'
        self.toolchain.cxx = '/path/to/cxx'

        # Setup args
        self.args = argparse.Namespace(
            enable_tsan_runtime=False,
            compiler_vendor='none',
            swift_compiler_version=None,
            clang_compiler_version=None,
            swift_user_visible_version=None,
            darwin_deployment_version_osx="10.9",
            benchmark=False,
            benchmark_num_onone_iterations=3,
            benchmark_num_o_iterations=3,
            enable_sil_ownership=False)

        # Setup shell
        shell.dry_run = True
        self._orig_stdout = sys.stdout
        self._orig_stderr = sys.stderr
        self.stdout = StringIO()
        self.stderr = StringIO()
        sys.stdout = self.stdout
        sys.stderr = self.stderr
Exemple #10
0
def build_partial_mar(to_mar_url, to_mar_hash, from_mar_url, from_mar_hash,
                      identifier, channel_id, product_version):
    """ Function that returns the partial MAR file to transition from the mar
        given by from_mar_url to to_mar_url
    """
    log.debug('Creating temporary working directories')
    TMP_MAR_STORAGE = tempfile.mkdtemp(prefix='mar_')
    TMP_WORKING_DIR = tempfile.mkdtemp(prefix='wd_')
    log.debug('MAR storage: %s', TMP_MAR_STORAGE)
    log.debug('Working dir storage: %s', TMP_WORKING_DIR)

    to_mar = os.path.join(TMP_MAR_STORAGE, 'new.mar')
    from_mar = os.path.join(TMP_MAR_STORAGE, 'old.mar')

    log.info('Looking up the complete MARs required')
    get_complete_mar(to_mar_url, to_mar_hash, to_mar)
    get_complete_mar(from_mar_url, from_mar_hash, from_mar)

    log.info('Creating cache connections')
    try:
        partial_file = generate_partial_mar(
            to_mar, from_mar, channel_id, product_version,
            working_dir=TMP_WORKING_DIR)
        log.debug('Partial MAR generated at %s', partial_file)
    except:
        cache.delete('partial', identifier)
        raise

    log.info('Saving partial MAR %s to cache with key %s', partial_file,
             identifier)
    cache.save(partial_file, 'partial', identifier, isfilename=True)
Exemple #11
0
def test_load_fail():
    # 1. test bad file path
    # 2. test non-json file
    # 3. test bad extensions
    # 4. test bad codecs

    def __test(filename, fmt):
        jams.load(filename, fmt=fmt)

    # Make a non-existent file
    tdir = tempfile.mkdtemp()
    yield raises(IOError)(__test), os.path.join(tdir, 'nonexistent.jams'), 'jams'
    os.rmdir(tdir)

    # Make a non-json file
    tdir = tempfile.mkdtemp()
    badfile = os.path.join(tdir, 'nonexistent.jams')
    with open(badfile, mode='w') as fp:
        fp.write('some garbage')
    yield raises(ValueError)(__test), os.path.join(tdir, 'nonexistent.jams'), 'jams'
    os.unlink(badfile)
    os.rmdir(tdir)

    tdir = tempfile.mkdtemp()
    for ext in ['txt', '']:
        badfile = os.path.join(tdir, 'nonexistent')
        yield raises(jams.ParameterError)(__test), '{:s}.{:s}'.format(badfile, ext), 'auto'
        yield raises(jams.ParameterError)(__test), '{:s}.{:s}'.format(badfile, ext), ext
        yield raises(jams.ParameterError)(__test), '{:s}.jams'.format(badfile), ext
    os.rmdir(tdir)
Exemple #12
0
    def test_prep_sffs_in_dir_FLX(self):
        """test_prep_sffs_in_dir should convert to FLX read lengths."""
        output_dir = tempfile.mkdtemp()
        gz_output_dir = tempfile.mkdtemp()

        prep_sffs_in_dir(
            self.sff_dir, output_dir, make_flowgram=True, convert_to_flx=True)
        prep_sffs_in_dir(
            self.gz_sff_dir, gz_output_dir, make_flowgram=True, convert_to_flx=True)

        fna_fp = os.path.join(output_dir, 'test_FLX.fna')
        fna_gz_fp = os.path.join(gz_output_dir, 'test_gz_FLX.fna')
        self.assertEqual(open(fna_fp).read(), fna_txt)
        self.assertEqual(open(fna_gz_fp).read(), fna_txt)

        qual_fp = os.path.join(output_dir, 'test_FLX.qual')
        qual_gz_fp = os.path.join(gz_output_dir, 'test_gz_FLX.qual')
        self.assertEqual(open(qual_fp).read(), qual_txt)
        self.assertEqual(open(qual_gz_fp).read(), qual_txt)

        flow_fp = os.path.join(output_dir, 'test_FLX.txt')
        flow_gz_fp = os.path.join(gz_output_dir, 'test_gz_FLX.txt')
        self.assertEqual(open(flow_fp).read(), flx_flow_txt)
        self.assertEqual(open(flow_gz_fp).read(), flx_flow_txt)

        shutil.rmtree(output_dir)
        shutil.rmtree(gz_output_dir)
Exemple #13
0
    def test_prep_sffs_in_dir_no_trim(self):
        """test_prep_sffs_in_dir should use the no_trim option only if sffinfo exists."""
        output_dir = tempfile.mkdtemp()
        gz_output_dir = tempfile.mkdtemp()

        try:
            check_sffinfo()
            perform_test = True
        except:
            perform_test = False

        if perform_test:
            prep_sffs_in_dir(self.sff_dir, output_dir, make_flowgram=False,
                             convert_to_flx=False, use_sfftools=True,
                             no_trim=True)

            fna_fp = os.path.join(output_dir, 'test.fna')

            self.assertEqual(open(fna_fp).read(), fna_notrim_txt)

            qual_fp = os.path.join(output_dir, 'test.qual')
            self.assertEqual(open(qual_fp).read(), qual_notrim_txt)

            self.assertRaises(TypeError, "gzipped SFF", prep_sffs_in_dir,
                              self.gz_sff_dir, gz_output_dir, make_flowgram=False,
                              convert_to_flx=False, use_sfftools=True,
                              no_trim=True)

            shutil.rmtree(output_dir)
            shutil.rmtree(gz_output_dir)
    def testSendOnlyToLitleSFTP_WithPreviouslyCreatedFile(self):
        requestFileName = "litleSdk-testBatchFile-testSendOnlyToLitleSFTP_WithPreviouslyCreatedFile.xml"
        request = litleBatchFileRequest(requestFileName)
        requestFile = request.requestFile.name
        self.assertTrue(os.path.exists(requestFile))
        configFromFile = request.config
        self.assertEqual('prelive.litle.com', configFromFile.batchHost)
        self.assertEqual('15000', configFromFile.batchPort)
        requestDir = configFromFile.batchRequestFolder
        responseDir = configFromFile.batchResponseFolder
        self.prepareTestRequest(request)
        request.prepareForDelivery()
        self.assertTrue(os.path.exists(requestFile))
        self.assertTrue(os.path.getsize(requestFile) > 0)

        tempfile.mkdtemp()
        newRequestDir = tempfile.gettempdir() + '/' + 'request'
        if not os.path.exists(newRequestDir):
            os.makedirs(newRequestDir)
        newRequestFileName = 'litle.xml'
        shutil.copyfile(requestFile, newRequestDir + '/' + newRequestFileName)
        configForRequest2 = copy.deepcopy(configFromFile)
        configForRequest2.batchRequestFolder = newRequestDir

        request2 = litleBatchFileRequest(newRequestFileName, configForRequest2)
        request2.sendRequestOnlyToSFTP(True)

        request3 = litleBatchFileRequest(newRequestFileName, configForRequest2)
        response = request3.retrieveOnlyFromSFTP()

        self.assertPythonApi(request3, response)

        self.assertGeneratedFiles(newRequestDir, responseDir, newRequestFileName, request3)
Exemple #15
0
    def test_ensure_tree(self):

        # Create source tree.

        # Create a pair of tempfile.
        tempdir_1, tempdir_2 = tempfile.mkdtemp(), tempfile.mkdtemp()

        with open(tempdir_1 + '/file1', 'w') as fd:
            fd.write('content1')

        os.mkdir(tempdir_1 + '/dir')
        with open(tempdir_1 + '/dir/file2', 'w') as fd:
            fd.write('content2')

        from espresso.helpers import fs

        # Ensure tree.
        fs.ensure_tree(tempdir_2, tempdir_1)

        # Assert for existence of paths.
        self.assertTrue(os.path.exists(tempdir_2 + '/dir'))
        self.assertTrue(os.path.exists(tempdir_2 + '/file1'))
        self.assertTrue(os.path.exists(tempdir_2 + '/dir/file2'))

        # Asssert files and dirs
        self.assertTrue(os.path.isdir(tempdir_2 + '/dir'))
        self.assertTrue(os.path.isfile(tempdir_2 + '/file1'))
        self.assertTrue(os.path.isfile(tempdir_2 + '/dir/file2'))

        # Assert for content in files.
        with open(tempdir_2 + '/file1') as fd:
            self.assertEqual(fd.read(), 'content1')

        with open(tempdir_2 + '/dir/file2') as fd:
            self.assertEqual(fd.read(), 'content2')
Exemple #16
0
    def runTest(self):
        # Run all tests in same environment, mounting and umounting
        # just takes too long otherwise

        self.mkfs()
        self.mount()
        self.tst_chown()
        self.tst_link()
        self.tst_mkdir()
        self.tst_mknod()
        self.tst_readdir()
        self.tst_statvfs()
        self.tst_symlink()
        self.tst_truncate()
        self.tst_truncate_nocache()
        self.tst_write()
        self.umount()
        self.fsck()
        
        # Empty cache
        shutil.rmtree(self.cache_dir)
        self.cache_dir = tempfile.mkdtemp()
        
        self.mount()
        self.umount()
        
        # Empty cache
        shutil.rmtree(self.cache_dir)
        self.cache_dir = tempfile.mkdtemp()
        self.fsck()
Exemple #17
0
 def setUp(self):
     self.temp_dir = tempfile.mkdtemp()
     self.storage = self.storage_class(location=self.temp_dir,
         base_url='/test_media_url/')
     # Set up a second temporary directory which is ensured to have a mixed
     # case name.
     self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
    def setUp(self):

        from invenio_utils.vcs.svn import svn_exists, get_which_svn
        if not svn_exists():
            from unittest import SkipTest
            raise SkipTest("SVN not found. It probably needs installing.")

        self.which_svn = get_which_svn()

        self.svntest = mkdtemp(dir=cfg['CFG_TMPDIR'])
        self.repo = path.join(self.svntest, 'temprepo', '')
        self.src = path.join(self.svntest, 'tempsrc', '')
        self.archive_dir = path.join(mkdtemp(dir=cfg['CFG_TMPDIR']), '')
        self.archive_path = path.join(self.archive_dir, 'test.tar.gz')

        chdir(self.svntest)
        call([which('svnadmin'), '--fs-type', 'fsfs', 'create', self.repo])
        call([self.which_svn, 'co', 'file://' + self.repo, self.src])
        chdir(self.src)
        call([self.which_svn, 'mkdir', 'trunk', 'tags', 'branches'])
        call([self.which_svn, 'commit', '-m', "'Initial import'"])
        chdir(self.svntest)
        chdir(self.src + 'trunk')
        call(['touch', 'test.txt'])
        call([self.which_svn, 'add', 'test.txt'])
        call([self.which_svn, 'commit', '-m', "'test.txt added'"])
        call([self.which_svn, 'copy', 'file://' + self.repo + 'trunk',
              'file://' + self.repo + 'tags/release-1', '-m', "'release1"])
        chdir(self.src + 'trunk')
        call([self.which_svn, 'update'])
        call(['touch', 'test2.txt'])
        call([self.which_svn, 'add', 'test2.txt'])
        call([self.which_svn, 'commit', '-m', "'2nd version'"])
        call([self.which_svn, 'copy', 'file://' + self.repo + 'trunk',
              'file://' + self.repo + 'tags/release-2', '-m', "'release2"])
Exemple #19
0
def install_from_source(setuptools_source, pip_source):
    setuptools_temp_dir = tempfile.mkdtemp('-setuptools', 'ptvs-')
    pip_temp_dir = tempfile.mkdtemp('-pip', 'ptvs-')
    cwd = os.getcwd()

    try:
        os.chdir(setuptools_temp_dir)
        print('Downloading setuptools from ' + setuptools_source)
        sys.stdout.flush()
        setuptools_package, _ = urlretrieve(setuptools_source, 'setuptools.tar.gz')

        package = tarfile.open(setuptools_package)
        try:
            safe_members = [m for m in package.getmembers() if not m.name.startswith(('..', '\\'))]
            package.extractall(setuptools_temp_dir, members=safe_members)
        finally:
            package.close()

        extracted_dirs = [d for d in os.listdir(setuptools_temp_dir) if os.path.exists(os.path.join(d, 'setup.py'))]
        if not extracted_dirs:
            raise OSError("Failed to find setuptools's setup.py")
        extracted_dir = extracted_dirs[0]

        print('\nInstalling from ' + extracted_dir)
        sys.stdout.flush()
        os.chdir(extracted_dir)
        subprocess.check_call(
            EXECUTABLE + ['setup.py', 'install', '--single-version-externally-managed', '--record', 'setuptools.txt']
        )

        os.chdir(pip_temp_dir)
        print('Downloading pip from ' + pip_source)
        sys.stdout.flush()
        pip_package, _ = urlretrieve(pip_source, 'pip.tar.gz')

        package = tarfile.open(pip_package)
        try:
            safe_members = [m for m in package.getmembers() if not m.name.startswith(('..', '\\'))]
            package.extractall(pip_temp_dir, members=safe_members)
        finally:
            package.close()

        extracted_dirs = [d for d in os.listdir(pip_temp_dir) if os.path.exists(os.path.join(d, 'setup.py'))]
        if not extracted_dirs:
            raise OSError("Failed to find pip's setup.py")
        extracted_dir = extracted_dirs[0]

        print('\nInstalling from ' + extracted_dir)
        sys.stdout.flush()
        os.chdir(extracted_dir)
        subprocess.check_call(
            EXECUTABLE + ['setup.py', 'install', '--single-version-externally-managed', '--record', 'pip.txt']
        )

        print('\nInstallation Complete')
        sys.stdout.flush()
    finally:
        os.chdir(cwd)
        shutil.rmtree(setuptools_temp_dir, ignore_errors=True)
        shutil.rmtree(pip_temp_dir, ignore_errors=True)
Exemple #20
0
def create_temp_dir(prefix = None):
    """ Create temporary directory with optional prefix.
    """
    if prefix is None:
        return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
    else:
        return tempfile.mkdtemp(prefix=prefix)
Exemple #21
0
 def setUp(self):
     super(TestPelican, self).setUp()
     self.temp_path = mkdtemp(prefix='pelicantests.')
     self.temp_cache = mkdtemp(prefix='pelican_cache.')
     self.maxDiff = None
     self.old_locale = locale.setlocale(locale.LC_ALL)
     locale.setlocale(locale.LC_ALL, str('C'))
Exemple #22
0
    def test_transform_function_serializer_failure(self):
        inputd = tempfile.mkdtemp()
        self.cpd = tempfile.mkdtemp("test_transform_function_serializer_failure")

        def setup():
            conf = SparkConf().set("spark.default.parallelism", 1)
            sc = SparkContext(conf=conf)
            ssc = SnappyStreamingContext(sc, 0.5)

            # A function that cannot be serialized
            def process(time, rdd):
                sc.parallelize(range(1, 10))

            ssc.textFileStream(inputd).foreachRDD(process)
            return ssc

        self.ssc = SnappyStreamingContext.getOrCreate(self.cpd, setup)
        try:
            self.ssc.start()
        except:
            import traceback
            failure = traceback.format_exc()
            self.assertTrue(
                    "It appears that you are attempting to reference SparkContext" in failure)
            return

        self.fail("using SparkContext in process should fail because it's not Serializable")
Exemple #23
0
 def _runEvaluator(self, predFilePath, goldPath):
     tempDir = tempfile.mkdtemp()
     evaluatorDir = os.path.join(Settings.DATAPATH, "tools", "evaluators", "ChemProtEvaluator")
     removeTemp = False
     if tempDir == None:
         tempDir = tempfile.mkdtemp()
         removeTemp = True
     print >> sys.stderr, "Using temporary evaluation directory", tempDir
     evaluatorTempDir = os.path.join(tempDir, "ChemProtEvaluator")
     shutil.copytree(evaluatorDir, evaluatorTempDir)
     currentDir = os.getcwd()
     os.chdir(evaluatorTempDir)
     command = "java -cp bc6chemprot_eval.jar org.biocreative.tasks.chemprot.main.Main " + os.path.abspath(predFilePath) + " " + goldPath
     print >> sys.stderr, "Running CP17 evaluator: " + command
     p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     for s in ["".join(x.readlines()).strip() for x in (p.stderr, p.stdout)]:
         if s != "":
             print >> sys.stderr, s
     os.chdir(currentDir)
     results = {}
     with open(os.path.join(evaluatorTempDir, "out", "eval.txt"), "rt") as f:
         for line in f:
             if ":" in line:
                 print >> sys.stderr, line.strip()
                 key, value = [x.strip() for x in line.split(":")]
                 value = float(value) if ("." in value or value == "NaN") else int(value)
                 assert key not in results
                 results[key] = value
     if removeTemp:
         print >> sys.stderr, "Removing temporary evaluation directory", tempDir
         shutil.rmtree(tempDir)
     return results
Exemple #24
0
    def test_unarchive(self):
        import zipfile, tarfile

        good_archives = (
            ('good.zip', zipfile.ZipFile, 'r', 'namelist'),
            ('good.tar', tarfile.open, 'r', 'getnames'),
            ('good.tar.gz', tarfile.open, 'r:gz', 'getnames'),
            ('good.tar.bz2', tarfile.open, 'r:bz2', 'getnames'),
        )
        bad_archives = ('bad.zip', 'bad.tar', 'bad.tar.gz', 'bad.tar.bz2')

        for name, cls, mode, lister in good_archives:
            td = tempfile.mkdtemp()
            archive = None
            try:
                name = os.path.join(HERE, name)
                unarchive(name, td)
                archive = cls(name, mode)
                names = getattr(archive, lister)()
                for name in names:
                    p = os.path.join(td, name)
                    self.assertTrue(os.path.exists(p))
            finally:
                shutil.rmtree(td)
                if archive:
                    archive.close()

        for name in bad_archives:
            name = os.path.join(HERE, name)
            td = tempfile.mkdtemp()
            try:
                self.assertRaises(ValueError, unarchive, name, td)
            finally:
                shutil.rmtree(td)
Exemple #25
0
def rm_rf(path, verbose=False):
    if not on_win and islink(path):
        # Note that we have to check if the destination is a link because
        # exists('/path/to/dead-link') will return False, although
        # islink('/path/to/dead-link') is True.
        if verbose:
            print "Removing: %r (link)" % path
        os.unlink(path)

    elif isfile(path):
        if verbose:
            print "Removing: %r (file)" % path
        if on_win:
            try:
                os.unlink(path)
            except (WindowsError, IOError):
                os.rename(path, join(tempfile.mkdtemp(), basename(path)))
        else:
            os.unlink(path)

    elif isdir(path):
        if verbose:
            print "Removing: %r (directory)" % path
        if on_win:
            try:
                shutil.rmtree(path)
            except (WindowsError, IOError):
                os.rename(path, join(tempfile.mkdtemp(), basename(path)))
        else:
            shutil.rmtree(path)
Exemple #26
0
    def begin(self):
        #
        # We monkey-patch javabridge.start_vm here in order to
        # set up the ImageJ event bus (actually 
        # org.bushe.swing.event.ThreadSafeEventService) to not start
        # its cleanup thread which semi-buggy hangs around forever
        # and prevents Java from exiting.
        #
        def patch_start_vm(*args, **kwargs):
            result = start_vm(*args, **kwargs)
            if javabridge.get_env() is not None:
                try:
                    event_service_cls = javabridge.JClassWrapper(
                        "org.bushe.swing.event.ThreadSafeEventService")
                    event_service_cls.CLEANUP_PERIOD_MS_DEFAULT = None
                except:
                    pass
            return result
        patch_start_vm.func_globals["start_vm"] = javabridge.start_vm
        javabridge.start_vm = patch_start_vm
        if "CP_EXAMPLEIMAGES" in os.environ:
            self.temp_exampleimages = None
        else:
            self.temp_exampleimages = tempfile.mkdtemp(prefix="cpexampleimages")

        if "CP_TEMPIMAGES" in os.environ:
            self.temp_images = None
        else:
            self.temp_images = tempfile.mkdtemp(prefix="cptempimages")
Exemple #27
0
def test_switch_cache():
    """Test changing cache directory while extension is active."""
    global _counter

    dir1 = tempfile.mkdtemp(prefix='mdp-tmp-joblib-cache.',
                            dir=py.test.mdp_tempdirname)
    dir2 = tempfile.mkdtemp(prefix='mdp-tmp-joblib-cache.',
                            dir=py.test.mdp_tempdirname)
    x = mdp.numx.array([[10]], dtype='d')

    mdp.caching.activate_caching(cachedir=dir1)

    node = _CounterNode()
    _counter = 0
    node.execute(x)
    assert _counter == 1
    node.execute(x)
    assert _counter == 1

    # now change path
    mdp.caching.set_cachedir(cachedir=dir2)
    node.execute(x)
    assert _counter == 2
    node.execute(x)
    assert _counter == 2

    mdp.caching.deactivate_caching()
Exemple #28
0
    def setUp(self):
        # Setup workspace
        tmpdir1 = os.path.realpath(tempfile.mkdtemp())
        tmpdir2 = os.path.realpath(tempfile.mkdtemp())
        os.makedirs(os.path.join(tmpdir1, "llvm"))

        self.workspace = Workspace(source_root=tmpdir1, build_root=tmpdir2)

        # Setup toolchain
        self.toolchain = host_toolchain()
        self.toolchain.cc = "/path/to/cc"
        self.toolchain.cxx = "/path/to/cxx"

        # Setup args
        self.args = argparse.Namespace(
            llvm_targets_to_build="X86;ARM;AArch64;PowerPC;SystemZ",
            llvm_assertions="true",
            compiler_vendor="none",
            clang_compiler_version=None,
            clang_user_visible_version=None,
            darwin_deployment_version_osx="10.9",
        )

        # Setup shell
        shell.dry_run = True
        self._orig_stdout = sys.stdout
        self._orig_stderr = sys.stderr
        self.stdout = StringIO()
        self.stderr = StringIO()
        sys.stdout = self.stdout
        sys.stderr = self.stderr
Exemple #29
0
 def test_extract_val_binary(self):
     st = create_setting()
     st.develop = False
     # create tmp dirs for extraction output
     st.inst_dir = tempfile.mkdtemp()
     st.true_dir = tempfile.mkdtemp()
     st.binary = True
     
     extract(st)
     
     # check no of files
     self.assertEqual(len(st.val_true_fns), 
                      len(st.val_part_fns))
     self.assertEqual(len(st.val_inst_fns), 
                      len(st.val_part_fns))
     
     # test loading a corpus file
     corpus = ParallelGraphCorpus(inf=st.val_true_fns[0])
     
     # test loading a instances file
     inst = CorpusInst()
     inst.loadbin(st.val_inst_fns[0])
     self.assertEqual(len(corpus), len(inst))
     
     clean_inst(st)
     clean_true(st)
    def test_subreadset_consolidate(self):
        log.debug("Test methods directly")
        aln = SubreadSet(data.getXml(10), data.getXml(13))
        self.assertEqual(len(aln.toExternalFiles()), 2)
        outdir = tempfile.mkdtemp(suffix="dataset-unittest")
        outfn = os.path.join(outdir, 'merged.bam')
        consolidateBams(aln.toExternalFiles(), outfn, filterDset=aln)
        self.assertTrue(os.path.exists(outfn))
        consAln = SubreadSet(outfn)
        self.assertEqual(len(consAln.toExternalFiles()), 1)
        for read1, read2 in zip(sorted(list(aln)), sorted(list(consAln))):
            self.assertEqual(read1, read2)
        self.assertEqual(len(aln), len(consAln))

        log.debug("Test through API")
        aln = SubreadSet(data.getXml(10), data.getXml(13))
        self.assertEqual(len(aln.toExternalFiles()), 2)
        outdir = tempfile.mkdtemp(suffix="dataset-unittest")
        outfn = os.path.join(outdir, 'merged.bam')
        aln.consolidate(outfn)
        self.assertTrue(os.path.exists(outfn))
        self.assertEqual(len(aln.toExternalFiles()), 1)
        nonCons = SubreadSet(data.getXml(10), data.getXml(13))
        self.assertEqual(len(nonCons.toExternalFiles()), 2)
        for read1, read2 in zip(sorted(list(aln)), sorted(list(nonCons))):
            self.assertEqual(read1, read2)
        self.assertEqual(len(aln), len(nonCons))
Exemple #31
0
    def _install_addon(self, path, unpack=False):
        addons = [path]

        # if path is not an add-on, try to install all contained add-ons
        try:
            self.addon_details(path)
        except AddonFormatError as e:
            module_logger.warning("Could not install %s: %s" % (path, str(e)))

            # If the path doesn't exist, then we don't really care, just return
            if not os.path.isdir(path):
                return

            addons = [
                os.path.join(path, x) for x in os.listdir(path)
                if self.is_addon(os.path.join(path, x))
            ]
            addons.sort()

        # install each addon
        for addon in addons:
            # determine the addon id
            addon_details = self.addon_details(addon)
            addon_id = addon_details.get("id")

            # if the add-on has to be unpacked force it now
            # note: we might want to let Firefox do it in case of addon details
            orig_path = None
            if os.path.isfile(addon) and (unpack or addon_details["unpack"]):
                orig_path = addon
                addon = tempfile.mkdtemp()
                mozfile.extract(orig_path, addon)

            # copy the addon to the profile
            extensions_path = os.path.join(self.profile, "extensions")
            addon_path = os.path.join(extensions_path, addon_id)

            if os.path.isfile(addon):
                addon_path += ".xpi"

                # move existing xpi file to backup location to restore later
                if os.path.exists(addon_path):
                    self.backup_dir = self.backup_dir or tempfile.mkdtemp()
                    shutil.move(addon_path, self.backup_dir)

                # copy new add-on to the extension folder
                if not os.path.exists(extensions_path):
                    os.makedirs(extensions_path)
                shutil.copy(addon, addon_path)
            else:
                # move existing folder to backup location to restore later
                if os.path.exists(addon_path):
                    self.backup_dir = self.backup_dir or tempfile.mkdtemp()
                    shutil.move(addon_path, self.backup_dir)

                # copy new add-on to the extension folder
                shutil.copytree(addon, addon_path, symlinks=True)

            # if we had to extract the addon, remove the temporary directory
            if orig_path:
                mozfile.remove(addon)
                addon = orig_path

            self._addons.append(addon_id)
            self.installed_addons.append(addon)
    def testTrtGraphConverter_Int8Conversion_v2(self):

        np_input1, np_input2 = self._RandomInput([4, 1, 1])

        # Create a model and save it.
        input_saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
        root = self._GetModelForV2()
        expected_output = root.run(np_input1, np_input2)
        save.save(root, input_saved_model_dir,
                  {_SAVED_MODEL_SIGNATURE_KEY: root.run})

        # Run TRT conversion.
        converter = self._CreateConverterV2(
            input_saved_model_dir,
            precision_mode=trt_convert.TrtPrecisionMode.INT8,
            maximum_cached_engines=3)

        # Convert and perform INT8 calibration
        def _CalibrationInputFn():
            yield np_input1, np_input2

        converter.convert(calibration_input_fn=_CalibrationInputFn)

        trt_engine_name = self._GetUniqueTRTEngineOp(
            converter._converted_graph_def).name

        def _CheckFn(node):
            self.assertTrue(len(node.attr["calibration_data"].s), node.name)

        # Verify the converted GraphDef.
        self._CheckTrtOps(converter._converted_func, _CheckFn)  # pylint: disable=protected-access

        # Build another engine with different batch size.
        def _InputFn():
            yield self._RandomInput([5, 1, 1])

        converter.build(input_fn=_InputFn)

        # Save the converted model.
        # TODO(laigd): check that it should contain two engines.
        output_saved_model_dir = self.mkdtemp()
        converter.save(output_saved_model_dir)
        expected_asset_file = os.path.join(
            output_saved_model_dir,
            "assets/trt-serialized-engine." + trt_engine_name)
        self.assertTrue(os.path.exists(expected_asset_file))
        self.assertTrue(os.path.getsize(expected_asset_file))

        del converter
        gc.collect()  # Force GC to destroy the TRT engine cache.

        # Load and verify the converted model.
        root_with_trt = load.load(output_saved_model_dir)
        converted_signature = root_with_trt.signatures[
            _SAVED_MODEL_SIGNATURE_KEY]
        self._CheckTrtOps(converted_signature, _CheckFn)
        output_with_trt = converted_signature(
            inp1=ops.convert_to_tensor(np_input1),
            inp2=ops.convert_to_tensor(np_input2))
        self.assertEqual(1, len(output_with_trt))
        # The output of running the converted signature is a dict due to
        # compatibility reasons with V1 SavedModel signature mechanism.
        self.assertAllClose(expected_output,
                            list(output_with_trt.values())[0],
                            atol=1e-6,
                            rtol=1e-6)

        # Run with an input of different batch size. It should build a new engine
        # using calibration table.
        # TODO(laigd): check that it should contain three engines.
        np_input1, np_input2 = self._RandomInput([6, 1, 1])
        converted_signature(inp1=ops.convert_to_tensor(np_input1),
                            inp2=ops.convert_to_tensor(np_input2))

        del root_with_trt
        gc.collect()  # Force GC to destroy the TRT engine cache.
Exemple #33
0
 def setUp(self):
     self.tmp_folder = mkdtemp()
Exemple #34
0
 def setUp(self):
     super(self.__class__, self).setUp()
     self.tempdir = tempfile.mkdtemp('_green_pipe_test')
Exemple #35
0
def build(runas,
          tgt,
          dest_dir,
          spec,
          sources,
          deps,
          env,
          template,
          saltenv='base',
          log_dir='/var/log/salt/pkgbuild'):  # pylint: disable=unused-argument
    '''
    Given the package destination directory, the tarball containing debian files (e.g. control)
    and package sources, use pbuilder to safely build the platform package

    CLI Example:

    **Debian**

    .. code-block:: bash

        salt '*' pkgbuild.make_src_pkg deb-8-x86_64 /var/www/html https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/deb/python-libnacl.control https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz

    This example command should build the libnacl package for Debian using pbuilder
    and place it in /var/www/html/ on the minion
    '''
    ret = {}
    try:
        os.makedirs(dest_dir)
    except OSError as exc:
        if exc.errno != errno.EEXIST:
            raise
    dsc_dir = tempfile.mkdtemp()
    try:
        dscs = make_src_pkg(dsc_dir, spec, sources, env, template, saltenv)
    except Exception as exc:
        shutil.rmtree(dsc_dir)
        log.error('Failed to make src package')
        return ret

    cmd = 'pbuilder --create'
    __salt__['cmd.run'](cmd, runas=runas, python_shell=True)

    # use default /var/cache/pbuilder/result
    results_dir = '/var/cache/pbuilder/result'

    # dscs should only contain salt orig and debian tarballs and dsc file
    for dsc in dscs:
        afile = os.path.basename(dsc)
        adist = os.path.join(dest_dir, afile)

        if dsc.endswith('.dsc'):
            dbase = os.path.dirname(dsc)
            try:
                __salt__['cmd.run']('chown {0} -R {1}'.format(runas, dbase))

                cmd = 'pbuilder --update --override-config'
                __salt__['cmd.run'](cmd, runas=runas, python_shell=True)

                cmd = 'pbuilder --build {0}'.format(dsc)
                __salt__['cmd.run'](cmd, runas=runas, python_shell=True)

                # ignore local deps generated package file
                for bfile in os.listdir(results_dir):
                    if bfile != 'Packages':
                        full = os.path.join(results_dir, bfile)
                        bdist = os.path.join(dest_dir, bfile)
                        shutil.copy(full, bdist)
                        ret.setdefault('Packages', []).append(bdist)

            except Exception as exc:
                log.error('Error building from {0}: {1}'.format(dsc, exc))

    # remove any Packages file created for local dependency processing
    for pkgzfile in os.listdir(dest_dir):
        if pkgzfile == 'Packages':
            pkgzabsfile = os.path.join(dest_dir, pkgzfile)
            os.remove(pkgzabsfile)

    shutil.rmtree(dsc_dir)
    return ret
Exemple #36
0
def _mk_tree():
    '''
    Create the debian build area
    '''
    basedir = tempfile.mkdtemp()
    return basedir
Exemple #37
0
 def setUp(self):
     """Overriding setUp function to create temp workspace directory."""
     # this lets us delete the workspace after its done no matter the
     # the rest result
     self.workspace_dir = tempfile.mkdtemp()
Exemple #38
0
 def __init__(self):
     self.temp_dir = tempfile.mkdtemp()
     self._rmtree = shutil.rmtree
Exemple #39
0
    zip_f = zipfile.ZipFile(temp_zip_path)
    zip_f.extractall(temp_dir)

    plugin_temp_path = path.join(
        temp_dir, path.join(temp_dir, '%s-master' % plugin_name))

    # Remove the current plugin and replace it with the extracted
    plugin_dest_path = path.join(source_dir, plugin_name)

    try:
        shutil.rmtree(plugin_dest_path)
    except OSError:
        pass

    shutil.move(plugin_temp_path, plugin_dest_path)

    print 'Updated %s' % plugin_name


if __name__ == '__main__':
    temp_directory = tempfile.mkdtemp()

    try:
        for line in PLUGINS.splitlines():
            name, github_url = line.split(' ')
            zip_path = GITHUB_ZIP % github_url
            download_extract_replace(name, zip_path, temp_directory,
                                     SOURCE_DIR)
    finally:
        shutil.rmtree(temp_directory)
Exemple #40
0
from qgis.testing import (
    start_app,
    unittest,
)
from qgis.PyQt.QtCore import (
    QEventLoop,
    QUrl,
)

try:
    QGIS_SERVER_ENDPOINT_PORT = os.environ['QGIS_SERVER_ENDPOINT_PORT']
except:
    QGIS_SERVER_ENDPOINT_PORT = '0'  # Auto

QGIS_AUTH_DB_DIR_PATH = tempfile.mkdtemp()

os.environ['QGIS_AUTH_DB_DIR_PATH'] = QGIS_AUTH_DB_DIR_PATH

qgis_app = start_app()


class TestAuthManager(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        """Run before all tests:
        Creates an auth configuration"""
        cls.port = QGIS_SERVER_ENDPOINT_PORT
        # Clean env just to be sure
        env_vars = ['QUERY_STRING', 'QGIS_PROJECT_FILE']
        for ev in env_vars:
def update_firmware(node):
    """Performs hpsum firmware update on the node.

    This method performs hpsum firmware update by mounting the
    SPP ISO on the node. It performs firmware update on all or
    some of the firmware components.

    :param node: A node object of type dict.
    :returns: Operation Status string.
    :raises: HpsumOperationError, when the vmedia device is not found or
        when the mount operation fails or when the image validation fails.
    :raises: IloConnectionError, when the iLO connection fails.
    :raises: IloError, when vmedia eject or insert operation fails.
    """
    hpsum_update_iso = node['clean_step']['args']['firmware_images'][0].get(
        'url')

    # Validates the http image reference for hpsum update ISO.
    try:
        utils.validate_href(hpsum_update_iso)
    except exception.ImageRefValidationFailed as e:
        raise exception.HpsumOperationError(reason=e)

    # Ejects the CDROM device in the iLO and inserts the hpsum update ISO
    # to the CDROM device.
    info = node.get('driver_info')
    ilo_object = client.IloClient(info.get('ilo_address'),
                                  info.get('ilo_username'),
                                  info.get('ilo_password'))

    ilo_object.eject_virtual_media('CDROM')
    ilo_object.insert_virtual_media(hpsum_update_iso, 'CDROM')

    # Waits for the OS to detect the disk and update the label file. SPP ISO
    # is identified by matching its label.
    time.sleep(5)
    vmedia_device_dir = "/dev/disk/by-label/"
    for file in os.listdir(vmedia_device_dir):
        if fnmatch.fnmatch(file, 'SPP*'):
            vmedia_device_file = os.path.join(vmedia_device_dir, file)

    if not os.path.exists(vmedia_device_file):
        msg = "Unable to find the virtual media device for HPSUM"
        raise exception.HpsumOperationError(reason=msg)

    # Validates the SPP ISO image for any file corruption using the checksum
    # of the ISO file.
    expected_checksum = node['clean_step']['args']['firmware_images'][0].get(
        'checksum')
    try:
        utils.verify_image_checksum(vmedia_device_file, expected_checksum)
    except exception.ImageRefValidationFailed as e:
        raise exception.HpsumOperationError(reason=e)

    # Mounts SPP ISO on a temporary directory.
    vmedia_mount_point = tempfile.mkdtemp()
    try:
        try:
            processutils.execute("mount", vmedia_device_file,
                                 vmedia_mount_point)
        except processutils.ProcessExecutionError as e:
            msg = ("Unable to mount virtual media device %(device)s: "
                   "%(error)s" % {'device': vmedia_device_file, 'error': e})
            raise exception.HpsumOperationError(reason=msg)

        # Executes the hpsum based firmware update by passing the default hpsum
        # executable path and the components specified, if any.
        hpsum_file_path = os.path.join(vmedia_mount_point, HPSUM_LOCATION)
        components = node['clean_step']['args']['firmware_images'][0].get(
            'component')
        if components:
            components = components.strip().split(',')

        result = _execute_hpsum(hpsum_file_path, components=components)

        processutils.trycmd("umount", vmedia_mount_point)
    finally:
        shutil.rmtree(vmedia_mount_point, ignore_errors=True)

    return result
Exemple #42
0
def pytest_sessionstart(session):
    pytest.workdir = tempfile.mkdtemp()
        hradius = hlength * 0.6

    if gap:
        diff = cpv.scale(normal, gap)
        xyz1 = cpv.sub(xyz1, diff)
        xyz2 = cpv.add(xyz2, diff)

    xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2)

    obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + [
        cgo.CONE
    ] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + [1.0, 0.0]
    return obj


dirpath = tempfile.mkdtemp()
zip_dir = 'out.zip'
with zipfile.ZipFile(zip_dir) as hs_zip:
    hs_zip.extractall(dirpath)

cmd.load(join(dirpath, "protein.pdb"), "protein")
cmd.show("cartoon", "protein")

if dirpath:
    f = join(dirpath, "label_threshold_10.mol2")
else:
    f = "label_threshold_10.mol2"

cmd.load(f, 'label_threshold_10')
cmd.hide('everything', 'label_threshold_10')
cmd.label("label_threshold_10", "name")
Exemple #44
0
 def setUp(self):
     self.note = notify.Notify()
     self.dirPath = tempfile.mkdtemp()
Exemple #45
0
def main(user_args=None):
    """Execute all InVEST models using a pool of available processes."""
    if not user_args:
        user_args = sys.argv[1:]

    # Don't use a default CPU count of less than 1.
    default_cpu_count = max(multiprocessing.cpu_count()-1, 1)
    parser = argparse.ArgumentParser(
        prog='invest-autotest.py',
        description=(
            'Run through each InVEST model to verify it completes. '
            'This script is for testing purposes only.'))
    parser.add_argument(
        '--max-cpus',
        default=default_cpu_count,
        type=int,
        help=('The number of CPUs to use. '
              'Defaults to %s.') % default_cpu_count)
    parser.add_argument(
        '--binary',
        default='invest',
        help=('The path to the InVEST binary to call.  Defaults to whatever '
              'is on the PATH.'))
    parser.add_argument(
        '--cwd',
        default='sample_data',
        help=('The CWD from which to execute the models. '
              'If executing from a checked-out InVEST repo, this will probably '
              'be ./data/invest-sample-data/ or a directory at the same '
              'level. If executing from a built InVEST binary, this will be '
              'the sample_data directory.  Default value: "sample_data"'
             ))
    parser.add_argument(
        '--workspace',
        default=tempfile.mkdtemp(),
        help=('Where the output workspaces for all model runs should be '
              'stored. Default value is a new temporary directory.'))
    parser.add_argument(
        '--prefix',
        default='',
        help=('If provided, only those models that start with this value will '
              'be run.  If not provided, all models will be run.'))
    args = parser.parse_args(user_args)
    LOGGER.debug(args)
    LOGGER.info('Writing all model workspaces to %s', args.workspace)
    LOGGER.info('Running on %s CPUs', args.max_cpus)

    pairs = []
    for name, datastacks in DATASTACKS.items():
        if not name.startswith(args.prefix):
            continue

        for datastack_index, datastack in enumerate(datastacks):
            pairs.append((name, datastack, datastack_index))

    pool = multiprocessing.Pool(processes=args.max_cpus)  # cpu_count()-1
    processes = []
    for modelname, datastack, datastack_index in pairs:
        datastack = os.path.join(args.cwd, datastack)

        for headless in (True, False):
            headless_string = ''
            if headless:
                headless_string = 'headless'
            else:
                headless_string = 'gui'
            workspace = os.path.join(os.path.abspath(args.workspace),
                                     'autorun_%s_%s_%s' % (modelname,
                                                           headless_string,
                                                           datastack_index))
            process = pool.apply_async(run_model, (modelname,
                                                   args.binary,
                                                   workspace,
                                                   datastack,
                                                   headless))
            processes.append((process, datastack, headless, workspace))

    # get() blocks until the result is ready.
    model_results = {}
    for _process, _datastack, _headless, _workspace in processes:
        result = _process.get()
        model_results[(result[0], _datastack, _headless, _workspace)] = result[1:]

    # add 10 for ' (headless)'
    max_width = max([len(key[0])+11 for key in model_results.keys()])
    failures = 0

    datastack_width = max([len(key[1]) for key in model_results.keys()])

    # record all statuses, sorted by the modelname, being sure to start on a
    # new line.
    status_messages = ''
    status_messages += '\n%s %s %s\n' % (
        'MODELNAME'.ljust(max_width+1),
        'EXIT CODE'.ljust(10),  # len('EXIT CODE')+1
        'DATASTACK')
    for (modelname, datastack, headless, _), exitcode in sorted(
            model_results.items(), key=lambda x: x[0]):
        if headless:
            modelname += ' (headless)'
        status_messages += "%s %s %s\n" % (
            modelname.ljust(max_width+1),
            str(exitcode[0]).ljust(10),
            datastack)
        if exitcode[0] > 0:
            failures += 1

    if failures > 0:
        status_messages += '\n********FAILURES********\n'
        status_messages += '%s %s %s %s\n' % (
            'MODELNAME'.ljust(max_width+1),
            'EXIT CODE'.ljust(10),
            'DATASTACK'.ljust(datastack_width),
            'WORKSPACE'
        )
        for (modelname, datastack, headless, workspace), exitcode in sorted(
                [(k, v) for (k, v) in model_results.items()
                 if v[0] != 0],
                key=lambda x: x[0]):
            if headless:
                modelname += ' (headless)'
            status_messages += "%s %s %s %s\n" % (
                modelname.ljust(max_width+1),
                str(exitcode[0]).ljust(10),
                datastack.ljust(datastack_width),
                workspace
            )

    print(status_messages)
    with open(os.path.join(args.workspace, 'model_results.txt'), 'w') as log:
        log.write(status_messages)
def search_mz(smp_id, celery_obj):

    smp = SearchMzParam.objects.get(id=smp_id)
    # if smp.mass_type=='mz':
    # loop through masses

    # c_peak level (ms1)

    masses = smp.masses.split('\r\n')
    # rules = [i['id'] for i in list(smp.adduct_rule.all().values('id'))]
    polarities = [i['id'] for i in list(smp.polarity.all().values('id'))]
    ms_levels_ids = [i['id'] for i in list(smp.ms_level.all().values('id'))]
    ms_levels = [i['ms_level'] for i in list(smp.ms_level.all().values('ms_level'))]
    ppm_target_tolerance = smp.ppm_target_tolerance
    ppm_library_tolerance = smp.ppm_library_tolerance

    dirpth = tempfile.mkdtemp()
    first = True
    sr = SearchResult()
    sr.searchmzparam = smp

    if 1 in ms_levels and sum(ms_levels) > 1:
        total_time = len(masses)*2
    else:
        total_time = len(masses)
    c = 0
    hc = 0
    if 1 in ms_levels:

        fnm = 'single_mz_search_result_chrom.csv'
        tmp_pth = os.path.join(dirpth, fnm)

        with open(tmp_pth, 'w') as csvfile:
            writer = csv.writer(csvfile)
            for m in masses:
                if celery_obj:
                    celery_obj.update_state(state='RUNNING',
                                            meta={'current': c,
                                                  'total': total_time,
                                                  'status':'Searching for masses (ms1 chrom)'
                                                  })
                c += 1
                hc += search_mz_chrom(float(m),
                                      float(ppm_target_tolerance),
                                      float(ppm_library_tolerance),
                                      polarities,
                                      writer,
                                      first)
                first = False
        if hc:
            sr.matches = True
        else:
            sr.matches = False

        sr.chrom.save(fnm, File(open(tmp_pth)))

    if sum(ms_levels) > 1:
        first = True
        fnm = 'single_mz_search_result_frag.csv'
        tmp_pth = os.path.join(dirpth, fnm)

        with open(tmp_pth, 'w') as csvfile:
            writer = csv.writer(csvfile)
            for m in masses:
                if celery_obj:
                    if celery_obj:
                        celery_obj.update_state(state='RUNNING',
                                                meta={'current': c,
                                                      'total': total_time,
                                                      'status': 'Searching for masses (>ms2 scans)'})
                c += 1
                hc += search_mz_scans(float(m),
                                      float(ppm_target_tolerance),
                                      float(ppm_library_tolerance),
                                      polarities,
                                      ms_levels_ids,
                                      writer,
                                      first)
                first = False
        if hc:
            sr.matches = True
        else:
            sr.matches = False
        sr.scans.save(fnm, File(open(tmp_pth)))
Exemple #47
0
 def setUp(self):
     self.temp_content = mkdtemp()
     self.temp_output = mkdtemp()
 def mkdtemp(self, **kwargs):
     return self.normAbsolutePath(
         tempfile.mkdtemp(dir=self.tempdir, **kwargs))
Exemple #49
0
 def setUp(self):
     self.working_directory = tempfile.mkdtemp()
Exemple #50
0
    def test_export_dmg_distributions(self):
        cfg = helpers.demo_file("scenario_damage_risk/config.gem")
        export_target_dir = tempfile.mkdtemp()

        try:
            ret_code = helpers.run_job(cfg)
            self.assertEqual(0, ret_code)

            job = models.OqJob.objects.latest("id")

            [oasset] = models.Output.objects.filter(
                oq_job=job.id, output_type="dmg_dist_per_asset")

            [otaxon] = models.Output.objects.filter(
                oq_job=job.id, output_type="dmg_dist_per_taxonomy")

            [ototal] = models.Output.objects.filter(
                oq_job=job.id, output_type="dmg_dist_total")

            [omap] = models.Output.objects.filter(
                oq_job=job.id, output_type="collapse_map")

            calcs = helpers.prepare_cli_output(subprocess.check_output(
                ["openquake/bin/oqscript.py", "--list-calculations"]))

            # we have the calculation...
            check_list_calcs(self, calcs, job.id)

            outputs = helpers.prepare_cli_output(
                subprocess.check_output(
                    ["openquake/bin/oqscript.py", "--list-outputs",
                     str(job.id)]))

            # the damage distributios and collapse map as output...
            check_list_outputs(self, outputs, oasset.id, "dmg_dist_per_asset")
            check_list_outputs(self, outputs, ototal.id, "dmg_dist_total")
            check_list_outputs(self, outputs, omap.id, "collapse_map")
            check_list_outputs(self, outputs, otaxon.id,
                    "dmg_dist_per_taxonomy")

            # and we exported correctly the damage distribution per asset,
            exports = helpers.prepare_cli_output(
                subprocess.check_output(
                    ["openquake/bin/oqscript.py", "--export",
                    str(oasset.id), export_target_dir]))

            expected_file = os.path.join(export_target_dir,
                    "dmg-dist-asset-%s.xml" % job.id)

            self.assertEqual([expected_file], exports)

            # and per taxonomy
            exports = helpers.prepare_cli_output(
                subprocess.check_output(["openquake/bin/oqscript.py",
                    "--export", str(otaxon.id), export_target_dir]))

            expected_file = os.path.join(export_target_dir,
                    "dmg-dist-taxonomy-%s.xml" % job.id)

            self.assertEqual([expected_file], exports)

            # and total damage distribution
            exports = helpers.prepare_cli_output(
                subprocess.check_output(["openquake/bin/oqscript.py",
                    "--export", str(ototal.id), export_target_dir]))

            expected_file = os.path.join(export_target_dir,
                    "dmg-dist-total-%s.xml" % job.id)

            self.assertEqual([expected_file], exports)

            # and collapse map
            exports = helpers.prepare_cli_output(
                subprocess.check_output(["openquake/bin/oqscript.py",
                    "--export", str(omap.id), export_target_dir]))

            expected_file = os.path.join(export_target_dir,
                    "collapse-map-%s.xml" % job.id)

            self.assertEqual([expected_file], exports)
        finally:
            shutil.rmtree(export_target_dir)
Exemple #51
0
def _path_factory():
    return tempfile.mkdtemp(dir=os.path.abspath(os.getcwd()), prefix='.tmp')
Exemple #52
0
 def setUp(self):
     self.origdir = os.getcwd()
     self.dirname = tempfile.mkdtemp("testdir")
     os.chdir(self.dirname)
Exemple #53
0
def setUpModule():
    global templayerdir
    templayerdir = tempfile.mkdtemp(prefix='recipetoolqa')
    create_temp_layer(templayerdir, 'selftestrecipetool')
    runCmd('bitbake-layers add-layer %s' % templayerdir)
Exemple #54
0
 def __enter__(self):
     if self.super_temp is not None:
         return self.super_temp
     self.temp_dir = tempfile.mkdtemp(dir=self.dir)
     return self.temp_dir
def download_volume(manga_to_download, volume, path_to_download_to, loading_bar, app_root, label, button, button_vol):
    if path_to_download_to == "":
        print("No argument was given for path_to_download_to")
        return
    dirpath = tempfile.mkdtemp()
    print(dirpath)
    button.config(state="disabled")
    button_vol.config(state="disabled")
    # If there is no connection, display an error
    try:
        merger = PdfFileMerger()
        chapter_list = get_volume_list(manga_to_download, True)[int(volume) - 1]
        for i in range(len(chapter_list)):
            r = requests.get("https://guya.moe/api/download_chapter/" + manga_to_download + "/" + chapter_list[i].replace(".", "-") + "/", stream=True)
            file_size = r.headers.get("content-length")

            with open(dirpath + "/chapter.zip", "wb") as file:
                if file_size is None:
                    print("No file size header found, cannot display progress")
                    file.write(r.content)
                else:
                    downloaded_data = 0
                    file_size = int(file_size)
                    for data in r.iter_content(chunk_size=32768):
                        downloaded_data += len(data)
                        file.write(data)
                        progress = int(100 * downloaded_data / file_size)
                        loading_bar["value"] = progress
                        label.configure(text=f"{str(progress)}% ({i + 1}/{len(chapter_list)})")
                        app_root.update_idletasks()
                        app_root.update()

            # Extract the zip file
            with zipfile.ZipFile(dirpath + "/chapter.zip", 'r') as zip_ref:
                zip_ref.extractall(dirpath)
            # Create the PDF file
            file_path = dirpath + f"/{chapter_list[i].replace('.', '-')}.pdf"
            pdf_maker.make_pdf(dirpath, file_path)
            # Append the created file to the volume
            print(f"Appended file {file_path}")
            merger.append(file_path)
    except Exception as e:
        mbox.showerror("An error occurred", "Unable to estabilish a connection, check your internet settings")
        print("Error: " + str(e))
        return

    if not path_to_download_to.endswith(".pdf"):
        path_to_download_to += ".pdf"
    '''
    Tried to make this a function in pdf_maker.py, but the pdf was sorted badly 
    (e. g. chapter 1, then 10-5, then 10 and only then 2) so I decided to append the pdf files right on when they were
    created.
    '''
    merger.write(path_to_download_to)
    merger.close()

    shutil.rmtree(dirpath)
    label.configure(text="Ready")
    loading_bar["value"] = 0
    button.config(state="normal")
    button_vol.config(state="normal")
Exemple #56
0
    def testUpdateMode(self):
        """ Test that on-the-fly re-opening in update/read-only mode works """

        tmpdir = tempfile.mkdtemp()
        self.dirs_to_cleanup.append(tmpdir)
        srcpath = os.path.join(TEST_DATA_DIR, 'provider')
        for file in glob.glob(os.path.join(srcpath, 'shapefile.*')):
            shutil.copy(os.path.join(srcpath, file), tmpdir)
        datasource = os.path.join(tmpdir, 'shapefile.shp')

        vl = QgsVectorLayer('{}|layerid=0'.format(datasource), 'test', 'ogr')
        caps = vl.dataProvider().capabilities()
        self.assertTrue(caps & QgsVectorDataProvider.AddFeatures)
        self.assertTrue(caps & QgsVectorDataProvider.DeleteFeatures)
        self.assertTrue(caps & QgsVectorDataProvider.ChangeAttributeValues)
        self.assertTrue(caps & QgsVectorDataProvider.AddAttributes)
        self.assertTrue(caps & QgsVectorDataProvider.DeleteAttributes)
        self.assertTrue(caps & QgsVectorDataProvider.CreateSpatialIndex)
        self.assertTrue(caps & QgsVectorDataProvider.SelectAtId)
        self.assertTrue(caps & QgsVectorDataProvider.ChangeGeometries)
        # self.assertTrue(caps & QgsVectorDataProvider.ChangeFeatures)

        # We should be really opened in read-only mode even if write capabilities are declared
        self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-only")

        # Unbalanced call to leaveUpdateMode()
        self.assertFalse(vl.dataProvider().leaveUpdateMode())

        # Test that startEditing() / commitChanges() plays with enterUpdateMode() / leaveUpdateMode()
        self.assertTrue(vl.startEditing())
        self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-write")
        self.assertTrue(vl.dataProvider().isValid())

        self.assertTrue(vl.commitChanges())
        self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-only")
        self.assertTrue(vl.dataProvider().isValid())

        # Manual enterUpdateMode() / leaveUpdateMode() with 2 depths
        self.assertTrue(vl.dataProvider().enterUpdateMode())
        self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-write")
        caps = vl.dataProvider().capabilities()
        self.assertTrue(caps & QgsVectorDataProvider.AddFeatures)

        f = QgsFeature()
        f.setAttributes([200])
        f.setGeometry(QgsGeometry.fromWkt('Point (2 49)'))
        (ret, feature_list) = vl.dataProvider().addFeatures([f])
        self.assertTrue(ret)
        fid = feature_list[0].id()

        features = [f_iter for f_iter in vl.getFeatures(QgsFeatureRequest().setFilterFid(fid))]
        values = [f_iter['pk'] for f_iter in features]
        self.assertEqual(values, [200])

        got_geom = [f_iter.geometry() for f_iter in features][0].constGet()
        self.assertEqual((got_geom.x(), got_geom.y()), (2.0, 49.0))

        self.assertTrue(vl.dataProvider().changeGeometryValues({fid: QgsGeometry.fromWkt('Point (3 50)')}))
        self.assertTrue(vl.dataProvider().changeAttributeValues({fid: {0: 100}}))

        features = [f_iter for f_iter in vl.getFeatures(QgsFeatureRequest().setFilterFid(fid))]
        values = [f_iter['pk'] for f_iter in features]

        got_geom = [f_iter.geometry() for f_iter in features][0].constGet()
        self.assertEqual((got_geom.x(), got_geom.y()), (3.0, 50.0))

        self.assertTrue(vl.dataProvider().deleteFeatures([fid]))

        # Check that it has really disappeared
        osgeo.gdal.PushErrorHandler('CPLQuietErrorHandler')
        features = [f_iter for f_iter in vl.getFeatures(QgsFeatureRequest().setFilterFid(fid))]
        osgeo.gdal.PopErrorHandler()
        self.assertEqual(features, [])

        self.assertTrue(vl.dataProvider().addAttributes([QgsField("new_field", QVariant.Int, "integer")]))
        self.assertTrue(vl.dataProvider().deleteAttributes([len(vl.dataProvider().fields()) - 1]))

        self.assertTrue(vl.startEditing())
        self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-write")

        self.assertTrue(vl.commitChanges())
        self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-write")

        self.assertTrue(vl.dataProvider().enterUpdateMode())
        self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-write")

        self.assertTrue(vl.dataProvider().leaveUpdateMode())
        self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-write")

        self.assertTrue(vl.dataProvider().leaveUpdateMode())
        self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-only")

        # Test that update mode will be implictly enabled if doing an action
        # that requires update mode
        (ret, _) = vl.dataProvider().addFeatures([QgsFeature()])
        self.assertTrue(ret)
        self.assertEqual(vl.dataProvider().property("_debug_open_mode"), "read-write")
Exemple #57
0
  def test_model_custom_sink(self):
    tempdir_name = tempfile.mkdtemp()

    class SimpleKV(object):
      def __init__(self, tmp_dir):
        self._dummy_token = 'dummy_token'
        self._tmp_dir = tmp_dir

      def connect(self, url):
        return self._dummy_token

      def open_table(self, access_token, table_name):
        assert access_token == self._dummy_token
        file_name = self._tmp_dir + os.sep + table_name
        assert not os.path.exists(file_name)
        open(file_name, 'wb').close()
        return table_name

      def write_to_table(self, access_token, table_name, key, value):
        assert access_token == self._dummy_token
        file_name = self._tmp_dir + os.sep + table_name
        assert os.path.exists(file_name)
        with open(file_name, 'ab') as f:
          content = (key + ':' + value + os.linesep).encode('utf-8')
          f.write(content)

      def rename_table(self, access_token, old_name, new_name):
        assert access_token == self._dummy_token
        old_file_name = self._tmp_dir + os.sep + old_name
        new_file_name = self._tmp_dir + os.sep + new_name
        assert os.path.isfile(old_file_name)
        assert not os.path.exists(new_file_name)

        os.rename(old_file_name, new_file_name)

    snippets.model_custom_sink(
        SimpleKV(tempdir_name),
        [('key' + str(i), 'value' + str(i)) for i in range(100)],
        'final_table_no_ptransform',
        'final_table_with_ptransform')

    expected_output = [
        'key' + str(i) + ':' + 'value' + str(i) for i in range(100)
    ]

    glob_pattern = tempdir_name + os.sep + 'final_table_no_ptransform*'
    output_files = glob.glob(glob_pattern)
    assert output_files

    received_output = []
    for file_name in output_files:
      with open(file_name) as f:
        for line in f:
          received_output.append(line.rstrip(os.linesep))

    self.assertCountEqual(expected_output, received_output)

    glob_pattern = tempdir_name + os.sep + 'final_table_with_ptransform*'
    output_files = glob.glob(glob_pattern)
    assert output_files

    received_output = []
    for file_name in output_files:
      with open(file_name) as f:
        for line in f:
          received_output.append(line.rstrip(os.linesep))

    self.assertCountEqual(expected_output, received_output)
def _save_as_saved_model(model):
  saved_model_dir = tempfile.mkdtemp()
  model.save(saved_model_dir)
  return saved_model_dir
Exemple #59
0
 def setUp(self):
     self.cache_dir = tempfile.mkdtemp("cwltool_cache")
from qgis.testing import start_app, unittest
from qgis.PyQt.QtNetwork import QSslCertificate

from plugins.db_manager.db_plugins import supportedDbTypes, createDbPlugin
from plugins.db_manager.db_plugins.plugin import TableField

from utilities import unitTestDataPath

QGIS_POSTGRES_SERVER_PORT = os.environ.get('QGIS_POSTGRES_SERVER_PORT',
                                           '55432')
QGIS_POSTGRES_EXECUTABLE_PATH = os.environ.get('QGIS_POSTGRES_EXECUTABLE_PATH',
                                               '/usr/lib/postgresql/9.4/bin')

assert os.path.exists(QGIS_POSTGRES_EXECUTABLE_PATH)

QGIS_AUTH_DB_DIR_PATH = tempfile.mkdtemp()

# Postgres test path
QGIS_PG_TEST_PATH = tempfile.mkdtemp()

os.environ['QGIS_AUTH_DB_DIR_PATH'] = QGIS_AUTH_DB_DIR_PATH

qgis_app = start_app()

QGIS_POSTGRES_CONF_TEMPLATE = """
hba_file = '%(tempfolder)s/pg_hba.conf'
listen_addresses = '*'
port = %(port)s
max_connections = 100
unix_socket_directories = '%(tempfolder)s'
ssl = true