def install_step(self):
        """Install by copying library and header files to install directory."""

        try:
            # copy library
            targetdir = os.path.join(self.installdir, "lib")
            os.mkdir(targetdir)
            
            # not documented AFAIK, but real
            if LooseVersion(self.version) < LooseVersion('1.6.0'):
                fn = "libgrib2c.a"
            else:
                fn = "libg2c_v%s.a" % self.version

            shutil.copyfile(os.path.join(self.cfg['start_dir'], fn),
                            os.path.join(targetdir, fn))

            # make link so other software still finds libgrib2c.a
            if LooseVersion(self.version) >= LooseVersion('1.6.0'):
                os.symlink("libg2c_v%s.a" % self.version, os.path.join(targetdir,"libgrib2c.a"))

            # copy header files
            targetdir = os.path.join(self.installdir, "include")
            os.mkdir(targetdir)
            for fn in glob.glob('*.h'):
                shutil.copyfile(os.path.join(self.cfg['start_dir'], fn),
                                os.path.join(targetdir, fn))

        except OSError, err:
            raise EasyBuildError("Failed to copy files to install dir: %s", err)
Exemple #2
0
    def symlink(env, target, source):
        trgt = str(target[0])
        src = str(source[0])

        if os.path.islink(trgt) or os.path.exists(trgt):
            os.remove(trgt)
        os.symlink(os.path.basename(src), trgt)
Exemple #3
0
  def _fetch_pkg(self, gopath, pkg, rev):
    """Fetch the package and setup symlinks."""
    fetcher = self._get_fetcher(pkg)
    root = fetcher.root()
    root_dir = os.path.join(self.workdir, 'fetches', root, rev)

    # Only fetch each remote root once.
    if not os.path.exists(root_dir):
      with temporary_dir() as tmp_fetch_root:
        fetcher.fetch(dest=tmp_fetch_root, rev=rev)
        safe_mkdir(root_dir)
        for path in os.listdir(tmp_fetch_root):
          shutil.move(os.path.join(tmp_fetch_root, path), os.path.join(root_dir, path))

    # TODO(John Sirois): Circle back and get get rid of this symlink tree.
    # GoWorkspaceTask will further symlink a single package from the tree below into a
    # target's workspace when it could just be linking from the fetch_dir.  The only thing
    # standing in the way is a determination of what we want to artifact cache.  If we don't
    # want to cache fetched zips, linking straight from the fetch_dir works simply.  Otherwise
    # thought needs to be applied to using the artifact cache directly or synthesizing a
    # canonical owner target for the fetched files that 'child' targets (subpackages) can
    # depend on and share the fetch from.
    dest_dir = os.path.join(gopath, 'src', root)
    # We may have been `invalidate`d and not `clean-all`ed so we need a new empty symlink
    # chroot to avoid collision; thus `clean=True`.
    safe_mkdir(dest_dir, clean=True)
    for path in os.listdir(root_dir):
      os.symlink(os.path.join(root_dir, path), os.path.join(dest_dir, path))
def changeConfigFile(change1, change2):
    subdir = "LambdaZ_" + str(change1) + "_dKgamma_" + str(change2)
    dirName = my3DayArea + subdir
    child1 = os.mkdir(dirName)
    command = "cp ${PWD}/*.* " + myMCFM
    child2 = os.system(command)
    child3 = os.chdir(myMCFM)
    child4 = os.symlink(myMCFM + "Pdfdata", dirName + "/Pdfdata")
    child5 = os.symlink(myMCFM + "process.DAT", dirName + "/process.DAT")
    fin = open(thisDirectory + ConfigFile)
    pset_cfg = subdir + ".DAT"
    fout = open(pset_cfg, "w")
    for line in fin.readlines():
        if line.find("Lambda(Z)") != -1:
            line = line.replace("0.0d0", str(change1) + "d0")
        if line.find("Lambda(gamma)") != -1:
            line = line.replace("0.0d0", str(change1) + "d0")
        if line.find("Delta_K(gamma)") != -1:
            line = line.replace("0.0d0", str(change2) + "d0")
        if line.find("Delta_K(Z)") != -1:
            line = line.replace("0.0d0", str(-0.2864 * change2) + "d0")
        fout.write(line)
    print pset_cfg + " has been written.\n"
    command = "./submitToCondor.py -c " + pset_cfg
    print "Submitting job: " + command + "\n"
    child9 = os.system(command)
Exemple #5
0
def generatePara(filename):
	_config = ConfigParser.ConfigParser()
	_config.optionxform=str
	_config.read(filename)
	_filenames = []
	sec = 'modelABrc'
	Vals, sizes, = {}, []
	for k in KEYS:
		Vals[k] = _config.get(sec,k)
	_path = 'c'+Vals['chiN']+'m'+Vals['miuN']+'b'+Vals['Beta']
	if not os.path.exists(_path):
		os.makedirs(_path)
	with open(Vals['listName'],'r') as f:
		for l in f.readlines():
			sizes = json.loads(l)
	count = len(glob.glob(os.path.join(_path,'Para*.ini')))
	for k in sizes:
		for name in sz:
			_config.set(sec,name,k[name])
		_fn = 'Para_' + str(count) + '.ini'
		count += 1
		with open(_path + '//' + _fn,'wb') as f:
			_config.write(f)
		_filenames.append(_fn)
	for ff in FILELIST:
		if not os.path.isfile(os.path.join(_path,ff)):
			#shutil.copy('.//'+ff,_path+'//'+ff)
			os.symlink(os.path.join(os.path.realpath('.'),ff), \
					os.path.join(_path,ff))

	#if not os.path.isfile(_path+'//scfRigid'):
	#	shutil.copy('.//scfRigid',_path+'//scfRigid')
	return _path, _filenames
Exemple #6
0
def createSoftLinks(pair1, pair2, orientation, type, currentLibraryNumber):
    pair1NewName = _new_name(pair1, orientation, type, currentLibraryNumber, 1)
    pair2NewName = _new_name(pair2, orientation, type, currentLibraryNumber, 2)
    os.symlink(pair1, pair1NewName)
    if pair2NewName is not None:
         os.symlink(pair2, pair2NewName)
    return pair1NewName, pair2NewName
Exemple #7
0
 def _create_link(self, src_path, dest_path, transaction):
     with _storagefailure_wrapper():
         try:
             os.symlink(src_path, dest_path)
         except OSError as e:
             if e.errno != errno.EEXIST:
                 raise
Exemple #8
0
def do_build(package):
    print("Build started for %s" % package.name)

    sphinx_args = [package.path, package.build_path]
    copy_env = os.environ.copy()

    if package.devhelp:
        sphinx_args = ["-b", "devhelpfork"] + sphinx_args
        copy_env["PGIDOCGEN_TARGET_PREFIX"] = DEVHELP_PREFIX
    else:
        sphinx_args = ["-b", "html"] + sphinx_args
        copy_env["PGIDOCGEN_TARGET_PREFIX"] = ""

    copy_env["PGIDOCGEN_TARGET_BASE_PATH"] = \
        os.path.dirname(package.build_path)

    subprocess.check_call(
        [sys.executable, "-m", "sphinx", "-n", "-q", "-a", "-E"] + sphinx_args,
        env=copy_env)

    # we don't rebuild, remove all caches
    shutil.rmtree(os.path.join(package.build_path, ".doctrees"))
    os.remove(os.path.join(package.build_path, ".buildinfo"))

    # remove some pages we don't need
    os.remove(os.path.join(package.build_path, "genindex.html"))
    os.remove(os.path.join(package.build_path, "search.html"))

    if os.name != "nt":
        for d in ["structs", "unions", "interfaces", "iface-structs",
                  "class-structs"]:
            os.symlink("classes", os.path.join(package.build_path, d))

    return package
Exemple #9
0
def move_symlink(src, dst):
    """Create or overwrite a symlink"""
    try:
        os.unlink(dst)
    except OSError:
        pass
    os.symlink(src, dst)
Exemple #10
0
def copy_python(src, dst, symlinks=False):
    " Copies just Python source files "
    import shutil
    names = os.listdir(src)
    try:
        os.makedirs(dst)
    except:
        pass
    errors = []
    for name in names:
        srcname = os.path.join(src, name)
        dstname = os.path.join(dst, name)
        try:
            if symlinks and os.path.islink(srcname):
                linkto = os.readlink(srcname)
                os.symlink(linkto, dstname)
            elif os.path.isdir(srcname):
                if not name.startswith("."):
                    copy_python(srcname, dstname, symlinks)
            else:
                if name.endswith(".py"):
                    print "create", dstname
                    shutil.copy2(srcname, dstname)
            # XXX What about devices, sockets etc.?
        except (IOError, os.error), why:
            errors.append((srcname, dstname, str(why)))
        # catch the Error from the recursive copytree so that we can
        # continue with other files
        except Error, err:
            errors.extend(err.args[0])
def linkBestCurrentSolution(dpath):
	alldsz = glob.glob(dpath + '/*dsz')
	currentLink = None
	bestDsz = None
	bestN = None
	stu = None
	for x in alldsz:
		if x.endswith('current.dsz'):
			assert os.path.islink(x)
			currentLink = os.readlink(x)
			continue
		fname = os.path.basename(x)
		m = CURSOL_RE_.match(fname)
		assert m is not None, ('failed to parse %s' % fname)
		nstu = m.group(1).upper()
		if stu is None:
			stu = nstu
		assert nstu == stu
		nth = int(m.group(2))
		if (bestDsz is None) or (nth > bestN):
			bestN = nth
			bestDsz = fname
	if bestDsz is None:
		return None
	currentDsz = os.path.join(dpath, stu + 'current.dsz')
	if os.path.islink(currentDsz):
		odsz = os.readlink(currentDsz)
		if odsz == bestDsz:
			return bestDsz
		os.unlink(currentDsz)
	os.symlink(bestDsz, currentDsz)
	return bestDsz
    def start_emulator(self):
        '''
        Starts the emulator
        '''
        if 'emulator_url' in self.config or 'emulator_manifest' in self.config or 'tools_manifest' in self.config:
            self.install_emulator()

        if not os.path.isfile(self.adb_path):
            self.fatal("The adb binary '%s' is not a valid file!" % self.adb_path)
        self._restart_adbd()

        if not self.config.get("developer_mode"):
            # We kill compiz because it sometimes prevents us from starting the emulator
            self._kill_processes("compiz")
            self._kill_processes("xpcshell")

        # We add a symlink for libGL.so because the emulator dlopen()s it by that name
        # even though the installed library on most systems without dev packages is
        # libGL.so.1
        linkfile = os.path.join(self.abs_dirs['abs_work_dir'], "libGL.so")
        self.info("Attempting to establish symlink for %s" % linkfile)
        try:
            os.unlink(linkfile)
        except OSError:
            pass
        for libdir in ["/usr/lib/x86_64-linux-gnu/mesa",
                       "/usr/lib/i386-linux-gnu/mesa",
                       "/usr/lib/mesa"]:
            libfile = os.path.join(libdir, "libGL.so.1")
            if os.path.exists(libfile):
                self.info("Symlinking %s -> %s" % (linkfile, libfile))
                self.mkdir_p(self.abs_dirs['abs_work_dir'])
                os.symlink(libfile, linkfile)
                break
        self.emulator_proc = self._launch_emulator()
Exemple #13
0
def symlink(src, target):
    """ symlink file if possible """
    if 'win' in sys.platform:
        shutil.copy(src, target)
        os.chmod(target, stat.S_IRWXU)
    else:
        os.symlink(src, target)
    def install_step(self):
        """
        Install LAPACK: copy all .a files to lib dir in install directory
        """

        if self.cfg['test_only']:
            self.log.info('Only testing, so skipping make install.')
            pass

        srcdir = self.cfg['start_dir']
        destdir = os.path.join(self.installdir, 'lib')

        try:
            os.makedirs(destdir)

            # copy all .a files
            os.chdir(srcdir)
            for lib in glob.glob('*.a'):
                srcfile = os.path.join(srcdir, lib)
                self.log.debug("Copying file %s to dir %s" % (srcfile, destdir))
                shutil.copy2(srcfile, destdir)

            # symlink libraries to sensible names, if they aren't renamed already
            for (fromfile, tofile) in [('liblapack_LINUX.a', 'liblapack.a'),
                                       ('tmglib_LINUX.a', 'libtmglib.a')]:
                frompath = os.path.join(destdir, fromfile)
                topath = os.path.join(destdir, tofile)
                if os.path.isfile(frompath) and not os.path.isfile(tofile):
                    self.log.debug("Symlinking %s to %s" % (fromfile, tofile))
                    os.symlink(frompath, topath)

        except OSError, err:
            self.log.error("Copying %s to installation dir %s failed: %s" % (srcdir, destdir, err))
Exemple #15
0
    def _persist_symlink(self, abspath):
        """Persist symbolic link and bind mount it back to its current location
        """
        persisted_path = self._config_path(abspath)
        current_target = os.readlink(abspath)
        if os.path.exists(persisted_path):
            stored_target = os.readlink(persisted_path)
            if stored_target == current_target:
                self._logger.warn('Symlink "%s" had already been persisted',
                                  abspath)
                return
            else:
                # Write the new symlink to an alternate location and atomically
                # rename
                self._prepare_dir(abspath, persisted_path)
                tmp_path = persisted_path + '.ovirtnode.atom'
                try:
                    os.symlink(current_target, tmp_path)
                except Exception:
                    raise
                else:
                    os.rename(tmp_path, persisted_path)
        else:
            self._prepare_dir(abspath, persisted_path)
            os.symlink(current_target, persisted_path)

        self.copy_attributes(abspath, persisted_path)
        self._logger.info('Symbolic link "%s" successfully persisted', abspath)
        self._add_path_entry(abspath)
Exemple #16
0
def build_file_from_blob(blob, mode, target_path, honor_filemode=True):
    """Build a file or symlink on disk based on a Git object.

    :param obj: The git object
    :param mode: File mode
    :param target_path: Path to write to
    :param honor_filemode: An optional flag to honor core.filemode setting in
        config file, default is core.filemode=True, change executable bit
    """
    if stat.S_ISLNK(mode):
        # FIXME: This will fail on Windows. What should we do instead?
        src_path = blob.as_raw_string()
        try:
            os.symlink(src_path, target_path)
        except OSError as e:
            if e.errno == errno.EEXIST:
                os.unlink(target_path)
                os.symlink(src_path, target_path)
            else:
                raise
    else:
        with open(target_path, 'wb') as f:
            # Write out file
            f.write(blob.as_raw_string())

        if honor_filemode:
            os.chmod(target_path, mode)
Exemple #17
0
def initializeInitD(ownerName):
    if (os.path.isdir(initdDirName)):
        fn = join(RANGER_USERSYNC_HOME, initdProgramName)
        initdFn = join(initdDirName, initdProgramName)
        shutil.copy(fn, initdFn)
        if (ownerName != 'ranger'):
            f = open(initdFn, 'r')
            filedata = f.read()
            f.close()
            find_str = "LINUX_USER=ranger"
            replace_str = "LINUX_USER="******"/etc/rc2.d", "/etc/rc3.d", "/etc/rc.d/rc2.d", "/etc/rc.d/rc3.d"]
        for rcDir in rcDirList:
            if (os.path.isdir(rcDir)):
                for prefix in initPrefixList:
                    scriptFn = prefix + initdProgramName
                    scriptName = join(rcDir, scriptFn)
                    if isfile(scriptName) or os.path.islink(scriptName):
                        os.remove(scriptName)
                    os.symlink(initdFn, scriptName)
        userSyncScriptName = "ranger-usersync-services.sh"
        localScriptName = os.path.abspath(join(RANGER_USERSYNC_HOME, userSyncScriptName))
        ubinScriptName = join("/usr/bin", initdProgramName)
        if isfile(ubinScriptName) or os.path.islink(ubinScriptName):
            os.remove(ubinScriptName)
        os.symlink(localScriptName, ubinScriptName)
Exemple #18
0
def postInstall(fromVersion, fromRelease, toVersion, toRelease):
    os.system("/usr/sbin/alternatives \
                --install /usr/lib32/libGL.so.1.2 libGL-32bit /usr/lib32/mesa/libGL.so.1.2 80 \
                --slave /usr/lib32/xorg/modules/volatile xorg-modules-volatile-32bit /var/empty")

    if not os.path.lexists("/usr/lib32/libGL.so.1"):
        os.symlink("libGL.so.1.2", "/usr/lib32/libGL.so.1")
Exemple #19
0
 def add_link(self, source, link_name):
     dest = self.dest_path(link_name)
     self._check_path(dest)
     if not os.path.lexists(dest):
         os.symlink(source, dest)
     self.log_debug("added symlink at '%s' to '%s' in FileCacheArchive '%s'"
                    % (dest, source, self._archive_root))
		def copytree(src, dst, symlinks=False, ignore=None):
			names = os.listdir(src)
			if ignore is not None:
				ignored_names = ignore(src, names)
			else:
				ignored_names = set()
			
			if not os.path.exists(dst): os.makedirs(dst)
			errors = []
			for name in names:
				if name in ignored_names:
					continue
				srcname = os.path.join(src, name)
				dstname = os.path.join(dst, name)
				try:
					if symlinks and os.path.islink(srcname):
						linkto = os.readlink(srcname)
						os.symlink(linkto, dstname)
					elif os.path.isdir(srcname):
						copytree(srcname, dstname, symlinks, ignore)
					else:
						shutil.copy2(srcname, dstname)
					# XXX What about devices, sockets etc.?
				except (IOError, os.error), why:
					errors.append((srcname, dstname, str(why)))
				# catch the Error from the recursive copytree so that we can
				# continue with other files
				except Error, err:
					errors.extend(err.args[0])
Exemple #21
0
    def test_which_follows_symlink(self):
        " which() follows symlinks and returns its path. "
        fname = 'original'
        symname = 'extra-crispy'
        bin_dir = tempfile.mkdtemp()
        bin_path = os.path.join(bin_dir, fname)
        sym_path = os.path.join(bin_dir, symname)
        save_path = os.environ['PATH']
        try:
            # setup
            os.environ['PATH'] = bin_dir
            with open(bin_path, 'w') as fp:
                pass
            os.chmod(bin_path, 0o400)
            os.symlink(bin_path, sym_path)

            # should not be found because symlink points to non-executable
            assert pexpect.which(symname) is None

            # but now it should -- because it is executable
            os.chmod(bin_path, 0o700)
            assert pexpect.which(symname) == sym_path

        finally:
            # restore,
            os.environ['PATH'] = save_path

            # destroy scratch files, symlinks, and folders,
            if os.path.exists(sym_path):
                os.unlink(sym_path)
            if os.path.exists(bin_path):
                os.unlink(bin_path)
            if os.path.exists(bin_dir):
                os.rmdir(bin_dir)
Exemple #22
0
    def store_file(self):
        """Store a copy of the file being analyzed."""
        if not os.path.exists(self.task.target):
            log.error("The file to analyze does not exist at path \"%s\", "
                      "analysis aborted", self.task.target)
            return False

        sha256 = File(self.task.target).get_sha256()
        self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries", sha256)

        if os.path.exists(self.binary):
            log.info("File already exists at \"%s\"", self.binary)
        else:
            # TODO: do we really need to abort the analysis in case we are not
            # able to store a copy of the file?
            try:
                shutil.copy(self.task.target, self.binary)
            except (IOError, shutil.Error) as e:
                log.error("Unable to store file from \"%s\" to \"%s\", "
                          "analysis aborted", self.task.target, self.binary)
                return False

        try:
            new_binary_path = os.path.join(self.storage, "binary")

            if hasattr(os, "symlink"):
                os.symlink(self.binary, new_binary_path)
            else:
                shutil.copy(self.binary, new_binary_path)
        except (AttributeError, OSError) as e:
            log.error("Unable to create symlink/copy from \"%s\" to "
                      "\"%s\": %s", self.binary, self.storage, e)

        return True
def _run_develop(self):
    """
    The definition of the "run" method for the CustomDevelopCommand metaclass.
    """
    # Get paths
    tethysapp_dir = get_tethysapp_directory()
    destination_dir = os.path.join(tethysapp_dir, self.app_package)

    # Notify user
    print('Creating Symbolic Link to App Package: {0} to {1}'.format(self.app_package_dir, destination_dir))

    # Create symbolic link
    try:
        os.symlink(self.app_package_dir, destination_dir)

    except:
        try:
            shutil.rmtree(destination_dir)
        except:
            os.remove(destination_dir)

        os.symlink(self.app_package_dir, destination_dir)

    # Install dependencies
    for dependency in self.dependencies:
        subprocess.call(['pip', 'install', dependency])

    # Run the original develop command
    develop.run(self)
Exemple #24
0
 def linkoutdir(src, tgt):
     # Need to make the link to the staged file (may be inside
     # the container)
     for _, item in self.pathmapper.items():
         if src == item.resolved:
             os.symlink(item.target, tgt)
             break
Exemple #25
0
    def test_architecture_via_symlink(self): # issue3762
        # On Windows, the EXE needs to know where pythonXY.dll and *.pyd is at
        # so we add the directory to the path, PYTHONHOME and PYTHONPATH.
        env = None
        if sys.platform == "win32":
            env = {k.upper(): os.environ[k] for k in os.environ}
            env["PATH"] = "{};{}".format(
                os.path.dirname(sys.executable), env.get("PATH", ""))
            env["PYTHONHOME"] = os.path.dirname(sys.executable)
            if sysconfig.is_python_build(True):
                env["PYTHONPATH"] = os.path.dirname(os.__file__)

        def get(python, env=None):
            cmd = [python, '-c',
                'import platform; print(platform.architecture())']
            p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE, env=env)
            r = p.communicate()
            if p.returncode:
                print(repr(r[0]))
                print(repr(r[1]), file=sys.stderr)
                self.fail('unexpected return code: {0} (0x{0:08X})'
                          .format(p.returncode))
            return r

        real = os.path.realpath(sys.executable)
        link = os.path.abspath(support.TESTFN)
        os.symlink(real, link)
        try:
            self.assertEqual(get(real), get(link, env=env))
        finally:
            os.remove(link)
Exemple #26
0
def copy_packages(packages_names, dest, create_links=False, extra_ignores=None):
    """Copy python packages ``packages_names`` to ``dest``, spurious data.

    Copy will happen without tests, testdata, mercurial data or C extension module source with it.
    ``py2app`` include and exclude rules are **quite** funky, and doing this is the only reliable
    way to make sure we don't end up with useless stuff in our app.
    """
    if ISWINDOWS:
        create_links = False
    if not extra_ignores:
        extra_ignores = []
    ignore = shutil.ignore_patterns('.hg*', 'tests', 'testdata', 'modules', 'docs', 'locale', *extra_ignores)
    for package_name in packages_names:
        if op.exists(package_name):
            source_path = package_name
        else:
            mod = __import__(package_name)
            source_path = mod.__file__
            if mod.__file__.endswith('__init__.py'):
                source_path = op.dirname(source_path)
        dest_name = op.basename(source_path)
        dest_path = op.join(dest, dest_name)
        if op.exists(dest_path):
            if op.islink(dest_path):
                os.unlink(dest_path)
            else:
                shutil.rmtree(dest_path)
        print("Copying package at {0} to {1}".format(source_path, dest_path))
        if create_links:
            os.symlink(op.abspath(source_path), dest_path)
        else:
            if op.isdir(source_path):
                shutil.copytree(source_path, dest_path, ignore=ignore)
            else:
                shutil.copy(source_path, dest_path)
Exemple #27
0
def merge_js_index(app):
    """
    Merge the JS indexes of the sub-docs into the main JS index
    """
    app.info('')
    app.info(bold('Merging js index files...'))
    mapping = app.builder.indexer._mapping
    for curdoc in app.env.config.multidocs_subdoc_list:
        app.info("    %s:"%curdoc, nonl=1)
        fixpath = lambda path: os.path.join(curdoc, path)
        index = get_js_index(app, curdoc)
        if index is not None:
            # merge the mappings
            app.info(" %s js index entries"%(len(index._mapping)))
            for (ref, locs) in index._mapping.iteritems():
                newmapping = set(map(fixpath, locs))
                if ref in mapping:
                    newmapping = mapping[ref] | newmapping
                mapping[unicode(ref)] = newmapping
            # merge the titles
            titles = app.builder.indexer._titles
            for (res, title) in index._titles.iteritems():
                titles[fixpath(res)] = title
            # TODO: merge indexer._objtypes, indexer._objnames as well

            # Setup source symbolic links
            dest = os.path.join(app.outdir, "_sources", curdoc)
            if not os.path.exists(dest):
                os.symlink(os.path.join("..", curdoc, "_sources"), dest)
    app.info('... done (%s js index entries)'%(len(mapping)))
    app.info(bold('Writing js search indexes...'), nonl=1)
    return [] # no extra page to setup
Exemple #28
0
 def upload_results(self, url, token):
     """ Renames and uploads all filesets and updates shock info """
     new_sets = []
     rank = 1
     for i,fset in enumerate(self.results):
         if fset.type == 'contigs' or fset.type == 'scaffolds':
             fset.add_tag('rank-' + str(rank))
             rank += 1
         new_files = []
         for j, f in enumerate(fset['file_infos']):
             if len(fset['file_infos']) > 1:
                 file_suffix = '_{}'.format(j+1)
             else: file_suffix = ''
             ext = f['local_file'].split('.')[-1]
             if not f['keep_name']:
                 new_file = '{}/{}.{}{}.{}'.format(os.path.dirname(f['local_file']),
                                                   i+1, fset.name, file_suffix, ext)
                 os.symlink(f['local_file'], new_file)
             else: new_file = f['local_file']
             res = self.upload_file(url, self['user'], token, new_file, filetype=fset.type)
             f.update({'shock_url': url, 'shock_id': res['data']['id'], 
                       'filename': os.path.basename(new_file)})
             new_files.append(f)
         fset.update_fileinfo(new_files)
         new_sets.append(fset)
     self['result_data'] = new_sets
     return new_sets
Exemple #29
0
def link(target, link):
    """
    Create a link to a target file or a folder.
    For simplicity sake, both target and link must be absolute path and must
    include the filename of the file or folder.
    Also do not include any trailing slash.

    e.g. link('/path/to/file', '/path/to/link')

    But not: link('/path/to/file', 'path/to/')
    or link('/path/to/folder/', '/path/to/link')

    Args:
        target (str): file or folder the link will point to
        link (str): Link to create
    """
    assert isinstance(target, str)
    assert os.path.exists(target)
    assert isinstance(link, str)

    # Create the path to the link if it does not exists
    abs_path = os.path.dirname(os.path.abspath(link))
    if not os.path.isdir(abs_path):
        os.makedirs(abs_path)

    # Make sure the file or folder recursively has the good mode
    chmod(target)

    # Create the link to target
    os.symlink(target, link)
Exemple #30
0
def storeFile(tmpFile, copyLocation, symLocation):
    shutil.copyfile(tmpFile, copyLocation)
    try:
        os.remove(symLocation)
    except:
        pass
    os.symlink(copyLocation, symLocation)
Exemple #31
0
def zipball_extract(zipball, path):
    """Retrieve a zip archive from Keep and extract it to a local
    directory.  Return the absolute path where the archive was
    extracted. If the top level of the archive contained just one
    file or directory, return the absolute path of that single
    item.

    zipball -- collection locator
    path -- where to extract the archive: absolute, or relative to job tmp
    """
    if not re.search('^/', path):
        path = os.path.join(arvados.current_job().tmpdir, path)
    lockfile = open(path + '.lock', 'w')
    fcntl.flock(lockfile, fcntl.LOCK_EX)
    try:
        os.stat(path)
    except OSError:
        os.mkdir(path)
    already_have_it = False
    try:
        if os.readlink(os.path.join(path, '.locator')) == zipball:
            already_have_it = True
    except OSError:
        pass
    if not already_have_it:

        # emulate "rm -f" (i.e., if the file does not exist, we win)
        try:
            os.unlink(os.path.join(path, '.locator'))
        except OSError:
            if os.path.exists(os.path.join(path, '.locator')):
                os.unlink(os.path.join(path, '.locator'))

        for f in CollectionReader(zipball).all_files():
            if not re.search('\.zip$', f.name()):
                raise arvados.errors.NotImplementedError(
                    "zipball_extract cannot handle filename %s" % f.name())
            zip_filename = os.path.join(path, os.path.basename(f.name()))
            zip_file = open(zip_filename, 'wb')
            while True:
                buf = f.read(2**20)
                if len(buf) == 0:
                    break
                zip_file.write(buf)
            zip_file.close()

            p = subprocess.Popen(["unzip",
                                  "-q", "-o",
                                  "-d", path,
                                  zip_filename],
                                 stdout=None,
                                 stdin=None, stderr=sys.stderr,
                                 shell=False, close_fds=True)
            p.wait()
            if p.returncode != 0:
                lockfile.close()
                raise arvados.errors.CommandFailedError(
                    "unzip exited %d" % p.returncode)
            os.unlink(zip_filename)
        os.symlink(zipball, os.path.join(path, '.locator'))
    tld_extracts = [f for f in os.listdir(path) if f != '.locator']
    lockfile.close()
    if len(tld_extracts) == 1:
        return os.path.join(path, tld_extracts[0])
    return path
Exemple #32
0
def main():

    parser = OptionParser(usage="usage: %prog [options] (/path/to/file | stdin)")
    parser.add_option("-d", "--debug",
                      action="store_true",
                      dest="debug",
                      help="enable debug messages to the console.")
    parser.add_option("-r", "--remove-limit",
                      action="store_true",
                      dest="nolimit",
                      help="disable 20mb size limit (be careful!)")
    parser.add_option("-t", "--timeout",
                      action="store", type="int",
                      dest="timeout",
                      help="adjust request timeout period (in seconds)")
    parser.add_option("-c", "--config-path",
                      action="store", type="string",
                      dest="config_path",
                      help="specify a path to si-cloudscan.conf.")
    parser.add_option("-a", "--address",
                      action="store", type="string",
                      dest="broker_host",
                      help="specify an IP and port to connect to the broker")
    parser.add_option("-f", "--file-list",
                      action="store", type="string",
                      dest="file_list",
                      help="Specify a list of files to scan")
    parser.add_option("-s", "--ssh-host",
                      action="store", type="string",
                      dest="ssh_host",
                      help="specify a host for the SSH tunnel")
    parser.add_option("-p", "--num-procs",
                      action="store", type="int", default=6,
                      dest="num_procs",
                      help="Specify the number of processors to use for recursion")
    parser.add_option("-u", "--source",
                      action="store", type="string",
                      dest="source",
                      help="specify a custom source")
    parser.add_option("--ssh",
                      action="store_true",
                      default=False,
                      dest="use_ssh",
                      help="Use SSH tunneling")
    parser.add_option("-l", "--level",
                      action="store", type="string",
                      dest="return_level",
                      help="Return Level: minimal, metadata, full [default: metadata]")
    parser.add_option("-o", "--out-path",
                      action="store", type="string",
                      dest="save_path",
                      help="If Return Level Full has been specified, provide a path to "
                            "save the results to [default: current directory]")
    parser.add_option("-b", "--buffer",
                      action="store_true",
                      dest="stdin_buffer",
                      help="Specify to allow a buffer to be collected by stdin.")
    parser.add_option("-e", "--ephID",
                      action="store", type="string",
                      dest="ephID", default="",
                      help="Specify an ephID to send to Laika.")
    parser.add_option("-m", "--ext-metadata",
                      action="store",
                      dest="ext_metadata",
                      help="Specify external metadata to be passed into the scanner.")
    parser.add_option("-z", "--log",
                      action="store_true",
                      dest="log_db",
                      help="Specify to turn on logging results.")
    parser.add_option("-R", "--recursive",
                      action="store_true",
                      default=False,
                      dest="recursive",
                      help="Enable recursive directory scanning. If enabled, all files "
                            "in the specified directory will be scanned. Results will "
                            "be output to si-cloudscan.log in the current directory.")
    (options, args) = parser.parse_args()


    # Define default configuration location
    CONFIG_PATH = "/etc/si-cloudscan/si-cloudscan.conf"

    if options.config_path:
        CONFIG_PATH = options.config_path
    
    Config = ConfigParser.ConfigParser()
    Config.read(CONFIG_PATH)

    # Parse through the config file and append each section to a single dictionary
    global configs
    for section in Config.sections():
        configs.update(dict(Config.items(section)))

    # Set the working path, this will be used for file ouput if another
    # path is not specified
    WORKING_PATH = os.getcwd()

    if options.use_ssh:
        USE_SSH = True
    else: 
        if strtobool(getConfig('use_ssh')):
            USE_SSH = True
        else:
            USE_SSH = False

    if options.ssh_host:
        SSH_HOST = options.ssh_host
    else:
        SSH_HOST = getConfig('ssh_host')
        
    if options.broker_host:
        BROKER_HOST = options.broker_host
    else:
        BROKER_HOST = getConfig('broker_host')
 
    if options.debug:
        logging.basicConfig(level=logging.DEBUG)

    logging.debug("Host: %s" % BROKER_HOST)

    if options.return_level:
        RETURN_LEVEL = options.return_level
    else:
        RETURN_LEVEL = getConfig('return_level')

    if options.source:
        SOURCE = options.source
    else:
        SOURCE = "si-cloudscan"

    if not options.log_db:
        SOURCE += "-nolog"
     
    if options.save_path:
        SAVE_PATH = options.save_path
    else:
        SAVE_PATH = WORKING_PATH
    
    if options.num_procs:
        num_procs = int(options.num_procs)
    else:
        num_procs = int(getConfig('num_procs'))

    if options.timeout:
        logging.debug("default timeout changed to %i" % options.timeout)
        REQUEST_TIMEOUT = options.timeout * 1000
    else:
        REQUEST_TIMEOUT = int(getConfig('request_timeout'))

    if options.ext_metadata:
        try:
            if os.path.exists(options.ext_metadata):
                with open(options.ext_metadata) as metafile:
                    ext_metadata = json.loads(metafile.read())
            else:
                ext_metadata = json.loads(options.ext_metadata)
            assert isinstance(ext_metadata, dict)
        except:
            print "External Metadata must be a dictionary!"
            sys.exit(0)
    else:
        ext_metadata = dict()

    REQUEST_RETRIES = int(getConfig('request_retries'))
    
    # Attempt to get the hostname
    try:
        hostname = gethostname().split('.')[0] 
    except:
        hostname = "none"

    
    # Attempt to set the return level, throw an error if it doesn't exist.
    try:
        return_level = globals()["level_%s" % RETURN_LEVEL]
    except KeyError as e:
        print "Please specify a valid return level: minimal, metadata or full"
        sys.exit(1)

    if not options.recursive:
        try:
            file_buffer = ''
            # Try to read the file

            if len(args) > 0:
                file_buffer = open(args[0], 'rb').read()
                file_len = len(file_buffer)
                logging.debug("opened file %s with len %i" % (args[0], file_len))
            else:
                while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
                    line = sys.stdin.readline()
                    if not line:
                        break
                    else:
                        file_buffer += line

                if not file_buffer:
                    parser.print_usage()
                    sys.exit(1)
                
                file_len = len(file_buffer)

            if file_len > 20971520 and not options.nolimit:
                print "You're trying to scan a file larger than 20mb.. Are you sure?"
                print "Use the --remove-limit flag if you really want to do this."
                sys.exit(1)
        except IOError as e:
            print "\nERROR: The file does not exist: %s\n" % (args[0],)
            sys.exit(1)
    else:
        try:
            fileList = []
            if options.file_list:
                fileList = open(options.file_list).read().splitlines()
            else:
                if len(args) > 0:
                    rootdir = args[0]
                    for root, subFolders, files in os.walk(rootdir):
                        for fname in files:
                            fileList.append(os.path.join(root, fname))
                else:
                    while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
                        line = sys.stdin.readline()
                        if not line:
                            break
                        else:
                            fileList.append(line)
                    if not fileList:
                        parser.print_usage()
                        sys.exit(1)

            
            if len(fileList) > 1000 and not options.nolimit:
                print "You're trying to scan over 1000 files... Are you sure?"
                print "Use the --remove-limit flag if you really want to do this."
                sys.exit(1)

        except IOError as e:
            print "\nERROR: Directory does not exist: %s\n" % (args[0],)
            sys.exit(1)


   
    if not options.recursive: 
        # Construct the object to be sent for scanning
        if args:
            filename = args[0]
        else:
            filename = "stdin"

        ext_metadata['server'] = hostname
        ext_metadata['user'] = getpass.getuser()
        externalObject = ExternalObject(buffer=file_buffer, 
                                        externalVars=ExternalVars(filename=filename, 
                                                                  ephID=options.ephID,
                                                                  extMetaData=ext_metadata,
                                                                  source="%s-%s-%s" % (SOURCE,
                                                                         hostname,
                                                                         getpass.getuser())),
                                        level=return_level)
    try:
        if not options.recursive:
            # Set up ZMQ context 
            if USE_SSH:
                try:
                    logging.debug("attempting to connect to broker at %s and SSH host %s" % (BROKER_HOST, SSH_HOST))
                    client = Client(BROKER_HOST, useSSH=True, sshHost=SSH_HOST, useGevent=True)
                except RuntimeError as e:
                    logging.exception("could not set up SSH tunnel to %s" % SSH_HOST)
                    sys.exit(1)
            else:
                logging.debug("SSH has been disabled.")
                client = Client(BROKER_HOST, useGevent=True)

            starttime = time.time()
            result = client.send(externalObject, retry=REQUEST_RETRIES, timeout=REQUEST_TIMEOUT)
            logging.debug("got reply in %s seconds" % str(time.time() - starttime))
            rootObject = getRootObject(result)
            try:
                jsonResult = getJSON(result)
                print jsonResult
            except:
                logging.exception("error occured collecting results")
                return
            if return_level == level_full:
                SAVE_PATH = "%s/%s" % (SAVE_PATH, get_scanObjectUID(rootObject))
                if not os.path.exists(SAVE_PATH):
                    try:
                        os.makedirs(SAVE_PATH)
                        print "\nWriting results to %s...\n" % SAVE_PATH
                    except (OSError, IOError) as e:
                        print "\nERROR: unable to write to %s...\n" % SAVE_PATH
                        return
                else:
                    print "\nOutput folder already exists! Skipping results output...\n"
                    return
                for uid, scanObject in result.files.iteritems():
                    f = open("%s/%s" % (SAVE_PATH, uid), "wb")
                    f.write(scanObject.buffer)
                    f.close()
                    try:
                        if scanObject.filename and scanObject.parent:
                            linkPath = "%s/%s" % (SAVE_PATH, scanObject.filename.replace("/","_"))
                            if not os.path.lexists(linkPath):
                                os.symlink("%s" % (uid), linkPath)
                        elif scanObject.filename:
                            filenameParts = scanObject.filename.split("/")
                            os.symlink("%s" % (uid), "%s/%s" % (SAVE_PATH, filenameParts[-1]))
                    except:
                        print "Unable to create symlink for %s" % (uid)

                f = open("%s/%s" % (SAVE_PATH, "results.log"), "wb")
                f.write(jsonResult)
                f.close()
                sys.exit(1)
        else:
            try:
                fh = open('si-cloudscan.log', 'w')
                fh.close()
            except:
                pass

            for fname in fileList:
                job_queue.put(fname)

            for i in range(num_procs):
                job_queue.put("STOP")

            print "File list length: %s" % len(fileList)

            for i in range(num_procs):
                Process(target=worker, args=(options.nolimit, REQUEST_RETRIES, REQUEST_TIMEOUT, SAVE_PATH, SOURCE, return_level, hostname, USE_SSH, BROKER_HOST, SSH_HOST,ext_metadata,options.ephID,)).start()
   
            results_processed = 0
            while results_processed < len(fileList):
                logging.debug("Files left: %s" % ((len(fileList) - results_processed)))
                resultText = result_queue.get()
                try:
                    # Process results
                    fh = open('si-cloudscan.log', 'ab')
                    fh.write('%s\n' % resultText)
                    fh.close()
                    results_processed += 1
                except Exception as e:
                    raise

            print 'Wrote results to si-cloudscan.log'

    except KeyboardInterrupt:
        print "Interrupted by user, exiting..."
        sys.exit(1)
Exemple #33
0
def worker(nolimit, REQUEST_RETRIES, REQUEST_TIMEOUT, SAVE_PATH, SOURCE, return_level, hostname, USE_SSH, BROKER_HOST, SSH_HOST, ext_metadata, ephID):
    # Set up ZMQ context 
    if USE_SSH:
        try:
            logging.debug("attempting to connect to broker at %s and SSH host %s" % (BROKER_HOST, SSH_HOST))
            client = Client(BROKER_HOST, useSSH=True, sshHost=SSH_HOST)
        except RuntimeError as e:
            logging.exception("could not set up SSH tunnel to %s" % SSH_HOST)
            sys.exit(1)
    else:
        logging.debug("SSH has been disabled.")
        client = Client(BROKER_HOST)

    
    randNum = randint(1, 10000)
    
    for fname in iter(job_queue.get, 'STOP'):
        print "Worker %s: Starting new request" % randNum
        try:
            # Try to read the file
            file_buffer = open(fname, 'rb').read()
            file_len = len(file_buffer)
            logging.debug("opened file %s with len %i" % (fname, file_len))
            if file_len > 20971520 and not nolimit:
                print "You're trying to scan a file larger than 20mb.. Are you sure?"
                print "Use the --remove-limit flag if you really want to do this."
                print "File has not been scanned: %s" % fname
                result_queue.put("~~~~~~~~~~~~~~~~~~~~\nFile has not been scanned due to size: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
                continue
        except IOError as e:
            print "\nERROR: The file does not exist: %s\n" % (fname,)
            print "Moving to next file..."
            result_queue.put("~~~~~~~~~~~~~~~~~~~~\nFile has not been scanned due to an IO Error: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
            continue

        try:
            # Construct the object to be sent for scanning
            externalObject = ExternalObject(buffer=file_buffer,
                                            externalVars=ExternalVars(filename=fname,
                                                                      ephID=ephID,
                                                                      extMetaData=ext_metadata,
                                                                      source="%s-%s-%s" % (SOURCE,
                                                                                           hostname,
                                                                                           getpass.getuser())),
                                        level=return_level)

            starttime = time.time()
            result = client.send(externalObject, retry=REQUEST_RETRIES, timeout=REQUEST_TIMEOUT)
            if not result:
                result_queue.put("~~~~~~~~~~~~~~~~~~~~\nFile timed out in the scanner: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
                continue

            logging.debug("got reply in %s seconds" % str(time.time() - starttime))
            rootObject = getRootObject(result)

            jsonResult = getJSON(result)
            resultText = '%s\n' % jsonResult

            if return_level == level_full:
                FILE_SAVE_PATH = "%s/%s" % (SAVE_PATH, get_scanObjectUID(rootObject))
                if not os.path.exists(FILE_SAVE_PATH):
                    try:
                        os.makedirs(FILE_SAVE_PATH)
                        print "Writing results to %s..." % FILE_SAVE_PATH
                    except (OSError, IOError) as e:
                        print "\nERROR: unable to write to %s...\n" % FILE_SAVE_PATH
                        return
                else:
                    print "\nOutput folder already exists! Skipping results output...\n"
                    return
                for uid, scanObject in result.files.iteritems():
                    f = open("%s/%s" % (FILE_SAVE_PATH, uid), "wb")
                    f.write(scanObject.buffer)
                    f.close()
                    if scanObject.filename and scanObject.depth != 0:
                        linkPath = "%s/%s" % (FILE_SAVE_PATH, scanObject.filename.replace("/","_"))
                        if not os.path.lexists(linkPath):
                            os.symlink("%s" % (uid), linkPath)
                    elif scanObject.filename:
                        filenameParts = scanObject.filename.split("/")
                        linkPath = "%s/%s" % (FILE_SAVE_PATH, filenameParts[-1])
                        if not os.path.lexists(linkPath):
                            os.symlink("%s" % (uid), linkPath)
                f = open("%s/%s" % (FILE_SAVE_PATH, "results.json"), "wb")
                f.write(jsonResult)
                f.close()
            
            result_queue.put(resultText)
        except:
            #logging.exception("error occured collecting results")
            result_queue.put("~~~~~~~~~~~~~~~~~~~~\nUNKNOWN ERROR OCCURRED: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
            continue
# 创建目录
os.mkdir("pythonDir", 777)

# 删除目录
os.rmdir("pythonDir")

# 将当前目录改为"/home/newdir"
# os.chdir("/home/newdir")

# 给出当前的目录
print os.getcwd()

# 返回path指定的文件夹包含的文件或文件夹的名字的列表
print os.listdir("/home/alic/WorkSpace/Python/StudyPython")

# 创建软链接
# os.mkdir("/home/alic/WorkSpace/Python/StudyPython/src")
# os.mkdir("/home/alic/WorkSpace/Python/StudyPython/dst")
src = "/home/alic/WorkSpace/Python/StudyPython/src"  # 源地址
dst = "/home/alic/WorkSpace/Python/StudyPython/dst"  # 目标地址
os.symlink(src, dst)
print '创建软连接成功'
# 删除软链接
os.remove(dst)
print "删除软链接成功"

# 更改权限
file = open("demoFile", "w+")
os.chmod(file.name, 0777)
Exemple #35
0
    def _create(cls,
                dom,
                imgUUID,
                volUUID,
                capacity,
                volFormat,
                preallocate,
                volParent,
                srcImgUUID,
                srcVolUUID,
                volPath,
                initial_size=None):
        """
        Class specific implementation of volumeCreate. All the exceptions are
        properly handled and logged in volume.create()
        """

        lv_size = cls.calculate_volume_alloc_size(preallocate, capacity,
                                                  initial_size)
        lv_size_mb = (utils.round(lv_size, constants.MEGAB) // constants.MEGAB)

        lvm.createLV(dom.sdUUID,
                     volUUID,
                     lv_size_mb,
                     activate=True,
                     initialTags=(sc.TAG_VOL_UNINIT, ))

        fileutils.rm_file(volPath)
        lvPath = lvm.lvPath(dom.sdUUID, volUUID)
        cls.log.info("Creating volume symlink from %r to %r", lvPath, volPath)
        os.symlink(lvPath, volPath)

        if not volParent:
            cls.log.info("Request to create %s volume %s with capacity = %s",
                         sc.type2name(volFormat), volPath, capacity)
            if volFormat == sc.COW_FORMAT:
                operation = qemuimg.create(volPath,
                                           size=capacity,
                                           format=sc.fmt2str(volFormat),
                                           qcow2Compat=dom.qcow2_compat())
                operation.run()
        else:
            # Create hardlink to template and its meta file
            cls.log.info(
                "Request to create snapshot %s/%s of volume %s/%s "
                "with capacity %s", imgUUID, volUUID, srcImgUUID, srcVolUUID,
                capacity)
            volParent.clone(volPath, volFormat, capacity)

        with dom.acquireVolumeMetadataSlot(volUUID) as slot:
            mdTags = [
                "%s%s" % (sc.TAG_PREFIX_MD, slot),
                "%s%s" % (sc.TAG_PREFIX_PARENT, srcVolUUID),
                "%s%s" % (sc.TAG_PREFIX_IMAGE, imgUUID)
            ]
            lvm.changeLVTags(dom.sdUUID,
                             volUUID,
                             delTags=[sc.TAG_VOL_UNINIT],
                             addTags=mdTags)

        try:
            lvm.deactivateLVs(dom.sdUUID, [volUUID])
        except se.CannotDeactivateLogicalVolume:
            cls.log.warn("Cannot deactivate new created volume %s/%s",
                         dom.sdUUID,
                         volUUID,
                         exc_info=True)

        return (dom.sdUUID, slot)
Exemple #36
0
 DMG_TMP = WORK_DIR + "pack_tmp.dmg"
 DMG_RES_DIR = RES_DIR + "dmg_resource/"
 VOL_NAME = "HostsUtl"
 DMG_NAME = VOL_NAME + "-mac-gpl-" + VERSION + ".dmg"
 DMG_PATH = WORK_DIR + DMG_NAME
 # Clean work space before pack up
 if os.path.exists(VDMG_DIR):
     shutil.rmtree(VDMG_DIR)
 if os.path.isfile(DMG_TMP):
     os.remove(DMG_TMP)
 if os.path.isfile(DMG_PATH):
     os.remove(DMG_PATH)
 # Prepare files in DMG package
 os.mkdir(VDMG_DIR)
 shutil.move(APP_PATH, VDMG_DIR)
 os.symlink("/Applications", VDMG_DIR + " ")
 shutil.copy2(DMG_RES_DIR + "background.png", VDMG_DIR + ".background.png")
 shutil.copy2(DMG_RES_DIR + "DS_Store_dmg", VDMG_DIR + ".DS_Store")
 # Make DMG file
 print " Making DMG Package ".center(78, '=')
 MK_CMD = (
     "hdiutil makehybrid -hfs -hfs-volume-name %s "
     "-hfs-openfolder %s %s -o %s" % (
         VOL_NAME, VDMG_DIR, VDMG_DIR, DMG_TMP))
 PACK_CMD = "hdiutil convert -format UDZO %s -o %s" % (DMG_TMP, DMG_PATH)
 os.system(MK_CMD)
 os.system(PACK_CMD)
 # Clean work directory after make DMG package
 shutil.rmtree(VDMG_DIR)
 os.remove(DMG_TMP)
 # Move DMG file to release directory
Exemple #37
0
def collection_extract(collection, path, files=[], decompress=True):
    """Retrieve a collection from Keep and extract it to a local
    directory.  Return the absolute path where the collection was
    extracted.

    collection -- collection locator
    path -- where to extract: absolute, or relative to job tmp
    """
    matches = re.search(r'^([0-9a-f]+)(\+[\w@]+)*$', collection)
    if matches:
        collection_hash = matches.group(1)
    else:
        collection_hash = hashlib.md5(collection).hexdigest()
    if not re.search('^/', path):
        path = os.path.join(arvados.current_job().tmpdir, path)
    lockfile = open(path + '.lock', 'w')
    fcntl.flock(lockfile, fcntl.LOCK_EX)
    try:
        os.stat(path)
    except OSError:
        os.mkdir(path)
    already_have_it = False
    try:
        if os.readlink(os.path.join(path, '.locator')) == collection_hash:
            already_have_it = True
    except OSError:
        pass

    # emulate "rm -f" (i.e., if the file does not exist, we win)
    try:
        os.unlink(os.path.join(path, '.locator'))
    except OSError:
        if os.path.exists(os.path.join(path, '.locator')):
            os.unlink(os.path.join(path, '.locator'))

    files_got = []
    for s in CollectionReader(collection).all_streams():
        stream_name = s.name()
        for f in s.all_files():
            if (files == [] or
                ((f.name() not in files_got) and
                 (f.name() in files or
                  (decompress and f.decompressed_name() in files)))):
                outname = f.decompressed_name() if decompress else f.name()
                files_got += [outname]
                if os.path.exists(os.path.join(path, stream_name, outname)):
                    continue
                mkdir_dash_p(os.path.dirname(os.path.join(path, stream_name, outname)))
                outfile = open(os.path.join(path, stream_name, outname), 'wb')
                for buf in (f.readall_decompressed() if decompress
                            else f.readall()):
                    outfile.write(buf)
                outfile.close()
    if len(files_got) < len(files):
        raise arvados.errors.AssertionError(
            "Wanted files %s but only got %s from %s" %
            (files, files_got,
             [z.name() for z in CollectionReader(collection).all_files()]))
    os.symlink(collection_hash, os.path.join(path, '.locator'))

    lockfile.close()
    return path
Exemple #38
0
def copyfile(
    originalfile,
    newfile,
    copy=False,
    create_new=False,
    use_hardlink=True,
    copy_related_files=True,
):
    """
    Copy or link files.

    If ``use_hardlink`` is True, and the file can be hard-linked, then a
    link is created, instead of copying the file.

    If a hard link is not created and ``copy`` is False, then a symbolic
    link is created.

    .. admonition:: Copy options for existing files

        * symlink

            * to regular file originalfile            (keep if symlinking)
            * to same dest as symlink originalfile    (keep if symlinking)
            * to other file                           (unlink)

        * regular file

            * hard link to originalfile               (keep)
            * copy of file (same hash)                (keep)
            * different file (diff hash)              (unlink)

    .. admonition:: Copy options for new files

        * ``use_hardlink`` & ``can_hardlink`` => hardlink
        * ``~hardlink`` & ``~copy`` & ``can_symlink`` => symlink
        * ``~hardlink`` & ``~symlink`` => copy

    Parameters
    ----------
    originalfile : :obj:`str`
        full path to original file
    newfile : :obj:`str`
        full path to new file
    copy : Bool
        specifies whether to copy or symlink files
        (default=False) but only for POSIX systems
    use_hardlink : Bool
        specifies whether to hard-link files, when able
        (Default=False), taking precedence over copy
    copy_related_files : Bool
        specifies whether to also operate on related files, as defined in
        ``related_filetype_sets``

    Returns
    -------
    None

    """
    newhash = None
    orighash = None
    logger.debug(newfile)

    if create_new:
        while op.exists(newfile):
            base, fname, ext = split_filename(newfile)
            s = re.search("_c[0-9]{4,4}$", fname)
            i = 0
            if s:
                i = int(s.group()[2:]) + 1
                fname = fname[:-6] + "_c%04d" % i
            else:
                fname += "_c%04d" % i
            newfile = base + os.sep + fname + ext

    # Don't try creating symlinks on CIFS
    if copy is False and on_cifs(newfile):
        copy = True

    keep = False
    if op.lexists(newfile):
        if op.islink(newfile):
            if all(
                (
                    os.readlink(newfile) == op.realpath(originalfile),
                    not use_hardlink,
                    not copy,
                )
            ):
                keep = True
        elif posixpath.samefile(newfile, originalfile):
            keep = True
        else:
            newhash = hash_file(newfile)
            logger.debug("File: %s already exists,%s, copy:%d", newfile, newhash, copy)
            orighash = hash_file(originalfile)
            keep = newhash == orighash
        if keep:
            logger.debug(
                "File: %s already exists, not overwriting, copy:%d", newfile, copy
            )
        else:
            os.unlink(newfile)

    if not keep and use_hardlink:
        try:
            logger.debug("Linking File: %s->%s", newfile, originalfile)
            # Use realpath to avoid hardlinking symlinks
            os.link(op.realpath(originalfile), newfile)
        except OSError:
            use_hardlink = False  # Disable hardlink for associated files
        else:
            keep = True

    if not keep and not copy and os.name == "posix":
        try:
            logger.debug("Symlinking File: %s->%s", newfile, originalfile)
            os.symlink(originalfile, newfile)
        except OSError:
            copy = True  # Disable symlink for associated files
        else:
            keep = True

    if not keep:
        try:
            logger.debug("Copying File: %s->%s", newfile, originalfile)
            shutil.copyfile(originalfile, newfile)
        except shutil.Error as e:
            logger.warning(e.message)

    # Associated files
    if copy_related_files:
        related_file_pairs = (
            get_related_files(f, include_this_file=False)
            for f in (originalfile, newfile)
        )
        for alt_ofile, alt_nfile in zip(*related_file_pairs):
            if op.exists(alt_ofile):
                copyfile(
                    alt_ofile,
                    alt_nfile,
                    copy,
                    use_hardlink=use_hardlink,
                    copy_related_files=False,
                )

    return newfile
Exemple #39
0
    def ingest(self, infile, outfile, mode="move", dryrun=False):
        """Ingest a file into the image repository.

        This is copied from LSST 18.1.0, with the addition of commit
        ``7d27e3e8694b5f62b6ae8ef1ae6a7ad35f2829ee`` to detect when we are
        re-linking the same file.

        Parameters
        ----------
        infile : `str`
            Name of input file.
        outfile : `str`
            Name of output file (file in repository).
        mode : `str`
            Mode of ingest (copy/link/move/skip).
        dryrun : `bool`
            Only report what would occur, rather than actually doing anything?

        Returns
        -------
        success : `bool`
            Whether the file was successfully ingested.
        """
        if mode == "skip":
            return True
        if dryrun:
            self.log.info("Would %s from %s to %s" % (mode, infile, outfile))
            return True
        try:
            outdir = os.path.dirname(outfile)
            if not os.path.isdir(outdir):
                try:
                    os.makedirs(outdir)
                except OSError:
                    # Silently ignore mkdir failures due to race conditions
                    if not os.path.isdir(outdir):
                        raise
            if os.path.lexists(outfile):
                if self.config.clobber:
                    os.unlink(outfile)
                else:
                    raise RuntimeError(
                        "File %s already exists; consider --config clobber=True"
                        % outfile)

            if mode == "copy":
                lsst.pipe.tasks.ingest.assertCanCopy(infile, outfile)
                shutil.copyfile(infile, outfile)
            elif mode == "link":
                if os.path.exists(outfile):
                    if os.path.samefile(infile, outfile):
                        self.log.debug("Already linked %s to %s: ignoring" %
                                       (infile, outfile))
                    else:
                        self.log.warn(
                            "%s already has a file at the target location (%s): ignoring "
                            "(set clobber=True to overwrite)" %
                            (infile, outfile))
                    return False
                os.symlink(os.path.abspath(infile), outfile)
            elif mode == "move":
                lsst.pipe.tasks.ingestassertCanCopy(infile, outfile)
                os.rename(infile, outfile)
            else:
                raise AssertionError("Unknown mode: %s" % mode)
            self.log.info("%s --<%s>--> %s" % (infile, mode, outfile))
        except Exception as e:
            self.log.warn("Failed to %s %s to %s: %s" %
                          (mode, infile, outfile, e))
            if not self.config.allowError:
                raise
            return False
        return True
Exemple #40
0
 def newsymlink(src, dst):
     print("newsymlink", src, dst)
     # os.path.exists returns False for broken symlinks, so remove if thats the case
     if os.path.islink(dst):
         os.remove(dst)
     os.symlink (src, dst)
Exemple #41
0
def installCustomIndicatorIcons(installMethod):
    if not os.path.exists(destDir):
        os.makedirs(destDir)
    else:
        try:
            shutil.rmtree(destDir)
        except OSError:
            exit(1)
        os.makedirs(destDir)

    if installMethod == "copy":
        try:
            for icon in os.listdir(iconDir):
                shutil.copy(iconDir + icon, destDir + icon)
            symlink(destDir + "ico_22_0.png", destDir + "icomute_22_0.png")
            for i in range(2, 100):
                symlink(destDir + "ico_22_1.png",
                        destDir + "ico_22_" + str(i) + ".png")
            for i in range(1, 100):
                symlink(destDir + "ico_22_1.png",
                        destDir + "icomute_22_" + str(i) + ".png")
        except:
            exit(1)
    elif installMethod == "link":
        try:
            for icon in os.listdir(iconDir):
                symlink(iconDir + icon, destDir + icon)
            symlink(iconDir + "ico_22_0.png", destDir + "icomute_22_0.png")
            for i in range(2, 100):
                symlink(iconDir + "ico_22_1.png",
                        destDir + "ico_22_" + str(i) + ".png")
            for i in range(1, 100):
                symlink(iconDir + "ico_22_1.png",
                        destDir + "icomute_22_" + str(i) + ".png")
        except:
            exit(1)
    else:
        print "Invalid operation!"

    file = open(destDir + "elementaryPlus.installed", "w")
    file.close()
Exemple #42
0
class PushPCAction(Action):
    """
    Action that pushes a config from one pipeline configuration up to its parent
    """
    def __init__(self):
        Action.__init__(self, "push_configuration", Action.TK_INSTANCE, (
            "Pushes any configuration changes made here to another configuration. "
            "This is typically used when you have cloned your production configuration "
            "into a staging sandbox, updated the apps in this sandbox and want to push "
            "those updates back to your production configuration."),
                        "Configuration")

    def run_interactive(self, log, args):

        # get list of all PCs for this project
        if self.tk.pipeline_configuration.is_site_configuration():
            raise TankError("You can't push the site configuration.")

        project_id = self.tk.pipeline_configuration.get_project_id()

        current_pc_name = self.tk.pipeline_configuration.get_name()
        current_pc_id = self.tk.pipeline_configuration.get_shotgun_id()
        pipeline_configs = self.tk.shotgun.find(
            constants.PIPELINE_CONFIGURATION_ENTITY,
            [["project", "is", {
                "type": "Project",
                "id": project_id
            }]], ["code", "linux_path", "windows_path", "mac_path"])

        if len(args) == 1 and args[0] == "--symlink":
            use_symlink = True
        else:
            use_symlink = False

        if len(pipeline_configs) == 1:
            raise TankError(
                "Only one pipeline configuration for this project! Need at least two "
                "configurations in order to push. Please start by cloning a pipeline "
                "configuration inside of Shotgun.")

        log.info(
            "This command will push the configuration in the current pipeline configuration "
            "('%s') to another pipeline configuration in the project. By default, the data "
            "will be copied to the target config folder. If pass a --symlink parameter, it will "
            "create a symlink instead." % current_pc_name)

        log.info("")
        log.info("Your existing configuration will be backed up.")

        if use_symlink:
            log.info("")
            log.info("A symlink will be used.")

        log.info("")

        log.info(
            "The following pipeline configurations are available to push to:")
        path_hash = {}
        for pc in pipeline_configs:
            # skip self
            if pc["id"] == current_pc_id:
                continue
            local_path = pc.get(SG_LOCAL_STORAGE_OS_MAP[sys.platform])
            path_hash[pc["id"]] = local_path
            log.info(" - [%d] %s (%s)" % (pc["id"], pc["code"], local_path))
        log.info("")

        answer = raw_input(
            "Please type in the id of the configuration to push to (ENTER to exit): "
        )
        if answer == "":
            raise TankError("Aborted by user.")
        try:
            target_pc_id = int(answer)
        except:
            raise TankError("Please enter a number!")

        if target_pc_id not in [x["id"] for x in pipeline_configs]:
            raise TankError("Id was not found in the list!")

        target_pc_path = path_hash[target_pc_id]
        target_pc = PipelineConfiguration(target_pc_path)

        # check that both pcs are using the same core version
        target_core_version = target_pc.get_associated_core_version()
        source_core_version = self.tk.pipeline_configuration.get_associated_core_version(
        )

        if target_core_version != source_core_version:
            raise TankError(
                "The configuration you are pushing to is using Core API %s and "
                "the configuration you are pushing from is using Core API %s. "
                "This is not supported - before pushing the changes, make sure "
                "that both configurations are using the "
                "same Core API!" % (target_core_version, source_core_version))

        # check that there are no dev descriptors
        dev_desc = None
        for env_name in self.tk.pipeline_configuration.get_environments():
            env = self.tk.pipeline_configuration.get_environment(env_name)
            for eng in env.get_engines():
                desc = env.get_engine_descriptor(eng)
                if isinstance(desc, TankDevDescriptor):
                    dev_desc = desc
                    break
                for app in env.get_apps(eng):
                    desc = env.get_app_descriptor(eng, app)
                    if isinstance(desc, TankDevDescriptor):
                        dev_desc = desc
                        break
        if dev_desc:
            log.warning(
                "Looks like you have one or more dev locations set up in your "
                "configuration! We strongly recommend that you do not use dev locations "
                "in any production based configs. Dev descriptors are for development "
                "purposes only. You can easily switch a dev location using the "
                "'tank switch_app' command.")
            if not console_utils.ask_yn_question("Okay to proceed?"):
                raise TankError("Aborted.")

        date_suffix = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")

        source_path = os.path.join(self.tk.pipeline_configuration.get_path(),
                                   "config")
        target_tmp_path = os.path.join(target_pc_path,
                                       "config.tmp.%s" % date_suffix)
        symlink_path = os.path.join(target_pc_path, "config.%s" % date_suffix)
        target_path = os.path.join(target_pc_path, "config")
        target_backup_path = os.path.join(target_pc_path,
                                          "config.bak.%s" % date_suffix)

        log.debug("Will push the config from %s to %s" %
                  (source_path, target_path))
        log.info("Hold on, pushing config...")

        ##########################################################################################
        # I/O phase
        old_umask = os.umask(0)
        try:

            # copy to temp location
            try:
                # copy everything!
                log.debug("Copying %s -> %s" % (source_path, target_tmp_path))
                util._copy_folder(log, source_path, target_tmp_path)

                # If the source and target configurations are both localized, then also copy the
                # core-related api files to the target config. Otherwise, skip them.
                copy_core_related_files = (
                    self.tk.pipeline_configuration.is_localized()
                    and target_pc.is_localized())

                # CORE_PC_FILES are specific to the pipeline configuration so we shouldn't copy them
                if copy_core_related_files:
                    core_files_to_remove = CORE_PC_FILES
                else:
                    core_files_to_remove = CORE_API_FILES + CORE_PC_FILES

                if self.tk.pipeline_configuration.is_localized(
                ) and not target_pc.is_localized():
                    log.warning(
                        "The source configuration contains a local core but the target "
                        "configuration uses a shared core. The following core-related api "
                        "files will not be copied to the target configuration: "
                        "%s" % CORE_API_FILES)

                # unlock and remove all the special core files from the temp dir so they aren't
                # copied to the target
                for core_file in core_files_to_remove:
                    path = os.path.join(target_tmp_path, "core", core_file)
                    if os.path.exists(path):
                        os.chmod(path, 0666)
                        log.debug("Removing system file %s" % path)
                        os.remove(path)

                # copy the pc specific special core files from target config to new config temp dir
                # in order to preserve them
                for core_file in CORE_PC_FILES:
                    curr_config_path = os.path.join(target_path, "core",
                                                    core_file)
                    new_config_path = os.path.join(target_tmp_path, "core",
                                                   core_file)
                    log.debug("Copying PC system file %s -> %s" %
                              (curr_config_path, new_config_path))
                    shutil.copy(curr_config_path, new_config_path)

            except Exception, e:
                raise TankError(
                    "Could not copy into temporary target folder '%s'. The target config "
                    "has not been altered. Check permissions and try again! "
                    "Error reported: %s" % (target_tmp_path, e))

            # backup original config
            try:
                if os.path.islink(target_path):
                    # if we are symlinked, no need to back up
                    # just delete the current symlink
                    os.remove(target_path)
                    created_backup = False
                else:
                    # move data to backup folder
                    shutil.move(target_path, target_backup_path)
                    created_backup = True
            except Exception, e:
                raise TankError(
                    "Could not move target folder from '%s' to '%s'. "
                    "Error reported: %s" %
                    (target_path, target_backup_path, e))

            # lastly, move new config into place
            if use_symlink:
                try:
                    shutil.move(target_tmp_path, symlink_path)
                    os.symlink(os.path.basename(symlink_path), target_path)
                except Exception, e:
                    raise TankError(
                        "Could not move new config folder from '%s' to '%s' or create symlink."
                        "Error reported: %s" %
                        (target_tmp_path, symlink_path, e))
Exemple #43
0
def move_repos_into_place(src, dst):
    # Find all the stuff in src/*, move it to a freshly-created
    # directory beside dst, then play some games with symlinks so that
    # dst is a name the new stuff and dst+".old" names the previous
    # one.  This feels like a lot of hooey for something so trivial.

    # First, make a crispy fresh new directory to put the stuff in.
    i = 0
    while True:
        date_suffix = time.strftime("%Y-%m-%d")
        dname = dst + ".%s.%d" % (date_suffix, i)
        try:
            os.mkdir(dname)
            break
        except OSError:
            exc = sys.exc_value
            if exc.errno == errno.EEXIST:
                pass
            else:
                raise exc
        i = i + 1

    # Put the stuff in our new directory.
    for r in os.listdir(src):
        sysassert(["cp", "-rv", src + "/" + r, dname])

    # Make a symlink to the new directory; the symlink will be renamed
    # to dst shortly.
    i = 0
    while True:
        tmpnam = dst + ".TMP.%d" % i
        try:
            os.symlink(dname, tmpnam)
            break
        except OSError:  # as exc: # Python >2.5
            exc = sys.exc_value
            if exc.errno == errno.EEXIST:
                pass
            else:
                raise exc
        i = i + 1

    # Make a symlink to the old directory; this symlink will be
    # renamed shortly, too.
    oldnam = None
    if os.path.exists(dst):
        i = 0
        while True:
            oldnam = dst + ".old.%d" % i
            try:
                os.symlink(os.readlink(dst), oldnam)
                break
            except OSError:  # as exc: # Python >2.5
                exc = sys.exc_value
                if exc.errno == errno.EEXIST:
                    pass
                else:
                    raise exc

    os.rename(tmpnam, dst)
    if oldnam:
        os.rename(oldnam, dst + ".old")
Exemple #44
0
 def create_symlink(self):
     # CMake compatibility: see https://github.com/spack/spack/pull/6240
     with working_dir(self.prefix.bin):
         os.symlink('swig', 'swig{0}'.format(self.spec.version.up_to(2)))
Exemple #45
0
def junctionlink(src, dest):
    if os.name == 'nt':
        _junctionlink_windows(src, dest)
    else:
        os.symlink(src, dest)
Exemple #46
0
def _copytree(src, dst, symlinks=False, ignore=None):
    """Recursively copy a directory tree using copy2().

    If exception(s) occur, an Error is raised with a list of reasons.

    If the optional symlinks flag is true, symbolic links in the
    source tree result in symbolic links in the destination tree; if
    it is false, the contents of the files pointed to by symbolic
    links are copied.

    The optional ignore argument is a callable. If given, it
    is called with the `src` parameter, which is the directory
    being visited by copytree(), and `names` which is the list of
    `src` contents, as returned by os.listdir():

        callable(src, names) -> ignored_names

    Since copytree() is called recursively, the callable will be
    called once for each directory that is copied. It returns a
    list of names relative to the `src` directory that should
    not be copied.

    XXX Consider this example code rather than the ultimate tool.

    """
    names = _os.listdir(src)
    if ignore is not None:
        ignored_names = ignore(src, names)
    else:
        ignored_names = set()

    if not exists(dst):
        _os.makedirs(dst)
    errors = []
    for name in names:
        if name in ignored_names:
            continue
        srcname = _os.path.join(src, name)
        dstname = _os.path.join(dst, name)
        try:
            if symlinks and _os.path.islink(srcname):
                linkto = _os.readlink(srcname)
                _os.symlink(linkto, dstname)
            elif _os.path.isdir(srcname):
                _copytree(srcname, dstname, symlinks, ignore)
            else:
                # Will raise a SpecialFileError for unsupported file types
                _shutil.copy2(srcname, dstname)
        # catch the Error from the recursive copytree so that we can
        # continue with other files
        except _shutil.Error as err:
            errors.extend(err.args[0])
        except EnvironmentError as why:
            errors.append((srcname, dstname, str(why)))
    try:
        _shutil.copystat(src, dst)
    except OSError as why:
        if _shutil.WindowsError is not None and isinstance \
               (why, _shutil.WindowsError):
            # Copying file access times may fail on Windows
            pass
        else:
            errors.append((src, dst, str(why)))
    if errors:
        raise _shutil.Error(errors)
Exemple #47
0
def _CreateRelativeSymlink(target_path, link_path):
    link_dir = os.path.dirname(link_path)
    relpath = os.path.relpath(target_path, link_dir)
    logging.debug('Creating symlink %s -> %s', link_path, relpath)
    os.symlink(relpath, link_path)
Exemple #48
0
    def fetch(self, fetcher, progress):

        fetcher.reset()

        self._compareurl = self._hdlurl

        hdlbaseurl, basename = os.path.split(self._hdlurl)
        md5url = posixpath.join(hdlbaseurl, "MD5SUM")
        item = fetcher.enqueue(md5url)
        fetcher.run(progress=progress)
        hdlmd5 = None
        failed = item.getFailedReason()
        if not failed:
            self._compareurl = md5url
            digest = getFileDigest(item.getTargetPath())
            if digest == self._digest:
                progress.add(2)
                return True

            basename = posixpath.basename(self._hdlurl)
            for line in open(item.getTargetPath()):
                md5, name = line.split()
                if name == basename:
                    hdlmd5 = md5
                    break

        fetcher.reset()
        hdlitem = fetcher.enqueue(self._hdlurl, md5=hdlmd5, uncomp=True)

        if self._hdlurl.endswith("/list"):
            listitem = None
        else:
            m = re.compile(r"/(?:synthesis\.)?hdlist(.*)\.") \
                  .search(self._hdlurl)
            suffix = m and m.group(1) or ""
            listurl = posixpath.join(hdlbaseurl, "list%s" % suffix)
            listitem = fetcher.enqueue(listurl, uncomp=True)

        fetcher.run(progress=progress)

        if hdlitem.getStatus() == FAILED:
            failed = hdlitem.getFailedReason()
            if fetcher.getCaching() is NEVER:
                lines = [_("Failed acquiring information for '%s':") % self,
                         u"%s: %s" % (hdlitem.getURL(), failed)]
                raise Error, "\n".join(lines)
            return False
        else:
            localpath = hdlitem.getTargetPath()
            digestpath = None
            if listitem and listitem.getStatus() == SUCCEEDED:
                if self._compareurl == self._hdlurl:
                    self._compareurl = listurl
                    digestpath = localpath
                listpath = listitem.getTargetPath()
            else:
                listpath = None
                if self._compareurl == self._hdlurl:
                    digestpath = localpath
            if digestpath:
                digest = getFileDigest(digestpath)
                if digest == self._digest:
                    return True
            self.removeLoaders()
            if localpath.endswith(".cz"):
                if (not os.path.isfile(localpath[:-3]) or
                    fetcher.getCaching() != ALWAYS):
                    linkpath = fetcher.getLocalPath(hdlitem)
                    linkpath = linkpath[:-2]+"gz"
                    if not os.access(os.path.dirname(linkpath), os.W_OK):
                        dirname = os.path.join(sysconf.get("user-data-dir"),
                                               "channels")
                        basename = os.path.basename(linkpath)
                        if not os.path.isdir(dirname):
                            os.makedirs(dirname)
                        linkpath = os.path.join(dirname, basename)
                    if os.path.isfile(linkpath):
                        os.unlink(linkpath)
                    os.symlink(localpath, linkpath)
                    localpath = linkpath
                    uncompressor = fetcher.getUncompressor()
                    uncomphandler = uncompressor.getHandler(linkpath)
                    try:
                        uncomphandler.uncompress(linkpath)
                    except Error, e:
                        # cz file has trailing information which breaks
                        # current gzip module logic.
                        if "Not a gzipped file" not in e[0]:
                            os.unlink(linkpath)
                            raise
                    os.unlink(linkpath)
                localpath = localpath[:-3]

            if open(localpath).read(4) == "\x8e\xad\xe8\x01":
                loader = URPMILoader(localpath, self._baseurl, listpath)
            else:
                loader = URPMISynthesisLoader(localpath, self._baseurl, listpath)

            loader.setChannel(self)
            self._loaders.append(loader)
    def test_NormalOp_1(self):
        """
        Synopsis:
        Test normal operation of the NG/AMS Archive Client.

        Description:
        It is tested that a file can be archived via a link in the Archive
        Queue.

        Expected Result:
        After the archiving the link is moved to the Archive Files
        Area. It is also checked that the NG/AMS XML Status Document is
        created in the Archived Files Area. After the expiration time for
        keeping archived files has expired, the archived file and the XML
        status document should be deleted.

        Test Steps:
        - Start NG/AMS Server.
        - Start instance of the NG/AMS Archive Client.
        - Create a link from a legal test (FITS) file into the Archive Queue.
        - Test that the file is archived within 20s and moved to the Archived
          Files Area.
        - Test that the XML Status Document from NG/AMS is stored in the
          Archived Files Area.
        - Check that after the given expiration time for the Archived Files
          Area, that the archived file + the XML Status Document are removed.
        - Stop the Archive Client.

        Remarks:
        ...
        """
        self.prepExtSrv()

        # Make sure the the queue subdir exist before the launch the client;
        # otherwise the client and this test might find themselves in a race
        # condition and the test might fail
        d = os.path.abspath(os.path.join(arcCliDir, 'queue'))
        if not os.path.exists(d):
            os.makedirs(d)

        self.startArchiveClient()

        # Archive a file as copy and link.
        # Make sure at least the quee dir is already created
        srcFile = os.path.abspath("src/SmallFile.fits")
        shutil.copy(srcFile, os.path.join(arcCliDir, 'queue'))
        os.symlink(srcFile, os.path.join(arcCliDir, 'queue', 'Test.fits'))

        # Check that files are being archived (within 20s) + NG/AMS Status
        # Documents created.
        file1Pat = arcCliDir + "/archived/*___SmallFile.fits"
        file1StatPat = file1Pat + "___STATUS.xml"
        file2Pat = arcCliDir + "/archived/*___Test.fits"
        file2StatPat = file2Pat + "___STATUS.xml"
        startTime = time.time()
        filesFound = 0
        while ((time.time() - startTime) < 20):
            globFile1Pat = glob.glob(file1Pat)
            globFile1StatPat = glob.glob(file1StatPat)
            globFile2Pat = glob.glob(file2Pat)
            globFile2StatPat = glob.glob(file2StatPat)
            if ((len(globFile1Pat) == 1) and (len(globFile1StatPat) == 1)
                    and (len(globFile2Pat) == 1)
                    and (len(globFile2StatPat) == 1)):
                filesFound = 1
                break
        if (not filesFound):
            if (not len(globFile1Pat)):
                errMsg = "Did not find status file: " + file1Pat
            elif (not len(globFile1StatPat)):
                errMsg = "Did not find status XML document: " + file1StatPat
            elif (not len(globFile2Pat)):
                errMsg = "Did not find status file: " + file2Pat
            else:
                # (not len(globFile2StatPat)):
                errMsg = "Did not find status XML document: " + file2StatPat
            self.fail(errMsg)

        # Check the contents of one of the status documents.
        statObj = ngamsStatus.ngamsStatus().load(globFile1StatPat[0])
        refStatFile = "ref/ngamsArchiveClientTest_test_NormalOp_1_1_ref"
        tmpStatFile = saveInFile(
            None,
            filterDbStatus1(statObj.dumpBuf(), [
                "BytesStored:", "NumberOfFiles:", "FileName:", "FileVersion:"
            ]))
        self.checkFilesEq(refStatFile, tmpStatFile,
                          "Incorrect info in Archive Command " +\
                          "XML Status Document")

        # Check that the status documents are removed within 10s.
        filesRemoved = 0
        startTime = time.time()
        while ((time.time() - startTime) < 20):
            globFile1Pat = glob.glob(file1Pat)
            globFile1StatPat = glob.glob(file1StatPat)
            globFile2Pat = glob.glob(file2Pat)
            globFile2StatPat = glob.glob(file2StatPat)
            if ((len(globFile1Pat) == 0) and (len(globFile1StatPat) == 0)
                    and (len(globFile2Pat) == 0)
                    and (len(globFile2StatPat) == 0)):
                filesRemoved = 1
                break
        if (not filesRemoved):
            if (len(globFile1Pat)):
                errMsg = "Did not remove status file: " + globFile1Pat[0]
            elif (len(globFile1StatPat)):
                errMsg = "Did not remove status XML document: " +\
                         globFile1StatPat[0]
            elif (len(globFile2Pat)):
                errMsg = "Did not remove status file: " + file2Pat[0]
            else:
                # (len(globFile2StatPat)):
                errMsg = "Did not remove status XML document: " + file2StatPat[
                    0]
            self.fail(errMsg)
Exemple #50
0
def dirlink(src, dest):
    if os.name == 'nt':
        _dirlink_windows(src, dest)
    else:
        os.symlink(src, dest)
Exemple #51
0
        scriptLen = len(scriptName)
        if scriptName not in simpleCopyMask and pythonScriptRE.match(
                scriptName):
            newScriptName = scriptName[:-3].replace('_', '-')
            if DEBUG:
                print " Wrapping %s as %s" % (scriptName, newScriptName)
            fakeScriptPath = os.path.join(targetScriptsPath, newScriptName)

            # Either create the symlink or write the wrapper script
            if useSymlinks:
                # We may overwrite already existing links (in extension for example)
                # os.symlink will not allow that, so remove the existing first
                if os.path.exists(fakeScriptPath):
                    os.remove(fakeScriptPath)
                # Create the symlink
                os.symlink(os.path.join(rootPath, scriptPath), fakeScriptPath)
            else:
                with open(fakeScriptPath, "w") as fd:
                    fd.write(
                        wrapperTemplate.replace('$SCRIPTLOCATION$',
                                                scriptPath))

            os.chmod(fakeScriptPath, gDefaultPerms)
        else:
            if DEBUG:
                print " Copying %s" % scriptName
            shutil.copy(os.path.join(rootPath, scriptPath), targetScriptsPath)
            copyPath = os.path.join(targetScriptsPath, scriptName)
            if platform.system() == 'Darwin':
                with open(copyPath, 'r+') as script:
                    scriptStr = script.read()
Exemple #52
0
    def _create_via_common_rec(self, path, create_symlinks=True):
        if not self.mode:
            raise ApplyError('no metadata - cannot create path ' + path)

        # If the path already exists and is a dir, try rmdir.
        # If the path already exists and is anything else, try unlink.
        st = None
        try:
            st = xstat.lstat(path)
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise
        if st:
            if stat.S_ISDIR(st.st_mode):
                try:
                    os.rmdir(path)
                except OSError as e:
                    if e.errno in (errno.ENOTEMPTY, errno.EEXIST):
                        msg = 'refusing to overwrite non-empty dir ' + path
                        raise Exception(msg)
                    raise
            else:
                os.unlink(path)

        if stat.S_ISREG(self.mode):
            assert (self._recognized_file_type())
            fd = os.open(path, os.O_CREAT | os.O_WRONLY | os.O_EXCL, 0o600)
            os.close(fd)
        elif stat.S_ISDIR(self.mode):
            assert (self._recognized_file_type())
            os.mkdir(path, 0o700)
        elif stat.S_ISCHR(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFCHR, self.rdev)
        elif stat.S_ISBLK(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFBLK, self.rdev)
        elif stat.S_ISFIFO(self.mode):
            assert (self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFIFO)
        elif stat.S_ISSOCK(self.mode):
            try:
                os.mknod(path, 0o600 | stat.S_IFSOCK)
            except OSError as e:
                if e.errno in (errno.EINVAL, errno.EPERM):
                    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
                    s.bind(path)
                else:
                    raise
        elif stat.S_ISLNK(self.mode):
            assert (self._recognized_file_type())
            if self.symlink_target and create_symlinks:
                # on MacOS, symlink() permissions depend on umask, and there's
                # no way to chown a symlink after creating it, so we have to
                # be careful here!
                oldumask = os.umask((self.mode & 0o777) ^ 0o777)
                try:
                    os.symlink(self.symlink_target, path)
                finally:
                    os.umask(oldumask)
        # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
        else:
            assert (not self._recognized_file_type())
            add_error('not creating "%s" with unrecognized mode "0x%x"\n' %
                      (path, self.mode))
    def readFile(self, jobStoreFileID, localFilePath, symlink=False):
        self._checkJobStoreFileID(jobStoreFileID)
        jobStoreFilePath = self._getAbsPath(jobStoreFileID)
        localDirPath = os.path.dirname(localFilePath)

        if not symlink and os.path.islink(localFilePath):
            # We had a symlink and want to clobber it with a hardlink or copy.
            os.unlink(localFilePath)

        if os.path.exists(localFilePath) and os.path.samefile(jobStoreFilePath, localFilePath):
            # The files are already the same: same name, hardlinked, or
            # symlinked. There is nothing to do, and trying to shutil.copyfile
            # one over the other will fail.
            return

        if symlink:
            # If the reader will accept a symlink, so always give them one.
            # There's less that can go wrong.
            try:
                os.symlink(jobStoreFilePath, localFilePath)
                # It worked!
                return
            except OSError as e:
                if e.errno == errno.EEXIST:
                    # Overwrite existing file, emulating shutil.copyfile().
                    os.unlink(localFilePath)
                    # It would be very unlikely to fail again for same reason but possible
                    # nonetheless in which case we should just give up.
                    os.symlink(jobStoreFilePath, localFilePath)

                    # Now we succeeded and don't need to copy
                    return
                else:
                    raise

        # If we get here, symlinking isn't an option.
        if os.stat(jobStoreFilePath).st_dev == os.stat(localDirPath).st_dev:
            # It is possible that we can hard link the file.
            # Note that even if the device numbers match, we can end up trying
            # to create a "cross-device" link.

            try:
                os.link(jobStoreFilePath, localFilePath)
                # It worked!
                return
            except OSError as e:
                if e.errno == errno.EEXIST:
                    # Overwrite existing file, emulating shutil.copyfile().
                    os.unlink(localFilePath)
                    # It would be very unlikely to fail again for same reason but possible
                    # nonetheless in which case we should just give up.
                    os.link(jobStoreFilePath, localFilePath)

                    # Now we succeeded and don't need to copy
                    return
                elif e.errno == errno.EXDEV:
                    # It's a cross-device link even though it didn't appear to be.
                    # Just keep going and hit the file copy case.
                    pass
                else:
                    logger.critical('Unexpected OSError when reading file from job store')
                    logger.critical('jobStoreFilePath: ' + jobStoreFilePath + ' ' + str(os.path.exists(jobStoreFilePath)))
                    logger.critical('localFilePath: ' + localFilePath + ' ' + str(os.path.exists(localFilePath)))
                    raise

        # If we get here, neither a symlink nor a hardlink will work.
        # Make a complete copy.
        shutil.copyfile(jobStoreFilePath, localFilePath)
Exemple #54
0
def share_dir(dir_to_share):
    dir_to_share = os.path.expanduser(dir_to_share)
    dir_to_share = utils.unfuck_path(dir_to_share)
    dir_to_share = os.path.abspath(dir_to_share)

    workspace_name = os.path.basename(dir_to_share)
    floo_workspace_dir = os.path.join(G.COLAB_DIR, G.USERNAME, workspace_name)

    if os.path.isfile(dir_to_share):
        return msg.error('give me a directory please')

    if not os.path.isdir(dir_to_share):
        return msg.error('The directory %s doesn\'t appear to exist' %
                         dir_to_share)

    floo_file = os.path.join(dir_to_share, '.floo')
    # look for the .floo file for hints about previous behavior
    info = {}
    try:
        floo_info = open(floo_file, 'rb').read().decode('utf-8')
        info = json.loads(floo_info)
    except (IOError, OSError):
        pass
    except Exception:
        msg.warn("couldn't read the floo_info file: %s" % floo_file)

    workspace_url = info.get('url')
    if workspace_url:
        try:
            result = utils.parse_url(workspace_url)
        except Exception as e:
            msg.error(str(e))
        else:
            workspace_name = result['workspace']
            floo_workspace_dir = os.path.join(G.COLAB_DIR, result['owner'],
                                              result['workspace'])
            # they have previously joined the workspace
            if os.path.realpath(floo_workspace_dir) == os.path.realpath(
                    dir_to_share):
                # it could have been deleted, try to recreate it if possible
                # TODO: org or something here?
                if result['owner'] == G.USERNAME:
                    try:
                        api.create_workspace({'name': workspace_name})
                        msg.debug('Created workspace %s' % workspace_url)
                    except Exception as e:
                        msg.debug('Tried to create workspace' + str(e))
                # they wanted to share teh dir, so always share it
                return join_workspace(
                    workspace_url,
                    lambda x: agent.protocol.create_buf(dir_to_share,
                                                        force=True))

    # link to what they want to share
    try:
        utils.mkdir(os.path.dirname(floo_workspace_dir))
        os.symlink(dir_to_share, floo_workspace_dir)
    except OSError as e:
        if e.errno != 17:
            raise
    except Exception as e:
        return msg.error("Couldn't create symlink from %s to %s: %s" %
                         (dir_to_share, floo_workspace_dir, str(e)))

    # make & join workspace
    create_workspace(workspace_name, floo_workspace_dir, dir_to_share)
Exemple #55
0
def main():
    global DOCKER_CONFIG_PATH
    setup_env()
    args = parse_args()

    use_docker_pulls = not args.no_pull

    # run utility commands (docker_clean) if specified
    if args.command == 'docker_clean':
        docker_containers = subprocess.check_output("{} docker ps -a -q".format(SUDO_CMD), shell=True).decode("utf-8").split('\n')
        docker_images = subprocess.check_output("{} docker images -q".format(SUDO_CMD), shell=True).decode("utf-8").split('\n')
        for container in docker_containers:
            if not container:
                continue
            os.system("{} docker rm {}".format(SUDO_CMD, container))
        for image in docker_images:
            if not image:
                continue
            os.system("{} docker rmi {}".format(SUDO_CMD, image))
        sys.exit(1)

    # for all other commands
    # if config doesn't exist, only the 'install' command may be run
    config_existed = os.path.exists(FEDNODE_CONFIG_PATH)
    config = configparser.ConfigParser()
    if not config_existed:
        if args.command != 'install':
            print("config file {} does not exist. Please run the 'install' command first".format(FEDNODE_CONFIG_FILE))
            sys.exit(1)

        # write default config
        config.add_section('Default')
        config.set('Default', 'branch', args.branch)
        config.set('Default', 'config', args.config)
        write_config(config)

    # load and read config
    assert os.path.exists(FEDNODE_CONFIG_PATH)
    config.read(FEDNODE_CONFIG_PATH)
    build_config = config.get('Default', 'config')
    docker_config_file = "docker-compose.{}.yml".format(build_config)
    DOCKER_CONFIG_PATH = os.path.join(SCRIPTDIR, docker_config_file)
    repo_branch = config.get('Default', 'branch')
    os.environ['FEDNODE_RELEASE_TAG'] = 'latest' if repo_branch == 'master' else repo_branch
    os.environ['HOSTNAME_BASE'] = socket.gethostname()
    os.environ['MONGODB_HOST_INTERFACE'] = getattr(args, 'mongodb_interface', "127.0.0.1")

    # perform action for the specified command
    if args.command == 'install':
        if config_existed:
            print("Cannot install, as it appears a configuration already exists. Please run the 'uninstall' command first")
            sys.exit(1)

        # check port usage
        for port in HOST_PORTS_USED[build_config]:
            if is_port_open(port):
                print("Cannot install, as it appears a process is already listening on host port {}".format(port))
                sys.exit(1)

        # check out the necessary source trees (don't use submodules due to detached HEAD and other problems)
        REPOS = REPOS_BASE if build_config == 'base' else (REPOS_COUNTERBLOCK if build_config == 'counterblock' else REPOS_FULL)
        for repo in REPOS:
            repo_url = REPO_BASE_SSH.format(repo) if args.use_ssh_uris else REPO_BASE_HTTPS.format(repo)
            repo_dir = os.path.join(SCRIPTDIR, "src", repo)
            if not os.path.exists(repo_dir):
                git_cmd = "git clone -b {} {} {}".format(repo_branch, repo_url, repo_dir)
                if not IS_WINDOWS:  # make sure to check out the code as the original user, so the permissions are right
                    os.system("{} -u {} bash -c \"{}\"".format(SUDO_CMD, SESSION_USER, git_cmd))
                else:
                    os.system(git_cmd)

        # make sure we have the newest image for each service
        if use_docker_pulls:
            run_compose_cmd("pull --ignore-pull-failures")
        else:
            print("skipping docker pull command")


        # copy over the configs from .default to active versions, if they don't already exist
        for default_config in glob.iglob(os.path.join(SCRIPTDIR, 'config', '**/*.default'), recursive=True):
            active_config = default_config.replace('.default', '')
            if not os.path.exists(active_config):
                print("Generating config from defaults at {} ...".format(active_config))
                shutil.copy2(default_config, active_config)
                default_config_stat = os.stat(default_config)
                if not IS_WINDOWS:
                    os.chown(active_config, default_config_stat.st_uid, default_config_stat.st_gid)

        # create symlinks to the data volumes (for ease of use)
        if not IS_WINDOWS:
            data_dir = os.path.join(SCRIPTDIR, "data")
            if not os.path.exists(data_dir):
                os.mkdir(data_dir)

            for volume in VOLUMES_USED[build_config]:
                symlink_path = os.path.join(data_dir, volume.replace('-data', ''))
                volume_name = "{}_{}".format(PROJECT_NAME, volume)
                mountpoint_path = get_docker_volume_path(volume_name)
                if mountpoint_path is not None and not os.path.lexists(symlink_path):
                    os.symlink(mountpoint_path, symlink_path)
                    print("For convenience, symlinking {} to {}".format(mountpoint_path, symlink_path))

        # launch
        run_compose_cmd("up -d")
    elif args.command == 'uninstall':
        run_compose_cmd("down")
        os.remove(FEDNODE_CONFIG_PATH)
    elif args.command == 'start':
        run_compose_cmd("start {}".format(' '.join(args.services)))
    elif args.command == 'stop':
        run_compose_cmd("stop {}".format(' '.join(args.services)))
    elif args.command == 'restart':
        run_compose_cmd("restart {}".format(' '.join(args.services)))
    elif args.command == 'reparse':
        run_compose_cmd("stop {}".format(args.service))
        run_compose_cmd("run -e COMMAND=reparse {}".format(args.service))
    elif args.command == 'rollback':
        run_compose_cmd("stop {}".format(args.service))
        run_compose_cmd("run -e COMMAND='rollback {}' {}".format(args.block_index, args.service))
    elif args.command == 'vacuum':
        run_compose_cmd("stop {}".format(args.service))
        run_compose_cmd("run -e COMMAND=vacuum {}".format(args.service))
    elif args.command == 'tail':
        run_compose_cmd("logs -f --tail={} {}".format(args.num_lines, ' '.join(args.services)))
    elif args.command == 'logs':
        run_compose_cmd("logs {}".format(' '.join(args.services)))
    elif args.command == 'ps':
        run_compose_cmd("ps")
    elif args.command == 'exec':
        if len(args.cmd) == 1 and re.match("['\"].*?['\"]", args.cmd[0]):
            cmd = args.cmd
        else:
            cmd = '"{}"'.format(' '.join(args.cmd).replace('"', '\\"'))
        os.system("{} docker exec -i -t federatednode_{}_1 bash -c {}".format(SUDO_CMD, args.service, cmd))
    elif args.command == 'shell':
        container_running = is_container_running(args.service)
        if container_running:
            os.system("{} docker exec -i -t federatednode_{}_1 bash".format(SUDO_CMD, args.service))
        else:
            print("Container is not running -- creating a transient container with a 'bash' shell entrypoint...")
            run_compose_cmd("run --no-deps --rm --entrypoint bash {}".format(args.service))
    elif args.command == 'update':
        # validate
        if args.services != ['', ]:
            for service in args.services:
                if service not in UPDATE_CHOICES:
                    print("Invalid service: {}".format(service))
                    sys.exit(1)

        services_to_update = copy.copy(UPDATE_CHOICES) if not len(args.services) else args.services
        git_has_updated = []
        while services_to_update:
            # update source code
            service = services_to_update.pop(0)
            service_base = service.replace('-testnet', '')
            if service_base not in git_has_updated:
                git_has_updated.append(service_base)
                if service_base == 'counterparty':  # special case
                    service_dirs = [os.path.join(SCRIPTDIR, "src", "counterparty-lib"), os.path.join(SCRIPTDIR, "src", "counterparty-cli")]
                else:
                    service_dirs = [service_base,]
                for service_dir in service_dirs:
                    service_dir_path = os.path.join(SCRIPTDIR, "src", service_dir)
                    if not os.path.exists(service_dir_path):
                        continue
                    service_branch = subprocess.check_output("cd {};git symbolic-ref --short -q HEAD;cd {}".format(service_dir_path, CURDIR), shell=True).decode("utf-8").strip()
                    if not service_branch:
                        print("Unknown service git branch name, or repo in detached state")
                        sys.exit(1)
                    git_cmd = "cd {}; git pull origin {}; cd {}".format(service_dir_path, service_branch, CURDIR)
                    if not IS_WINDOWS:  # make sure to update the code as the original user, so the permissions are right
                        os.system("{} -u {} bash -c \"{}\"".format(SUDO_CMD, SESSION_USER, git_cmd))
                    else:
                        os.system(git_cmd)

                    # delete installed egg (to force egg recreate and deps re-check on next start)
                    if service_base in ('counterparty', 'counterblock', 'armory-utxsvr'):
                        for path in glob.glob(os.path.join(service_dir_path, "*.egg-info")):
                            print("Removing egg path {}".format(path))
                            if not IS_WINDOWS:  # have to use root
                                os.system("{} bash -c \"rm -rf {}\"".format(SUDO_CMD, path))
                            else:
                                shutil.rmtree(path)

                if service_base == 'counterwallet' and os.path.exists(os.path.join(SCRIPTDIR, "src", "counterwallet")):  # special case
                    transifex_cfg_path = os.path.join(os.path.expanduser("~"), ".transifex")
                    if os.path.exists(transifex_cfg_path):
                        os.system("{} docker cp {} federatednode_counterwallet_1:/root/.transifex".format(SUDO_CMD, transifex_cfg_path))
                    os.system("{} docker exec -i -t federatednode_counterwallet_1 bash -c \"cd /counterwallet/src ".format(SUDO_CMD) +
                              "&& bower --allow-root update && cd /counterwallet && npm update && grunt build\"")
                    if not os.path.exists(transifex_cfg_path):
                        print("NOTE: Did not update locales because there is no .transifex file in your home directory")
                        print("If you want locales compiled, sign up for transifex and create this file to" +
                              " contain 'your_transifex_username:your_transifex_password'")

            # and restart container
            if not args.no_restart:
                run_compose_cmd("restart {}".format(service))
    elif args.command == 'configcheck':
        config_check(build_config)
    elif args.command == 'rebuild':
        if use_docker_pulls:
            run_compose_cmd("pull --ignore-pull-failures {}".format(' '.join(args.services)))
        else:
            print("skipping docker pull command")
        run_compose_cmd("up -d --build --force-recreate --no-deps {}".format(' '.join(args.services)))
Exemple #56
0
def enable(name, start=False, **kwargs):
    '''
    Start service ``name`` at boot.
    Returns ``True`` if operation is successful

    name
        the service's name

    start : False
        If ``True``, start the service once enabled.

    CLI Example:

    .. code-block:: bash

        salt '*' service.enable <name> [start=True]
    '''

    # non-existent service
    if not available(name):
        return False

    # if service is aliased, refuse to enable it
    alias = get_svc_alias()
    if name in alias:
        log.error('This service is aliased, enable its alias instead')
        return False

    # down_file: file that disables sv autostart
    svc_realpath = _get_svc_path(name)[0]
    down_file = os.path.join(svc_realpath, 'down')

    # if service already enabled, remove down_file to
    # let service starts on boot (as requested)
    if enabled(name):
        if os.path.exists(down_file):
            try:
                os.unlink(down_file)
            except OSError:
                log.error('Unable to remove file %s', down_file)
                return False
        return True

    # let's enable the service

    if not start:
        # create a temp 'down' file BEFORE enabling service.
        # will prevent sv from starting this service automatically.
        log.trace('need a temporary file %s', down_file)
        if not os.path.exists(down_file):
            try:
                salt.utils.files.fopen(down_file, "w").close()  # pylint: disable=resource-leakage
            except IOError:
                log.error('Unable to create file {0}'.format(down_file))
                return False

    # enable the service
    try:
        os.symlink(svc_realpath, _service_path(name))

    except IOError:
        # (attempt to) remove temp down_file anyway
        log.error('Unable to create symlink {0}'.format(down_file))
        if not start:
            os.unlink(down_file)
        return False

    # ensure sv is aware of this new service before continuing.
    # if not, down_file might be removed too quickly,
    # before 'sv' have time to take care about it.
    # Documentation indicates that a change is handled within 5 seconds.
    cmd = 'sv status {0}'.format(_service_path(name))
    retcode_sv = 1
    count_sv = 0
    while retcode_sv != 0 and count_sv < 10:
        time.sleep(0.5)
        count_sv += 1
        call = __salt__['cmd.run_all'](cmd)
        retcode_sv = call['retcode']

    # remove the temp down_file in any case.
    if (not start) and os.path.exists(down_file):
        try:
            os.unlink(down_file)
        except OSError:
            log.error('Unable to remove temp file %s', down_file)
            retcode_sv = 1

    # if an error happened, revert our changes
    if retcode_sv != 0:
        os.unlink(os.path.join([_service_path(name), name]))
        return False
    return True
Exemple #57
0
    def run_android_test(self, **kwargs):
        """Runs a reftest, in an Android application."""

        args = Namespace(**kwargs)
        if args.suite not in ('reftest', 'crashtest', 'jstestbrowser'):
            raise Exception('None or unrecognized reftest suite type.')

        self._setup_objdir(args)
        import remotereftest

        default_manifest = {
            "reftest": (self.topsrcdir, "layout", "reftests", "reftest.list"),
            "crashtest":
            (self.topsrcdir, "testing", "crashtest", "crashtests.list"),
            "jstestbrowser": ("jsreftest", "tests", "jstests.list")
        }

        if not args.tests:
            args.tests = [os.path.join(*default_manifest[args.suite])]

        args.extraProfileFiles.append(
            os.path.join(self.topsrcdir, "mobile", "android", "fonts"))

        hyphenation_path = os.path.join(self.topsrcdir, "intl", "locales")

        for (dirpath, dirnames, filenames) in os.walk(hyphenation_path):
            for filename in filenames:
                if filename.endswith('.dic'):
                    args.extraProfileFiles.append(
                        os.path.join(dirpath, filename))

        if not args.httpdPath:
            args.httpdPath = os.path.join(self.tests_dir, "modules")
        if not args.symbolsPath:
            args.symbolsPath = os.path.join(self.topobjdir,
                                            "crashreporter-symbols")
        if not args.xrePath:
            args.xrePath = os.environ.get("MOZ_HOST_BIN")
        if not args.app:
            args.app = "org.mozilla.geckoview.test"
        if not args.utilityPath:
            args.utilityPath = args.xrePath
        args.ignoreWindowSize = True
        args.printDeviceInfo = False

        from mozrunner.devices.android_device import grant_runtime_permissions, get_adb_path
        grant_runtime_permissions(self,
                                  args.app,
                                  device_serial=args.deviceSerial)

        if not args.adb_path:
            args.adb_path = get_adb_path(self)

        if 'geckoview' not in args.app:
            args.e10s = False
            print("using e10s=False for non-geckoview app")

        # A symlink and some path manipulations are required so that test
        # manifests can be found both locally and remotely (via a url)
        # using the same relative path.
        if args.suite == "jstestbrowser":
            staged_js_dir = os.path.join(self.topobjdir, "dist", "test-stage",
                                         "jsreftest")
            tests = os.path.join(self.reftest_dir, 'jsreftest')
            if not os.path.isdir(tests):
                os.symlink(staged_js_dir, tests)
            args.extraProfileFiles.append(
                os.path.join(staged_js_dir, "tests", "user.js"))
        else:
            tests = os.path.join(self.reftest_dir, "tests")
            if not os.path.isdir(tests):
                os.symlink(self.topsrcdir, tests)
            for i, path in enumerate(args.tests):
                # Non-absolute paths are relative to the packaged directory, which
                # has an extra tests/ at the start
                if os.path.exists(os.path.abspath(path)):
                    path = os.path.relpath(path, os.path.join(self.topsrcdir))
                args.tests[i] = os.path.join('tests', path)

        self.log_manager.enable_unstructured()
        try:
            rv = remotereftest.run_test_harness(parser, args)
        finally:
            self.log_manager.disable_unstructured()

        return rv
Exemple #58
0
def gen_rank_table_file(server_id=None, visible_devices=None, hccn_conf_file=None):
    hccn_conf_file = HCCN_CONF_FILE if hccn_conf_file is None else hccn_conf_file

    def get_device_ips(cfg_file):
        rst = {}
        with open(cfg_file, 'r') as f:
            print("hccn_conf_file:", cfg_file)
            for line in f.readlines():
                if not line.startswith('address_'):
                    continue
                device_id, device_ip = line.strip().split('=')
                device_id = device_id.split('_')[1]
                rst[device_id] = device_ip
                print('device_id:{}, device_ip:{}'.format(device_id, device_ip))
        return rst

    if os.path.exists(hccn_conf_file):
        device_ips = get_device_ips(hccn_conf_file)
    else:
        print("Warning: Please specify the param `hccn_conf_file` so that we can get the right device ip.")
        device_ips = {
            '0': '192.168.101.101',
            '1': '192.168.102.101',
            '2': '192.168.103.101',
            '3': '192.168.104.101',
            '4': '192.168.101.102',
            '5': '192.168.102.102',
            '6': '192.168.103.102',
            '7': '192.168.104.102',
        }

    visible_devices = [str(x) for x in range(DEVICE_NUM)] if visible_devices is None else visible_devices
    visible_devices = visible_devices.split(',') if isinstance(visible_devices, str) else visible_devices
    device_num = len(visible_devices)

    # rank_table template
    rank_table = {
        'version': '1.0',
        'server_count': '1',
        'server_list': [],  # set later
        'status': 'completed'
    }
    server_list = [
        {
            'server_id': str(SERVER_ID if server_id is None else server_id),
            'device': [],  # set later
            'host_nic_ip': 'reserve'
        }
    ]
    rank_table['server_list'] = server_list

    device = []
    for i in range(device_num):
        instance = {
            'rank_id': str(i),
            'device_id': visible_devices[i],
            'device_ip': device_ips[visible_devices[i]],
        }
        device.append(instance)
    server_list[0]['device'] = device

    rank_table_file = os.path.join(os.getcwd(), 'rank_table_{}p_{}.json'.format(device_num, '-'.join(visible_devices)))
    with open(rank_table_file, 'w') as table_fp:
        json.dump(rank_table, table_fp, indent=4)

    soft_link_file = 'rank_table_{}p.json'.format(device_num)
    if os.path.exists(soft_link_file):
        os.remove(soft_link_file)
    os.symlink(rank_table_file, soft_link_file)

    return rank_table_file
Exemple #59
0
if not args.app:
    # Maybe we are being run from within the bundle
    us = Path(__file__)
    for d in us.parents:
        if d.name == 'Contents':
            if d.parent.suffix == '.app':
                args.app = d.parent
                print(
                    f"[+] We're in a bundle, defaulting app directory to {args.app}"
                )
                break

with tempfile.TemporaryDirectory() as tmp_dir:
    shutil.copytree('Ghidra.app', os.path.join(tmp_dir, 'Ghidra.app'))
    os.symlink('/Applications', os.path.join(tmp_dir, 'Applications'))

    if args.app:
        out_app = args.app
    else:
        out_app = Path(tmp_dir, 'Ghidra.app')
    contents_path = out_app.joinpath('Contents')
    resources_path = contents_path.joinpath('Resources')
    ghidra_content = None
    ghidra_zip_name = None

    if not args.url and not args.path and not args.version:
        print("[!] No URL or path provided, getting latest from github")
        releases = get_ghidra_releases()
        release = releases[0]  # The latest release
        args.version = release["name"].split(" ")[1]
Exemple #60
0
    def run_test(self):
        node = self.nodes[0]

        data_dir = lambda *p: os.path.join(node.datadir, 'regtest', *p)
        wallet_dir = lambda *p: data_dir('wallets', *p)
        wallet = lambda name: node.get_wallet_rpc(name)

        assert_equal(set(node.listwallets()), {"w1", "w2", "w3", "w"})

        self.stop_nodes()

        self.assert_start_raises_init_error(
            0, ['-walletdir=wallets'],
            'Error: Specified -walletdir "wallets" does not exist')
        self.assert_start_raises_init_error(
            0, ['-walletdir=wallets'],
            'Error: Specified -walletdir "wallets" is a relative path',
            cwd=data_dir())
        self.assert_start_raises_init_error(
            0, ['-walletdir=debug.log'],
            'Error: Specified -walletdir "debug.log" is not a directory',
            cwd=data_dir())

        # should not initialize if there are duplicate wallets
        self.assert_start_raises_init_error(
            0, ['-wallet=w1', '-wallet=w1'],
            'Error loading wallet w1. Duplicate -wallet filename specified.')

        # should not initialize if wallet file is a directory
        os.mkdir(wallet_dir('w11'))
        self.assert_start_raises_init_error(
            0, ['-wallet=w11'],
            'Error loading wallet w11. -wallet filename must be a regular file.'
        )

        # should not initialize if one wallet is a copy of another
        shutil.copyfile(wallet_dir('w2'), wallet_dir('w22'))
        self.assert_start_raises_init_error(0, ['-wallet=w2', '-wallet=w22'],
                                            'duplicates fileid')

        # should not initialize if wallet file is a symlink
        os.symlink(wallet_dir('w1'), wallet_dir('w12'))
        self.assert_start_raises_init_error(
            0, ['-wallet=w12'],
            'Error loading wallet w12. -wallet filename must be a regular file.'
        )

        # should not initialize if the specified walletdir does not exist
        self.assert_start_raises_init_error(
            0, ['-walletdir=bad'],
            'Error: Specified -walletdir "bad" does not exist')
        # should not initialize if the specified walletdir is not a directory
        not_a_dir = wallet_dir('notadir')
        open(not_a_dir, 'a').close()
        self.assert_start_raises_init_error(
            0, ['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' +
            not_a_dir + '" is not a directory')

        self.log.info("Do not allow -zapwallettxes with multiwallet")
        self.assert_start_raises_init_error(
            0, ['-zapwallettxes', '-wallet=w1', '-wallet=w2'],
            "Error: -zapwallettxes is only allowed with a single wallet file")
        self.assert_start_raises_init_error(
            0, ['-zapwallettxes=1', '-wallet=w1', '-wallet=w2'],
            "Error: -zapwallettxes is only allowed with a single wallet file")
        self.assert_start_raises_init_error(
            0, ['-zapwallettxes=2', '-wallet=w1', '-wallet=w2'],
            "Error: -zapwallettxes is only allowed with a single wallet file")

        self.log.info("Do not allow -salvagewallet with multiwallet")
        self.assert_start_raises_init_error(
            0, ['-salvagewallet', '-wallet=w1', '-wallet=w2'],
            "Error: -salvagewallet is only allowed with a single wallet file")
        self.assert_start_raises_init_error(
            0, ['-salvagewallet=1', '-wallet=w1', '-wallet=w2'],
            "Error: -salvagewallet is only allowed with a single wallet file")

        self.log.info("Do not allow -upgradewallet with multiwallet")
        self.assert_start_raises_init_error(
            0, ['-upgradewallet', '-wallet=w1', '-wallet=w2'],
            "Error: -upgradewallet is only allowed with a single wallet file")
        self.assert_start_raises_init_error(
            0, ['-upgradewallet=1', '-wallet=w1', '-wallet=w2'],
            "Error: -upgradewallet is only allowed with a single wallet file")

        # if wallets/ doesn't exist, datadir should be the default wallet dir
        wallet_dir2 = data_dir('walletdir')
        os.rename(wallet_dir(), wallet_dir2)
        self.start_node(0, ['-wallet=w4', '-wallet=w5'])
        assert_equal(set(node.listwallets()), {"w4", "w5"})
        w5 = wallet("w5")
        w5.generate(1)

        # now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
        os.rename(wallet_dir2, wallet_dir())
        self.restart_node(
            0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()])
        assert_equal(set(node.listwallets()), {"w4", "w5"})
        w5 = wallet("w5")
        w5_info = w5.getwalletinfo()
        assert_equal(w5_info['immature_balance'], 50)

        competing_wallet_dir = os.path.join(self.options.tmpdir,
                                            'competing_walletdir')
        os.mkdir(competing_wallet_dir)
        self.restart_node(0, ['-walletdir=' + competing_wallet_dir])
        self.assert_start_raises_init_error(
            1, ['-walletdir=' + competing_wallet_dir],
            'Error initializing wallet database environment')

        self.restart_node(0, self.extra_args[0])

        w1 = wallet("w1")
        w2 = wallet("w2")
        w3 = wallet("w3")
        w4 = wallet("w")
        wallet_bad = wallet("bad")

        w1.generate(1)

        # accessing invalid wallet fails
        assert_raises_rpc_error(
            -18, "Requested wallet does not exist or is not loaded",
            wallet_bad.getwalletinfo)

        # accessing wallet RPC without using wallet endpoint fails
        assert_raises_rpc_error(-19, "Wallet file not specified",
                                node.getwalletinfo)

        # check w1 wallet balance
        w1_info = w1.getwalletinfo()
        assert_equal(w1_info['immature_balance'], 50)
        w1_name = w1_info['walletname']
        assert_equal(w1_name, "w1")

        # check w2 wallet balance
        w2_info = w2.getwalletinfo()
        assert_equal(w2_info['immature_balance'], 0)
        w2_name = w2_info['walletname']
        assert_equal(w2_name, "w2")

        w3_name = w3.getwalletinfo()['walletname']
        assert_equal(w3_name, "w3")

        w4_name = w4.getwalletinfo()['walletname']
        assert_equal(w4_name, "w")

        w1.generate(101)
        assert_equal(w1.getbalance(), 100)
        assert_equal(w2.getbalance(), 0)
        assert_equal(w3.getbalance(), 0)
        assert_equal(w4.getbalance(), 0)

        w1.sendtoaddress(w2.getnewaddress(), 1)
        w1.sendtoaddress(w3.getnewaddress(), 2)
        w1.sendtoaddress(w4.getnewaddress(), 3)
        w1.generate(1)
        assert_equal(w2.getbalance(), 1)
        assert_equal(w3.getbalance(), 2)
        assert_equal(w4.getbalance(), 3)

        batch = w1.batch([
            w1.getblockchaininfo.get_request(),
            w1.getwalletinfo.get_request()
        ])
        assert_equal(batch[0]["result"]["chain"], "regtest")
        assert_equal(batch[1]["result"]["walletname"], "w1")