Example #1
1
def main():
    if len(sys.argv) < 2:
        print 'Usage: %s [version] [--install] [--local|username password]' % sys.argv[0]
        print 'Where [version] is the branch you want to checkout'
        print 'and username and password are for your eduforge account'
        print 'Eg. %s 0.7 --local' % sys.argv[0]
    else:
        version = sys.argv[1]
        branch = 'http://exe.cfdl.auckland.ac.nz/svn/exe/branches/%s' % version
        origDir = Path(sys.argv[0]).abspath().dirname()
        tmp = TempDirPath()
        os.chdir(tmp)
        os.system('svn export %s exe' % branch)
        (origDir/'../../exe/webui/firefox').copytree(tmp/'exe/exe/webui/firefox')
        os.chdir(tmp/'exe')
        tarball = Path('../exe-%s-source.tgz' % version).abspath()
        os.system('tar czf %s *' % tarball)
        os.chdir(tmp)
        if '--local' not in sys.argv:
            try:
                from paramiko import Transport
            except ImportError:
                print 'To upload you need to install paramiko python library from:'
                print 'http://www.lag.net/paramiko'
                sys.exit(2)
            from socket import socket, gethostbyname
            s = socket()
            s.connect((gethostbyname('shell.eduforge.org'), 22))
            t = Transport(s)
            t.connect()
            t.auth_password(sys.argv[-2], sys.argv[-1])
            f = t.open_sftp_client()
            f.chdir('/home/pub/exe')
            f.put(tarball.encode('utf8'), tarball.basename().encode('utf8'))
        if os.getuid() == 0:
            tarball.copyfile('/usr/portage/distfiles/' + tarball.basename())
        os.chdir(tmp/'exe/installs/gentoo')
        newEbuildFilename = Path('exe-%s.ebuild' % version).abspath()
        if not newEbuildFilename.exists():
            Path('exe-0.7.ebuild').copy(newEbuildFilename)
        if os.getuid() == 0:
            ebuildDir = Path('/usr/local/portage/dev-python/exe')
            if ebuildDir.exists():
                ebuildDir.rmtree()
            ebuildDir.makedirs()
            os.chdir(ebuildDir)
            newEbuildFilename.copy(ebuildDir)
            filesDir = ebuildDir/'files'
            filesDir.makedirs()
            Path(tmp/'exe/installs/gentoo/all-config.patch').copy(filesDir)
            if '--local' not in sys.argv:
                oldTarball = Path('/usr/portage/distfiles/')/tarball.basename()
                if oldTarball.exists():
                    oldTarball.remove()
                os.environ['GENTOO_MIRRORS']=''
                os.system('ebuild %s fetch' % newEbuildFilename.basename())
            os.system('ebuild %s manifest' % newEbuildFilename.basename())
            os.system('ebuild %s digest' % newEbuildFilename.basename())
            if '--install' in sys.argv:
                os.system('ebuild %s install' % newEbuildFilename.basename())
Example #2
0
 def getattr(self, path, fh=None):
     try:
         content = self.cache[path]
     except KeyError:
         node = self.get_node(path)
         has_perm = bool(self.view.get_key(path))
         if node.entry.action == node.entry.MKDIR:
             mode = stat.S_IFDIR | (0o0750 if has_perm else 0o0550)
         else:
             mode = stat.S_IFREG | (0o0640 if has_perm else 0o0440)
         return {
             'st_atime': node.entry.timestamp,
             'st_ctime': node.entry.ctime,
             'st_gid': os.getgid(),
             'st_mode': mode, 
             'st_mtime': node.entry.timestamp,
             'st_nlink': 1,
             'st_size': len(node.content),
             'st_uid': os.getuid(),
         }
     else:
         import time
         return {
             'st_atime': time.time(),
             'st_ctime': time.time(),
             'st_gid': os.getgid(),
             'st_mode': stat.S_IFREG | 0o0640,
             'st_mtime': time.time(),
             'st_nlink': 1,
             'st_size': len(content),
             'st_uid': os.getuid(),
         }
Example #3
0
def assertPermissions():
    stat_info = os.stat('.')
    if stat_info.st_uid != os.getuid():
        print(utils.lightred("Will not load modules/extensions in tests."))
        print(utils.lightred("Repository owner (%d) executer (%d) mismatch" % (
            stat_info.st_uid, os.getuid())))
        exit(1)
def maybe_drop_privileges(uid=None, gid=None):
    """Change process privileges to new user/group.

    If UID and GID is specified, the real user/group is changed.

    If only UID is specified, the real user is changed, and the group is
    changed to the users primary group.

    If only GID is specified, only the group is changed.
    """
    if sys.platform == 'win32':
        return
    if os.geteuid():
        # no point trying to setuid unless we're root.
        if not os.getuid():
            raise SecurityError('contact support')
    uid = uid and parse_uid(uid)
    gid = gid and parse_gid(gid)

    if uid:
        _setuid(uid, gid)
    else:
        gid and setgid(gid)

    if uid and not os.getuid() and not os.geteuid():
        raise SecurityError('Still root uid after drop privileges!')
    if gid and not os.getgid() and not os.getegid():
        raise SecurityError('Still root gid after drop privileges!')
Example #5
0
def drop(work_dir, new_uid='root', new_gid='root'):
    starting_uid = os.getuid()
    starting_gid = os.getgid()

    if os.getuid() != 0:
        return
    if starting_uid == 0:

        #special handling for os x < 10.9. (getgrname has trouble with gid below 0)
        if platform.mac_ver()[0] and platform.mac_ver()[0] < float('10.9'):
            wanted_gid = -2
        else:
            wanted_gid = grp.getgrnam(new_gid)[2]

        run_uid = pwd.getpwnam(new_uid)[2]
        run_gid = wanted_gid
        try:
            recursive_chown(work_dir, run_uid, run_gid)
        except OSError as e:
            logger.exception("Could not change file owner: {0}".format(e))
        try:
            os.setgid(run_gid)
        except OSError as e:
            logger.exception("Could not set new group: {0}".format(e))

        try:
            os.setuid(run_uid)
        except OSError as e:
            logger.exception("Could not set net user: {0}".format(e))

        new_umask = 066
        try:
            os.umask(new_umask)
        except Exception as e:
            logger.error("Failed to change umask: {0}".format(e))
Example #6
0
    def _create_bench_dir(self):
        """Create the directory for a benchmark."""
        # Get group_id if available (given by JUBE_GROUP_NAME)
        group_id = jube2.util.check_and_get_group_id()
        # Check if outpath exists
        if not (os.path.exists(self._outpath) and
                os.path.isdir(self._outpath)):
            os.makedirs(self._outpath)
            if group_id is not None:
                os.chown(self._outpath, os.getuid(), group_id)
        # Generate unique ID in outpath
        if self._id < 0:
            self._id = jube2.util.get_current_id(self._outpath) + 1
        if os.path.exists(self.bench_dir):
            raise RuntimeError("Benchmark directory \"{0}\" already exist"
                               .format(self.bench_dir))

        os.makedirs(self.bench_dir)
        # If JUBE_GROUP_NAME is given, set GID-Bit and change group
        if group_id is not None:
            os.chmod(self.bench_dir,
                     os.stat(self.bench_dir).st_mode | stat.S_ISGID)
            os.chown(self.bench_dir, os.getuid(), group_id)
        self.write_benchmark_configuration(
            os.path.join(self.bench_dir, jube2.conf.CONFIGURATION_FILENAME))
        jube2.util.update_timestamps(os.path.join(self.bench_dir,
                                                  jube2.conf.TIMESTAMPS_INFO),
                                     "start", "change")
Example #7
0
    def getattr(self, path, fh):
        now = time() # FIXME:
        uid = pwd.getpwuid(os.getuid()).pw_uid
        gid = pwd.getpwuid(os.getuid()).pw_gid
        if self.vdb.is_dir(path):
            try:
                size = self.vdb.size(path)
            except:
                raise OSError(ENOENT, "")

            if platform.system() == "Darwin":
                st_nlink = size
            elif platform.system() == "Linux":
                st_nlink = size + 2
                
            return dict(st_mode=(S_IFDIR|0766), st_ctime=now, st_mtime=now, st_atime=now, st_nlink=st_nlink, st_uid=uid, st_gid=gid)
        else:
            try:
                data = self.vdb.read(path)
            except:
                raise OSError(ENOENT, "")

            if data == "null":
                raise OSError(ENOENT, "")
                
            return dict(st_mode=(S_IFREG|0666), st_size=len(data), st_ctime=now, st_mtime=now, st_atime=now, st_nlink=1, st_uid=uid, st_gid=gid)
def get_username():
   comment = pwd.getpwuid(os.getuid())[4]
   name = comment.split(',')[0]
   if name == "":
       return pwd.getpwuid(os.getuid())[0]

   return name
Example #9
0
    def check(require, check_argv=True):
        if require:
            # If they're not root, tell them to run with root!
            if os.getuid() != RootCheck.ids["root"]:
                sys.stdout.write(
                    "This script requires root (id={0}), and you're currently id={1}.\n".format(
                        RootCheck.ids["root"], os.getuid()
                    )
                )
                sys.stdout.write("Please re-run the script as root (id={0})".format(RootCheck.ids["root"]))
                sys.exit(1)
                # If we're checking argv,
            if check_argv and "--requires-root" not in sys.argv:
                sys.stdout.write("To run this script, you must append '--requires-root' to the args.\n")
                sys.stdout.write("This is so that you can't say you didn't know that using root is a bad idea.\n")
                sys.stdout.write("Please re-run the script with '--requires-root'.")
                sys.exit(1)
                # Should we berate them? A warning is enough, really.
            sys.stdout.write("[WARNING!] You've run this script as root, which is bad.\n")

        else:  # down with root, down with root!
            if os.getuid() == RootCheck.ids["root"]:
                sys.stdout.write("This script does not require root, but you've given it that anyway.\n")
                sys.stdout.write("It is very poor practice to run a script with more privilege than it needs.\n")
                sys.stdout.write("Please re-run the script without root access")
                sys.exit(1)
Example #10
0
def setuid(user):
    if not hasattr(os, 'getuid'):
        return

    # People can remove this check if they're really determined
    if user is None:
        if os.getuid():
            return
        raise ValueError, _("Can't run as root!")

    if os.getuid():
        print _('WARNING: ignoring "-u" argument, not root')
        return

    try:
        import pwd
    except ImportError:
        raise ValueError, _("Can't change users - no pwd module")
    try:
        try:
            uid = int(user)
        except ValueError:
            uid = pwd.getpwnam(user)[2]
        else:
            pwd.getpwuid(uid)
    except KeyError:
        raise ValueError, _("User %(user)s doesn't exist")%locals()
    os.setuid(uid)
    def testStartCopy(self):
        run_name = '000000_RUNDIR_1234_ABCDEFG'
        source_run_root = os.path.join(self.run_root, 'source')
        source_rundir = os.path.join(source_run_root, run_name)
        os.makedirs(source_rundir)
        testfile = 'test.txt'
        with open(os.path.join(source_rundir, testfile), 'w') as f:
            f.write("Hello")

        dest_run_root = os.path.join(self.run_root, 'dest')
        dest_host = 'localhost'
        dest_group = grp.getgrgid(pwd.getpwuid(os.getuid()).pw_gid).gr_name
        dest_user = pwd.getpwuid(os.getuid()).pw_name
        os.makedirs(dest_run_root)

        config = {
            'COPY_DEST_HOST': dest_host,
            'COPY_DEST_USER': dest_user,
            'COPY_DEST_GROUP': dest_group,
            'COPY_DEST_RUN_ROOT': dest_run_root,
            'COPY_SOURCE_RUN_ROOTS': [source_run_root],
        }

        # Initialize autocopy and create the source root
        a = Autocopy(log_file=self.tmp_file.name, no_email=True, test_mode_lims=True, config=config, errors_to_terminal=DEBUG)
        a.update_rundirs_monitored()
        rundir = a.get_rundir(dirname=run_name)
        a.start_copy(rundir)
        retcode = rundir.copy_proc.wait()
        self.assertEqual(retcode, 0)

        with open(os.path.join(dest_run_root, run_name, testfile), 'r') as f:
            text = f.read()
        self.assertTrue(re.search("Hello", text))
        a.cleanup()
Example #12
0
def startPipes() :
    global tofile
    global fromfile
    global EOL
    if( sys.platform  == 'win32' ):
        print( "pipe-test.py, running on windows" )
        toname = '\\\\.\\pipe\\ToSrvPipe'
        fromname = '\\\\.\\pipe\\FromSrvPipe'
        EOL = '\r\n\0'
    else:
        print( "pipe-test.py, running on linux or mac" )
        toname = '/tmp/audacity_script_pipe.to.' + str(os.getuid())
        fromname = '/tmp/audacity_script_pipe.from.' + str(os.getuid())
        EOL = '\n'

    print( "Write to  \"" + toname +"\"" )
    if not os.path.exists( toname ) :
       print( " ..does not exist.  Ensure Audacity is running with mod-script-pipe." )
       sys.exit();
        
    print( "Read from \"" + fromname +"\"")
    if not os.path.exists( fromname ) :
       print( " ..does not exist.  Ensure Audacity is running with mod-script-pipe." )
       sys.exit();

    print( "-- Both pipes exist.  Good." )

    tofile = open( toname, 'wt+' )
    print( "-- File to write to has been opened" )
    fromfile = open( fromname, 'rt')
    print( "-- File to read from has now been opened too\r\n" )
Example #13
0
    def test_create_op(self, base_dir):
        destination = base_dir / "my_new_item"
        for dry_run in range(2):
            for content in (None, bytes(b"hello world")):
                for mode in (0o755,  None):
                    for gid in (None, os.getgid()):
                        for uid in (None, os.getuid()):
                            for dest_exists in range(2):
                                assert not destination.exists()

                                t = Transaction(log, dry_run=dry_run)
                                co = TestCreateFSItemOperation(
                                    t, str(destination), content, mode=mode, uid=uid, gid=gid)

                                if dest_exists:
                                    # Will ignore existing items, but cares about the type
                                    destination.mkdir()
                                    assert t.apply().succeeded() == (content is None)
                                    destination.rmdir()
                                else:
                                    t.apply()
                                    if not (gid or uid and os.getuid() != 0 and type(t.exception()) is OSError):
                                        assert t.succeeded()
                                        assert destination.exists() != dry_run
                                    # end ignore non-root permissions issues
                                    assert not t.rollback().succeeded()
                                    assert not destination.exists()
Example #14
0
    def demote(self, uid):
        try:
            username = pwd.getpwuid(uid).pw_name
            gid = pwd.getpwuid(uid).pw_gid
        except KeyError:
            username = None
            gid = uid

        if os.getuid() == uid:
            return
        else:
            if os.getuid() != 0:
                logging.warn('Running as a limited user, setuid() unavailable!')
                return

        logging.info(
            'Worker %s is demoting to UID %s / GID %s...',
            os.getpid(),
            uid,
            gid
        )

        groups = [
            g.gr_gid
            for g in grp.getgrall()
            if username in g.gr_mem or g.gr_gid == gid
        ]
        os.setgroups(groups)
        os.setgid(gid)
        os.setuid(uid)
        logging.info(
            '...done, new EUID %s EGID %s',
            os.geteuid(),
            os.getegid()
        )
Example #15
0
 def test_008_file_copy(self):
     """test clush (file copy)"""
     content = "%f" % time.time()
     f = make_temp_file(content)
     self._clush_t(["-w", HOSTNAME, "-c", f.name], None, "")
     f.seek(0)
     self.assertEqual(f.read(), content)
     # test --dest option
     f2 = tempfile.NamedTemporaryFile()
     self._clush_t(["-w", HOSTNAME, "-c", f.name, "--dest", f2.name], \
         None, "")
     f2.seek(0)
     self.assertEqual(f2.read(), content)
     # test --user option
     f2 = tempfile.NamedTemporaryFile()
     self._clush_t(["--user", pwd.getpwuid(os.getuid())[0], "-w", \
         HOSTNAME, "--copy", f.name, "--dest", f2.name], None, "")
     f2.seek(0)
     self.assertEqual(f2.read(), content)
     # test --rcopy
     self._clush_t(["--user", pwd.getpwuid(os.getuid())[0], "-w", \
         HOSTNAME, "--rcopy", f.name, "--dest", \
             os.path.dirname(f.name)], None, "")
     f2.seek(0)
     self.assertEqual(open("%s.%s" % (f.name, HOSTNAME)).read(), content)
Example #16
0
def init_directory(dirname, force_ownership=True):
    try:
        os.makedirs(dirname, 0700)
    except OSError as e:
        if e.errno == errno.EEXIST and os.path.isdir(dirname):
            # directory is already created, check ownership
            stat = os.stat(dirname)
            if (force_ownership and stat.st_uid == 0 and
                os.getuid() != stat.st_uid):
                print ('%s is already created and owned by root. Please change '
                       'ownership and try again.' % dirname)
                sys.exit(1)
        elif e.errno == errno.EACCES:
            logging.info("Can't create directory %s." % dirname)
            return None
        else:
            raise
    finally:
        uid, gid = get_current_user()
        if uid != 0 and os.getuid() == 0:
            try:
                os.chown(dirname, uid, gid)
            except Exception, ex:
                if force_ownership:
                    print ('Unable to change owner of %s. Please fix ownership'
                           ' manually and try again.' % dirname)
                    sys.exit(1)
Example #17
0
def drop(new_uid='nobody', new_gid='nogroup'):
    starting_uid = os.getuid()
    starting_gid = os.getgid()

    if os.getuid() != 0:
        return
    if starting_uid == 0:
        run_uid = pwd.getpwnam(new_uid)[2]
        run_gid = grp.getgrnam(new_gid)[2]
        try:
            os.chown("files", run_uid, run_gid)
            os.chown("db", run_uid, run_gid)
            os.chown("log", run_uid, run_gid)
            recursive_chown("files", run_uid, run_gid)
            recursive_chown("db", run_uid, run_gid)
            recursive_chown("log", run_uid, run_gid)
            os.chown("modules/handlers/emulators/dork_list/pages",
                     run_uid, run_gid)
            os.chown("modules/handlers/emulators/dork_list/comments.txt",
                     run_uid, run_gid)
        except OSError, e:
            print "Could not change file owner: %s" % (e)
        try:
            os.setgid(run_gid)
        except OSError, e:
            print "Could not set new group: %s" % (e)
Example #18
0
    def setUp(self):
        self.metadata = obnamlib.Metadata()
        self.metadata.st_atime_sec = 12765
        self.metadata.st_atime_nsec = 0
        self.metadata.st_mode = 42 | stat.S_IFREG
        self.metadata.st_mtime_sec = 10**9
        self.metadata.st_mtime_nsec = 0
        # make sure the uid/gid magic numbers aren't the current users
        self.metadata.st_uid = os.getuid() + 1234
        self.metadata.st_gid = 5678
        while self.metadata.st_gid in os.getgroups():
            self.metadata.st_gid += 1

        fd, self.filename = tempfile.mkstemp()
        os.close(fd)
        # On some systems (e.g. FreeBSD) /tmp is apparently setgid and
        # default gid of files is therefore not the user's gid.
        os.chown(self.filename, os.getuid(), os.getgid())

        self.fs = obnamlib.LocalFS('/')
        self.fs.connect()

        self.uid_set = None
        self.gid_set = None
        self.fs.lchown = self.fake_lchown

        obnamlib.set_metadata(self.fs, self.filename, self.metadata)

        self.st = os.stat(self.filename)
Example #19
0
def test_request_and_spawn(capfd, request_and_spawn):
    request_and_spawn()

    captured = capfd.readouterr()
    print('**************\n%s\n**************' % captured.err)

    if request_and_spawn.kind != 'running':
        assert '%s:%s' % (pwd.getpwuid(os.getuid())[0], os.getpid()) in captured.err
        assert 'completed. Passing back results to' in captured.err
        assert 'Queues => 0 workspaces' in captured.err

    request_and_spawn(wait=False)
    request_and_spawn(wait=False)
    request_and_spawn(wait=False)
    request_and_spawn(wait=False)
    request_and_spawn()

    # wait for process list to settle (eg: there might be one or two extra processes that will exit because the lock
    # is already acquired - see StampedeStub)
    start = time.time()
    while len(get_children()) > 1 and time.time() - start < TIMEOUT:
        time.sleep(0.1)

    children = get_children()
    assert len(children) == 1
    for child in children:
        child.kill()

    captured = capfd.readouterr()
    print('##############\n%s\n##############' % captured.err)
    if request_and_spawn.kind != 'running':
        assert '%s:%s' % (pwd.getpwuid(os.getuid())[0], os.getpid()) in captured.err
        assert 'completed. Passing back results to' in captured.err
        assert 'Queues => 0 workspaces' in captured.err
Example #20
0
 def is_admin():
     if os.getuid() == 0:
         say(os.getuid(), "r00tness!")
         return True
     else:
         say(os.getuid(), "I cannot run as a mortal. Sorry.")
         return False
        def test_chown(self):
            # raise an OSError if the file does not exist
            os.unlink(support.TESTFN)
            self.assertRaises(OSError, posix.chown, support.TESTFN, -1, -1)

            # re-create the file
            open(support.TESTFN, 'w').close()
            if os.getuid() == 0:
                try:
                    # Many linux distros have a nfsnobody user as MAX_UID-2
                    # that makes a good test case for signedness issues.
                    #   http://bugs.python.org/issue1747858
                    # This part of the test only runs when run as root.
                    # Only scary people run their tests as root.
                    ent = pwd.getpwnam('nfsnobody')
                    posix.chown(support.TESTFN, ent.pw_uid, ent.pw_gid)
                except KeyError:
                    pass
            else:
                # non-root cannot chown to root, raises OSError
                self.assertRaises(OSError, posix.chown,
                                  support.TESTFN, 0, 0)

            # test a successful chown call
            posix.chown(support.TESTFN, os.getuid(), os.getgid())
Example #22
0
    def _test_all_chown_common(self, chown_func, first_param):
        """Common code for chown, fchown and lchown tests."""
        # test a successful chown call
        chown_func(first_param, os.getuid(), os.getgid())

        if os.getuid() == 0:
            try:
                # Many linux distros have a nfsnobody user as MAX_UID-2
                # that makes a good test case for signedness issues.
                #   http://bugs.python.org/issue1747858
                # This part of the test only runs when run as root.
                # Only scary people run their tests as root.
                ent = pwd.getpwnam('nfsnobody')
                chown_func(first_param, ent.pw_uid, ent.pw_gid)
            except KeyError:
                pass
        elif platform.system() in ('HP-UX', 'SunOS'):
            # HP-UX and Solaris can allow a non-root user to chown() to root
            # (issue #5113)
            raise unittest.SkipTest("Skipping because of non-standard chown() "
                                    "behavior")
        else:
            # non-root cannot chown to root, raises OSError
            self.assertRaises(OSError, chown_func,
                              first_param, 0, 0)
    def install(self):
        # TODO in production installer.py should be removed
        if not os.access(os.path.split(os.path.abspath(installdir))[0], os.W_OK|os.X_OK):
            if not freeze:
                proc = popen_root((sys.executable, os.path.abspath("installer.py"),
                            os.path.abspath(installdir), str(os.getuid())),)
                if proc == None:
                    wizard.back()
                    msgbox = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Critical, "Error", "The installation directory is only accessible by the root user and no method for starting a GUI authentication dialog has been found. Try changing the installation directory to a location accessible by you, or modifying the installation directory's permissions to allow you to access it, or install gksudo/gksu/kdesudo/kdesu.")
                    msgbox.exec()
                    raise PermissionError
            if freeze:
                proc = popen_root((sys.executable,
                                os.path.abspath(installdir), str(os.getuid())),)
            wizard.setEnabled(False)
            wizard.back()
            proc.readyRead.connect(self.readyRead)
            proc.finished.connect(lambda:wizard.setEnabled(True))
            return proc

        else:
            try:
                os.makedirs(self.map_dir)
            except FileExistsError:
                pass # User may be continuing download
Example #24
0
 def init_default_configs(self):
     conf = config.config()
     dic = {}
     dic[conf.TEMP_DIR] = "/home/%s/.paralite-tmp" % (pwd.getpwuid(os.getuid())[0])
     dic[conf.LOG_DIR] = "/home/%s/.paralite-log" % (pwd.getpwuid(os.getuid())[0])
     dic[conf.BLOCK_SIZE] = 0
     return dic
Example #25
0
    def _process_bridge_mode(self):
        nettype_setting = 'config vt.qemu.nettype'
        if not self.options.vt_config:
            # Let's select reasonable defaults depending on vt_type
            if not self.options.vt_nettype:
                if self.options.vt_type == 'qemu':
                    self.options.vt_nettype = ("bridge" if os.getuid() == 0
                                               else "user")
                elif self.options.vt_type == 'spice':
                    self.options.vt_nettype = "none"
                else:
                    self.options.vt_nettype = "bridge"

            if self.options.vt_nettype not in SUPPORTED_NET_TYPES:
                raise ValueError("Invalid %s '%s'. "
                                 "Valid values: (%s)" %
                                 (nettype_setting,
                                  self.options.vt_nettype,
                                  ", ".join(SUPPORTED_NET_TYPES)))
            if self.options.vt_nettype == 'bridge':
                if os.getuid() != 0:
                    raise ValueError("In order to use %s '%s' you "
                                     "need to be root" % (nettype_setting,
                                                          self.options.vt_nettype))
                self.cartesian_parser.assign("nettype", "bridge")
                self.cartesian_parser.assign("netdst", self.options.vt_netdst)
            elif self.options.vt_nettype == 'user':
                self.cartesian_parser.assign("nettype", "user")
        else:
            logging.info("Config provided, ignoring %s", nettype_setting)
Example #26
0
def system_model():
    """Get manufacturer and model number.

    On older Linux kernel versions without /sys/class/dmi/id this
    requires root access.
    """
    mdata = {}
    man = None
    pn = None
    try:
        # This might be
        # sys_vendor, bios_vendor, board_vendor, or chassis_vendor
        man = open('/sys/class/dmi/id/sys_vendor').read().strip()
    except:
        if os.getuid() == 0:
            for line in os.popen('/usr/sbin/dmidecode -s system-manufacturer'):
                man = line.strip()
    try:
        pn = open('/sys/class/dmi/id/product_name').read().strip()
    except:
        if os.getuid() == 0:
            for line in os.popen('/usr/sbin/dmidecode -s system-product-name'):
                pn = line.strip()
    if man is not None:
        mdata['System_manufacturer'] = man
    if pn is not None:
        mdata['System_product_name'] = pn
    return mdata
Example #27
0
def before():
    cred = request.authorization
    if not cred:
        return basic_auth_response()

    authclass = app.config["AUTHENTICATION"]
    if authclass not in globals():
        log.error("Unknown authentication method: %s", authclass)
        return basic_auth_response()

    Authentication = globals()[authclass]
    auth = Authentication(cred.username, cred.password)
    if not auth.authenticate():
        log.error("Invalid login: %s", cred.username)
        return basic_auth_response()

    g.user = auth.get_user()
    g.username = g.user.username
    g.master_db_url = g.user.get_master_db_url()

    # If required, set uid and gid of handler process
    if os.getuid() != g.user.uid:
        if os.getuid() != 0:
            log.error("Pegasus service must run as root to enable multi-user access")
            return basic_auth_response()

        os.setgid(g.user.gid)
        os.setuid(g.user.uid)
Example #28
0
def drop_privileges(uid_name='nobody', gid_name='nogroup'):
    if os.getuid() != 0:
        # We're not root so, like, whatever dude
        return

    # Get the uid/gid from the name
    running_uid = pwd.getpwnam(uid_name).pw_uid
    running_gid = grp.getgrnam(gid_name).gr_gid

    # Remove group privileges
    os.setgroups([])

    # Try setting the new uid/gid
    os.setgid(running_gid)
    os.setuid(running_uid)

    # Ensure a very conservative umask
    new_umask = 0o077
    old_umask = os.umask(new_umask)

    print 'drop_privileges: Old mask: %s, new umask :%s' % \
        (oct(old_umask), oct(new_umask))

    final_uid = os.getuid()
    final_gid = os.getgid()

    print 'drop_privileges: running as %s/%s' % \
        (pwd.getpwuid(final_uid)[0],
         grp.getgrgid(final_gid)[0])
Example #29
0
def isFileOpen(fName):
  fName = os.path.realpath(fName)
  pids=os.listdir('/proc')
  for pid in sorted(pids):
    try:  
      if not pid.isdigit():
        continue
    
      if os.stat(os.path.join('/proc',pid)).st_uid != os.getuid():
        continue
      
      uid = os.stat(os.path.join('/proc',pid)).st_uid
      fd_dir=os.path.join('/proc', pid, 'fd')
      if os.stat(fd_dir).st_uid != os.getuid():
        continue
        
      for f in os.listdir(fd_dir):
        fdName = os.path.join(fd_dir, f)
        if os.path.islink(fdName) :       
            link=os.readlink(fdName)
            if link == fName:
              return True
    except:
      continue
          
  return False
Example #30
0
    def test_add_license_key_backend(self):
        self._finished = False
        # add repo
        deb_line = "deb https://mvo:[email protected]/canonical-isd-hackers/internal-qa/ubuntu oneiric main"
        signing_key_id = "F5410BE0"
        app = Application("Test app1", self.PKGNAME)
        # install only when runnig as root, as we require polkit promtps
        # otherwise
        # FIXME: provide InstallBackendSimulate()
        if os.getuid() == 0:
            backend = get_install_backend()
            backend.ui = Mock()
            backend.connect("transaction-finished",
                            self._on_transaction_finished)
            # simulate repos becomes available for the public 20 s later
            GLib.timeout_add_seconds(20, self._add_pw_to_commercial_repo)
            # run it
            backend.add_repo_add_key_and_install_app(deb_line,
                                                     signing_key_id,
                                                     app,
                                                     "icon",
                                                     self.LICENSE_KEY)
            # wait until the pkg is installed
            while not self._finished:
                do_events_with_sleep()

        if os.getuid() == 0:
            self.assertTrue(os.path.exists(self.LICENSE_KEY_PATH))
            self.assertEqual(open(self.LICENSE_KEY_PATH).read(), self.LICENSE_KEY)
Example #31
0
def create_commands(machines,
                    links,
                    options,
                    metadata,
                    path,
                    execbash=False,
                    no_machines_tmp=False):
    docker = DOCKER_BIN

    # deciding machine and network prefix in order to avoid conflicts with other users and containers
    if PLATFORM != WINDOWS:
        prefix = 'netkit_' + str(os.getuid()) + '_'
    else:
        prefix = 'netkit_nt_'

    # generating network create command and network names separated by spaces for the temp file
    lab_links_text = ''
    lab_machines_text = ''

    create_network_template = docker + ' network create '
    create_network_commands = []
    network_counter = 0
    for link in links:
        create_network_commands.append(create_network_template + prefix +
                                       link + " --subnet=172." +
                                       str(18 + network_counter) +
                                       ".0.0/16 --gateway=172." +
                                       str(18 + network_counter) + ".0.1")
        lab_links_text += prefix + link + ' '
        network_counter += 1

    # writing the network list in the temp file
    if not execbash:
        if not PRINT:
            u.write_temp(lab_links_text,
                         u.generate_urlsafe_hash(path) + '_links',
                         PLATFORM,
                         file_mode="w+")

    # generating commands for running the containers, copying the config folder and executing the terminals connected to the containers
    if PLATFORM != WINDOWS:
        create_machine_template = docker + ' run -tid --privileged=true --name ' + prefix + '{machine_name} --hostname={machine_name} --network=' + prefix + '{first_link} {machine_options} {image_name}'
    else:
        create_machine_template = docker + ' run --volume="' + os.path.expanduser(
            '~'
        ) + '":/hosthome -tid --privileged=true --name ' + prefix + '{machine_name} --hostname={machine_name} --network=' + prefix + '{first_link} {machine_options} {image_name}'
    # we could use -ti -a stdin -a stdout and then /bin/bash -c "commands;bash",
    # but that woult execute commands like ifconfig BEFORE all the networks are linked
    create_machine_commands = []

    create_connection_template = docker + ' network connect ' + prefix + '{link} ' + prefix + '{machine_name}'
    create_bridge_connection_template = docker + ' network connect {link} ' + prefix + '{machine_name}'
    create_connection_commands = []
    create_bridge_connection_commands = []

    copy_folder_template = docker + ' cp "' + path + '{machine_name}/{folder_or_file}" ' + prefix + '{machine_name}:/{dest}'
    copy_folder_commands = []

    exec_template = docker + ' exec {params} -i --privileged=true ' + prefix + '{machine_name} {command}'
    exec_commands = []
    startup_commands = []

    count = 0

    for machine_name, interfaces in machines.items():
        this_image = DOCKER_HUB_PREFIX + IMAGE_NAME

        # copying the hostlab directory
        if not execbash:
            copy_folder_commands.append(docker + ' cp "' + path + '" ' +
                                        prefix + machine_name + ':/hostlab')

        # applying docker patch for /proc and icmp
        repls = ('{machine_name}', machine_name), (
            '{command}',
            'bash -c "sysctl net.ipv4.conf.all.rp_filter=0"'), ('{params}', '')
        startup_commands.insert(0,
                                u.replace_multiple_items(repls, exec_template))
        repls = ('{machine_name}', machine_name), (
            '{command}',
            'bash -c "sysctl net.ipv4.conf.default.rp_filter=0"'), ('{params}',
                                                                    '')
        startup_commands.insert(1,
                                u.replace_multiple_items(repls, exec_template))
        repls = ('{machine_name}', machine_name), (
            '{command}',
            'bash -c "sysctl net.ipv4.conf.lo.rp_filter=0"'), ('{params}', '')
        startup_commands.insert(2,
                                u.replace_multiple_items(repls, exec_template))
        repls = ('{machine_name}', machine_name), (
            '{command}',
            'bash -c "sysctl net.ipv4.conf.eth0.rp_filter=0"'), ('{params}',
                                                                 '')
        startup_commands.insert(2,
                                u.replace_multiple_items(repls, exec_template))

        # Parsing options from lab.conf
        machine_option_string = " "
        if options.get(machine_name):
            for opt, val in options[machine_name]:
                if opt == 'mem' or opt == 'M':
                    machine_option_string += '--memory=' + val.upper() + ' '
                if opt == 'image' or opt == 'i' or opt == 'model-fs' or opt == 'm' or opt == 'f' or opt == 'filesystem':
                    this_image = DOCKER_HUB_PREFIX + val
                if opt == 'eth':
                    app = val.split(":")
                    create_network_commands.append(create_network_template +
                                                   prefix + app[1])
                    repls = ('{link}', app[1]), ('{machine_name}',
                                                 machine_name)
                    create_connection_commands.append(
                        u.replace_multiple_items(repls,
                                                 create_connection_template))
                    if not PRINT:
                        u.write_temp(" " + prefix + app[1],
                                     u.generate_urlsafe_hash(path) + '_links',
                                     PLATFORM)
                    repls = ('{machine_name}', machine_name), (
                        '{command}', 'bash -c "sysctl net.ipv4.conf.eth' +
                        str(app[0]) + '.rp_filter=0"'), ('{params}', '')
                    startup_commands.insert(
                        4, u.replace_multiple_items(repls, exec_template))
                if opt == 'bridged':
                    repls = ('{link}', "bridge"), ('{machine_name}',
                                                   machine_name)
                    create_bridge_connection_commands.append(
                        u.replace_multiple_items(
                            repls, create_bridge_connection_template))
                if opt == 'e' or opt == 'exec':
                    repls = ('{machine_name}', machine_name), (
                        '{command}',
                        'bash -c "' + val.strip().replace('\\', r'\\').replace(
                            '"', r'\\"').replace("'", r"\\'") +
                        '"'), ('{params}', '-d')
                    startup_commands.append(
                        u.replace_multiple_items(repls, exec_template))
        repls = ('{machine_name}', machine_name), ('{number}', str(count)), (
            '{first_link}',
            interfaces[0][0]), ('{image_name}',
                                this_image), ('{machine_options}',
                                              machine_option_string)
        create_machine_commands.append(
            u.replace_multiple_items(repls, create_machine_template))
        count += 1
        eth_cnt = 1
        for link, _ in interfaces[1:]:
            repls = ('{link}', link), ('{machine_name}', machine_name)
            create_connection_commands.append(
                u.replace_multiple_items(repls, create_connection_template))
            repls = ('{machine_name}',
                     machine_name), ('{command}',
                                     'bash -c "sysctl net.ipv4.conf.eth' +
                                     str(eth_cnt) +
                                     '.rp_filter=0"'), ('{params}', '')
            startup_commands.insert(
                4, u.replace_multiple_items(repls, exec_template))
            eth_cnt += 1
        # convoluted method to copy MACHINE_NAME/etc folder to the etc of the container
        if os.path.exists(os.path.join(path, machine_name)) and not execbash:
            for folder_or_file in os.listdir(os.path.join(path, machine_name)):
                if folder_or_file == 'etc':
                    repls = ('{machine_name}',
                             machine_name), ('{machine_name}', machine_name), (
                                 '{folder_or_file}',
                                 folder_or_file), ('{dest}', 'temp_etc')
                    repls2 = ('{machine_name}', machine_name), (
                        '{command}',
                        'bash -c "cp -rf /temp_etc/* /etc/; rm -rf /temp_etc"'
                    ), ('{params}', '')
                    startup_commands.insert(
                        0, u.replace_multiple_items(repls2, exec_template))
                else:
                    repls = ('{machine_name}',
                             machine_name), ('{machine_name}', machine_name), (
                                 '{folder_or_file}',
                                 folder_or_file), ('{dest}', '')
                copy_folder_commands.append(
                    u.replace_multiple_items(repls, copy_folder_template))
        if PLATFORM == WINDOWS:
            repls = ('{machine_name}',
                     machine_name), ('{command}',
                                     'bash -c "echo -ne \'\033]0;' +
                                     machine_name +
                                     '\007\'; bash"'), ('{params}',
                                                        '-t -e TERM=vt100')
        else:
            repls = ('{machine_name}',
                     machine_name), ('{command}', 'bash'), ('{params}',
                                                            '-t -e TERM=vt100')
        exec_commands.append(u.replace_multiple_items(repls, exec_template))
        lab_machines_text += prefix + machine_name + ' '

    # writing the container list in the temp file
    if not no_machines_tmp:
        if not execbash:
            if not PRINT:
                u.write_temp(lab_machines_text,
                             u.generate_urlsafe_hash(path) + '_machines',
                             PLATFORM)

    # for each machine we have to get the machine.startup file and insert every non empty line as a string inside an array of exec commands. We also replace escapes and quotes
    for machine_name, _ in machines.items():
        startup_file = os.path.join(path, machine_name + '.startup')
        if os.path.exists(startup_file):
            f = open(startup_file, 'r')
            for line in f:
                if line.strip() and line.strip() not in ['\n', '\r\n']:
                    repls = ('{machine_name}', machine_name), (
                        '{command}', 'bash -c "' +
                        line.strip().replace('\\', r'\\').replace(
                            '"', r'\"').replace("'", r"\'") +
                        '"'), ('{params}', '-d')
                    startup_commands.append(
                        u.replace_multiple_items(repls, exec_template))
            f.close()

    commands = create_network_commands + create_machine_commands + create_connection_commands + create_bridge_connection_commands + copy_folder_commands

    return commands, startup_commands, exec_commands
Example #32
0
    if _xwindow:
        from stsci.tools import capable
        capable.OF_GRAPHICS = False

    for img in imglist:
        ######################################
        #        if os.path.isfile(re.sub('.fits','.clean.fits',img)):
        #            img=re.sub('.fits','.clean.fits',img)
        #            print 'use the clean'
        ######################################
        if '.fits' in img: 
            img = img[:-5]
        if os.path.exists(img + '.sn2.fits') and not option.redo:
            print img + ': psf already calculated'
        else:
            ds9 = os.system("ps -U" + str(os.getuid()) + "|grep -v grep | grep ds9")
            if option.interactive and ds9 != 0:
                pid = subprocess.Popen(['ds9']).pid
                time.sleep(2)
                ds9 = 0

            result, fwhm = ecpsf(img, option.fwhm, option.threshold, option.psfstars,
                                 option.distance, option.interactive, ds9, psffun, fixaperture,_catalog,_datamax)
            print '\n### ' + str(result)
            if option.show:
                agnkey.util.marksn2(img + '.fits', img + '.sn2.fits', 1, '')
                iraf.delete('tmp.psf.fit?', verify=False)
                iraf.seepsf(img + '.psf', '_psf.psf')
                iraf.surface('_psf.psf')
                aa = raw_input('>>>good psf [[y]/n] ? ')
                if not aa: aa = 'y'
Example #33
0
    
    
    # json.dumps(args.logfile,indent=6, sort_keys = True)
    # return 0

if __name__ == "__main__":
    
    if args.json:
        print "json Argument received"
        jsonDump(args.logfile)

    retCode = daemonization()
    procParams = """
    return code = %s
    process ID = %s
    parent process ID = %s
    process group ID = %s
    session ID = %s
    user ID = %s
    effective user ID = %s
    real group ID = %s
    effective group ID = %s
    """ % (retCode, os.getpid(), os.getppid(), os.getpgrp(), os.getsid(0),os.getuid(),os.geteuid(),os.getgid(),os.getegid())
    daemonLog = open(args.logfile,"w").write(procParams + "\n")
   
    


    
    daemonization()
Example #34
0
    def _mountShare(self, sMountPoint, sType, sServer, sShare, sUser,
                    sPassword, sMountOpt, sWhat):
        """
        Mounts the specified share if needed.
        Raises exception on failure.
        """
        # Only mount if the type is specified.
        if sType is None:
            return True

        # Test if already mounted.
        sTestFile = os.path.join(sMountPoint + os.path.sep,
                                 sShare + '-new.txt')
        if os.path.isfile(sTestFile):
            return True

        #
        # Platform specific mount code.
        #
        sHostOs = utils.getHostOs()
        if sHostOs in ('darwin', 'freebsd'):
            if sMountOpt != '':
                sMountOpt = ',' + sMountOpt
            utils.sudoProcessCall(['/sbin/umount', sMountPoint])
            utils.sudoProcessCall(['/bin/mkdir', '-p', sMountPoint])
            utils.sudoProcessCall(
                ['/usr/sbin/chown',
                 str(os.getuid()), sMountPoint])
            # pylint: disable=E1101
            if sType == 'cifs':
                # Note! no smb://server/share stuff here, 10.6.8 didn't like it.
                utils.processOutputChecked([
                    '/sbin/mount_smbfs', '-o',
                    'automounted,nostreams,soft,noowners,noatime,rdonly' +
                    sMountOpt, '-f', '0555', '-d', '0555',
                    '//%s:%s@%s/%s' % (sUser, sPassword, sServer, sShare),
                    sMountPoint
                ])
            else:
                raise TestBoxScriptException('Unsupported server type %s.' %
                                             (sType, ))

        elif sHostOs == 'linux':
            if sMountOpt != '':
                sMountOpt = ',' + sMountOpt
            utils.sudoProcessCall(['/bin/umount', sMountPoint])
            utils.sudoProcessCall(['/bin/mkdir', '-p', sMountPoint])
            if sType == 'cifs':
                utils.sudoProcessOutputChecked([
                    '/bin/mount',
                    '-t',
                    'cifs',
                    '-o',
                    'user='******',password='******',sec=ntlmv2' + ',uid=' + str(os.getuid())  # pylint: disable=E1101
                    + ',gid=' + str(os.getgid())  # pylint: disable=E1101
                    + ',nounix,file_mode=0555,dir_mode=0555,soft,ro' +
                    sMountOpt,
                    '//%s/%s' % (sServer, sShare),
                    sMountPoint
                ])
            elif sType == 'nfs':
                utils.sudoProcessOutputChecked([
                    '/bin/mount', '-t', 'nfs', '-o', 'soft,ro' + sMountOpt,
                    '%s:%s' % (sServer, sShare if sShare.find('/') >= 0 else
                               ('/export/' + sShare)), sMountPoint
                ])

            else:
                raise TestBoxScriptException('Unsupported server type %s.' %
                                             (sType, ))

        elif sHostOs == 'solaris':
            if sMountOpt != '':
                sMountOpt = ',' + sMountOpt
            utils.sudoProcessCall(['/sbin/umount', sMountPoint])
            utils.sudoProcessCall(['/bin/mkdir', '-p', sMountPoint])
            if sType == 'cifs':
                ## @todo This stuff doesn't work on wei01-x4600b.de.oracle.com running 11.1. FIXME!
                oPasswdFile = tempfile.TemporaryFile()
                oPasswdFile.write(sPassword + '\n')
                oPasswdFile.flush()
                utils.sudoProcessOutputChecked(
                    [
                        '/sbin/mount',
                        '-F',
                        'smbfs',
                        '-o',
                        'user='******',uid=' + str(os.getuid())  # pylint: disable=E1101
                        + ',gid=' + str(os.getgid())  # pylint: disable=E1101
                        + ',fileperms=0555,dirperms=0555,noxattr,ro' +
                        sMountOpt,
                        '//%s/%s' % (sServer, sShare),
                        sMountPoint
                    ],
                    stdin=oPasswdFile)
                oPasswdFile.close()
            elif sType == 'nfs':
                utils.sudoProcessOutputChecked([
                    '/sbin/mount', '-F', 'nfs', '-o', 'noxattr,ro' + sMountOpt,
                    '%s:%s' % (sServer, sShare if sShare.find('/') >= 0 else
                               ('/export/' + sShare)), sMountPoint
                ])

            else:
                raise TestBoxScriptException('Unsupported server type %s.' %
                                             (sType, ))

        elif sHostOs == 'win':
            if sType != 'cifs':
                raise TestBoxScriptException(
                    'Only CIFS mounts are supported on Windows.')
            utils.processCall(['net', 'use', sMountPoint, '/d'])
            utils.processOutputChecked([
                'net',
                'use',
                sMountPoint,
                '\\\\' + sServer + '\\' + sShare,
                sPassword,
                '/USER:'******'Unsupported host %s' % (sHostOs, ))

        #
        # Re-test.
        #
        if not os.path.isfile(sTestFile):
            raise TestBoxException(
                'Failed to mount %s (%s[%s]) at %s: %s not found' %
                (sWhat, sServer, sShare, sMountPoint, sTestFile))

        return True
Example #35
0
    def __init__(self, oOptions):
        """
        Initialize internals
        """
        self._oOptions = oOptions
        self._sTestBoxHelper = None

        # Signed-on state
        self._cSignOnAttempts = 0
        self._fSignedOn = False
        self._fNeedReSignOn = False
        self._fFirstSignOn = True
        self._idTestBox = None
        self._sTestBoxName = ''
        self._sTestBoxUuid = self.ksNullUuid
        # convenience, assigned below.

        # Command processor.
        self._oCommand = TestBoxCommand(self)

        #
        # Scratch dir setup.  Use /var/tmp instead of /tmp because we may need
        # many many GBs for some test scenarios and /tmp can be backed by swap
        # or be a fast+small disk of some kind, while /var/tmp is normally
        # larger, if slower.  /var/tmp is generally not cleaned up on reboot,
        # /tmp often is, this would break host panic / triple-fault detection.
        #
        if self._oOptions.sScratchRoot is None:
            if utils.getHostOs() in ('win', 'os2', 'haiku', 'dos'):
                # We need *lots* of space, so avoid /tmp as it may be a memory
                # file system backed by the swap file, or worse.
                self._oOptions.sScratchRoot = tempfile.gettempdir()
            else:
                self._oOptions.sScratchRoot = '/var/tmp'
            sSubDir = 'testbox'
            try:
                sSubDir = '%s-%u' % (sSubDir, os.getuid())
                # pylint: disable=E1101
            except:
                pass
            self._oOptions.sScratchRoot = os.path.join(
                self._oOptions.sScratchRoot, sSubDir)

        self._sScratchSpill = os.path.join(self._oOptions.sScratchRoot,
                                           'scratch')
        self._sScratchScripts = os.path.join(self._oOptions.sScratchRoot,
                                             'scripts')
        self._sScratchState = os.path.join(self._oOptions.sScratchRoot,
                                           'state')
        # persistant storage.

        for sDir in [
                self._oOptions.sScratchRoot, self._sScratchSpill,
                self._sScratchScripts, self._sScratchState
        ]:
            if not os.path.isdir(sDir):
                os.makedirs(sDir, 0o700)

        # We count consecutive reinitScratch failures and will reboot the
        # testbox after a while in the hope that it will correct the issue.
        self._cReinitScratchErrors = 0

        #
        # Mount builds and test resources if requested.
        #
        self.mountShares()

        #
        # Sign-on parameters: Packed into list of records of format:
        # { <Parameter ID>: { <Current value>, <Check function> } }
        #
        self._ddSignOnParams = \
        {
            constants.tbreq.ALL_PARAM_TESTBOX_UUID:        { self.VALUE: self._getHostSystemUuid(),    self.FN: None },
            constants.tbreq.SIGNON_PARAM_OS:               { self.VALUE: utils.getHostOs(),            self.FN: None },
            constants.tbreq.SIGNON_PARAM_OS_VERSION:       { self.VALUE: utils.getHostOsVersion(),     self.FN: None },
            constants.tbreq.SIGNON_PARAM_CPU_ARCH:         { self.VALUE: utils.getHostArch(),          self.FN: None },
            constants.tbreq.SIGNON_PARAM_CPU_VENDOR:       { self.VALUE: self._getHostCpuVendor(),     self.FN: None },
            constants.tbreq.SIGNON_PARAM_CPU_NAME:         { self.VALUE: self._getHostCpuName(),       self.FN: None },
            constants.tbreq.SIGNON_PARAM_CPU_REVISION:     { self.VALUE: self._getHostCpuRevision(),   self.FN: None },
            constants.tbreq.SIGNON_PARAM_HAS_HW_VIRT:      { self.VALUE: self._hasHostHwVirt(),        self.FN: None },
            constants.tbreq.SIGNON_PARAM_HAS_NESTED_PAGING:{ self.VALUE: self._hasHostNestedPaging(),  self.FN: None },
            constants.tbreq.SIGNON_PARAM_HAS_64_BIT_GUEST: { self.VALUE: self._can64BitGuest(),        self.FN: None },
            constants.tbreq.SIGNON_PARAM_HAS_IOMMU:        { self.VALUE: self._hasHostIoMmu(),         self.FN: None },
            #constants.tbreq.SIGNON_PARAM_WITH_RAW_MODE:    { self.VALUE: self._withRawModeSupport(),   self.FN: None },
            constants.tbreq.SIGNON_PARAM_SCRIPT_REV:       { self.VALUE: self._getScriptRev(),         self.FN: None },
            constants.tbreq.SIGNON_PARAM_REPORT:           { self.VALUE: self._getHostReport(),        self.FN: None },
            constants.tbreq.SIGNON_PARAM_PYTHON_VERSION:   { self.VALUE: self._getPythonHexVersion(),  self.FN: None },
            constants.tbreq.SIGNON_PARAM_CPU_COUNT:        { self.VALUE: None,     self.FN: multiprocessing.cpu_count },
            constants.tbreq.SIGNON_PARAM_MEM_SIZE:         { self.VALUE: None,     self.FN: self._getHostMemSize },
            constants.tbreq.SIGNON_PARAM_SCRATCH_SIZE:     { self.VALUE: None,     self.FN: self._getFreeScratchSpace },
        }
        for sItem in self._ddSignOnParams:
            if self._ddSignOnParams[sItem][self.FN] is not None:
                self._ddSignOnParams[sItem][
                    self.VALUE] = self._ddSignOnParams[sItem][self.FN]()

        testboxcommons.log('Starting Test Box script (%s)' %
                           (self._getScriptRev(), ))
        testboxcommons.log(
            'Test Manager URL: %s' % self._oOptions.sTestManagerUrl, )
        testboxcommons.log(
            'Scratch root path: %s' % self._oOptions.sScratchRoot, )
        for sItem in self._ddSignOnParams:
            testboxcommons.log(
                'Sign-On value %18s: %s' %
                (sItem, self._ddSignOnParams[sItem][self.VALUE]))

        #
        # The System UUID is the primary identification of the machine, so
        # refuse to cooperate if it's NULL.
        #
        self._sTestBoxUuid = self.getSignOnParam(
            constants.tbreq.ALL_PARAM_TESTBOX_UUID)
        if self._sTestBoxUuid == self.ksNullUuid:
            raise TestBoxScriptException(
                'Couldn\'t determine the System UUID, please use --system-uuid to specify it.'
            )

        #
        # Export environment variables, clearing any we don't know yet.
        #
        for sEnvVar in self._oOptions.asEnvVars:
            iEqual = sEnvVar.find('=')
            if iEqual == -1:  # No '=', remove it.
                if sEnvVar in os.environ:
                    del os.environ[sEnvVar]
            elif iEqual > 0:  # Set it.
                os.environ[sEnvVar[:iEqual]] = sEnvVar[iEqual + 1:]
            else:  # Starts with '=', bad user.
                raise TestBoxScriptException('Invalid -E argument: "%s"' %
                                             (sEnvVar, ))

        os.environ['TESTBOX_PATH_BUILDS'] = self._oOptions.sBuildsPath
        os.environ['TESTBOX_PATH_RESOURCES'] = self._oOptions.sTestRsrcPath
        os.environ['TESTBOX_PATH_SCRATCH'] = self._sScratchSpill
        os.environ['TESTBOX_PATH_SCRIPTS'] = self._sScratchScripts
        os.environ['TESTBOX_PATH_UPLOAD'] = self._sScratchSpill
        ## @todo drop the UPLOAD dir?
        os.environ['TESTBOX_HAS_HW_VIRT'] = self.getSignOnParam(
            constants.tbreq.SIGNON_PARAM_HAS_HW_VIRT)
        os.environ['TESTBOX_HAS_NESTED_PAGING'] = self.getSignOnParam(
            constants.tbreq.SIGNON_PARAM_HAS_NESTED_PAGING)
        os.environ['TESTBOX_HAS_IOMMU'] = self.getSignOnParam(
            constants.tbreq.SIGNON_PARAM_HAS_IOMMU)
        os.environ['TESTBOX_SCRIPT_REV'] = self.getSignOnParam(
            constants.tbreq.SIGNON_PARAM_SCRIPT_REV)
        os.environ['TESTBOX_CPU_COUNT'] = self.getSignOnParam(
            constants.tbreq.SIGNON_PARAM_CPU_COUNT)
        os.environ['TESTBOX_MEM_SIZE'] = self.getSignOnParam(
            constants.tbreq.SIGNON_PARAM_MEM_SIZE)
        os.environ['TESTBOX_SCRATCH_SIZE'] = self.getSignOnParam(
            constants.tbreq.SIGNON_PARAM_SCRATCH_SIZE)
        #TODO: os.environ['TESTBOX_WITH_RAW_MODE']     = self.getSignOnParam(constants.tbreq.SIGNON_PARAM_WITH_RAW_MODE);
        os.environ['TESTBOX_WITH_RAW_MODE'] = str(self._withRawModeSupport())
        os.environ['TESTBOX_MANAGER_URL'] = self._oOptions.sTestManagerUrl
        os.environ['TESTBOX_UUID'] = self._sTestBoxUuid
        os.environ['TESTBOX_REPORTER'] = 'remote'
        os.environ['TESTBOX_NAME'] = ''
        os.environ['TESTBOX_ID'] = ''
        os.environ['TESTBOX_TEST_SET_ID'] = ''
        os.environ['TESTBOX_TIMEOUT'] = '0'
        os.environ['TESTBOX_TIMEOUT_ABS'] = '0'

        if utils.getHostOs() == 'win':
            os.environ['COMSPEC'] = os.path.join(os.environ['SystemRoot'],
                                                 'System32', 'cmd.exe')
Example #36
0
def run_subscription_manager(module, arguments):
    # Execute subuscription-manager with arguments and manage common errors
    rhsm_bin = module.get_bin_path('subscription-manager')
    if not rhsm_bin:
        module.fail_json(msg='The executable file subscription-manager was not found in PATH')

    rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)))

    if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0):
        module.fail_json(msg='The executable file subscription-manager must be run using root privileges')
    elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
        module.fail_json(msg='This system has no repositories available through subscriptions')
    elif rc == 1:
        module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
    else:
        return rc, out, err
Example #37
0
def test_apprise_asset(tmpdir):
    """
    API: AppriseAsset() object

    """
    a = AppriseAsset(theme=None)
    # Default theme
    assert(a.theme == 'default')

    a = AppriseAsset(
        theme='dark',
        image_path_mask='/{THEME}/{TYPE}-{XY}{EXTENSION}',
        image_url_mask='http://localhost/{THEME}/{TYPE}-{XY}{EXTENSION}',
    )

    a.default_html_color = '#abcabc'
    a.html_notify_map[NotifyType.INFO] = '#aaaaaa'

    assert(a.color('invalid', tuple) == (171, 202, 188))
    assert(a.color(NotifyType.INFO, tuple) == (170, 170, 170))

    assert(a.color('invalid', int) == 11258556)
    assert(a.color(NotifyType.INFO, int) == 11184810)

    assert(a.color('invalid', None) == '#abcabc')
    assert(a.color(NotifyType.INFO, None) == '#aaaaaa')
    # None is the default
    assert(a.color(NotifyType.INFO) == '#aaaaaa')

    # Invalid Type
    try:
        a.color(NotifyType.INFO, dict)
        # We should not get here (exception should be thrown)
        assert(False)

    except ValueError:
        # The exception we expect since dict is not supported
        assert(True)

    except Exception:
        # Any other exception is not good
        assert(False)

    assert(a.image_url(NotifyType.INFO, NotifyImageSize.XY_256) ==
           'http://localhost/dark/info-256x256.png')

    assert(a.image_path(
        NotifyType.INFO,
        NotifyImageSize.XY_256,
        must_exist=False) == '/dark/info-256x256.png')

    # This path doesn't exist so image_raw will fail (since we just
    # randompyl picked it for testing)
    assert(a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is None)

    assert(a.image_path(
        NotifyType.INFO,
        NotifyImageSize.XY_256,
        must_exist=True) is None)

    # Create a new object (with our default settings)
    a = AppriseAsset()

    # Our default configuration can access our file
    assert(a.image_path(
        NotifyType.INFO,
        NotifyImageSize.XY_256,
        must_exist=True) is not None)

    assert(a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is not None)

    # Create a temporary directory
    sub = tmpdir.mkdir("great.theme")

    # Write a file
    sub.join("{0}-{1}.png".format(
        NotifyType.INFO,
        NotifyImageSize.XY_256,
    )).write("the content doesn't matter for testing.")

    # Create an asset that will reference our file we just created
    a = AppriseAsset(
        theme='great.theme',
        image_path_mask='%s/{THEME}/{TYPE}-{XY}.png' % dirname(sub.strpath),
    )

    # We'll be able to read file we just created
    assert(a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is not None)

    # We can retrieve the filename at this point even with must_exist set
    # to True
    assert(a.image_path(
        NotifyType.INFO,
        NotifyImageSize.XY_256,
        must_exist=True) is not None)

    # If we make the file un-readable however, we won't be able to read it
    # This test is just showing that we won't throw an exception
    if getuid() == 0:
        # Root always over-rides 0x000 permission settings making the below
        # tests futile
        pytest.skip('The Root user can not run file permission tests.')

    chmod(dirname(sub.strpath), 0o000)
    assert(a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is None)

    # Our path doesn't exist anymore using this logic
    assert(a.image_path(
        NotifyType.INFO,
        NotifyImageSize.XY_256,
        must_exist=True) is None)

    # Return our permission so we don't have any problems with our cleanup
    chmod(dirname(sub.strpath), 0o700)

    # Our content is retrivable again
    assert(a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is not None)

    # our file path is accessible again too
    assert(a.image_path(
        NotifyType.INFO,
        NotifyImageSize.XY_256,
        must_exist=True) is not None)

    # We do the same test, but set the permission on the file
    chmod(a.image_path(NotifyType.INFO, NotifyImageSize.XY_256), 0o000)

    # our path will still exist in this case
    assert(a.image_path(
        NotifyType.INFO,
        NotifyImageSize.XY_256,
        must_exist=True) is not None)

    # but we will not be able to open it
    assert(a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is None)

    # Restore our permissions
    chmod(a.image_path(NotifyType.INFO, NotifyImageSize.XY_256), 0o640)

    # Disable all image references
    a = AppriseAsset(image_path_mask=False, image_url_mask=False)

    # We always return none in these calls now
    assert(a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is None)
    assert(a.image_url(NotifyType.INFO, NotifyImageSize.XY_256) is None)
    assert(a.image_path(NotifyType.INFO, NotifyImageSize.XY_256,
           must_exist=False) is None)
    assert(a.image_path(NotifyType.INFO, NotifyImageSize.XY_256,
           must_exist=True) is None)

    # Test our default extension out
    a = AppriseAsset(
        image_path_mask='/{THEME}/{TYPE}-{XY}{EXTENSION}',
        image_url_mask='http://localhost/{THEME}/{TYPE}-{XY}{EXTENSION}',
        default_extension='.jpeg',
    )
    assert(a.image_path(
        NotifyType.INFO,
        NotifyImageSize.XY_256,
        must_exist=False) == '/default/info-256x256.jpeg')

    assert(a.image_url(
        NotifyType.INFO,
        NotifyImageSize.XY_256) == 'http://localhost/'
                                   'default/info-256x256.jpeg')

    # extension support
    assert(a.image_path(
        NotifyType.INFO,
        NotifyImageSize.XY_128,
        must_exist=False,
        extension='.ico') == '/default/info-128x128.ico')

    assert(a.image_url(
        NotifyType.INFO,
        NotifyImageSize.XY_256,
        extension='.test') == 'http://localhost/'
                              'default/info-256x256.test')
Example #38
0
File: willie.py Project: dkg/willie
def main(argv=None):
    global homedir
    # Step One: Parse The Command Line
    try:
        parser = argparse.ArgumentParser(description='Willie IRC Bot',
                                         usage='%(prog)s [options]')
        parser.add_argument('-c',
                            '--config',
                            metavar='filename',
                            help='use a specific configuration file')
        parser.add_argument("-d",
                            '--fork',
                            action="store_true",
                            dest="deamonize",
                            help="Deamonize willie")
        parser.add_argument("-q",
                            '--quit',
                            action="store_true",
                            dest="quit",
                            help="Gracefully quit Willie")
        parser.add_argument("-k",
                            '--kill',
                            action="store_true",
                            dest="kill",
                            help="Kill Willie")
        parser.add_argument('--exit-on-error',
                            action="store_true",
                            dest="exit_on_error",
                            help=("Exit immediately on every error instead of "
                                  "trying to recover"))
        parser.add_argument("-l",
                            '--list',
                            action="store_true",
                            dest="list_configs",
                            help="List all config files found")
        parser.add_argument("-m",
                            '--migrate',
                            action="store_true",
                            dest="migrate_configs",
                            help="Migrate config files to the new format")
        parser.add_argument('--quiet',
                            action="store_true",
                            dest="quiet",
                            help="Supress all output")
        parser.add_argument('-w',
                            '--configure-all',
                            action='store_true',
                            dest='wizard',
                            help='Run the configuration wizard.')
        parser.add_argument(
            '--configure-modules',
            action='store_true',
            dest='mod_wizard',
            help=('Run the configuration wizard, but only for the '
                  'module configuration options.'))
        parser.add_argument(
            '--configure-database',
            action='store_true',
            dest='db_wizard',
            help=('Run the configuration wizard, but only for the '
                  'database configuration options.'))
        parser.add_argument('-v',
                            '--version',
                            action="store_true",
                            dest="version",
                            help="Show version number and exit")
        opts = parser.parse_args()

        try:
            if os.getuid() == 0 or os.geteuid() == 0:
                stderr('Error: Do not run Willie with root privileges.')
                sys.exit(1)
        except AttributeError:
            # Windows don't have os.getuid/os.geteuid
            pass

        if opts.version:
            py_ver = '%s.%s.%s' % (sys.version_info.major,
                                   sys.version_info.minor,
                                   sys.version_info.micro)
            print('Willie %s (running on python %s)' % (__version__, py_ver))
            print('http://willie.dftba.net/')
            return
        elif opts.wizard:
            wizard('all', opts.config)
            return
        elif opts.mod_wizard:
            wizard('mod', opts.config)
            return
        elif opts.db_wizard:
            wizard('db', opts.config)
            return

        if opts.list_configs:
            configs = enumerate_configs()
            print('Config files in ~/.willie:')
            if len(configs) is 0:
                print('\tNone found')
            else:
                for config in configs:
                    print('\t%s' % config)
            print('-------------------------')
            return

        config_name = opts.config or 'default'

        configpath = find_config(config_name)
        if not os.path.isfile(configpath):
            print(
                "Welcome to Willie!\nI can't seem to find the configuration file, so let's generate it!\n"
            )
            if not configpath.endswith('.cfg'):
                configpath = configpath + '.cfg'
            create_config(configpath)
            configpath = find_config(config_name)
        try:
            config_module = Config(configpath)
        except ConfigurationError as e:
            stderr(e)
            sys.exit(2)

        if config_module.core.not_configured:
            stderr('Bot is not configured, can\'t start')
            # exit with code 2 to prevent auto restart on fail by systemd
            sys.exit(2)

        if not config_module.has_option('core', 'homedir'):
            config_module.dotdir = homedir
            config_module.homedir = homedir
        else:
            homedir = config_module.core.homedir
            config_module.dotdir = config_module.core.homedir

        if not config_module.core.logdir:
            config_module.core.logdir = os.path.join(homedir, 'logs')
        logfile = os.path.os.path.join(config_module.logdir, 'stdio.log')
        if not os.path.isdir(config_module.logdir):
            os.mkdir(config_module.logdir)

        config_module.exit_on_error = opts.exit_on_error
        config_module._is_deamonized = opts.deamonize

        sys.stderr = tools.OutputRedirect(logfile, True, opts.quiet)
        sys.stdout = tools.OutputRedirect(logfile, False, opts.quiet)

        # Handle --quit, --kill and saving the PID to file
        pid_dir = config_module.core.pid_dir or homedir
        if opts.config is None:
            pid_file_path = os.path.join(pid_dir, 'willie.pid')
        else:
            basename = os.path.basename(opts.config)
            if basename.endswith('.cfg'):
                basename = basename[:-4]
            pid_file_path = os.path.join(pid_dir, 'willie-%s.pid' % basename)
        if os.path.isfile(pid_file_path):
            with open(pid_file_path, 'r') as pid_file:
                try:
                    old_pid = int(pid_file.read())
                except ValueError:
                    old_pid = None
            if old_pid is not None and tools.check_pid(old_pid):
                if not opts.quit and not opts.kill:
                    stderr(
                        'There\'s already a Willie instance running with this config file'
                    )
                    stderr('Try using the --quit or the --kill options')
                    sys.exit(1)
                elif opts.kill:
                    stderr('Killing the willie')
                    os.kill(old_pid, signal.SIGKILL)
                    sys.exit(0)
                elif opts.quit:
                    stderr('Signaling Willie to stop gracefully')
                    if hasattr(signal, 'SIGUSR1'):
                        os.kill(old_pid, signal.SIGUSR1)
                    else:
                        os.kill(old_pid, signal.SIGTERM)
                    sys.exit(0)
            elif not tools.check_pid(old_pid) and (opts.kill or opts.quit):
                stderr('Willie is not running!')
                sys.exit(1)
        elif opts.quit or opts.kill:
            stderr('Willie is not running!')
            sys.exit(1)
        if opts.deamonize:
            child_pid = os.fork()
            if child_pid is not 0:
                sys.exit()
        with open(pid_file_path, 'w') as pid_file:
            pid_file.write(str(os.getpid()))
        config_module.pid_file_path = pid_file_path

        # Step Five: Initialise And Run willie
        run(config_module)
    except KeyboardInterrupt:
        print("\n\nInterrupted")
        os._exit(1)
Example #39
0

def latexcmd(file):
    return "pdflatex " + file


def end_file(file):
    tex = open(file + ".tex", 'a')
    tex.write("\\end{document}")
    tex.close()
    os.system(latexcmd(file))
    sys.exit(100)


##############
user = pwd.getpwuid(os.getuid())[0]
ltime = time.asctime(time.localtime(time.time()))

ftex = "results"

today = datetime.date.today()
ftex += '_%s' % user
ftex += '_%s' % today
#print ftex

peak_label = ["", "nominal", "2s", "3s"]

modename = [
    "$d\\sigma/d\\pt, |y|:(0,2)$", "$d\\sigma/d\\pt~|y|:(0,1),(1,2)$",
    "$d\\sigma/d|y|$"
]
Example #40
0
    def runLinux(self):
        """The steps required to handle a frame under linux"""
        frameInfo = self.frameInfo
        runFrame = self.runFrame

        self.__createEnvVariables()
        self.__writeHeader()
        if rqd.rqconstants.RQD_CREATE_USER_IF_NOT_EXISTS:
            rqd.rqutil.permissionsHigh()
            rqd.rqutil.checkAndCreateUser(runFrame.user_name)
            rqd.rqutil.permissionsLow()

        tempStatFile = "%srqd-stat-%s-%s" % (self.rqCore.machine.getTempPath(),
                                             frameInfo.frameId,
                                             time.time())
        self._tempLocations.append(tempStatFile)
        tempCommand = []
        if self.rqCore.machine.isDesktop():
            tempCommand += [rqconstants.PATH_NICE_CMD]
        tempCommand += [rqconstants.PATH_TIME_CMD, "-p", "-o", tempStatFile]

        if 'CPU_LIST' in runFrame.attributes:
            tempCommand += [rqconstants.PATH_TASKSET_CMD, '-c', runFrame.attributes['CPU_LIST']]

        rqd.rqutil.permissionsHigh()
        try:
            if os.getuid() != 0:
                tempCommand += ['"' + self._createCommandFile(runFrame.command) + '"']
                log.debug("tempCommand : "+ str(tempCommand))
            else:
                tempCommand += ["/bin/su", runFrame.user_name, rqconstants.SU_ARGUEMENT,
                                '"' + self._createCommandFile(runFrame.command) + '"']

            frameInfo.forkedCommand = subprocess.Popen(" ".join(tempCommand),
                                                       env=self.frameEnv,
                                                       cwd=self.rqCore.machine.getTempPath(),
                                                       stdin=subprocess.PIPE,
                                                       stdout=self.rqlog,
                                                       stderr=self.rqlog,
                                                       close_fds=True,
                                                       shell=rqconstants.USE_SHELL,
                                                       preexec_fn=os.setsid)
        except:
            log.critical(str(sys.exc_info()))

            # Actual cwd is set by /shots/SHOW/home/perl/etc/qwrap.cuerun
        finally:
            rqd.rqutil.permissionsLow()

        frameInfo.pid = frameInfo.forkedCommand.pid

        if not self.rqCore.updateRssThread.isAlive():
            self.rqCore.updateRssThread = threading.Timer(rqd.rqconstants.RSS_UPDATE_INTERVAL,
                                                           self.rqCore.updateRss)
            self.rqCore.updateRssThread.start()

        returncode = frameInfo.forkedCommand.wait()

        # Find exitStatus and exitSignal
        if returncode < 0:
            # Exited with a signal
            frameInfo.exitStatus = 1
            frameInfo.exitSignal = -returncode
        else:
            frameInfo.exitStatus = returncode
            frameInfo.exitSignal = 0

        try:
            statFile  = open(tempStatFile,"r")
            frameInfo.realtime = statFile.readline().split()[1]
            frameInfo.utime = statFile.readline().split()[1]
            frameInfo.stime = statFile.readline().split()[1]
            statFile.close()
        except Exception:
            pass # This happens when frames are killed

        self.__writeFooter()
        self.__cleanup()
Example #41
0
def deapply_dns_workaround():
    if os.getuid() == 0:
        commands.getoutput(
            'echo "nameserver 85.214.20.141" > /etc/resolv.conf')
Example #42
0
def main():
    log = logging.getLogger("zulip-provisioner")
    # TODO: support other architectures
    if platform.architecture()[0] == '64bit':
        arch = 'amd64'
    else:
        log.critical("Only amd64 is supported.")

    vendor, version, codename = platform.dist()

    if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
        log.critical("Unsupported platform: {} {}".format(vendor, codename))

    with sh.sudo:
        sh.apt_get.update(**LOUD)

        sh.apt_get.install(*APT_DEPENDENCIES["trusty"], assume_yes=True, **LOUD)

    temp_deb_path = sh.mktemp("package_XXXXXX.deb", tmpdir=True)

    sh.wget(
        "{}/{}_{}_{}.deb".format(
            TSEARCH_URL_BASE,
            TSEARCH_PACKAGE_NAME["trusty"],
            TSEARCH_VERSION,
            arch,
        ),
        output_document=temp_deb_path,
        **LOUD
    )

    with sh.sudo:
        sh.dpkg("--install", temp_deb_path, **LOUD)

    with sh.sudo:
        PHANTOMJS_PATH = "/srv/phantomjs"
        PHANTOMJS_TARBALL = os.path.join(PHANTOMJS_PATH, "phantomjs-1.9.8-linux-x86_64.tar.bz2")
        sh.mkdir("-p", PHANTOMJS_PATH, **LOUD)
        sh.wget("https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-1.9.8-linux-x86_64.tar.bz2",
                output_document=PHANTOMJS_TARBALL, **LOUD)
        sh.tar("xj", directory=PHANTOMJS_PATH, file=PHANTOMJS_TARBALL, **LOUD)
        sh.ln("-sf", os.path.join(PHANTOMJS_PATH, "phantomjs-1.9.8-linux-x86_64", "bin", "phantomjs"),
              "/usr/local/bin/phantomjs", **LOUD)

    with sh.sudo:
        sh.rm("-rf", VENV_PATH, **LOUD)
        sh.mkdir("-p", VENV_PATH, **LOUD)
        sh.chown("{}:{}".format(os.getuid(), os.getgid()), VENV_PATH, **LOUD)

    sh.virtualenv(VENV_PATH, **LOUD)

    # Add the ./tools and ./scripts/setup directories inside the repository root to
    # the system path; we'll reference them later.
    orig_path = os.environ["PATH"]
    os.environ["PATH"] = os.pathsep.join((
            os.path.join(ZULIP_PATH, "tools"),
            os.path.join(ZULIP_PATH, "scripts", "setup"),
            orig_path
    ))


    # Put Python virtualenv activation in our .bash_profile.
    with open(os.path.expanduser('~/.bash_profile'), 'w+') as bash_profile:
        bash_profile.writelines([
            "source .bashrc\n",
            "source %s\n" % (os.path.join(VENV_PATH, "bin", "activate"),),
        ])

    # Switch current Python context to the virtualenv.
    activate_this = os.path.join(VENV_PATH, "bin", "activate_this.py")
    execfile(activate_this, dict(__file__=activate_this))

    sh.pip.install(requirement=os.path.join(ZULIP_PATH, "requirements.txt"), **LOUD)

    with sh.sudo:
        sh.cp(REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH, **LOUD)

    # Add additional node packages for test-js-with-node.
    with sh.sudo:
        sh.npm.install(*NPM_DEPENDENCIES["trusty"], g=True, prefix="/usr", **LOUD)

    # Management commands expect to be run from the root of the project.
    os.chdir(ZULIP_PATH)

    os.system("tools/download-zxcvbn")
    os.system("tools/emoji_dump/build_emoji")
    os.system("generate_secrets.py -d")
    if "--travis" in sys.argv:
        os.system("sudo service rabbitmq-server restart")
        os.system("sudo service redis-server restart")
        os.system("sudo service memcached restart")
    sh.configure_rabbitmq(**LOUD)
    sh.postgres_init_dev_db(**LOUD)
    sh.do_destroy_rebuild_database(**LOUD)
    sh.postgres_init_test_db(**LOUD)
    sh.do_destroy_rebuild_test_database(**LOUD)
def main():
    for ML in [ config.MEASUREDBOOT_ML, config.IMA_ML ] :
        if not os.access(ML, os.F_OK) :
            logger.warning("Measurement list path %s not accessible by agent. Any attempt to instruct it to access this path - via \"keylime_tenant\" CLI - will result in agent process dying", ML)

    if config.get('cloud_agent', 'agent_uuid') == 'dmidecode':
        if os.getuid() != 0:
            raise RuntimeError('agent_uuid is configured to use dmidecode, '
                               'but current process is not running as root.')
        cmd = ['which', 'dmidecode']
        ret = cmd_exec.run(cmd, raiseOnError=False)
        if ret['code'] != 0:
            raise RuntimeError('agent_uuid is configured to use dmidecode, '
                               'but it\'s is not found on the system.')

    # Instanitate TPM class

    instance_tpm = tpm()
    # get params for initialization
    registrar_ip = config.get('cloud_agent', 'registrar_ip')
    registrar_port = config.get('cloud_agent', 'registrar_port')

    # get params for the verifier to contact the agent
    contact_ip = os.getenv("KEYLIME_AGENT_CONTACT_IP", None)
    if contact_ip is None and config.has_option('cloud_agent', 'agent_contact_ip'):
        contact_ip = config.get('cloud_agent', 'agent_contact_ip')
    contact_port = os.getenv("KEYLIME_AGENT_CONTACT_PORT", None)
    if contact_port is None and config.has_option('cloud_agent', 'agent_contact_port'):
        contact_port = config.get('cloud_agent', 'agent_contact_port', fallback="invalid")

    # initialize the tmpfs partition to store keys if it isn't already available
    secdir = secure_mount.mount()

    # change dir to working dir
    config.ch_dir(config.WORK_DIR, logger)

    # initialize tpm
    (ekcert, ek_tpm, aik_tpm) = instance_tpm.tpm_init(self_activate=False, config_pw=config.get(
        'cloud_agent', 'tpm_ownerpassword'))  # this tells initialize not to self activate the AIK
    virtual_agent = instance_tpm.is_vtpm()

    if ekcert is None:
        if virtual_agent:
            ekcert = 'virtual'
        elif instance_tpm.is_emulator():
            ekcert = 'emulator'

    # now we need the UUID
    try:
        agent_uuid = config.get('cloud_agent', 'agent_uuid')
    except configparser.NoOptionError:
        agent_uuid = None
    if agent_uuid == 'openstack':
        agent_uuid = openstack.get_openstack_uuid()
    elif agent_uuid == 'hash_ek':
        agent_uuid = hashlib.sha256(ek_tpm).hexdigest()
    elif agent_uuid == 'generate' or agent_uuid is None:
        agent_uuid = str(uuid.uuid4())
    elif agent_uuid == 'dmidecode':
        cmd = ['dmidecode', '-s', 'system-uuid']
        ret = cmd_exec.run(cmd)
        sys_uuid = ret['retout'][0].decode('utf-8')
        agent_uuid = sys_uuid.strip()
        try:
            uuid.UUID(agent_uuid)
        except ValueError as e:
            raise RuntimeError("The UUID returned from dmidecode is invalid: %s" % e)  # pylint: disable=raise-missing-from
    elif agent_uuid == 'hostname':
        agent_uuid = socket.getfqdn()
    if config.STUB_VTPM and config.TPM_CANNED_VALUES is not None:
        # Use canned values for stubbing
        jsonIn = config.TPM_CANNED_VALUES
        if "add_vtpm_to_group" in jsonIn:
            # The value we're looking for has been canned!
            agent_uuid = jsonIn['add_vtpm_to_group']['retout']
        else:
            # Our command hasn't been canned!
            raise Exception("Command %s not found in canned json!" %
                            ("add_vtpm_to_group"))

    logger.info("Agent UUID: %s", agent_uuid)

    # register it and get back a blob
    keyblob = registrar_client.doRegisterAgent(
        registrar_ip, registrar_port, agent_uuid, ek_tpm, ekcert, aik_tpm, contact_ip, contact_port)

    if keyblob is None:
        instance_tpm.flush_keys()
        raise Exception("Registration failed")

    # get the ephemeral registrar key
    key = instance_tpm.activate_identity(keyblob)

    if key is None:
        instance_tpm.flush_keys()
        raise Exception("Activation failed")

    # tell the registrar server we know the key
    retval = False
    retval = registrar_client.doActivateAgent(
        registrar_ip, registrar_port, agent_uuid, key)

    if not retval:
        instance_tpm.flush_keys()
        raise Exception("Registration failed on activate")

    serveraddr = (config.get('cloud_agent', 'cloudagent_ip'),
                  config.getint('cloud_agent', 'cloudagent_port'))
    server = CloudAgentHTTPServer(serveraddr, Handler, agent_uuid)
    serverthread = threading.Thread(target=server.serve_forever)

    logger.info("Starting Cloud Agent on %s:%s with API version %s. Use <Ctrl-C> to stop", serveraddr[0], serveraddr[1], keylime_api_version.current_version())
    serverthread.start()

    # want to listen for revocations?
    if config.getboolean('cloud_agent', 'listen_notfications'):
        cert_path = config.get('cloud_agent', 'revocation_cert')
        if cert_path == "default":
            cert_path = os.path.join(secdir,
                                      "unzipped/RevocationNotifier-cert.crt")
        elif cert_path[0] != '/':
            # if it is a relative, convert to absolute in work_dir
            cert_path = os.path.abspath(
                os.path.join(config.WORK_DIR, cert_path))

        def perform_actions(revocation):
            actionlist = []

            # load the actions from inside the keylime module
            actionlisttxt = config.get('cloud_agent', 'revocation_actions')
            if actionlisttxt.strip() != "":
                actionlist = actionlisttxt.split(',')
                actionlist = ["revocation_actions.%s" % i for i in actionlist]

            # load actions from unzipped
            action_list_path = os.path.join(secdir, "unzipped/action_list")
            if os.path.exists(action_list_path):
                with open(action_list_path, encoding="utf-8") as f:
                    actionlisttxt = f.read()
                if actionlisttxt.strip() != "":
                    localactions = actionlisttxt.strip().split(',')
                    for action in localactions:
                        if not action.startswith('local_action_'):
                            logger.warning("Invalid local action: %s. Must start with local_action_", action)
                        else:
                            actionlist.append(action)

                    uzpath = "%s/unzipped" % secdir
                    if uzpath not in sys.path:
                        sys.path.append(uzpath)

            for action in actionlist:
                logger.info("Executing revocation action %s", action)
                try:
                    module = importlib.import_module(action)
                    execute = getattr(module, 'execute')
                    asyncio.get_event_loop().run_until_complete(execute(revocation))
                except Exception as e:
                    logger.warning("Exception during execution of revocation action %s: %s", action, e)
        try:
            while True:
                try:
                    revocation_notifier.await_notifications(
                        perform_actions, revocation_cert_path=cert_path)
                except Exception as e:
                    logger.exception(e)
                    logger.warning("No connection to revocation server, retrying in 10s...")
                    time.sleep(10)
        except KeyboardInterrupt:
            logger.info("TERM Signal received, shutting down...")
            instance_tpm.flush_keys()
            server.shutdown()
    else:
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            logger.info("TERM Signal received, shutting down...")
            instance_tpm.flush_keys()
            server.shutdown()
def main(argv):
    """
    Run the program.
    """

    # Set up logging. See the logging module docs.
    logging.basicConfig(format="[%(asctime)s] %(levelname)s: %(message)s",
                        level=logging.INFO)

    # Parse command-line arguments. Make sure to give our docstring as program
    # help.
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)

    parser.add_argument("configFile",
                        type=argparse.FileType("r"),
                        help="configuration file of hosts to read")
    parser.add_argument("--noWait",
                        action="store_true",
                        help="look up dynamic peers once and exit")
    parser.add_argument("--adminInfo",
                        help="use this file to load the cjdns admin password")

    # Parse all the command-line arguments
    options = parser.parse_args(argv[1:])

    while True:
        try:
            # Connect to the router, using the specified admin info file, if
            # given.
            cjdns = connectWithAdminInfo(path=options.adminInfo)
            break
        except socket.error:
            # Connection probably refused. Retry in a bit
            logging.error("Error connecting to cjdns. Retrying in 1 minute...")
            time.sleep(60)

    # Drop root if we have it. We can't do it before we load the admin info
    # file, for the use case where that file is root-only.
    try:

        # Switch group to nogroup
        os.setgid(grp.getgrnam("nogroup").gr_gid)
        # Switch user to nobody
        os.setuid(pwd.getpwnam("nobody").pw_uid)

        # Announce we dropped privs
        logging.info("Dropped privileges: running as {}:{}".format(
            pwd.getpwuid(os.getuid())[0],
            grp.getgrgid(os.getgid())[0]))
    except (OSError, KeyError):
        # Complain we couldn't drop privs right
        logging.warning("Could not drop privileges: running as {}:{}".format(
            pwd.getpwuid(os.getuid())[0],
            grp.getgrgid(os.getgid())[0]))

    # Now we can load the config file. It is now required.

    # Maker a new parser to parse the config file
    parsedConfig = ConfigParser.SafeConfigParser()

    # Be case sensitive
    parsedConfig.optionxform = str

    # Read the config from the file
    parsedConfig.readfp(options.configFile)

    # Make a new watcher on the cjdroute connection, with the config from the
    # config file. This automatically looks up all the peers and tries to
    # connect to them once.
    watcher = DynamicEndpointWatcher(cjdns, parsedConfig)

    if options.noWait or os.environ.get('nowait', False):
        # We're not supposed to wait. Quit while we're ahead
        sys.exit(0)
    else:
        # Monitor for unresponsive nodes. This will loop until cjdns restarts,
        # at which point it will throw an exception.
        watcher.run()
Example #45
0
def run(argv):
    if len(argv) < 2:
        show_usage()
        return 2

    extra_context = {
        'PATH': os.environ.get('PATH', ''),
        'username': pwd.getpwuid(os.getuid()).pw_name
    }

    if 'TZ' in os.environ:
        extra_context['TZ'] = os.environ['TZ']

    client = SimpleSentryClient.new_from_environment()

    full_command, command_ws, shell = get_command(argv[1:])
    extra_context['command'] = command_ws
    extra_context['shell'] = shell

    # if we couldn't configure sentry, just pass through
    if client is None:
        signal.signal(signal.SIGPIPE, signal.SIG_DFL)
        os.execv(shell, full_command)
        eprint('Unable to execv({0}, {1})'.format(shell, repr(full_command)))
        return 1

    working_dir = None
    p = None

    def passthrough(signum, frame):
        if p is not None:
            p.send_signal(signum)
        else:
            raise ValueError('received signal %d without a child; bailing' %
                             signum)

    def reset_signals():
        for sig in (signal.SIGTERM, signal.SIGQUIT, signal.SIGINT,
                    signal.SIGPIPE):
            signal.signal(sig, signal.SIG_DFL)

    def run_and_monitor(working_dir):
        stdout_path = working_dir / 'stdout'
        stderr_path = working_dir / 'stderr'
        with stdout_path.open('w+b') as stdout, \
                stderr_path.open('w+b') as stderr:
            start_time = time.time()

            p = subprocess.Popen(full_command,
                                 stdout=stdout,
                                 stderr=stderr,
                                 shell=False,
                                 preexec_fn=reset_signals)

            extra_context['start_time'] = start_time
            extra_context['load_average_at_exit'] = ' '.join(
                map(str, os.getloadavg()))
            extra_context['working_directory'] = os.getcwd()

            def print_all():
                stderr.seek(0)
                x = stderr.read().decode(sys.stderr.encoding)
                if x:
                    print(x, file=sys.stderr, end="")
                stdout.seek(0)
                x = stdout.read().decode(sys.stdout.encoding)
                if x:
                    print(x, end="")

            if p.wait() == 0:
                print_all()
                return 0

            else:
                end_time = time.time()
                extra_context['duration'] = end_time - start_time

                code = p.returncode
                extra_context['returncode'] = code

                stderr_head, stderr_is_all = read_snippet(stderr, 700)
                message = f'Command `{command_ws}` failed with code {code}.\n'
                if stderr_head:
                    if stderr_is_all:
                        message += '\nstderr:\n'
                    else:
                        message += '\nExcerpt of stderr:\n'
                    message += stderr_head
                stdout_head, stdout_is_all = read_snippet(
                    stdout, 200 + (700 - len(stderr_head)))
                if stdout_head:
                    if stdout_is_all:
                        message += '\nstdout:\n'
                    else:
                        message += '\nExcerpt of stdout:\n'
                    message += stdout_head
                client.send_event(
                    message=message,
                    level='error',
                    fingerprint=[socket.gethostname(), command_ws],
                    extra_context=extra_context,
                )
                print_all()
                return code

    for sig in (signal.SIGTERM, signal.SIGQUIT, signal.SIGINT):
        signal.signal(sig, passthrough)
    with tempfile.TemporaryDirectory() as working_dir:
        return run_and_monitor(PosixPath(working_dir))
Example #46
0
#!/usr/bin/env python

import sys
import os
import pwd
import time
from Pegasus.DAX3 import *

# The name of the DAX file is the first argument
if len(sys.argv) != 2:
    sys.stderr.write("Usage: %s DAXFILE\n" % sys.argv[0])
    sys.exit(1)

daxfile = sys.argv[1]
USER = pwd.getpwuid(os.getuid())[0]

dax = ADAG("hic_wf")
dax.metadata("name", "HIC")
dax.metadata("creator", "%s@%s" % (USER, os.uname()[1]))
dax.metadata("created", time.ctime())

events_exec = Executable("run_events")

# if you need multiple runs just add the job in a for loop
# replace XYZ with the unique identifier
# and change the name of the input and args files
# eg.

args_conf = File("args.conf")
results_in = File("Results.tar.gz")
Example #47
0
 def __exit__(self, exc_type, exc_value, traceback):
     os.seteuid(os.getuid())
Example #48
0
def admin_control():
    if os.getuid() != 0:
        print('Please run as root!')
        exit(1)
Example #49
0
import os
import sys

VERSION = "threshold v3.2"

# if user is checking version
try:
    if sys.argv[1] in ('-v', '--version'):
        print(VERSION)
        sys.exit()
except IndexError:
    pass

# Check if user is sudo
if os.getuid() != 0:
    print('\nPermission ERROR: You must run this tool as a superuser\n')
    sys.exit()

import re
import signal
import shutil
import sqlite3
import subprocess
import time
import threading
import logging
import getpass
from datetime import datetime

PORT = 0
Example #50
0
def prepare_environment(
    flags: Optional[list[str]] = typer.Argument(None),
    editable: Optional[bool] = None,
    extra_module: Optional[list[str]] = None,
    release_var: Optional[str] = None,
):
    """Prepare the local environment for installing DIRAC."""

    _check_containers_running(is_up=False)
    if editable is None:
        editable = sys.stdout.isatty()
        typer.secho(
            f"No value passed for --[no-]editable, automatically detected: {editable}",
            fg=c.YELLOW,
        )
    typer.echo(f"Preparing environment")

    modules = DEFAULT_MODULES | dict(f.split("=", 1) for f in extra_module)
    modules = {k: Path(v).absolute() for k, v in modules.items()}

    flags = dict(f.split("=", 1) for f in flags)
    docker_compose_env = _make_env(flags)
    server_flags = {}
    client_flags = {}
    for key, value in flags.items():
        if key.startswith("SERVER_"):
            server_flags[key[len("SERVER_"):]] = value
        elif key.startswith("CLIENT_"):
            client_flags[key[len("CLIENT_"):]] = value
        else:
            server_flags[key] = value
            client_flags[key] = value
    server_config = _make_config(modules, server_flags, release_var, editable)
    client_config = _make_config(modules, client_flags, release_var, editable)

    typer.secho("Running docker-compose to create containers", fg=c.GREEN)
    with _gen_docker_compose(modules) as docker_compose_fn:
        subprocess.run(
            ["docker-compose", "-f", docker_compose_fn, "up", "-d"],
            check=True,
            env=docker_compose_env,
        )

    typer.secho("Creating users in server and client containers", fg=c.GREEN)
    for container_name in ["server", "client"]:
        if os.getuid() == 0:
            continue
        cmd = _build_docker_cmd(container_name, use_root=True, cwd="/")
        gid = str(os.getgid())
        uid = str(os.getuid())
        ret = subprocess.run(cmd + ["groupadd", "--gid", gid, "dirac"],
                             check=False)
        if ret.returncode != 0:
            typer.secho(f"Failed to add add group dirac with id={gid}",
                        fg=c.YELLOW)
        subprocess.run(
            cmd + [
                "useradd",
                "--uid",
                uid,
                "--gid",
                gid,
                "-s",
                "/bin/bash",
                "-d",
                "/home/dirac",
                "dirac",
            ],
            check=True,
        )
        subprocess.run(cmd + ["chown", "dirac", "/home/dirac"], check=True)

    typer.secho("Creating MySQL user", fg=c.GREEN)
    cmd = [
        "docker", "exec", "mysql", "mysql", f"--password={DB_ROOTPWD}", "-e"
    ]
    # It sometimes takes a while for MySQL to be ready so wait for a while if needed
    for _ in range(10):
        ret = subprocess.run(
            cmd +
            [f"CREATE USER '{DB_USER}'@'%' IDENTIFIED BY '{DB_PASSWORD}';"],
            check=False,
        )
        if ret.returncode != 0:
            typer.secho("Failed to connect to MySQL, will retry in 10 seconds",
                        fg=c.YELLOW)
            time.sleep(10)
        break
    else:
        raise Exception(ret)
    subprocess.run(
        cmd + [
            f"CREATE USER '{DB_USER}'@'localhost' IDENTIFIED BY '{DB_PASSWORD}';"
        ],
        check=True,
    )
    subprocess.run(
        cmd +
        [f"CREATE USER '{DB_USER}'@'mysql' IDENTIFIED BY '{DB_PASSWORD}';"],
        check=True,
    )

    typer.secho("Copying files to containers", fg=c.GREEN)
    for name, config in [("server", server_config), ("client", client_config)]:
        if path := config.get("DIRACOS_TARBALL_PATH"):
            path = Path(path)
            if config["USE_PYTHON3"]:
                config["DIRACOS_TARBALL_PATH"] = f"/{path.name}"
                subprocess.run(
                    [
                        "docker", "cp",
                        str(path), f"{name}:/{config['DIRACOS_TARBALL_PATH']}"
                    ],
                    check=True,
                )
            else:
                md5_fn = Path(str(path).replace(".tar.gz", ".md5"))
                if not md5_fn.exists():
                    typer.secho(
                        "Failed to find MD5 filename for DIRACOS_TARBALL_PATH. "
                        f"Expected at: {md5_fn}",
                        err=True,
                        fg=c.RED,
                    )
                    raise typer.Exit(1)
                subprocess.run(
                    ["docker", "cp",
                     str(path), f"{name}:/{path.name}"],
                    check=True)
                subprocess.run(
                    ["docker", "cp",
                     str(md5_fn), f"{name}:/{md5_fn.name}"],
                    check=True)
                config["DIRACOS_TARBALL_PATH"] = "/"
                config["DIRACOSVER"] = md5_fn.stem.split("-", 1)[1]

        config_as_shell = _dict_to_shell(config)
        typer.secho(f"## {name.title()} config is:",
                    fg=c.BRIGHT_WHITE,
                    bg=c.BLACK)
        typer.secho(config_as_shell)

        with tempfile.TemporaryDirectory() as tmpdir:
            path = Path(tmpdir) / "CONFIG"
            path.write_text(config_as_shell)
            subprocess.run(
                ["docker", "cp",
                 str(path), f"{name}:/home/dirac"],
                check=True,
            )
Example #51
0
import os
import stat
import time
import grp
import threading
import binascii

from fuse import Operations, FuseOSError
from evdev import UInput, ecodes

from pybot.lcd.ansi import ANSITerm

__author__ = 'Eric Pascual'

_file_timestamp = int(time.time())
_uid = os.getuid()
_gid = grp.getgrnam('lcdfs').gr_gid


class FSEntryDescriptor(object):
    """ Descriptor of the file system entries.

    It bundles the file stats (atime and mtime) and the handler of the file content.
    """
    def __init__(self, handler, mtime=_file_timestamp, atime=_file_timestamp):
        self.handler = handler
        self.mtime = mtime
        self.atime = atime


class FileHandler(object):
 def test_archive_to_staging_set_group(self):
     """archive: test copying to staging archive dir and set group
     """
     # Find groups for current user
     current_user = pwd.getpwuid(os.getuid()).pw_name
     groups = [g.gr_gid
               for g in grp.getgrall()
               if current_user in g.gr_mem]
     if len(groups) < 2:
         raise unittest.SkipTest("user '%s' must be in at least "
                                 "two groups for this test" %
                                 current_user)
     # Find a group to set archived files to
     current_gid = os.stat(self.dirn).st_gid
     new_group = None
     for gid in groups:
         if gid != current_gid:
             new_group = gid
             break
     self.assertTrue(new_group is not None)
     # Make a mock auto-process directory
     mockdir = MockAnalysisDirFactory.bcl2fastq2(
         '170901_M00879_0087_000000000-AGEW9',
         'miseq',
         metadata={ "instrument_datestamp": "170901" },
         top_dir=self.dirn)
     mockdir.create()
     # Make a mock archive directory
     archive_dir = os.path.join(self.dirn,"archive")
     final_dir = os.path.join(archive_dir,
                              "2017",
                              "miseq")
     os.makedirs(final_dir)
     self.assertTrue(os.path.isdir(final_dir))
     self.assertEqual(len(os.listdir(final_dir)),0)
     # Make autoprocess instance and set required metadata
     ap = AutoProcess(analysis_dir=mockdir.dirn,
                      settings=self.settings)
     ap.set_metadata("source","testing")
     ap.set_metadata("run_number","87")
     # Do archiving op
     status = archive(ap,
                      archive_dir=archive_dir,
                      year='2017',platform='miseq',
                      read_only_fastqs=False,
                      group=new_group,
                      final=False)
     self.assertEqual(status,0)
     # Check that staging dir exists
     staging_dir = os.path.join(
         final_dir,
         "__170901_M00879_0087_000000000-AGEW9_analysis.pending")
     self.assertTrue(os.path.exists(staging_dir))
     self.assertEqual(len(os.listdir(final_dir)),1)
     # Check group of staging dir
     self.assertEqual(os.stat(staging_dir).st_gid,new_group)
     # Check contents
     dirs = ("AB","CDE","logs","undetermined")
     for d in dirs:
         d = os.path.join(staging_dir,d)
         self.assertTrue(os.path.exists(d))
         self.assertEqual(os.stat(d).st_gid,new_group)
     files = ("auto_process.info",
              "custom_SampleSheet.csv",
              "metadata.info",
              "projects.info",
              "SampleSheet.orig.csv")
     for f in files:
         f = os.path.join(staging_dir,f)
         self.assertTrue(os.path.exists(f))
         self.assertEqual(os.stat(f).st_gid,new_group)
def main():
    '''Do the main thing here'''
    if os.getuid() != 0:
        sys.exit('This command requires root (to install packages), so please '
                 'run again with sudo or as root.')

    parser = argparse.ArgumentParser()
    parser.add_argument('--seedprogram', default='',
                        help='Which Seed Program catalog to use. Valid values '
                        'are %s.' % ', '.join(get_seeding_programs()))
    parser.add_argument('--catalogurl', default='',
                        help='Software Update catalog URL. This option '
                        'overrides any seedprogram option.')
    parser.add_argument('--workdir', metavar='path_to_working_dir',
                        default='.',
                        help='Path to working directory on a volume with over '
                        '10G of available space. Defaults to current working '
                        'directory.')
    parser.add_argument('--compress', action='store_true',
                        help='Output a read-only compressed disk image with '
                        'the Install macOS app at the root. This is now the '
                        'default. Use --raw to get a read-write sparse image '
                        'with the app in the Applications directory.')
    parser.add_argument('--raw', action='store_true',
                        help='Output a read-write sparse image '
                        'with the app in the Applications directory. Requires '
                        'less available disk space and is faster.')
    parser.add_argument('--ignore-cache', action='store_true',
                        help='Ignore any previously cached files.')
    args = parser.parse_args()

    if args.catalogurl:
        su_catalog_url = args.catalogurl
    elif args.seedprogram:
        su_catalog_url = get_seed_catalog(args.seedprogram)
        if not su_catalog_url:
            print('Could not find a catalog url for seed program %s'
                  % args.seedprogram, file=sys.stderr)
            print('Valid seeding programs are: %s'
                  % ', '.join(get_seeding_programs()), file=sys.stderr)
            exit(-1)
    else:
        su_catalog_url = get_default_catalog()
        if not su_catalog_url:
            print('Could not find a default catalog url for this OS version.',
                  file=sys.stderr)
            exit(-1)

    # download sucatalog and look for products that are for macOS installers
    catalog = download_and_parse_sucatalog(
        su_catalog_url, args.workdir, ignore_cache=args.ignore_cache)
    product_info = os_installer_product_info(
        catalog, args.workdir, ignore_cache=args.ignore_cache)

    if not product_info:
        print('No macOS installer products found in the sucatalog.',
              file=sys.stderr)
        exit(-1)

    # display a menu of choices (some seed catalogs have multiple installers)
    print('%2s %12s %10s %8s %11s  %s'
          % ('#', 'ProductID', 'Version', 'Build', 'Post Date', 'Title'))
    for index, product_id in enumerate(product_info):
        print('%2s %12s %10s %8s %11s  %s' % (
            index + 1,
            product_id,
            product_info[product_id]['version'],
            product_info[product_id]['BUILD'],
            product_info[product_id]['PostDate'].strftime('%Y-%m-%d'),
            product_info[product_id]['title']
        ))

    answer = get_input(
        '\nChoose a product to download (1-%s): ' % len(product_info))
    try:
        index = int(answer) - 1
        if index < 0:
            raise ValueError
        product_id = list(product_info.keys())[index]
    except (ValueError, IndexError):
        print('Exiting.')
        exit(0)

    # download all the packages for the selected product
    replicate_product(
        catalog, product_id, args.workdir, ignore_cache=args.ignore_cache)

    # generate a name for the sparseimage
    volname = ('Install_macOS_%s-%s'
               % (product_info[product_id]['version'],
                  product_info[product_id]['BUILD']))
    sparse_diskimage_path = os.path.join(args.workdir, volname + '.sparseimage')
    if os.path.exists(sparse_diskimage_path):
        os.unlink(sparse_diskimage_path)

    # make an empty sparseimage and mount it
    print('Making empty sparseimage...')
    sparse_diskimage_path = make_sparse_image(volname, sparse_diskimage_path)
    mountpoint = mountdmg(sparse_diskimage_path)
    if mountpoint:
        # install the product to the mounted sparseimage volume
        success = install_product(
            product_info[product_id]['DistributionPath'],
            mountpoint)
        if not success:
            print('Product installation failed.', file=sys.stderr)
            unmountdmg(mountpoint)
            exit(-1)
        # add the seeding program xattr to the app if applicable
        seeding_program = get_seeding_program(su_catalog_url)
        if seeding_program:
            installer_app = find_installer_app(mountpoint)
            if installer_app:
                print("Adding seeding program %s extended attribute to app"
                      % seeding_program)
                xattr.setxattr(installer_app, 'SeedProgram', seeding_program)
        print('Product downloaded and installed to %s' % sparse_diskimage_path)
        if args.raw:
            unmountdmg(mountpoint)
        else:
            # if --raw option not given, create a r/o compressed diskimage
            # containing the Install macOS app
            compressed_diskimagepath = os.path.join(
                args.workdir, volname + '.dmg')
            if os.path.exists(compressed_diskimagepath):
                os.unlink(compressed_diskimagepath)
            app_path = find_installer_app(mountpoint)
            if app_path:
                make_compressed_dmg(app_path, compressed_diskimagepath)
            # unmount sparseimage
            unmountdmg(mountpoint)
            # delete sparseimage since we don't need it any longer
            os.unlink(sparse_diskimage_path)
Example #54
0
def zranger_chdir_handler(signal, frame):
    tmpfile = "/tmp/zranger-cwd-{}".format(os.getuid())
    with open(tmpfile, "r") as f:
        Command.fm.cd(f.readline().strip())
        os.unlink(tmpfile)
Example #55
0
def set_directory_ownership(path, username, groupname):
    if os.getuid() == 0:
        uid = pwd.getpwnam(username).pw_uid
        gid = grp.getgrnam(groupname).gr_gid
        os.chown(path, uid, gid)
Example #56
0
    def __init__(
        self,
        root: Path,
        backend: ProjectBackend,
        filesystem_watcher: util.FileWatcher,
        build_identifiers: BuildIdentifierSet,
    ) -> None:
        root = root.resolve(strict=True)
        self.config, config_diagnostics = ProjectConfig.open(root)
        self.targets = TargetDatabase.load(self.config)

        if config_diagnostics:
            backend.on_diagnostics(
                FileId(self.config.config_path.relative_to(root)),
                config_diagnostics)

        self.parser = rstparser.Parser(self.config, JSONVisitor)
        self.backend = backend
        self.filesystem_watcher = filesystem_watcher
        self.build_identifiers = build_identifiers

        self.postprocessor = (DevhubPostprocessor(self.config, self.targets)
                              if self.config.default_domain else Postprocessor(
                                  self.config, self.targets))

        self.yaml_mapping: Dict[str, GizaCategory[Any]] = {
            "steps":
            gizaparser.steps.GizaStepsCategory(self.config),
            "extracts":
            gizaparser.extracts.GizaExtractsCategory(self.config),
            "release":
            gizaparser.release.GizaReleaseSpecificationCategory(self.config),
        }

        # For each repo-wide substitution, parse the string and save to our project config
        inline_parser = rstparser.Parser(self.config, InlineJSONVisitor)
        substitution_nodes: Dict[str, List[n.InlineNode]] = {}
        for k, v in self.config.substitutions.items():
            page, substitution_diagnostics = parse_rst(inline_parser, root, v)
            substitution_nodes[k] = list(
                deepcopy(child) for child in page.ast.children  # type: ignore
            )

            if substitution_diagnostics:
                backend.on_diagnostics(
                    self.get_fileid(self.config.config_path),
                    substitution_diagnostics)

        self.config.substitution_nodes = substitution_nodes

        username = pwd.getpwuid(os.getuid()).pw_name
        branch = subprocess.check_output(
            ["git", "rev-parse", "--abbrev-ref", "HEAD"],
            cwd=root,
            encoding="utf-8").strip()
        self.prefix = [self.config.name, username, branch]

        self.pages: Dict[FileId, Page] = {}

        self.asset_dg: "networkx.DiGraph[FileId]" = networkx.DiGraph()
        self.expensive_operation_cache = Cache()

        published_branches, published_branches_diagnostics = self.get_parsed_branches(
        )
        if published_branches:
            self.backend.on_update_metadata(
                self.prefix,
                self.build_identifiers,
                {"publishedBranches": published_branches.serialize()},
            )

        if published_branches_diagnostics:
            backend.on_diagnostics(self.get_fileid(self.config.config_path),
                                   published_branches_diagnostics)
Example #57
0
def main():
    if getuid() != 0:
        print('Please run this script as root')
        print('SHUTTING DOWN')
        exit()

    global db_path, log_file_out, redis_ip, redis_instance, syslog_path, hq_ip
    global continents_tracked, countries_tracked, ips_tracked, postal_codes_tracked, event_count, unknown, ip_to_code, country_to_code

    #args = menu()

    # Connect to Redis
    redis_instance = connect_redis(redis_ip)

    # Find HQ lat/long
    hq_dict = find_hq_lat_long(hq_ip)

    # Follow/parse/format/publish syslog data
    with io.open(syslog_path, "r", encoding='ISO-8859-1') as syslog_file:
        syslog_file.readlines()
        while True:
            where = syslog_file.tell()
            line = syslog_file.readline()
            if not line:
                sleep(.1)
                syslog_file.seek(where)
            else:
                syslog_data_dict = parse_syslog(line)
                if syslog_data_dict:
                    ip_db_unclean = parse_maxminddb(db_path,
                                                    syslog_data_dict['src_ip'])
                    if ip_db_unclean:
                        event_count += 1
                        ip_db_clean = clean_db(ip_db_unclean)

                        msg_type = {'msg_type': get_msg_type()}
                        msg_type2 = {
                            'msg_type2': syslog_data_dict['type_attack']
                        }
                        msg_type3 = {
                            'msg_type3': syslog_data_dict['cve_attack']
                        }

                        proto = {
                            'protocol':
                            get_tcp_udp_proto(syslog_data_dict['src_port'],
                                              syslog_data_dict['dst_port'])
                        }
                        super_dict = merge_dicts(hq_dict, ip_db_clean,
                                                 msg_type, msg_type2,
                                                 msg_type3, proto,
                                                 syslog_data_dict)

                        # Track Stats
                        track_stats(super_dict, continents_tracked,
                                    'continent')
                        track_stats(super_dict, countries_tracked, 'country')
                        track_stats(super_dict, ips_tracked, 'src_ip')
                        event_time = strftime("%d-%m-%Y %H:%M:%S",
                                              localtime())  # local time
                        #event_time = strftime("%Y-%m-%d %H:%M:%S", gmtime()) # UTC time
                        track_flags(super_dict, country_to_code, 'country',
                                    'iso_code')
                        track_flags(super_dict, ip_to_code, 'src_ip',
                                    'iso_code')

                        # Append stats to super_dict
                        super_dict['event_count'] = event_count
                        super_dict['continents_tracked'] = continents_tracked
                        super_dict['countries_tracked'] = countries_tracked
                        super_dict['ips_tracked'] = ips_tracked
                        super_dict['unknowns'] = unknowns
                        super_dict['event_time'] = event_time
                        super_dict['country_to_code'] = country_to_code
                        super_dict['ip_to_code'] = ip_to_code

                        json_data = json.dumps(super_dict)
                        redis_instance.publish('attack-map-production',
                                               json_data)

                        #if args.verbose:
                        #    print(ip_db_unclean)
                        #    print('------------------------')
                        #    print(json_data)
                        #    print('Event Count: {}'.format(event_count))
                        #    print('------------------------')

                        print('Event Count: {}'.format(event_count))
                        print('------------------------')

                    else:
                        continue
Example #58
0
    def __init__(self):
        # command line parsing utils
        self.parser = OptionParser()
        self.cmdline_options = None

        # version
        self.version_string = __version__

        # testing
        # This variable is to be able to hook/bypass code when unit-tests are run
        self.testing = False

        # daemonize the process
        self.nodaemon = False

        self.bind_address = '0.0.0.0'
        self.bind_remote_ports = [80, 443]
        self.bind_local_ports = [8082, 8083]

        self.db_type = 'sqlite'

        # debug defaults
        self.orm_debug = False

        # files and paths
        self.src_path = os.path.abspath(
            os.path.join(os.path.dirname(__file__), '..'))
        self.backend_script = os.path.abspath(
            os.path.join(self.src_path, 'globaleaks/backend.py'))

        self.pid_path = '/var/run/globaleaks'
        self.working_path = '/var/globaleaks'

        # TODO(bug-fix-italian-style) why is this set to the 2nd entry in the possible
        # client paths...? please fix.
        self.client_path = '/usr/share/globaleaks/client'
        for path in possible_client_paths:
            if os.path.exists(path):
                self.client_path = path
                break

        self.authentication_lifetime = 3600

        self.accept_submissions = True

        # statistical, referred to latest period
        # and resetted by session_management sched
        self.failed_login_attempts = 0

        # static file rules
        self.staticfile_regexp = r'(.*)'
        self.staticfile_overwrite = False

        self.local_hosts = ['127.0.0.1', 'localhost']

        self.onionservice = None

        # Default request time uniform value
        self.side_channels_guard = 150

        # SOCKS default
        self.socks_host = "127.0.0.1"
        self.socks_port = 9050

        self.key_bits = 4096
        self.csr_sign_bits = 512

        self.notification_limit = 30
        self.jobs_operation_limit = 20

        self.user = getpass.getuser()
        self.group = getpass.getuser()

        self.uid = os.getuid()
        self.gid = os.getgid()

        self.devel_mode = False
        self.disable_swap = False
        self.enable_csp = True

        # Number of failed login enough to generate an alarm
        self.failed_login_alarm = 5

        # Number of minutes in which a user is prevented to login in case of triggered alarm
        self.failed_login_block_time = 5

        # Limit for log sizes and number of log files
        # https://github.com/globaleaks/GlobaLeaks/issues/1578
        self.log_size = 10000000  # 10MB
        self.log_file_size = 1000000  # 1MB
        self.num_log_files = self.log_size / self.log_file_size

        self.AES_file_regexp = r'(.*)\.aes'
        self.AES_keyfile_prefix = "aeskey-"

        self.exceptions_email_hourly_limit = 20

        self.enable_input_length_checks = True

        self.mail_timeout = 15  # seconds
        self.mail_attempts_limit = 3  # per mail limit

        self.acme_directory_url = 'https://acme-v02.api.letsencrypt.org/directory'

        self.enable_api_cache = True

        self.eval_paths()
Example #59
0
def main():
    username = '******'

    if os.name != 'nt':
        import pwd
        username = pwd.getpwuid(os.getuid()).pw_name

    parser = argparse.ArgumentParser(prog="infrabox")
    parser.add_argument("--url",
                        required=False,
                        default=os.environ.get('INFRABOX_URL', None),
                        help="Address of the API server")
    parser.add_argument(
        "--ca-bundle",
        required=False,
        default=os.environ.get('INFRABOX_CA_BUNDLE', None),
        help=
        "Path to a CA_BUNDLE file or directory with certificates of trusted CAs"
    )
    parser.add_argument("-f",
                        dest='infrabox_file',
                        required=False,
                        type=str,
                        help="Path to an infrabox.json or infrabox.yaml file")
    sub_parser = parser.add_subparsers(help='sub-command help')

    # version
    version_init = sub_parser.add_parser('version',
                                         help='Show the current version')
    version_init.set_defaults(version=version)

    # init
    parser_init = sub_parser.add_parser('init', help='Create a simple project')
    parser_init.set_defaults(is_init=True)
    parser_init.set_defaults(func=init)

    # push
    parser_push = sub_parser.add_parser(
        'push', help='Push a local project to InfraBox')
    parser_push.add_argument("--show-console",
                             action='store_true',
                             required=False,
                             help="Show the console output of the jobs")
    parser_push.set_defaults(show_console=False)
    parser_push.set_defaults(validate_only=False)
    parser_push.set_defaults(func=push)

    # pull
    parser_pull = sub_parser.add_parser('pull', help='Pull a remote job')
    parser_pull.set_defaults(is_pull=True)
    parser_pull.add_argument("--job-id", required=True)
    parser_pull.add_argument(
        "--no-container",
        required=False,
        dest='pull_container',
        action='store_false',
        help=
        "Only the inputs will be downloaded but not the actual container. Implies --no-run."
    )
    parser_pull.set_defaults(pull_container=True)

    parser_pull.add_argument("--no-run",
                             required=False,
                             dest='run_container',
                             action='store_false',
                             help="The container will not be run.")
    parser_pull.set_defaults(run_container=True)
    parser_pull.set_defaults(func=pull)

    # graph
    parser_graph = sub_parser.add_parser(
        'graph', help='Generate a graph of your local jobs')
    parser_graph.set_defaults(func=graph)

    # validate
    validate_graph = sub_parser.add_parser(
        'validate', help='Validate infrabox.json or infrabox.yaml')
    validate_graph.set_defaults(func=validate)

    # list
    list_job = sub_parser.add_parser('list', help='List all available jobs')
    list_job.set_defaults(func=list_jobs)

    # install
    install = sub_parser.add_parser('install', help='Setup InfraBox')
    install.set_defaults(is_install=True)
    install.set_defaults(func=install_infrabox)

    # run
    parser_run = sub_parser.add_parser('run', help='Run your jobs locally')
    parser_run.add_argument("job_name",
                            nargs="?",
                            type=str,
                            help="Job name to execute")
    parser_run.add_argument(
        "--no-rm",
        action='store_true',
        required=False,
        help="Does not run 'docker-compose rm' before building")
    parser_run.add_argument("--build-arg",
                            required=False,
                            type=str,
                            nargs='?',
                            help="Set docker build arguments",
                            action='append')
    parser_run.add_argument("--env",
                            required=False,
                            type=str,
                            nargs='?',
                            help="Override environment variables",
                            action='append')
    parser_run.add_argument(
        "--env-file",
        required=False,
        type=str,
        default=None,
        help="Environment file to override environment values")
    parser_run.add_argument("-t",
                            dest='tag',
                            required=False,
                            type=str,
                            help="Docker image tag")
    parser_run.add_argument("-c",
                            "--children",
                            action='store_true',
                            help="Also run children of a job")
    parser_run.add_argument(
        "--local-cache",
        required=False,
        type=str,
        default="/tmp/{}/infrabox/local-cache".format(username),
        help="Path to the local cache")
    parser_run.add_argument("--memory",
                            required=False,
                            type=float,
                            help="Override a memory limit for your job")
    parser_run.add_argument("--cpu",
                            required=False,
                            type=float,
                            help="Override a cpu limit for your job")
    parser_run.add_argument("--unlimited",
                            action='store_true',
                            required=False,
                            help="Do not apply cpu and mem limits.")
    parser_run.set_defaults(no_rm=False)
    parser_run.set_defaults(func=run)

    # Project
    parser_project = sub_parser.add_parser('project',
                                           help='Manage your project')
    parser_project.add_argument('--project-name',
                                dest='remote_project_name',
                                required=False,
                                type=str)
    parser_project.set_defaults(project_command=True)
    sub_project = parser_project.add_subparsers(dest='project')

    # Project list
    parser_projects_list = sub_project.add_parser(
        'list', help='Get a list of all your projects')
    parser_projects_list.add_argument('--verbose',
                                      required=False,
                                      default=True,
                                      type=str2bool)
    parser_projects_list.set_defaults(func=project.list_projects)

    # Project status
    parser_projects_list = sub_project.add_parser(
        'status', help='Get some info about your current project')
    parser_projects_list.add_argument('--verbose',
                                      required=False,
                                      default=True,
                                      type=str2bool)
    parser_projects_list.set_defaults(func=project.print_status)

    # Create project
    parser_project_create = sub_project.add_parser('create',
                                                   help='Create a new project')
    parser_project_create.add_argument(
        '--name',
        required=True,
        type=str,
        help='Name of the project you want to create')
    parser_project_create.add_argument(
        '--type',
        required=True,
        type=str,
        help='Name of the project { upload, github, gerrit } you want to create'
    )
    parser_project_create.add_argument('--public',
                                       required=False,
                                       default=False,
                                       action='store_true',
                                       help='Make your project public')
    parser_project_create.add_argument('--private',
                                       required=False,
                                       default=False,
                                       action='store_true',
                                       help='Make your project private')
    parser_project_create.set_defaults(func=project.create_project)

    parser_project_delete = sub_project.add_parser('delete',
                                                   help='Delete a project')
    parser_project_delete.add_argument(
        '--name',
        required=False,
        type=str,
        help='Name of the project you want to delete')
    parser_project_delete.add_argument(
        '--id',
        required=False,
        type=str,
        help='Id of the project you want to delete')
    parser_project_delete.set_defaults(func=project.delete_project)

    # Collaborators
    parser_collaborators = sub_project.add_parser(
        'collaborators', help='Add or remove collaborators for your project')
    sub_collaborators = parser_collaborators.add_subparsers()

    parser_list_collaborators = sub_collaborators.add_parser(
        'list', help='Show collaborators list')
    parser_list_collaborators.add_argument('--verbose',
                                           required=False,
                                           default=True,
                                           type=str2bool)
    parser_list_collaborators.set_defaults(func=project.list_collaborators)

    parser_add_collaborator = sub_collaborators.add_parser(
        'add', help='Add a collaborator')
    parser_add_collaborator.add_argument(
        '--username',
        required=True,
        type=str,
        help='Username of the collaborator you want to add')
    parser_add_collaborator.set_defaults(func=project.add_collaborator)

    parser_remove_collaborator = sub_collaborators.add_parser(
        'remove', help='Remove a collaborator')
    parser_remove_collaborator.add_argument(
        '--username',
        required=True,
        type=str,
        help='Username of the collaborator you want to remove')
    parser_remove_collaborator.set_defaults(func=project.remove_collaborator)

    # Secrets
    parser_secrets = sub_project.add_parser('secrets',
                                            help='Create or delete secrets')
    sub_secrets = parser_secrets.add_subparsers()

    parser_list_secrets = sub_secrets.add_parser('list',
                                                 help='Show all your secrets')
    parser_list_secrets.add_argument('--verbose',
                                     required=False,
                                     default=True,
                                     type=str2bool)
    parser_list_secrets.set_defaults(func=project.list_secrets)

    parser_create_secret = sub_secrets.add_parser('create',
                                                  help='Create a secret')
    parser_create_secret.add_argument('--name',
                                      required=True,
                                      type=str,
                                      help='Name of the secret')
    parser_create_secret.add_argument('--value',
                                      required=True,
                                      type=str,
                                      help='Value of the secret')
    parser_create_secret.set_defaults(func=project.add_secret)

    parser_delete_secret = sub_secrets.add_parser('delete',
                                                  help='Delete a secret')
    parser_delete_secret.add_argument(
        '--name',
        required=False,
        type=str,
        help='Name of the secret you want to delete')
    parser_delete_secret.add_argument(
        '--id',
        required=False,
        type=str,
        help='Id of the secret you want to delete')
    parser_delete_secret.set_defaults(func=project.delete_secret)

    # Tokens
    parsers_project_tokens = sub_project.add_parser(
        'tokens', help='Manage your project tokens')
    sub_project_tokens = parsers_project_tokens.add_subparsers()

    parser_list_project_tokens = sub_project_tokens.add_parser(
        'list', help='Show all your project tokens')
    parser_list_project_tokens.add_argument('--verbose',
                                            required=False,
                                            default=True,
                                            type=str2bool)
    parser_list_project_tokens.set_defaults(func=project.list_project_tokens)

    parser_add_project_token = sub_project_tokens.add_parser(
        'create', help='Create a project token')
    parser_add_project_token.add_argument(
        '--description',
        required=True,
        type=str,
        help='Description of the project token you want to create')
    #TODO<Steffen> when scope push/pull functionality is implemented, uncomment following 2 lines
    #parser_add_project_token.add_argument('--scope_push', required=False, default=True, type=str2bool, help='Scope push')
    #parser_add_project_token.add_argument('--scope_pull', required=False, default=True, type=str2bool, help='Scope pull')
    parser_add_project_token.set_defaults(func=project.add_project_token)

    parser_remove_project_token = sub_project_tokens.add_parser(
        'delete', help='Delete a project token')
    parser_remove_project_token.add_argument(
        '--id',
        required=False,
        type=str,
        help='Id of the project token you want to delete')
    parser_remove_project_token.add_argument(
        '--description',
        required=False,
        type=str,
        help='Description of the project token you want to delete')
    parser_remove_project_token.set_defaults(func=project.delete_project_token)

    # Login
    parser_login = sub_parser.add_parser('login', help='Login to infrabox')
    parser_login.add_argument('remote_url',
                              nargs='?',
                              type=str,
                              help='Name of remote')
    parser_login.add_argument('--email',
                              required=False,
                              default=None,
                              type=str,
                              help='Email of the user')
    parser_login.add_argument('--password',
                              required=False,
                              default=None,
                              type=str,
                              help='Password of the user')
    parser_login.set_defaults(func=user.login)

    # Logout
    parser_logout = sub_parser.add_parser('logout',
                                          help='Logout from current remote')
    parser_logout.set_defaults(func=user.logout)

    # Config
    parser_config = sub_parser.add_parser('config',
                                          help='Configure your infrabox')
    sub_config = parser_config.add_subparsers(dest='config')

    parser_config_current_project = sub_config.add_parser(
        'set-current-project', help='Set new current project')
    parser_config_current_project.add_argument('project_name',
                                               nargs='?',
                                               type=str,
                                               help='Name of the project')
    parser_config_current_project.set_defaults(
        func=local_config.set_current_project_name)

    # Remotes
    parser_remotes = sub_parser.add_parser('remotes', help='Current remotes')
    sub_remotes = parser_remotes.add_subparsers()
    parser_remotes_list = sub_remotes.add_parser('list',
                                                 help='Show your all remotes')
    parser_remotes_list.add_argument('--verbose',
                                     required=False,
                                     default=True,
                                     type=str2bool)
    parser_remotes_list.set_defaults(func=remotes.list_remotes)

    # Parse args
    args = parser.parse_args()

    if 'version' in args:
        print('infraboxcli %s' % version)
        return

    if "DOCKER_HOST" in os.environ:
        logger.error("DOCKER_HOST is set")
        logger.error("infrabox can't be used to run jobs on a remote machine")
        sys.exit(1)

    if args.ca_bundle:
        if args.ca_bundle.lower() == "false":
            args.ca_bundle = False
            # according to: https://stackoverflow.com/a/28002687/131120
            import requests.packages.urllib3 as urllib3
            urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
        else:
            if not os.path.exists(args.ca_bundle):
                logger.error("INFRABOX_CA_BUNDLE: %s not found" %
                             args.ca_bundle)
                sys.exit(1)

    if args.infrabox_file:
        if not os.path.exists(args.infrabox_file):
            logger.error('%s does not exist' % args.infrabox_file)
            sys.exit(1)

        p = os.path.abspath(args.infrabox_file)

        args.project_root = p[0:p.rfind('/')]
        args.infrabox_file_path = p
        args.project_name = os.path.basename(p)
    else:
        # Find infrabox.json
        p = os.getcwd()

        while p:
            tb = os.path.join(p, 'infrabox.json')
            if not os.path.exists(tb):
                tb = os.path.join(p, 'infrabox.yaml')
            if not os.path.exists(tb):
                p = p[0:p.rfind('/')]
            else:
                args.project_root = p
                args.infrabox_file_path = tb
                args.project_name = os.path.basename(p)
                break

    if 'job_name' not in args:
        args.children = True

    if 'project_root' not in args and 'is_init' not in args and 'is_pull' not in args and 'is_install' not in args:
        logger.error(
            "infrabox.json or infrabox.yaml not found in current or any parent directory"
        )
        sys.exit(1)

    # Run command
    args.func(args)
Example #60
0
    def create_dir(self, destroy_dir=True):
        """Create the instance directory and all the needed files there."""

        if destroy_dir:
            self.destroy_dir()
        elif p.exists(self.path):
            return

        os.makedirs(self.path)

        configs_dir = p.abspath(p.join(self.path, 'configs'))
        os.mkdir(configs_dir)

        shutil.copy(p.join(self.base_configs_dir, 'config.xml'), configs_dir)
        shutil.copy(p.join(self.base_configs_dir, 'users.xml'), configs_dir)

        config_d_dir = p.abspath(p.join(configs_dir, 'config.d'))
        users_d_dir = p.abspath(p.join(configs_dir, 'users.d'))
        os.mkdir(config_d_dir)
        os.mkdir(users_d_dir)

        shutil.copy(p.join(HELPERS_DIR, 'common_instance_config.xml'),
                    config_d_dir)

        # Generate and write macros file
        macros = self.macros.copy()
        macros['instance'] = self.name
        with open(p.join(config_d_dir, 'macros.xml'), 'w') as macros_config:
            macros_config.write(self.dict_to_xml({"macros": macros}))

        # Put ZooKeeper config
        if self.with_zookeeper:
            shutil.copy(self.zookeeper_config_path, config_d_dir)

        # Copy config dir
        if self.custom_config_dir:
            distutils.dir_util.copy_tree(self.custom_config_dir, configs_dir)

        # Copy config.d configs
        for path in self.custom_main_config_paths:
            shutil.copy(path, config_d_dir)

        # Copy users.d configs
        for path in self.custom_user_config_paths:
            shutil.copy(path, users_d_dir)

        db_dir = p.abspath(p.join(self.path, 'database'))
        os.mkdir(db_dir)
        if self.clickhouse_path_dir is not None:
            distutils.dir_util.copy_tree(self.clickhouse_path_dir, db_dir)

        logs_dir = p.abspath(p.join(self.path, 'logs'))
        os.mkdir(logs_dir)

        depends_on = []

        if self.with_mysql:
            depends_on.append("mysql1")

        if self.with_kafka:
            depends_on.append("kafka1")

        if self.with_zookeeper:
            depends_on.append("zoo1")
            depends_on.append("zoo2")
            depends_on.append("zoo3")

        env_file = _create_env_file(os.path.dirname(self.docker_compose_path),
                                    self.env_variables)

        odbc_ini_path = ""
        if self.odbc_ini_path:
            self._create_odbc_config_file()
            odbc_ini_path = '- ' + self.odbc_ini_path

        with open(self.docker_compose_path, 'w') as docker_compose:
            docker_compose.write(
                DOCKER_COMPOSE_TEMPLATE.format(
                    image=self.image,
                    name=self.name,
                    hostname=self.hostname,
                    uid=os.getuid(),
                    binary_path=self.server_bin_path,
                    configs_dir=configs_dir,
                    config_d_dir=config_d_dir,
                    db_dir=db_dir,
                    logs_dir=logs_dir,
                    depends_on=str(depends_on),
                    env_file=env_file,
                    odbc_ini_path=odbc_ini_path,
                ))