Example #1
0
def new_global_journal_with_path(path, **metadata):
  """Creates a global journal persisted at the provided path.

  Args:
    path: [string] The path to the journal to open.
    metadata: [kwargs] The journal metadata to write into the journal.
  """
  global _global_journal
  global _added_atexit
  _global_lock.acquire(True)
  try:
    if _global_journal is not None:
      raise ValueError('Global journal was already set.')

    if not _added_atexit:
      atexit.register(_atexit_handler)
      _added_atexit = True

    journal_file = open(path, 'w')
    os.fchmod(journal_file.fileno(), 0600)  # Protect sensitive data.
    journal = Journal()
    journal.open_with_file(journal_file, **metadata)

    _global_journal = journal
  finally:
    _global_lock.release()

  return journal
    def _inject_multi_node_api(self, mntdir, target):
        shell = target.deployment_data['lava_test_sh_cmd']

        # Generic scripts
        scripts_to_copy = glob(os.path.join(LAVA_MULTI_NODE_TEST_DIR, 'lava-*'))

        for fname in scripts_to_copy:
            with open(fname, 'r') as fin:
                foutname = os.path.basename(fname)
                with open('%s/bin/%s' % (mntdir, foutname), 'w') as fout:
                    fout.write("#!%s\n\n" % shell)
                    # Target-specific scripts (add ENV to the generic ones)
                    if foutname == LAVA_GROUP_FILE:
                        fout.write('LAVA_GROUP="\n')
                        if 'roles' in self.context.group_data:
                            for client_name in self.context.group_data['roles']:
                                fout.write(r"\t%s\t%s\n" % (client_name, self.context.group_data['roles'][client_name]))
                        else:
                            logging.debug("group data MISSING")
                        fout.write('"\n')
                    elif foutname == LAVA_ROLE_FILE:
                        fout.write("TARGET_ROLE='%s'\n" % self.context.test_data.metadata['role'])
                    elif foutname == LAVA_SELF_FILE:
                        fout.write("LAVA_HOSTNAME='%s'\n" % self.context.test_data.metadata['target.hostname'])
                    else:
                        fout.write("LAVA_TEST_BIN='%s/bin'\n" %
                                   target.lava_test_dir)
                        fout.write("LAVA_MULTI_NODE_CACHE='%s'\n" % LAVA_MULTI_NODE_CACHE_FILE)
                        logging_level = self.context.test_data.metadata.get(
                            'logging_level', None)
                        if logging_level and logging_level == 'DEBUG':
                            fout.write("LAVA_MULTI_NODE_DEBUG='yes'\n")
                    fout.write(fin.read())
                    os.fchmod(fout.fileno(), XMOD)
Example #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-o", "--output", metavar="FILE", required=True, type=argparse.FileType(mode="wb"))
    ns = parser.parse_args()
    dos_header = pe.IMAGE_DOS_HEADER()
    dos_header.e_lfanew = sizeof(dos_header)
    nt_headers = pe.IMAGE_NT_HEADERS()
    nt_headers.FileHeader.Machine = pe.IMAGE_FILE_MACHINE_I386
    nt_headers.FileHeader.Characteristics = pe.IMAGE_FILE_EXECUTABLE_IMAGE
    nt_headers.OptionalHeader.Magic = pe.MAGIC_PE32
    nt_headers.OptionalHeader.AddressOfEntryPoint = 0x140
    nt_headers.OptionalHeader.ImageBase = 0x400000
    nt_headers.OptionalHeader.SectionAligment = 1
    nt_headers.OptionalHeader.FileAlignment = 1
    nt_headers.OptionalHeader.MajorSubsystemVersion = 4
    nt_headers.OptionalHeader.SizeOfImage = 0x160
    nt_headers.OptionalHeader.SizeOfHeaders = 0x140
    nt_headers.OptionalHeader.Subsystem = pe.IMAGE_SUBSYSTEM_WINDOWS_CUI

    code = emit_code((MOV, EAX, imm32(42)), (ADD, EAX, 1), (RET,))
    with ns.output as stream:
        stream.write(dos_header)
        stream.write(nt_headers)
        stream.seek(nt_headers.OptionalHeader.SizeOfHeaders)
        stream.write(code)
        stream.seek(nt_headers.OptionalHeader.SizeOfImage)
        stream.truncate(nt_headers.OptionalHeader.SizeOfImage)
        assert stream.tell() == 0x160, hex(stream.tell())
        if sys.platform != "win32":
            os.fchmod(stream.fileno(), 0o755)
Example #4
0
 def restore_attrs(self, path, item, symlink=False, fd=None):
     xattrs = item.get(b'xattrs')
     if xattrs:
             for k, v in xattrs.items():
                 try:
                     xattr.setxattr(fd or path, k, v)
                 except OSError as e:
                     if e.errno != errno.ENOTSUP:
                         raise
     uid = gid = None
     if not self.numeric_owner:
         uid = user2uid(item[b'user'])
         gid = group2gid(item[b'group'])
     uid = uid or item[b'uid']
     gid = gid or item[b'gid']
     # This code is a bit of a mess due to os specific differences
     try:
         if fd:
             os.fchown(fd, uid, gid)
         else:
             os.lchown(path, uid, gid)
     except OSError:
         pass
     if fd:
         os.fchmod(fd, item[b'mode'])
     elif not symlink:
         os.chmod(path, item[b'mode'])
     elif has_lchmod:  # Not available on Linux
         os.lchmod(path, item[b'mode'])
     if fd and utime_supports_fd:  # Python >= 3.3
         os.utime(fd, None, ns=(item[b'mtime'], item[b'mtime']))
     elif utime_supports_fd:  # Python >= 3.3
         os.utime(path, None, ns=(item[b'mtime'], item[b'mtime']), follow_symlinks=False)
     elif not symlink:
         os.utime(path, (item[b'mtime'] / 10**9, item[b'mtime'] / 10**9))
    def save(self):
        if self.read_only:
            raise StandardError("Tried to save read only file!")
        else:
            # write back to the buffer
            self.imageMeta.write()
            if type(self.outfile) == file:
                self.outfile.write(self.imageMeta.buffer)
            else:
                path = MetaDataCollection.absolute_path(self.outfile)
                dirname = os.path.dirname(path)
                (outfd, tmpname) = mkstemp(dir=dirname, prefix='.tmp')
                try:
                    outfile = os.fdopen(outfd, "w")
                    outfile.write(self.imageMeta.buffer)

                    if self.infile != sys.stdin:
                        os.fchmod(outfd, set_perms(os.stat(self.infile.name).st_mode))
                    else:
                        os.fchmod(outfd, set_perms())

                    outfile.close()
                    os.rename(tmpname, path)
                except Exception, e:
                    os.remove(tmpname)
                    raise e
Example #6
0
 def powerOn(self):
     user = pwd.getpwuid(os.getuid())[0]
     tf, tfn = tempfile.mkstemp()
     os.fchmod(tf, 0755)
     os.write(tf, "#!/bin/sh\nifconfig $1 up\n")
     os.write(tf, "brctl addif %s $1\n" % self.controlBridge())
     os.close(tf)
     try:
         bigtest.sudo(
             ["kvm",
              "-runas", user,
              "-daemonize",
              "-nographic",
              "-pidfile", os.path.join(self.statedir_, "pid"),
              "-vnc", ":%d" % (self.vncPort()-5900),
              "-qmp", "unix:%s,server,nowait" % os.path.join(self.statedir_, "monitor"),
              "-serial", "unix:%s,server,nowait" % os.path.join(self.statedir_, "console"),
              "-net", "tap,ifname=%s,script=%s,downscript=no" % (self.name(), tfn)]
             + list(self.vmArgs()))
     finally:
         for fn in ["pid", "monitor", "console"]:
             p = os.path.join(self.statedir_, fn)
             if os.path.exists(p):
                 bigtest.sudo(["chown", user, p])
     os.unlink(tfn)
Example #7
0
def write_load_config(repo_dir, saved_state_path, changed_files=[]):
    """
    Writes a .hhconfig that allows hh_client to launch hh_server from a saved
    state archive.

    repo_dir: Repository to run hh_server on
    saved_state_path: Path to file containing saved server state
    changed_files: list of strings
    """
    fd, recheck_fn = tempfile.mkstemp()
    with os.fdopen(fd, 'w') as f:
        f.write("\n".join(changed_files))

    with open(os.path.join(repo_dir, 'server_options.sh'), 'w') as f:
        f.write(r"""
#! /bin/sh
echo --load \"%s %s\"
        """ % (saved_state_path, recheck_fn))
        os.fchmod(f.fileno(), 0o700)

    with open(os.path.join(repo_dir, '.hhconfig'), 'w') as f:
        # we can't just write 'echo ...' inline because Hack server will
        # be passing this command some command-line options
        f.write(r"""
# some comment
server_options_cmd = %s
        """ % os.path.join(repo_dir, 'server_options.sh'))
Example #8
0
    def ui_command_saveconfig(self, savefile=default_save_file):
        '''
        Saves the current configuration to a file so that it can be restored
        on next boot.
        '''
        self.assert_root()

        savefile = os.path.expanduser(savefile)

        backupfile = savefile + ".backup"
        try:
            shutil.move(savefile, backupfile)
            self.shell.log.info("Existing file %s backed up to %s" % \
                                    (savefile, backupfile.split('/')[-1]))
        except IOError:
            pass

        with open(savefile+".temp", "w+") as f:
            os.fchmod(f.fileno(), stat.S_IRUSR | stat.S_IWUSR)
            f.write(json.dumps(RTSRoot().dump(), sort_keys=True, indent=2))
            f.write("\n")
            os.fsync(f.fileno())

        os.rename(savefile+".temp", savefile)

        self.shell.log.info("Configuration saved to %s" % savefile)
Example #9
0
 def test_failure(self):
   testScript = os.path.join(self.tempDir, "test")
   with open(testScript, "w") as f:
     f.write("#!/bin/bash\n\necho TESTING\nexit 1")
     os.fchmod(f.fileno(), 0o700)
   self.assertFalse(cronbackoff.execute([testScript]))
   os.unlink(testScript)
Example #10
0
 def test_no_file_write_perms(self):
   with open(self.state.filePath, 'w') as f:
     os.fchmod(f.fileno(), 0o400)
   with self.assertRaises(cronbackoff.CronBackoffException) as ctx:
     self.state._lock()
   self.assertEqual(ctx.exception.errno, errno.EACCES)
   os.unlink(self.state.filePath)
Example #11
0
    def run(self):
        """
            Imports the data and syncs dependent system components, such as GeoServer to the imported data.
        """

        with tempfile.NamedTemporaryFile(delete=False) as f:
            self.passwordfile = f.name

            for connection in uniq(self.connections):
                f.write("{0}\n".format(connection))

            # A pgpass file must have permissions of 0600 (or less permissive):
            # http://www.postgresql.org/docs/9.3/static/libpq-pgpass.html
            os.fchmod(f.fileno(), stat.S_IRUSR | stat.S_IWUSR)

        # Set the ENV variable to use this file
        os.environ['PGPASSFILE'] = self.passwordfile
        logger.info("Created password file at %s" % self.passwordfile)

        logger.info("Importing data")

        create_schema_command = "psql {0} -c 'CREATE SCHEMA IF NOT EXISTS {1}; GRANT ALL ON SCHEMA {1} TO public;'".format(
            self.target_database_connection,
            settings.IMPORT_SCHEMA
        )
        results = self.command_execution.run(create_schema_command)
        if results.returncode:
            raise Exception(results.stderr.text)

        self.import_data()

        os.remove(self.passwordfile)
        del os.environ['PGPASSFILE']
def main():
    args = parse_args()
    genPyHello(args.outFilename[0])
    #give execute rights to everyone 
    fd = os.open( args.outFilename[0], os.O_RDONLY )
    os.fchmod( fd, 0775)
    os.close( fd )
Example #13
0
    def install_sshd(self):
        self._install_package_atoms(['net-misc/openssh'])

        init_script_path = os.path.join(self._abs_mountpoint, 'etc/init.d/sshd-need-root')
        with open(init_script_path, 'w') as f:
            print(dedent("""\
                    #!/sbin/runscript
                    # Workaround to ensure that sshd has a writable root file system
                    # during key generation
                    # https://bugs.gentoo.org/show_bug.cgi?id=554804
                    #
                    # Copyright (C) 2015 Sebastian Pipping <*****@*****.**>
                    # Licensed under AGPL v3 or later

                    depend() {
                        if ! ls /etc/ssh/ssh_host_*_key 1>/dev/null 2>/dev/null; then
                            need root
                        fi
                        before sshd
                    }

                    start() { :; }
                    stop() { :; }
                    """), file=f)
            os.fchmod(f.fileno(), 0755)
Example #14
0
  def StartUploadServer(self):
    logging.info('StartUploadServer: started')
    try:
      filepath = self._file_op[1]
      dirname = os.path.dirname(filepath)
      if not os.path.exists(dirname):
        try:
          os.makedirs(dirname)
        except Exception:
          pass

      with open(filepath, 'wb') as f:
        if self._file_op[2]:
          os.fchmod(f.fileno(), self._file_op[2])

        f.write(self._sock.RecvBuf())

        while True:
          rd, unused_wd, unused_xd = select.select([self._sock], [], [])
          if self._sock in rd:
            buf = self._sock.Recv(_BLOCK_SIZE)
            if len(buf) == 0:
              break
            f.write(buf)
    except socket.error as e:
      logging.error('StartUploadServer: socket error: %s', e)
    except Exception as e:
      logging.error('StartUploadServer: %s', e)
    finally:
      self._sock.Close()

    logging.info('StartUploadServer: terminated')
    sys.exit(0)
Example #15
0
def writerdepscript(job, config):
    # Populate the list of rdeps
    rdeps = []
    for p in job.packageList:
        rdeps = rdeps + stablerdeps (p, config)
    if len(rdeps) == 0:
        print("No stable rdeps for " + job.name)
        return

    # If there are rdeps, write the script
    try:
        rdepheaderfile=open(config['template-dir'] + "revdep-header", 'r')
    except IOError:
        print("revdep-header not found in " + config['template-dir'])
        sys.exit(1)
    rdepheader=rdepheaderfile.read().replace("@@JOB@@", job.name)
    outfilename = (job.name + "-rdeps.sh")
    reportname = (job.name + ".report")
    if os.path.isfile(outfilename):
        print("WARNING: Will overwrite " + outfilename)
    outfile = open(outfilename,'w')
    outfile.write(rdepheader)

    for r in rdeps:
        # Todo: remove duplicates
        localsnippet = rdepTestString (r, config)
        outfile.write(localsnippet.replace("@@REPORTFILE@@", reportname))
    os.fchmod(outfile.fileno(),484)
    outfile.close()
Example #16
0
def exec_func_shell(function, d, runfile, logfile, cwd=None, fakeroot=False):
    """Execute a shell function from the metadata

    Note on directory behavior.  The 'dirs' varflag should contain a list
    of the directories you need created prior to execution.  The last
    item in the list is where we will chdir/cd to.
    """

    # Don't let the emitted shell script override PWD
    d.delVarFlag('PWD', 'export')

    with open(runfile, 'w') as script:
        script.write('#!/bin/sh -e\n')
        data.emit_func(function, script, d)

        script.write("set -x\n")
        script.write("%s\n" % function)
        os.fchmod(script.fileno(), 0775)

    if fakeroot:
        cmd = ['fakeroot', runfile]
    else:
        cmd = runfile

    if logger.isEnabledFor(logging.DEBUG):
        logfile = LogTee(logger, logfile)

    try:
        bb.process.run(cmd, cwd=cwd, shell=False, stdin=NULL, log=logfile)
    except bb.process.CmdError:
        raise FuncFailed(function, logfile.name)
def write_xar(fn, hdr, tocdata, heap, keep_old=False):
    ztocdata = zlib.compress(tocdata)
    digest = toc_digest(hdr, ztocdata)
    newhdr = dict(hdr,
                  toc_length_uncompressed=len(tocdata),
                  toc_length_compressed=len(ztocdata))
    outf = NamedTemporaryFile(prefix='.' + os.path.basename(fn),
                              dir=os.path.dirname(fn),
                              delete=False)
    try:
        st_mode = os.stat(fn).st_mode
        if os.fstat(outf.fileno()) != st_mode:
            os.fchmod(outf.fileno(), st_mode)
    except OSError:
        pass
    try:
        outf.writelines([HEADER.pack(newhdr),
                         ztocdata,
                         digest])
        copyfileobj(heap, outf)
        outf.close()
    except:
        outf.close()
        os.unlink(outf.name)
        raise
    if keep_old:
        oldfn = fn + '.old'
        if os.path.exists(oldfn):
            os.unlink(oldfn)
        os.link(fn, oldfn)
    os.rename(outf.name, fn)
Example #18
0
def read_conf(key=None, prompt=True):
    """ Read settings from the config file
    :param key:
    :param prompt:
    """
    # Use the right raw_input in python3
    if 'raw_input' not in dir(__builtins__):
        raw_input = input

    try:
        conf = json.load(open(os.path.expanduser('~/.sshmap.conf'), 'r'))
    except IOError:
        conf = conf_defaults
    if key:
        try:
            return conf[key].encode('ascii')
        except KeyError:
            pass
    else:
        return conf
    if key and prompt:
        conf[key] = raw_input(conf_desc[key] + ': ')
        fh = open(os.path.expanduser('~/.sshmap2.conf'), 'w')
        os.fchmod(fh.fileno(), stat.S_IRUSR | stat.S_IWUSR)
        json.dump(conf, fh)
        fh.close()
        return conf[key]
    else:
        return None
    def acquire(self):
        """Acquire an exclusive lock

        Returns immediately if lock was already acquired within this process.

        Raises:
            LockfileLockedError: The lockfile is in use by another process.
            LockfileEstablishError: The lock could not be established for
                some other reason.
        """
        if self.fd:
            return
        if self.has_exlock():
            raise LockfileLockedError(
                'Lock file already locked by PID %s' % self.get_pid())

        pwent = pwd.getpwnam(self.user)

        if not os.path.exists(self.dir):
            os.mkdir(self.dir)
        os.chown(self.dir, pwent.pw_uid, pwent.pw_gid)
        os.chmod(self.dir, 0o755)

        self.fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
        os.fchown(self.fd, pwent.pw_uid, pwent.pw_gid)
        os.fchmod(self.fd, 0o644)

        try:
            self.lock = fcntl.lockf(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except:
            os.close(self.fd)
            self.fd = None
            raise LockfileEstablishError("Could not establish lock %s" %
                                        self.filename)
        os.write(self.fd, "%s\n" % os.getpid())
Example #20
0
def write_file(fn, contents, perms=None, owner=None, group=None):
    if perms:
        perms2=perms
    else:
        perms2=0o666

    fd=os.open(fn, os.O_CREAT | os.O_RDWR, perms2)

    # Bypass umask
    if perms:
        os.fchmod(fd, perms)

    if owner or group:
        if owner:
            pw=pwd.getpwnam(owner)
            uid=pw.pw_uid
        else:
            uid=-1

        if group:
            gr=grp.getgrnam(group)
            gid=gr.gr_gid
        else:
            gid=-1

        os.fchown(fd, uid, gid)

    f=os.fdopen(fd, 'w')
    f.write(contents)

    f.close()
Example #21
0
	def generateShellScript(self, data):
		print "Generating shell script", os.path.join(self.output, "script.sh")
		f = codecs.open(os.path.join(self.output, "script.sh"), "w", "utf-8")
		f.write(SCRIPT_SH % (data, ""))
		os.fchmod(f.fileno(), 0755)
		f.close()
		return True
Example #22
0
 def test_fchmod(self):
     os = self.posix
     f = open(self.path, "w")
     os.fchmod(f.fileno(), 0200)
     assert (os.fstat(f.fileno()).st_mode & 0777) == 0200
     f.close()
     assert (os.stat(self.path).st_mode & 0777) == 0200
Example #23
0
    def test_no_state_found(self):
        error_msg = 'No such rev'
        with open(os.path.join(self.repo_dir, 'server_options.sh'), 'w') as f:
            f.write("#! /bin/sh\n")
            os.fchmod(f.fileno(), 0o700)
            f.write("echo %s\n" % shlex.quote(json.dumps({
                'error': error_msg,
                })))
            os.fchmod(f.fileno(), 0o700)

        self.write_local_conf()
        self.write_hhconfig('server_options.sh')

        (output, _) = self.proc_call([
            hh_client,
            'check',
            '--retries',
            '20',
            self.repo_dir
            ],
            env={'HH_LOCALCONF_PATH': self.repo_dir})

        self.assertEqual(output.strip(), 'No errors!')

        log_file = self.proc_call([
            hh_client, '--logname', self.repo_dir
            ])[0].strip()
        with open(log_file) as f:
            logs = f.read()
            self.assertIn('Could not load mini state', logs)
            self.assertIn(error_msg, logs)
def _write_gcp_snap_config(component):
    # gcp requires additional credentials setup
    gcp = endpoint_from_flag('endpoint.gcp.ready')
    creds_path = _gcp_creds_path(component)
    with creds_path.open('w') as fp:
        os.fchmod(fp.fileno(), 0o600)
        fp.write(gcp.credentials)

    # create a cloud-config file that sets token-url to nil to make the
    # services use the creds env var instead of the metadata server, as
    # well as making the cluster multizone
    cloud_config_path = _cloud_config_path(component)
    cloud_config_path.write_text('[Global]\n'
                                 'token-url = nil\n'
                                 'multizone = true\n')

    daemon_env_path = _daemon_env_path(component)
    if daemon_env_path.exists():
        daemon_env = daemon_env_path.read_text()
        if not daemon_env.endswith('\n'):
            daemon_env += '\n'
    else:
        daemon_env = ''
    if gcp_creds_env_key not in daemon_env:
        daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
        daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
        daemon_env_path.write_text(daemon_env)
Example #25
0
def write_load_config(repo_dir, saved_state_path, changed_files=None):
    """
    Writes a .hhconfig that allows hh_client to launch hh_server from a saved
    state archive.

    repo_dir: Repository to run hh_server on
    saved_state_path: Path to file containing saved server state
    changed_files: list of strings
    """
    if changed_files is None:
        changed_files = []
    with open(os.path.join(repo_dir, "server_options.sh"), "w") as f:
        f.write(
            r"""
#! /bin/sh
echo {0!s}
""".format(
                saved_state_path
            )
        )
        for fn in changed_files:
            f.write("echo {0!s}\n".format(fn))
        os.fchmod(f.fileno(), 0o700)

    with open(os.path.join(repo_dir, ".hhconfig"), "w") as f:
        # we can't just write 'echo ...' inline because Hack server will
        # be passing this command some command-line options
        f.write(
            r"""
# some comment
load_script = {0!s}
        """.format(
                os.path.join(repo_dir, "server_options.sh")
            )
        )
Example #26
0
    def _unpack_resource(self, resource_path, resource_name, resource_executable):
        if not pkg_resources.resource_exists(__name__, resource_name):
            return

        if pkg_resources.resource_isdir(__name__, resource_name):
            self.__create_dir(resource_path)
            for f in pkg_resources.resource_listdir(__name__, resource_name):
                if f == "":
                    # TODO(beng): Figure out why this happens
                    continue
                # TODO: Handle executable resources in directory
                self._unpack_resource(
                    os.path.join(resource_path, f),
                    os.path.join(resource_name, f),
                    False,
                )
        else:
            with closable_named_temporary_file(
                prefix=resource_path + os.extsep
            ) as outf:
                outf.write(pkg_resources.resource_string(__name__, resource_name))
                if resource_executable and hasattr(os, "fchmod"):
                    st = os.fstat(outf.fileno())
                    os.fchmod(outf.fileno(), st.st_mode | stat.S_IXUSR)
                outf.close()
                shutil.copy(outf.name, resource_path)
Example #27
0
def write_load_config(repo_dir, saved_state_path, changed_files=[]):
    """
    Writes a .hhconfig that allows hh_client to launch hh_server from a saved
    state archive.

    repo_dir: Repository to run hh_server on
    saved_state_path: Path to file containing saved server state
    changed_files: list of strings
    """
    with open(os.path.join(repo_dir, 'server_options.sh'), 'w') as f:
        f.write(r"""
#! /bin/sh
echo %s
""" % saved_state_path)
        for fn in changed_files:
            f.write("echo %s\n" % fn)
        os.fchmod(f.fileno(), 0o700)

    with open(os.path.join(repo_dir, '.hhconfig'), 'w') as f:
        # we can't just write 'echo ...' inline because Hack server will
        # be passing this command some command-line options
        f.write(r"""
# some comment
load_script = %s
        """ % os.path.join(repo_dir, 'server_options.sh'))
Example #28
0
 def run(self, connection, args=None):
     """
     Check if a lava-test-shell has been requested, implement the overlay
     * create test runner directories beneath the temporary location
     * copy runners into test runner directories
     """
     self.data[self.name].setdefault('location', mkdtemp())
     self.logger.debug("Preparing overlay tarball in %s", self.data[self.name]['location'])
     if 'lava_test_results_dir' not in self.data:
         self.logger.error("Unable to identify lava test results directory - missing OS type?")
         return connection
     lava_path = os.path.abspath("%s/%s" % (self.data[self.name]['location'], self.data['lava_test_results_dir']))
     for runner_dir in ['bin', 'tests', 'results']:
         # avoid os.path.join as lava_test_results_dir startswith / so location is *dropped* by join.
         path = os.path.abspath("%s/%s" % (lava_path, runner_dir))
         if not os.path.exists(path):
             os.makedirs(path, 0755)
             self.logger.debug("makedir: %s", path)
     for fname in self.scripts_to_copy:
         with open(fname, 'r') as fin:
             output_file = '%s/bin/%s' % (lava_path, os.path.basename(fname))
             self.logger.debug("Creating %s", output_file)
             with open(output_file, 'w') as fout:
                 fout.write("#!%s\n\n" % self.parameters['deployment_data']['lava_test_sh_cmd'])
                 fout.write(fin.read())
                 os.fchmod(fout.fileno(), self.xmod)
     connection = super(OverlayAction, self).run(connection, args)
     return connection
Example #29
0
    def test_options_cmd(self):
        """
        Make sure we are invoking the server_options_cmd with the right flags
        """
        args_file = os.path.join(self.saved_state_dir, 'cmd_args')
        with open(os.path.join(self.repo_dir, 'server_options.sh'), 'w') as f:
            f.write(r"""
#! /bin/sh
echo "$1" > {out}
echo "$2" >> {out}
            """.format(out=args_file))
            os.fchmod(f.fileno(), 0o700)

        with open(os.path.join(self.repo_dir, '.hhconfig'), 'w') as f:
            f.write(r"""
# some comment
load_script = %s
            """ % os.path.join(self.repo_dir, 'server_options.sh'))

        proc_call([
            self.hh_client,
            'start',
            self.repo_dir
        ])

        version = proc_call([
            self.hh_server,
            '--version'
        ])

        with open(args_file) as f:
            self.assertEqual(f.read().splitlines(), [self.repo_dir, version])
Example #30
0
    def test_get_changes_failure(self):
        error_msg = 'hg is not playing nice today'
        with open(os.path.join(self.repo_dir, 'server_options.sh'), 'w') as f:
            f.write("#! /bin/sh\n")
            write_echo_json(f, {
                'state': self.saved_state_path(),
                'is_cached': True,
                'deptable': self.saved_state_path() + '.deptable',
                })
            write_echo_json(f, {
                'error': error_msg,
                })
            os.fchmod(f.fileno(), 0o700)

        self.write_local_conf()
        self.write_hhconfig('server_options.sh')
        self.write_watchman_config()

        (output, _, _) = self.run_check()

        self.assertEqual(output.strip(), 'No errors!')

        logs = self.get_server_logs()
        self.assertIn('Could not load mini state', logs)
        self.assertIn(error_msg, logs)
Example #31
0
    def _reconfig_ssh(self):
        temp_files = []
        ssh_options = []

        # ssh_config
        ssh_config_fname = self.ssh_config_file
        ssh_config = self.get_store("ssh_config")
        if ssh_config is not None or ssh_config_fname is None:
            if not ssh_config:
                ssh_config = DEFAULT_SSH_CONFIG
            f = tempfile.NamedTemporaryFile(prefix='ceph-mgr-ssh-conf-')
            os.fchmod(f.fileno(), 0o600)
            f.write(ssh_config.encode('utf-8'))
            f.flush()  # make visible to other processes
            temp_files += [f]
            ssh_config_fname = f.name
        if ssh_config_fname:
            if not os.path.isfile(ssh_config_fname):
                raise Exception("ssh_config \"{}\" does not exist".format(
                    ssh_config_fname))
            ssh_options += ['-F', ssh_config_fname]

        # identity
        ssh_key = self.get_store("ssh_identity_key")
        ssh_pub = self.get_store("ssh_identity_pub")
        tpub = None
        tkey = None
        if ssh_key and ssh_pub:
            tkey = tempfile.NamedTemporaryFile(prefix='ceph-mgr-ssh-identity-')
            tkey.write(ssh_key.encode('utf-8'))
            os.fchmod(tkey.fileno(), 0o600)
            tkey.flush()  # make visible to other processes
            tpub = open(tkey.name + '.pub', 'w')
            os.fchmod(tpub.fileno(), 0o600)
            tpub.write(ssh_pub)
            tpub.flush()  # make visible to other processes
            temp_files += [tkey, tpub]
            ssh_options += ['-i', tkey.name]

        self._temp_files = temp_files
        if ssh_options:
            self._ssh_options = ' '.join(ssh_options)
        else:
            self._ssh_options = None
        self.log.info('ssh_options %s' % ssh_options)
def ensure_traefik_config(creds_path: pathlib.Path, config_path: pathlib.Path,
                          state_dir: pathlib.Path):
    """
    Render the traefik.toml config file
    """
    with open(creds_path) as f:
        creds = json.load(f)
        # generate htpassword that works with traefik
        ht = HtpasswdFile()
        ht.set_password(creds['username'], creds['password'])
        hashed_password = str(ht.to_string()).split(":")[1][:-3]
        basic_auth = f'{creds["username"]}:{hashed_password}'

    config = {
        'traefik_api': {
            'basic_auth': basic_auth,
            'port': 8099,
            'ip': '127.0.0.1'
        },
        'http': {
            'port': 8181
        },
        'https': {
            'port': 443,
            'enabled': False
        }
    }

    with open(HERE / "traefik.toml.tpl") as f:
        template = Template(f.read())

    new_toml = template.render(config)

    with open(config_path, 'w') as f:
        os.fchmod(f.fileno(), 0o600)
        f.write(new_toml)

    with open(state_dir / "rules.toml", "w") as f:
        os.fchmod(f.fileno(), 0o600)

    # ensure acme.json exists and is private
    with open(state_dir / "acme.json", "a") as f:
        os.fchmod(f.fileno(), 0o600)
Example #33
0
def test_read_cache_permission_error(tmpdir):
    cache = DependencyCache(cache_dir=tmpdir)
    with open(cache._cache_file, "w") as fp:
        os.fchmod(fp.fileno(), 0o000)
    with pytest.raises(IOError, match="Permission denied"):
        cache.cache
Example #34
0
    if not os.path.exists(DisBatchPath):
        print('Unable to find myself; set DisBatchPath and ImportDir manually at the top of disBatch.py.', file=sys.stderr)
        sys.exit(1)
    DisBatchDir = os.path.dirname(DisBatchPath)
    with open(DisBatchPath, 'r') as fi:
        with tempfile.NamedTemporaryFile('w', prefix='disBatch.py.', dir=DisBatchDir, delete=False) as fo:
            found = False
            for l in fi:
                if l.startswith('DisBatchPath, ImportDir, PathsFixed ='):
                    assert not found
                    found = True
                    l = 'DisBatchPath, ImportDir, PathsFixed = %r, %r, True\n'%(DisBatchPath, DisBatchDir)
                    print("Changing path info to %r"%l, file=sys.stderr)
                fo.write(l)
            assert found
            os.fchmod(fo.fileno(), os.fstat(fi.fileno()).st_mode)
    os.rename(DisBatchPath, DisBatchPath+'.prev')
    os.rename(fo.name, DisBatchPath)
    sys.exit(0)

if not PathsFixed:
    # Try to guess
    DisBatchPath = os.path.realpath(__file__)
    ImportDir = os.path.dirname(DisBatchPath)

PythonPath = os.getenv('PYTHONPATH', '')
if ImportDir:
    # to find kvsstcp:
    sys.path.append(ImportDir)
    # for subprocesses:
    PythonPath = PythonPath + ':' + ImportDir if PythonPath else ImportDir
Example #35
0
    if len(sys.argv) > 1:
        setup_analytical = bool(int(sys.argv[1]))

    print("setup_analytical: " + str(setup_analytical))

    if setup_analytical:
        file_ext = "_analytical_1"
    else:
        file_ext = "_analytical_0"

    # Create dummy run.sh script in this folder for automized job processing
    os.makedirs("job_benchref_solution" + file_ext, exist_ok=True)
    with open("job_benchref_solution" + file_ext + "/run.sh", "w") as rfile:
        rfile.write("#!/bin/bash\necho \"Dummy\"")
        os.fchmod(rfile.fileno(),
                  stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH)

    output_file_name = "job_benchref_solution" + file_ext + "/output_{:s}_t{:020.8f}.csv"

    def savefile(data, name, t):
        d = x.spectogrd(data)
        d = np.flip(d, 0)
        os.makedirs("job_benchref_solution" + file_ext, exist_ok=True)
        np.savetxt(output_file_name.format(name, t / (60 * 60)),
                   d,
                   delimiter="\t")

    def savefileg(data, name, t):
        d = np.flip(data, 0)
        os.makedirs("job_benchref_solution" + file_ext, exist_ok=True)
        np.savetxt(output_file_name.format(name, t / (60 * 60)),
Example #36
0
    def run(self, connection, max_end_time):  # pylint: disable=too-many-locals,too-many-branches,too-many-statements
        if self.role is None:
            self.logger.debug("skipped %s", self.name)
            return connection
        lava_test_results_dir = self.get_namespace_data(
            action='test', label='results', key='lava_test_results_dir')
        shell = self.get_namespace_data(action='test',
                                        label='shared',
                                        key='lava_test_sh_cmd')
        location = self.get_namespace_data(action='test',
                                           label='shared',
                                           key='location')
        if not location:
            raise LAVABug("Missing lava overlay location")
        if not os.path.exists(location):
            raise LAVABug("Unable to find overlay location")

        # the roles list can only be populated after the devices have been assigned
        # therefore, cannot be checked in validate which is executed at submission.
        if 'roles' not in self.job.parameters['protocols'][self.protocol]:
            raise LAVABug(
                "multinode definition without complete list of roles after assignment"
            )

        # Generic scripts
        lava_path = os.path.abspath("%s/%s" %
                                    (location, lava_test_results_dir))
        scripts_to_copy = glob.glob(
            os.path.join(self.lava_multi_node_test_dir, 'lava-*'))
        self.logger.debug(self.lava_multi_node_test_dir)
        self.logger.debug("lava_path: %s", lava_path)
        self.logger.debug("scripts to copy %s", scripts_to_copy)

        for fname in scripts_to_copy:
            with open(fname, 'r') as fin:
                foutname = os.path.basename(fname)
                output_file = '%s/bin/%s' % (lava_path, foutname)
                self.logger.debug("Creating %s", output_file)
                with open(output_file, 'w') as fout:
                    fout.write("#!%s\n\n" % shell)
                    # Target-specific scripts (add ENV to the generic ones)
                    if foutname == 'lava-group':
                        fout.write('LAVA_GROUP="\n')
                        for client_name in self.job.parameters['protocols'][
                                self.protocol]['roles']:
                            if client_name == 'yaml_line':
                                continue
                            role_line = self.job.parameters['protocols'][
                                self.protocol]['roles'][client_name]
                            self.logger.debug("group roles:\t%s\t%s",
                                              client_name, role_line)
                            fout.write(r"\t%s\t%s\n" %
                                       (client_name, role_line))
                        fout.write('"\n')
                    elif foutname == 'lava-role':
                        fout.write("TARGET_ROLE='%s'\n" %
                                   self.job.parameters['protocols'][
                                       self.protocol]['role'])
                    elif foutname == 'lava-self':
                        fout.write("LAVA_HOSTNAME='%s'\n" % self.job.job_id)
                    else:
                        fout.write("LAVA_TEST_BIN='%s/bin'\n" %
                                   lava_test_results_dir)
                        fout.write("LAVA_MULTI_NODE_CACHE='%s'\n" %
                                   self.lava_multi_node_cache_file)
                        # always write out full debug logs
                        fout.write("LAVA_MULTI_NODE_DEBUG='yes'\n")
                    fout.write(fin.read())
                    os.fchmod(fout.fileno(), self.xmod)
        self.call_protocols()
        return connection
Example #37
0
    def run(self, connection, max_end_time):
        """
        Writes out file contents from lists, across multiple lines
        VAR="VAL1\n\
        VAL2\n\
        "
        The \n and \ are used to avoid unwanted whitespace, so are escaped.
        \n becomes \\n, \ becomes \\, which itself then needs \n to output:
        VAL1
        VAL2
        """
        if not self.params:
            self.logger.debug("skipped %s", self.name)
            return connection
        location = self.get_namespace_data(action='test',
                                           label='shared',
                                           key='location')
        lava_test_results_dir = self.get_namespace_data(
            action='test', label='results', key='lava_test_results_dir')
        shell = self.get_namespace_data(action='test',
                                        label='shared',
                                        key='lava_test_sh_cmd')
        if not location:
            raise LAVABug("Missing lava overlay location")
        if not os.path.exists(location):
            raise LAVABug("Unable to find overlay location")

        lava_path = os.path.abspath("%s/%s" %
                                    (location, lava_test_results_dir))
        scripts_to_copy = glob.glob(
            os.path.join(self.lava_vland_test_dir, 'lava-*'))
        self.logger.debug(self.lava_vland_test_dir)
        self.logger.debug({"lava_path": lava_path, "scripts": scripts_to_copy})

        for fname in scripts_to_copy:
            with open(fname, 'r') as fin:
                foutname = os.path.basename(fname)
                output_file = '%s/bin/%s' % (lava_path, foutname)
                self.logger.debug("Creating %s", output_file)
                with open(output_file, 'w') as fout:
                    fout.write("#!%s\n\n" % shell)
                    # Target-specific scripts (add ENV to the generic ones)
                    if foutname == 'lava-vland-self':
                        fout.write(r'LAVA_VLAND_SELF="')
                        for line in self.sysfs:
                            fout.write(r"%s\n" % line)
                    elif foutname == 'lava-vland-names':
                        fout.write(r'LAVA_VLAND_NAMES="')
                        for line in self.names:
                            fout.write(r"%s\n" % line)
                    elif foutname == 'lava-vland-tags':
                        fout.write(r'LAVA_VLAND_TAGS="')
                        if not self.tags:
                            fout.write(r"\n")
                        else:
                            for line in self.tags:
                                fout.write(r"%s\n" % line)
                    fout.write('"\n\n')
                    fout.write(fin.read())
                    os.fchmod(fout.fileno(), self.xmod)
        self.call_protocols()
        return connection
Example #38
0
class UpdateCommand(Command):
    """Implementation of gsutil update command."""

    # Command specification. See base class for documentation.
    command_spec = Command.CreateCommandSpec(
        'update',
        command_name_aliases=['refresh'],
        usage_synopsis=_SYNOPSIS,
        min_args=0,
        max_args=1,
        supported_sub_args='fn',
        file_url_ok=True,
        provider_url_ok=False,
        urls_start_arg=0,
        gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
        gs_default_api=ApiSelector.JSON,
    )
    # Help specification. See help_provider.py for documentation.
    help_spec = Command.HelpSpec(
        help_name='update',
        help_name_aliases=['refresh'],
        help_type='command_help',
        help_one_line_summary='Update to the latest gsutil release',
        help_text=_DETAILED_HELP_TEXT,
        subcommand_help_text={},
    )

    def _DisallowUpdataIfDataInGsutilDir(self):
        """Disallows the update command if files not in the gsutil distro are found.

    This prevents users from losing data if they are in the habit of running
    gsutil from the gsutil directory and leaving data in that directory.

    This will also detect someone attempting to run gsutil update from a git
    repo, since the top-level directory will contain git files and dirs (like
    .git) that are not distributed with gsutil.

    Raises:
      CommandException: if files other than those distributed with gsutil found.
    """
        # Manifest includes recursive-includes of gslib. Directly add
        # those to the list here so we will skip them in os.listdir() loop without
        # having to build deeper handling of the MANIFEST file here. Also include
        # 'third_party', which isn't present in manifest but gets added to the
        # gsutil distro by the gsutil submodule configuration; and the MANIFEST.in
        # and CHANGES.md files.
        manifest_lines = ['gslib', 'third_party', 'MANIFEST.in', 'CHANGES.md']

        try:
            with open(os.path.join(gslib.GSUTIL_DIR, 'MANIFEST.in'),
                      'r') as fp:
                for line in fp:
                    if line.startswith('include '):
                        manifest_lines.append(line.split()[-1])
        except IOError:
            self.logger.warn(
                'MANIFEST.in not found in %s.\nSkipping user data '
                'check.\n', gslib.GSUTIL_DIR)
            return

        # Look just at top-level directory. We don't try to catch data dropped into
        # subdirs (like gslib) because that would require deeper parsing of
        # MANFFEST.in, and most users who drop data into gsutil dir do so at the top
        # level directory.
        for filename in os.listdir(gslib.GSUTIL_DIR):
            if filename.endswith('.pyc'):
                # Ignore compiled code.
                continue
            if filename not in manifest_lines:
                raise CommandException('\n'.join(
                    textwrap.wrap(
                        'A file (%s) that is not distributed with gsutil was found in '
                        'the gsutil directory. The update command cannot run with user '
                        'data in the gsutil directory.' %
                        os.path.join(gslib.GSUTIL_DIR, filename))))

    def _ExplainIfSudoNeeded(self, tf, dirs_to_remove):
        """Explains what to do if sudo needed to update gsutil software.

    Happens if gsutil was previously installed by a different user (typically if
    someone originally installed in a shared file system location, using sudo).

    Args:
      tf: Opened TarFile.
      dirs_to_remove: List of directories to remove.

    Raises:
      CommandException: if errors encountered.
    """
        # If running under Windows or Cygwin we don't need (or have) sudo.
        if IS_CYGWIN or IS_WINDOWS:
            return

        user_id = os.getuid()
        if os.stat(gslib.GSUTIL_DIR).st_uid == user_id:
            return

        # Won't fail - this command runs after main startup code that insists on
        # having a config file.
        config_file_list = GetBotoConfigFileList()
        config_files = ' '.join(config_file_list)
        self._CleanUpUpdateCommand(tf, dirs_to_remove)

        # Pick current protection of each boto config file for command that restores
        # protection (rather than fixing at 600) to support use cases like how GCE
        # installs a service account with an /etc/boto.cfg file protected to 644.
        chmod_cmds = []
        for config_file in config_file_list:
            mode = oct(stat.S_IMODE((os.stat(config_file)[stat.ST_MODE])))
            chmod_cmds.append('\n\tsudo chmod %s %s' % (mode, config_file))

        raise CommandException('\n'.join(
            textwrap.wrap(
                'Since it was installed by a different user previously, you will need '
                'to update using the following commands. You will be prompted for your '
                'password, and the install will run as "root". If you\'re unsure what '
                'this means please ask your system administrator for help:')
        ) + ('\n\tsudo chmod 0644 %s\n\tsudo env BOTO_CONFIG="%s" %s update'
             '%s') % (config_files, config_files, self.gsutil_path,
                      ' '.join(chmod_cmds)),
                               informational=True)

    # This list is checked during gsutil update by doing a lowercased
    # slash-left-stripped check. For example "/Dev" would match the "dev" entry.
    unsafe_update_dirs = [
        'applications',
        'auto',
        'bin',
        'boot',
        'desktop',
        'dev',
        'documents and settings',
        'etc',
        'export',
        'home',
        'kernel',
        'lib',
        'lib32',
        'library',
        'lost+found',
        'mach_kernel',
        'media',
        'mnt',
        'net',
        'null',
        'network',
        'opt',
        'private',
        'proc',
        'program files',
        'python',
        'root',
        'sbin',
        'scripts',
        'srv',
        'sys',
        'system',
        'tmp',
        'users',
        'usr',
        'var',
        'volumes',
        'win',
        'win32',
        'windows',
        'winnt',
    ]

    def _EnsureDirsSafeForUpdate(self, dirs):
        """Raises Exception if any of dirs is known to be unsafe for gsutil update.

    This provides a fail-safe check to ensure we don't try to overwrite
    or delete any important directories. (That shouldn't happen given the
    way we construct tmp dirs, etc., but since the gsutil update cleanup
    uses shutil.rmtree() it's prudent to add extra checks.)

    Args:
      dirs: List of directories to check.

    Raises:
      CommandException: If unsafe directory encountered.
    """
        for d in dirs:
            if not d:
                d = 'null'
            if d.lstrip(os.sep).lower() in self.unsafe_update_dirs:
                raise CommandException(
                    'EnsureDirsSafeForUpdate: encountered unsafe '
                    'directory (%s); aborting update' % d)

    def _CleanUpUpdateCommand(self, tf, dirs_to_remove):
        """Cleans up temp files etc. from running update command.

    Args:
      tf: Opened TarFile, or None if none currently open.
      dirs_to_remove: List of directories to remove.

    """
        if tf:
            tf.close()
        self._EnsureDirsSafeForUpdate(dirs_to_remove)
        for directory in dirs_to_remove:
            try:
                shutil.rmtree(directory)
            except OSError:
                # Ignore errors while attempting to remove old dirs under Windows. They
                # happen because of Windows exclusive file locking, and the update
                # actually succeeds but just leaves the old versions around in the
                # user's temp dir.
                if not IS_WINDOWS:
                    raise

    def RunCommand(self):
        """Command entry point for the update command."""

        if gslib.IS_PACKAGE_INSTALL:
            raise CommandException(
                'The update command is only available for gsutil installed from a '
                'tarball. If you installed gsutil via another method, use the same '
                'method to update it.')

        if os.environ.get('CLOUDSDK_WRAPPER') == '1':
            raise CommandException(
                'The update command is disabled for Cloud SDK installs. Please run '
                '"gcloud components update" to update it. Note: the Cloud SDK '
                'incorporates updates to the underlying tools approximately every 2 '
                'weeks, so if you are attempting to update to a recently created '
                'release / pre-release of gsutil it may not yet be available via '
                'the Cloud SDK.')

        https_validate_certificates = CERTIFICATE_VALIDATION_ENABLED
        if not https_validate_certificates:
            raise CommandException(
                'Your boto configuration has https_validate_certificates = False.\n'
                'The update command cannot be run this way, for security reasons.'
            )

        self._DisallowUpdataIfDataInGsutilDir()

        force_update = False
        no_prompt = False
        if self.sub_opts:
            for o, unused_a in self.sub_opts:
                if o == '-f':
                    force_update = True
                if o == '-n':
                    no_prompt = True

        dirs_to_remove = []
        tmp_dir = tempfile.mkdtemp()
        dirs_to_remove.append(tmp_dir)
        os.chdir(tmp_dir)

        if not no_prompt:
            self.logger.info('Checking for software update...')
        if self.args:
            update_from_url_str = self.args[0]
            if not update_from_url_str.endswith('.tar.gz'):
                raise CommandException(
                    'The update command only works with tar.gz files.')
            for i, result in enumerate(
                    self.WildcardIterator(update_from_url_str)):
                if i > 0:
                    raise CommandException(
                        'Invalid update URL. Must name a single .tar.gz file.')
                storage_url = result.storage_url
                if storage_url.IsFileUrl() and not storage_url.IsDirectory():
                    if not force_update:
                        raise CommandException((
                            '"update" command does not support "file://" URLs without the '
                            '-f option.'))
                elif not (storage_url.IsCloudUrl() and storage_url.IsObject()):
                    raise CommandException(
                        'Invalid update object URL. Must name a single .tar.gz file.'
                    )
        else:
            update_from_url_str = GSUTIL_PUB_TARBALL

        # Try to retrieve version info from tarball metadata; failing that; download
        # the tarball and extract the VERSION file. The version lookup will fail
        # when running the update system test, because it retrieves the tarball from
        # a temp file rather than a cloud URL (files lack the version metadata).
        tarball_version = LookUpGsutilVersion(self.gsutil_api,
                                              update_from_url_str)
        if tarball_version:
            tf = None
        else:
            tf = self._FetchAndOpenGsutilTarball(update_from_url_str)
            tf.extractall()
            with open(os.path.join('gsutil', 'VERSION'), 'r') as ver_file:
                tarball_version = ver_file.read().strip()

        if not force_update and gslib.VERSION == tarball_version:
            self._CleanUpUpdateCommand(tf, dirs_to_remove)
            if self.args:
                raise CommandException('You already have %s installed.' %
                                       update_from_url_str,
                                       informational=True)
            else:
                raise CommandException(
                    'You already have the latest gsutil release '
                    'installed.',
                    informational=True)

        if not no_prompt:
            (_, major) = CompareVersions(tarball_version, gslib.VERSION)
            if major:
                print('\n'.join(
                    textwrap.wrap(
                        'This command will update to the "%s" version of gsutil at %s. '
                        'NOTE: This a major new version, so it is strongly recommended '
                        'that you review the release note details at %s before updating to '
                        'this version, especially if you use gsutil in scripts.'
                        % (tarball_version, gslib.GSUTIL_DIR,
                           RELEASE_NOTES_URL))))
            else:
                print(
                    'This command will update to the "%s" version of\ngsutil at %s'
                    % (tarball_version, gslib.GSUTIL_DIR))
        self._ExplainIfSudoNeeded(tf, dirs_to_remove)

        if no_prompt:
            answer = 'y'
        else:
            answer = raw_input('Proceed? [y/N] ')
        if not answer or answer.lower()[0] != 'y':
            self._CleanUpUpdateCommand(tf, dirs_to_remove)
            raise CommandException('Not running update.', informational=True)

        if not tf:
            tf = self._FetchAndOpenGsutilTarball(update_from_url_str)

        # Ignore keyboard interrupts during the update to reduce the chance someone
        # hitting ^C leaves gsutil in a broken state.
        RegisterSignalHandler(signal.SIGINT, signal.SIG_IGN)

        # gslib.GSUTIL_DIR lists the path where the code should end up (like
        # /usr/local/gsutil), which is one level down from the relative path in the
        # tarball (since the latter creates files in ./gsutil). So, we need to
        # extract at the parent directory level.
        gsutil_bin_parent_dir = os.path.normpath(
            os.path.join(gslib.GSUTIL_DIR, '..'))

        # Extract tarball to a temporary directory in a sibling to GSUTIL_DIR.
        old_dir = tempfile.mkdtemp(dir=gsutil_bin_parent_dir)
        new_dir = tempfile.mkdtemp(dir=gsutil_bin_parent_dir)
        dirs_to_remove.append(old_dir)
        dirs_to_remove.append(new_dir)
        self._EnsureDirsSafeForUpdate(dirs_to_remove)
        try:
            tf.extractall(path=new_dir)
        except Exception, e:
            self._CleanUpUpdateCommand(tf, dirs_to_remove)
            raise CommandException('Update failed: %s.' % e)

        # For enterprise mode (shared/central) installation, users with
        # different user/group than the installation user/group must be
        # able to run gsutil so we need to do some permissions adjustments
        # here. Since enterprise mode is not not supported for Windows
        # users, we can skip this step when running on Windows, which
        # avoids the problem that Windows has no find or xargs command.
        if not IS_WINDOWS:
            # Make all files and dirs in updated area owner-RW and world-R, and make
            # all directories owner-RWX and world-RX.
            for dirname, subdirs, filenames in os.walk(new_dir):
                for filename in filenames:
                    fd = os.open(os.path.join(dirname, filename), os.O_RDONLY)
                    os.fchmod(
                        fd, stat.S_IWRITE | stat.S_IRUSR | stat.S_IRGRP
                        | stat.S_IROTH)
                    os.close(fd)
                for subdir in subdirs:
                    fd = os.open(os.path.join(dirname, subdir), os.O_RDONLY)
                    os.fchmod(
                        fd, stat.S_IRWXU | stat.S_IXGRP | stat.S_IXOTH
                        | stat.S_IRGRP | stat.S_IROTH)
                    os.close(fd)

            # Make main gsutil script owner-RWX and world-RX.
            fd = os.open(os.path.join(new_dir, 'gsutil', 'gsutil'),
                         os.O_RDONLY)
            os.fchmod(
                fd, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH
                | stat.S_IXOTH)
            os.close(fd)

        # Move old installation aside and new into place.
        os.rename(gslib.GSUTIL_DIR, os.path.join(old_dir, 'old'))
        os.rename(os.path.join(new_dir, 'gsutil'), gslib.GSUTIL_DIR)
        self._CleanUpUpdateCommand(tf, dirs_to_remove)
        RegisterSignalHandler(signal.SIGINT, signal.SIG_DFL)
        self.logger.info('Update complete.')
        return 0
Example #39
0
    def make_runner(self, parser, dry_run=False, setarith=None):
        """!Creates a Rocoto workflow for the given arguments.

        @param parser The produtil.testing.parse.Parser containing all
        needed information.  This is used to get the list of runnable
        tasks and builds, the sets of tasks and builds, and all
        configuration information.

        @param dry_run If True, the make_runner only logs what is to
        be done without actually doing it.

        @param setarith Optional: a string recognized by
        produtil.testing.setarith.arithparse().  This is used to
        generate the list of Tasks and Builds to run.  If no setarith
        is given, all Tests and Builds with "run" blocks are run."""
        dry_run = bool(dry_run)
        work = None
        con = None
        runset = parser.setarith(setarith)
        logger = parser.logger
        mode = parser.run_mode
        for runcon in runset:
            runme, raw_con = runcon.as_tuple
            runme_context = produtil.testing.script.runner_context_for(raw_con)
            if work is None:
                work = RocotoWorkflow('rt', runme.defscopes[-1], mode)
            work.run(runme, runme_context)
            if con is None:
                con = runme_context

        if work is None:
            raise ValueError('ERROR: No "run" statments seen; '
                             'nothing to do.\n')
        assert (con is not None)

        def here(path):
            here = os.path.normpath(os.path.join(work.install_dir(con), path))
            dir = os.path.normpath(os.path.dirname(here))
            if dir != here and not dry_run and not os.path.isdir(dir):
                logger.info('%s: make directory' % (dir, ))
                produtil.fileop.makedirs(dir)
            logger.info('%s: write file' % (here, ))
            return here

        target = here('rocoto/workflow.xml')
        if not dry_run:
            with open(target, 'wt') as f:
                work.generate_xml(f, con)
        target = here('src/install.sh')
        if not dry_run:
            with open(target, 'wt') as f:
                f.write(work.generate_install_script())
                os.fchmod(f.fileno(), 0o755)
        target = here('src/uninstall.sh')
        if not dry_run:
            with open(target, 'wt') as f:
                f.write(work.generate_uninstall_script())
                os.fchmod(f.fileno(), 0o755)
        if mode is BASELINE:
            target = here('ush/prep_baseline.sh')
            if not dry_run:
                with open(target, 'wt') as f:
                    f.write(work.make_prep_baseline_sh(con))
                    os.fchmod(f.fileno(), 0o755)
        for name, test in work.iter_tests():
            target = here(test.j_job_name(work, con))
            if not dry_run:
                with open(target, 'wt') as f:
                    f.write(test.j_job_contents(work, con))
                    os.fchmod(f.fileno(), 0o755)
            target = here(test.ex_script_name(work, con))
            if not dry_run:
                with open(target, 'wt') as f:
                    f.write(test.ex_script_contents(work, con))
                    os.fchmod(f.fileno(), 0o755)
        target = here('ush/functions.bash')
        if not dry_run:
            with open(target, 'wt') as f:
                f.write('# DO NOT EDIT THIS SCRIPT; '
                        'IT IS AUTOMATICALLY GENERATED\n')
                f.write('# These are bash functions used '
                        'by the ex-scripts.\n\n')
                f.write(bash_functions)
Example #40
0
def init_client_mmap(mmap_group=None,
                     socket_filename=None,
                     size=128 * 1024 * 1024,
                     filename=None):
    """
        Initializes an mmap area, writes the token in it and returns:
            (success flag, mmap_area, mmap_size, temp_file, mmap_filename)
        The caller must keep hold of temp_file to ensure it does not get deleted!
        This is used by the client.
    """
    def rerr():
        return False, False, None, 0, None, None

    log("init_mmap%s", (mmap_group, socket_filename, size, filename))
    mmap_filename = filename
    mmap_temp_file = None
    delete = True

    def validate_size(size):
        assert size >= 64 * 1024 * 1024, "mmap size is too small: %sB (minimum is 64MB)" % std_unit(
            size)
        assert size <= 4 * 1024 * 1024 * 1024, "mmap is too big: %sB (maximum is 4GB)" % std_unit(
            size)

    try:
        import mmap
        unit = max(4096, mmap.PAGESIZE)
        #add 8 bytes for the mmap area control header zone:
        mmap_size = roundup(size + 8, unit)
        if WIN32:
            validate_size(mmap_size)
            if not filename:
                from xpra.net.crypto import get_hex_uuid
                filename = "xpra-%s" % get_hex_uuid()
            mmap_filename = filename
            mmap_area = mmap.mmap(0, mmap_size, filename)
            #not a real file:
            delete = False
            mmap_temp_file = None
        else:
            assert POSIX
            if filename:
                if os.path.exists(filename):
                    fd = os.open(filename, os.O_EXCL | os.O_RDWR)
                    mmap_size = os.path.getsize(mmap_filename)
                    validate_size(mmap_size)
                    #mmap_size = 4*1024*1024    #size restriction needed with ivshmem
                    delete = False
                    log.info("Using existing mmap file '%s': %sMB",
                             mmap_filename, mmap_size // 1024 // 1024)
                else:
                    validate_size(mmap_size)
                    import errno
                    flags = os.O_CREAT | os.O_EXCL | os.O_RDWR
                    try:
                        fd = os.open(filename, flags)
                        mmap_temp_file = None  #os.fdopen(fd, 'w')
                        mmap_filename = filename
                    except OSError as e:
                        if e.errno == errno.EEXIST:
                            log.error(
                                "Error: the mmap file '%s' already exists",
                                filename)
                            return rerr()
                        raise
            else:
                validate_size(mmap_size)
                import tempfile
                from xpra.platform.paths import get_mmap_dir
                mmap_dir = get_mmap_dir()
                subs = os.environ.copy()
                subs.update({
                    "UID": os.getuid(),
                    "GID": os.getgid(),
                    "PID": os.getpid(),
                })
                mmap_dir = shellsub(mmap_dir, subs)
                if mmap_dir and not os.path.exists(mmap_dir):
                    os.mkdir(mmap_dir, 0o700)
                if not mmap_dir or not os.path.exists(mmap_dir):
                    raise Exception("mmap directory %s does not exist!" %
                                    mmap_dir)
                #create the mmap file, the mkstemp that is called via NamedTemporaryFile ensures
                #that the file is readable and writable only by the creating user ID
                try:
                    temp = tempfile.NamedTemporaryFile(prefix="xpra.",
                                                       suffix=".mmap",
                                                       dir=mmap_dir)
                except OSError as e:
                    log.error("Error: cannot create mmap file:")
                    log.error(" %s", e)
                    return rerr()
                #keep a reference to it so it does not disappear!
                mmap_temp_file = temp
                mmap_filename = temp.name
                fd = temp.file.fileno()
            #set the group permissions and gid if the mmap-group option is specified
            if mmap_group and type(socket_filename) == str and os.path.exists(
                    socket_filename):
                from stat import S_IRUSR, S_IWUSR, S_IRGRP, S_IWGRP
                s = os.stat(socket_filename)
                os.fchown(fd, -1, s.st_gid)
                os.fchmod(fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP)
            log("using mmap file %s, fd=%s, size=%s", mmap_filename, fd,
                mmap_size)
            os.lseek(fd, mmap_size - 1, os.SEEK_SET)
            assert os.write(fd, b'\x00')
            os.lseek(fd, 0, os.SEEK_SET)
            mmap_area = mmap.mmap(fd, length=mmap_size)
        return True, delete, mmap_area, mmap_size, mmap_temp_file, mmap_filename
    except Exception as e:
        log("failed to setup mmap: %s", e, exc_info=True)
        log.error("Error: mmap setup failed:")
        log.error(" %s", e)
        clean_mmap(mmap_filename)
        return rerr()
Example #41
0
def _make_password_file(password):
    fd, path = tempfile.mkstemp()
    os.fchmod(fd, stat.S_IRUSR | stat.S_IWUSR)
    with os.fdopen(fd, "w") as f:
        f.write(password)
    return path
Example #42
0
 def test_build(self):
     self.use_temp_dir()
     scratch = os.path.join(self.temp_dir, "scratch")
     with mkfile(os.path.join(scratch, "bin", "foo")) as f:
         f.write("test /bin/foo\n")
     os.symlink("foo", os.path.join(scratch, "bin", "bar"))
     touch(os.path.join(scratch, ".git", "config"))
     with mkfile(os.path.join(scratch, "toplevel")) as f:
         f.write("test /toplevel\n")
     os.symlink("file-does-not-exist",
                os.path.join(scratch, "broken-symlink"))
     with mkfile(os.path.join(scratch, "manifest.json")) as f:
         json.dump(
             {
                 "name": "com.example.test",
                 "version": "1.0",
                 "maintainer": "Foo Bar <*****@*****.**>",
                 "title": "test title",
                 "architecture": "all",
                 "framework": "ubuntu-sdk-13.10",
             }, f)
         # build() overrides this back to 0o644
         os.fchmod(f.fileno(), 0o600)
     self.builder.add_file(scratch, "/")
     path = os.path.join(self.temp_dir, "com.example.test_1.0_all.click")
     self.assertEqual(path, self.builder.build(self.temp_dir))
     self.assertTrue(os.path.exists(path))
     for key, value in (
         ("Package", "com.example.test"),
         ("Version", "1.0"),
         ("Click-Version", "0.4"),
         ("Architecture", "all"),
         ("Maintainer", "Foo Bar <*****@*****.**>"),
         ("Description", "test title"),
     ):
         self.assertEqual(value, self.extract_field(path, key))
     self.assertNotEqual("", self.extract_field(path, "Installed-Size"))
     control_path = os.path.join(self.temp_dir, "control")
     subprocess.check_call(["dpkg-deb", "-e", path, control_path])
     manifest_path = os.path.join(control_path, "manifest")
     self.assertEqual(0o644, stat.S_IMODE(os.stat(manifest_path).st_mode))
     with open(os.path.join(scratch, "manifest.json")) as source, \
             open(manifest_path) as target:
         source_json = json.load(source)
         target_json = json.load(target)
         self.assertNotEqual("", target_json["installed-size"])
         del target_json["installed-size"]
         self.assertEqual(source_json, target_json)
     with open(os.path.join(control_path, "md5sums")) as md5sums:
         self.assertRegex(
             md5sums.read(), r"^"
             r"eb774c3ead632b397d6450d1df25e001  bin/bar\n"
             r"eb774c3ead632b397d6450d1df25e001  bin/foo\n"
             r"49327ce6306df8a87522456b14a179e0  toplevel\n"
             r"$")
     with open(os.path.join(control_path, "preinst")) as preinst:
         self.assertEqual(static_preinst, preinst.read())
     contents = subprocess.check_output(["dpkg-deb", "-c", path],
                                        universal_newlines=True)
     self.assertRegex(contents, r"^drwxr-xr-x root/root         0 .* \./\n")
     self.assertRegex(
         contents,
         "\nlrwxrwxrwx root/root         0 .* \./bin/bar -> foo\n")
     self.assertRegex(contents,
                      "\n-rw-r--r-- root/root        14 .* \./bin/foo\n")
     self.assertRegex(contents,
                      "\n-rw-r--r-- root/root        15 .* \./toplevel\n")
     extract_path = os.path.join(self.temp_dir, "extract")
     subprocess.check_call(["dpkg-deb", "-x", path, extract_path])
     for rel_path in (
             os.path.join("bin", "foo"),
             "toplevel",
     ):
         with open(os.path.join(scratch, rel_path)) as source, \
                 open(os.path.join(extract_path, rel_path)) as target:
             self.assertEqual(source.read(), target.read())
     self.assertTrue(
         os.path.islink(os.path.join(extract_path, "bin", "bar")))
     self.assertEqual("foo",
                      os.readlink(os.path.join(extract_path, "bin", "bar")))
Example #43
0
def nag():
    """
    Check if pre-commits should be installed for this repository.
    If they are not and should be then annoy the developer.
    To be called in libtbx_refresh.py
    """
    if os.name == "nt" or not stat:  # unsupported
        return
    # Determine the name of the calling module, and thus the internal module name
    # of the libtbx_refresh file. Use exception trick to pick up the current frame.
    try:
        raise Exception()
    except Exception:
        frame = sys.exc_info()[2].tb_frame.f_back
    # Extract the caller name
    caller = frame.f_globals["__name__"]
    if caller == "__main__":
        # well that is not very informative, is it.
        caller = os.path.abspath(
            frame.f_code.co_filename
        )  # Get the full path of the libtbx_refresh.py file.
        refresh_file, _ = os.path.splitext(caller)
        if not refresh_file.endswith("libtbx_refresh"):
            raise RuntimeError(
                "pre-commit nagging can only be done from within libtbx_refresh.py"
            )
        # the name of the parent directory of libtbx_refresh.py is the caller name
        caller = os.path.basename(os.path.dirname(refresh_file))
    else:
        if not caller.endswith(".libtbx_refresh"):
            raise RuntimeError(
                "pre-commit nagging can only be done from within libtbx_refresh.py"
            )
        caller = caller[:-15]

    try:
        import libtbx.load_env
    except Exception as e:
        print("error on importing libtbx environment for pre-commit nagging:",
              e)
        return
    try:
        path = libtbx.env.dist_path(caller)
    except Exception as e:
        print(
            "error on obtaining module path for %s for pre-commit nagging:" %
            caller, e)
        return

    if not os.path.isdir(os.path.join(path, ".git")):
        return  # not a developer installation

    precommit_python = abs(libtbx.env.build_path / "precommitbx" / "bin" /
                           "python3")
    hookfile = os.path.join(path, ".git", "hooks", "pre-commit")
    if os.path.isfile(hookfile) and os.access(hookfile, os.X_OK):
        with open(hookfile, "r") as fh:
            precommit = fh.read()
        if "precommitbx" in precommit and os.path.exists(precommit_python):
            return  # libtbx.precommit hook is fine
        if "precommitbx for conda" in precommit:
            return  # libtbx.precommit hook with conda environment is fine
        if "generated by pre-commit" in precommit and "libtbx" not in precommit:
            return  # genuine pre-commit hook is also fine

    try:
        with open(hookfile, "w") as fh:
            fh.write("""#!/bin/bash
echo
echo Please install the DIALS pre-commit hooks before committing into the DIALS
echo repository. These hooks run simple static code analysis to catch common
echo coding mistakes early and ensure a common code style.
echo
echo The command you need to run is:
echo "  libtbx.precommit install"
echo

if [ -z "$DIALS_WITHOUT_PRECOMMITS" ]; then
  echo If you want to continue without installing pre-commits then you can override
  echo this check by setting the environment variable DIALS_WITHOUT_PRECOMMITS
fi
echo You can find more information about contributing to DIALS at:
echo https://github.com/dials/dials/blob/master/CONTRIBUTING.md
echo
if [ -z "$DIALS_WITHOUT_PRECOMMITS" ]; then
  exit 1
fi
""")
            mode = os.fstat(fh.fileno()).st_mode
            mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
            os.fchmod(fh.fileno(), stat.S_IMODE(mode))
    except Exception as e:
        print("Could not generate pre-commit stub:", e)
Example #44
0
def do_upload(dburi,
              uploaddir,
              inputfp=sys.stdin,
              output=sys.stdout,
              environ=os.environ):
    diag(1, "Command line arguments:", sys.argv)
    diag(2, "Environment:", os.environ)

    try:  # Windows needs stdio set for binary mode.
        # pylint: disable=F0401
        import msvcrt
        # pylint: disable=E1101
        msvcrt.setmode(0, os.O_BINARY)  # stdin  = 0
        msvcrt.setmode(1, os.O_BINARY)  # stdout = 1
    except ImportError:
        pass

    diag(1, "Reading POST data")
    form = cgi.FieldStorage(fp=inputfp, environ=environ)
    diag(1, "Finished reading POST data")
    diag(2, "form = ", form)
    analyze(3, "form", form)

    charset = "utf-8"
    header = "Content-Type: text/plain; charset=" + charset + "\r\n\r\n"

    # A nested FieldStorage instance holds the file
    message = None
    if "WUid" not in form:
        message = 'No "WUid" key found in POST data'
    if "clientid" not in form:
        message = 'No "clientid" key found in POST data'
    elif not os.path.isdir(uploaddir):
        message = 'Script error: %s is not a directory' % uploaddir
    else:
        wuid = form['WUid']
        clientid = form['clientid']
        if 'errorcode' in form:
            errorcode = int(form['errorcode'].value)
            diag(1, "errorcode = ", errorcode)
        else:
            errorcode = None
        if 'failedcommand' in form:
            failedcommand = int(form['failedcommand'].value)
            diag(1, "failedcommand = ", failedcommand)
        else:
            failedcommand = None

        # Test if wuid and clientid was set:
        if not wuid.value:
            message = 'No workunit was specified'
        elif not clientid.value:
            message = 'No client id was specified'

        diag(1, "wuid = ", wuid.value)
        diag(1, "clientid = ", clientid.value)

    if not message:
        filetuples = []
        if 'results' in form:
            fileitems = form['results']
            if isinstance(fileitems, cgi.FieldStorage):
                fileitems = [fileitems]  # Make it iterable
        else:
            fileitems = []
            diag(1, 'No "results" form found')
        analyze(3, "fileitems", fileitems)

        message = ""
        for fileitem in fileitems:
            if not fileitem.file:
                continue
            analyze(3, "f", fileitem)
            diag(1, "Processing file ", fileitem.filename)
            # strip leading path from file name to avoid directory traversal
            # attacks
            basename = os.path.basename(fileitem.filename)
            # Split extension from file name. We need to preserve the
            # file extension so that, e.g., gzipped files can be identified
            # as such
            (basename, suffix) = os.path.splitext(basename)
            # Make a file name which does not exist yet and create the file
            (filedesc, filename) = mkstemp(prefix=basename + '.',
                                           suffix=suffix,
                                           dir=uploaddir)
            diag(1, "output filename = ", filename)
            filestuple = [fileitem.filename, filename]

            # mkstmp() creates files with mode 0o600 (before umask), and does
            # not allow overriding this with a parameter. We change the mode
            # to 666 & ~umask.

            if os.name != "nt":
                # The os.umask() function gets the old umask *and* sets a new
                # one, so we have to call it twice to avoid changing it :(
                umask = os.umask(0o022)
                os.umask(umask)
                filemode = 0o666 & ~umask
                diag(1, "Setting %s to mode %o" % (filename, filemode))
                os.fchmod(filedesc, filemode)

            filetype = fileitem.headers.get("filetype", None)
            if not filetype is None:
                filestuple.append(filetype)
                diag(1, "filetype = ", filetype)
                command = fileitem.headers.get("command", None)
                if not command is None:
                    filestuple.append(command)
                    diag(1, "command = ", command)
            if False:
                filestuple[1] = os.path.basename(filestuple[1])
            filetuples.append(filestuple)

            # fd is a file descriptor, make a file object from it
            diag(1, "Getting file object for temp file")
            file = os.fdopen(filedesc, "wb")
            diag(1, "Writing data to temp file")
            try:
                copyfileobj(fileitem.file, file)
            except OSError as e:
                if e.errno == errno.ENOSPC:
                    sys.stderr.write("Disk full, deleting %s\n" % filename)
                    file.close()
                    os.unlink(filename)
                raise
            nr_bytes = file.tell()
            diag(1, "Wrote %d bytes" % nr_bytes)
            diag(1, "Closing file")
            file.close()

            # Example output:
            # upload.py: The file "testrun.polyselect.0-5000" for workunit
            # testrun_polyselect_0-5000 was uploaded successfully by client
            # localhost and stored as /localdisk/kruppaal/work/testrun.upload/
            # testrun.polyselect.0-5000.kcudj7, received 84720 bytes.
            message += 'The file "%s" for workunit %s was uploaded ' \
            'successfully by client %s and stored as %s, received %d bytes.\n' \
            % (basename, wuid.value, clientid.value, filename, nr_bytes)
            if errorcode:
                message += 'Error code = %d.\n' % errorcode
        diag(1, "Getting WuAccess object")
        wuar = wudb.WuAccess(dburi)
        diag(1, "Got WuAccess object. Calling .result()")
        try:
            wuar.result(wuid.value, clientid.value, filetuples, errorcode,
                        failedcommand)
        except wudb.StatusUpdateError:
            message = 'Workunit ' + wuid.value + 'was not currently assigned'
        else:
            message = message + 'Workunit ' + wuid.value + ' completed.\n'
        diag(1, "Finished .result()")

    diag(1, sys.argv[0] + ': ', message.rstrip("\n"))
    if output == sys.stdout:
        output.write(header + message)
    else:
        output.write((header + message).encode(charset))
Example #45
0
                return

            url = ('%s:%d/api/agent/download/%s?filename=%s' %
                   (self._state.host, self._state.port, self._selected_mid,
                    urllib2.quote(src)))
            try:
                h = UrlOpen(self._state, url)
            except urllib2.HTTPError as e:
                msg = json.loads(e.read()).get('error', 'unkown error')
                raise RuntimeError('pull: %s' % msg)
            except KeyboardInterrupt:
                return

            pbar = ProgressBar(src_base)
            with open(dst, 'w') as f:
                os.fchmod(f.fileno(), perm)
                total_size = int(h.headers.get('Content-Length'))
                downloaded_size = 0

                while True:
                    data = h.read(_BUFSIZ)
                    if len(data) == 0:
                        break
                    downloaded_size += len(data)
                    pbar.SetProgress(
                        float(downloaded_size) * 100 / total_size,
                        downloaded_size)
                    f.write(data)
            pbar.End()

        # Use find to get a listing of all files under a root directory. The 'stat'
Example #46
0
        "fallbacks": ["fi"],
        "hide_untranslated": False
    },
}

if "SECRET_KEY" not in locals():
    secret_file = os.path.join(BASE_DIR, ".django_secret")
    try:
        with open(secret_file) as f:
            SECRET_KEY = f.read().strip()
    except IOError:
        import random

        system_random = random.SystemRandom()
        try:
            SECRET_KEY = "".join([
                system_random.choice(
                    "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)")
                for i in range(64)
            ])
            with open(secret_file, "w") as f:
                import os

                os.fchmod(f.fileno(), 0o0600)
                f.write(SECRET_KEY)
                f.close()
        except IOError:
            Exception(
                "Please create a %s file with random characters to generate your secret key!"
                % secret_file)
Example #47
0
        print("Free GPU:")
        print("---------")
        for i in free:
            print("GPU {}".format(i))
    else:
        print("No GPU available.")


# run scheduler
if __name__ == '__main__':

    args = get_args()
    init = not (os.path.isfile(GPU_INFO_FILE))
    if init and not args.init:
        print('The scheduler needs to be initializes with --init arg first.')
        sys.exit(1)

    with open(GPU_INFO_FILE, 'w+' if init else 'r+') as f:
        if init:
            os.fchmod(f.fileno(), 0o777)
            init_gpu_info_file(f, args.init)

        if args.release:
            set_free_gpu(f, args.release)

        if args.status:
            display_status(f)

        if args.task:
            run_task(f, args)
Example #48
0
    def write_p11kit_certs(self, filename, ca_certs):
        # pylint: disable=ipa-forbidden-import
        from ipalib import x509  # FixMe: break import cycle
        from ipalib.errors import CertificateError
        # pylint: enable=ipa-forbidden-import

        path = Path(filename)
        try:
            f = open(path, 'w')
        except IOError:
            logger.error("Failed to open %s", path)
            raise

        with f:
            f.write("# This file was created by IPA. Do not edit.\n" "\n")

            try:
                os.fchmod(f.fileno(), 0o644)
            except IOError:
                logger.error("Failed to set mode of %s", path)
                raise

            has_eku = set()
            for cert, nickname, trusted, _ext_key_usage in ca_certs:
                try:
                    subject = cert.subject_bytes
                    issuer = cert.issuer_bytes
                    serial_number = cert.serial_number_bytes
                    public_key_info = cert.public_key_info_bytes
                except (PyAsn1Error, ValueError, CertificateError):
                    logger.error("Failed to decode certificate \"%s\"",
                                 nickname)
                    raise

                label = urllib.parse.quote(nickname)
                subject = urllib.parse.quote(subject)
                issuer = urllib.parse.quote(issuer)
                serial_number = urllib.parse.quote(serial_number)
                public_key_info = urllib.parse.quote(public_key_info)

                obj = ("[p11-kit-object-v1]\n"
                       "class: certificate\n"
                       "certificate-type: x-509\n"
                       "certificate-category: authority\n"
                       "label: \"%(label)s\"\n"
                       "subject: \"%(subject)s\"\n"
                       "issuer: \"%(issuer)s\"\n"
                       "serial-number: \"%(serial_number)s\"\n"
                       "x-public-key-info: \"%(public_key_info)s\"\n" %
                       dict(label=label,
                            subject=subject,
                            issuer=issuer,
                            serial_number=serial_number,
                            public_key_info=public_key_info))
                if trusted is True:
                    obj += "trusted: true\n"
                elif trusted is False:
                    obj += "x-distrusted: true\n"
                obj += "{pem}\n\n".format(
                    pem=cert.public_bytes(x509.Encoding.PEM).decode('ascii'))

                f.write(obj)

                if (cert.extended_key_usage is not None
                        and public_key_info not in has_eku):
                    try:
                        ext_key_usage = cert.extended_key_usage_bytes
                    except PyAsn1Error:
                        logger.error(
                            "Failed to encode extended key usage for \"%s\"",
                            nickname)
                        raise
                    value = urllib.parse.quote(ext_key_usage)
                    obj = ("[p11-kit-object-v1]\n"
                           "class: x-certificate-extension\n"
                           "label: \"ExtendedKeyUsage for %(label)s\"\n"
                           "x-public-key-info: \"%(public_key_info)s\"\n"
                           "object-id: 2.5.29.37\n"
                           "value: \"%(value)s\"\n\n" %
                           dict(label=label,
                                public_key_info=public_key_info,
                                value=value))
                    f.write(obj)
                    has_eku.add(public_key_info)

        return True
Example #49
0
    def create_iface(self, ipv6=False, dhcpserver_opts=None):
        '''Create test interface with DHCP server behind it'''

        # run "router-side" networkd in own mount namespace to shield it from
        # "client-side" configuration and networkd
        (fd, script) = tempfile.mkstemp(prefix='networkd-router.sh')
        self.addCleanup(os.remove, script)
        with os.fdopen(fd, 'w+') as f:
            f.write('''\
#!/bin/sh
set -eu
mkdir -p /run/systemd/network
mkdir -p /run/systemd/netif
mount -t tmpfs none /run/systemd/network
mount -t tmpfs none /run/systemd/netif
[ ! -e /run/dbus ] || mount -t tmpfs none /run/dbus
# create router/client veth pair
cat <<EOF >/run/systemd/network/test.netdev
[NetDev]
Name={ifr}
Kind=veth

[Peer]
Name={ifc}
EOF

cat <<EOF >/run/systemd/network/test.network
[Match]
Name={ifr}

[Network]
Address=192.168.5.1/24
{addr6}
DHCPServer=yes

[DHCPServer]
PoolOffset=10
PoolSize=50
DNS=192.168.5.1
{dhopts}
EOF

# run networkd as in systemd-networkd.service
exec $(systemctl cat systemd-networkd.service | sed -n '/^ExecStart=/ {{ s/^.*=//; s/^[@+-]//; s/^!*//; p}}')
'''.format(ifr=self.if_router,
            ifc=self.iface,
            addr6=('Address=2600::1/64' if ipv6 else ''),
            dhopts=(dhcpserver_opts or '')))

            os.fchmod(fd, 0o755)

        subprocess.check_call([
            'systemd-run', '--unit=networkd-test-router.service', '-p',
            'InaccessibleDirectories=-/etc/systemd/network', '-p',
            'InaccessibleDirectories=-/run/systemd/network', '-p',
            'InaccessibleDirectories=-/run/systemd/netif',
            '--service-type=notify', script
        ])

        # wait until devices got created
        for _ in range(50):
            out = subprocess.check_output(
                ['ip', 'a', 'show', 'dev', self.if_router])
            if b'state UP' in out and b'scope global' in out:
                break
            time.sleep(0.1)
Example #50
0
    def run(self, connection, max_end_time):  # pylint: disable=too-many-locals
        """
        Check if a lava-test-shell has been requested, implement the overlay
        * create test runner directories beneath the temporary location
        * copy runners into test runner directories
        """
        tmp_dir = self.mkdtemp()
        namespace = self.parameters.get('namespace')
        if namespace:
            if namespace not in get_test_action_namespaces(
                    self.job.parameters):
                self.logger.info("[%s] skipped %s - no test action.",
                                 namespace, self.name)
                return connection
        self.set_namespace_data(action='test',
                                label='shared',
                                key='location',
                                value=tmp_dir)
        lava_test_results_dir = self.get_namespace_data(
            action='test', label='results', key='lava_test_results_dir')
        if not lava_test_results_dir:
            raise LAVABug("Unable to identify top level lava test directory")
        shell = self.get_namespace_data(action='test',
                                        label='shared',
                                        key='lava_test_sh_cmd')
        self.logger.debug("[%s] Preparing overlay tarball in %s", namespace,
                          tmp_dir)
        lava_path = os.path.abspath("%s/%s" % (tmp_dir, lava_test_results_dir))
        for runner_dir in ['bin', 'tests', 'results']:
            # avoid os.path.join as lava_test_results_dir startswith / so location is *dropped* by join.
            path = os.path.abspath("%s/%s" % (lava_path, runner_dir))
            if not os.path.exists(path):
                os.makedirs(path, 0o755)
                self.logger.debug("makedir: %s", path)
        for fname in self.scripts_to_copy:
            with open(fname, 'r') as fin:
                foutname = os.path.basename(fname)
                output_file = '%s/bin/%s' % (lava_path, foutname)
                if "distro" in fname:
                    distribution = os.path.basename(os.path.dirname(fname))
                    self.logger.debug("Updating %s (%s)", output_file,
                                      distribution)
                else:
                    self.logger.debug("Creating %s", output_file)
                with open(output_file, 'w') as fout:
                    fout.write("#!%s\n\n" % shell)
                    if foutname == 'lava-target-mac':
                        fout.write("TARGET_DEVICE_MAC='%s'\n" %
                                   self.target_mac)
                    if foutname == 'lava-target-ip':
                        fout.write("TARGET_DEVICE_IP='%s'\n" % self.target_ip)
                    if foutname == 'lava-probe-ip':
                        fout.write("PROBE_DEVICE_IP='%s'\n" % self.probe_ip)
                    if foutname == 'lava-probe-channel':
                        fout.write("PROBE_DEVICE_CHANNEL='%s'\n" %
                                   self.probe_channel)
                    if foutname == 'lava-target-storage':
                        fout.write('LAVA_STORAGE="\n')
                        for method in self.job.device.get(
                                'storage_info', [{}]):
                            for key, value in method.items():
                                if key == 'yaml_line':
                                    continue
                                self.logger.debug("storage methods:\t%s\t%s",
                                                  key, value)
                                fout.write(r"\t%s\t%s\n" % (key, value))
                        fout.write('"\n')
                    fout.write(fin.read())
                    os.fchmod(fout.fileno(), self.xmod)

        # Generate the file containing the secrets
        if 'secrets' in self.job.parameters:
            self.logger.debug("Creating %s/secrets", lava_path)
            with open(os.path.join(lava_path, 'secrets'), 'w') as fout:
                for key, value in self.job.parameters['secrets'].items():
                    if key == 'yaml_line':
                        continue
                    fout.write("%s=%s\n" % (key, value))

        connection = super().run(connection, max_end_time)
        return connection
Example #51
0
    def configure_dns_resolver(self,
                               nameservers,
                               searchdomains,
                               *,
                               resolve1_enabled=False,
                               fstore=None):
        """Configure global DNS resolver (e.g. /etc/resolv.conf)

        :param nameservers: list of IP addresses
        :param searchdomains: list of search domaons
        :param fstore: optional file store for backup
        """
        assert nameservers and isinstance(nameservers, list)
        assert searchdomains and isinstance(searchdomains, list)

        super().configure_dns_resolver(nameservers=nameservers,
                                       searchdomains=searchdomains,
                                       resolve1_enabled=resolve1_enabled,
                                       fstore=fstore)

        # break circular import
        from ipaplatform.services import knownservices

        if fstore is not None and not fstore.has_file(paths.RESOLV_CONF):
            fstore.backup_file(paths.RESOLV_CONF)

        nm = knownservices['NetworkManager']
        nm_enabled = nm.is_enabled()
        if nm_enabled:
            logger.debug("Network Manager is enabled, write %s",
                         paths.NETWORK_MANAGER_IPA_CONF)
            # write DNS override and reload network manager to have it create
            # a new resolv.conf. The file is prefixed with ``zzz`` to
            # make it the last file. Global dns options do not stack and last
            # man standing wins.
            if resolve1_enabled:
                # push DNS configuration to systemd-resolved
                dnsprocessing = "systemd-resolved"
            else:
                # update /etc/resolv.conf
                dnsprocessing = "default"

            cfg = NM_IPA_CONF.format(dnsprocessing=dnsprocessing,
                                     servers=','.join(nameservers),
                                     searches=','.join(searchdomains))
            with open(paths.NETWORK_MANAGER_IPA_CONF, 'w') as f:
                os.fchmod(f.fileno(), 0o644)
                f.write(cfg)
            # reload NetworkManager
            nm.reload_or_restart()

        if not resolve1_enabled and not nm_enabled:
            # no NM running, no systemd-resolved detected
            # fall back to /etc/resolv.conf
            logger.debug(
                "Neither Network Manager nor systemd-resolved are enabled, "
                "write %s directly.", paths.RESOLV_CONF)
            cfg = [
                "# auto-generated by IPA installer",
                "search {}".format(' '.join(searchdomains)),
            ]
            for nameserver in nameservers:
                cfg.append("nameserver {}".format(nameserver))
            with open(paths.RESOLV_CONF, 'w') as f:
                f.write('\n'.join(cfg))
#!/usr/bin/env python
import virtualenv
import inspect
import os
import stat


def after_install(options, home_dir):
    subprocess.check_call([
        join(home_dir, 'bin', 'pip'), 'install', '-U', 'construct', 'twisted'
    ])


src = ''.join(inspect.getsourcelines(after_install)[0])

script = virtualenv.create_bootstrap_script(src)
with open('virtenv-bootstrap.py', 'w') as f:
    f.write(script)
    os.fchmod(
        f.fileno(), stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH
        | stat.S_IXOTH)
Example #53
0
 def make_executable(name):
     with open(os.path.join(temp_bin, name), 'w') as f:
         os.fchmod(f.fileno(), stat.S_IRWXU)
Example #54
0
def process_file(logger, file, stdout, skip, old_platform, new_platform):
    """Add a configuration for a new platform to the modulemd file.

    The file is overwritten if stdout is False. Otherwise, the file is left
    intact and the modified content is printed.

    The file should be a modulemd-packager document with a context for the old
    platform. If it isn't and a skip argument is False, an error will be
    reported. Otherwise the file will be skipped. Nevertheles if the file
    already contains a context for the new platform, the file will also be
    skipped.

    In case of an error, return (True, an error message).
    In case of success, return (False, a warning), where the warning is an
    optinal notification the user could be interrested in.
    """

    # Open the modulemd-packager file
    try:
        fd = open(file, encoding='UTF-8')
    except Exception as e:
        return (True,
                'Could not open the modulemd-packager file: {}'.format(e))
    # Read the file
    try:
        content = fd.read()
    except Exception as e:
        fd.close()
        return (True,
                '{}: Could not read the modulemd-packager file: {}'.format(
                    file, e))
    if not stdout:
        # Retrieve permissions of the file
        try:
            stat = os.fstat(fd.fileno())
        except Exception as e:
            fd.close()
            return (True,
                    '{}: Could not stat the modulemd-packager file: {}'.format(
                        file, e))
    # Close the file
    fd.close()

    # Edit the document in memory
    error, text = process_string(logger, content, skip, old_platform,
                                 new_platform)
    if error == -1:
        return (False, '{}: Skipped: {}'.format(file, text))
    elif error:
        return (True, '{}: {}'.format(file, text))

    # Print the edited document to a standard output
    if stdout:
        try:
            sys.stdout.write(text)
        except Exception as e:
            return (True,
                    '{}: Could not write to a standard output: {}'.format(
                        file, e))
        return (False, None)

    # Or store the edited document into a temporary file
    try:
        temp_fd, temp_name = tempfile.mkstemp(dir=Path(file).parent, text=True)
        temp_file = os.fdopen(temp_fd, mode='w', encoding='UTF-8')
        temp_file.write(text)
    except Exception as e:
        temp_file.close()
        return (True, '{}: Could not write to a temporary file: {}'.format(
            temp_name, e))
    # Copy file permissions
    try:
        os.fchmod(temp_fd, stat.st_mode)
    except Exception as e:
        temp_file.close()
        return (True,
                '{}: Could not copy a file mode: {}'.format(temp_name, e))
    try:
        os.fchown(temp_fd, stat.st_uid, stat.st_gid)
    except Exception as e:
        temp_file.close()
        return (True,
                '{}: Could not copy a file ownership: {}'.format(temp_name, e))
    # Close the descriptor
    try:
        temp_file.close()
    except Exception as e:
        return (True, '{}: Could not close the file: {}'.format(temp_name, e))
    # And replace the file
    try:
        os.replace(temp_name, file)
    except Exception as e:
        return (True,
                '{}: Could not rename to {}: {}'.format(temp_name, file, e))

    # Successfully stored
    return (False, None)
Example #55
0
    def get_config(self):
        config_file = home + "/.librarian.cfg"
        config = ConfigParser.ConfigParser()
        config.read(config_file)
        if not config.sections():
            #print "Cannot read config file, exiting.\n"
            # Pop up a message
            import messages
            messages.pop_info(
                _('No config file found.\nA template file file has been written to disk.\nPlease edit '
                  ) + config_file +
                _(' to contain the correct login details for your databases.\nNote that is is a hidden file'
                  ))

            f = open(config_file, "w")
            # Write a dummy config file if one doesn't exist
            #The python way, but it converts everything to LOWER case!
            parser = ConfigParser.SafeConfigParser()
            parser.add_section('database')
            parser.add_section('calibre')
            parser.add_section('qr_code')
            parser.add_section('amazon_aws')
            parser.set('database', 'USER', 'username')
            parser.set('database', 'PASSWD', 'password')
            parser.set('database', 'DB', 'db_name')
            parser.set('database', 'DBHOST', 'db_host')
            parser.set('database', '# DON\'T change the LITE_DB name', '')
            parser.set('database', 'LITE_DB', 'books.db')
            parser.set(
                'database',
                '# Select either sqlite or mysql, Disable a type with a leading #',
                '')
            parser.set('database', '#use', 'sqlite')
            parser.set('database', 'use', 'mysql')
            parser.set(
                'calibre',
                '# Optional: Define path to Calibre database, Users home dir will be automatically determined.',
                '')
            parser.set('calibre', 'CALIBRE_DB', 'calibre_db')
            parser.set('qr_code', 'QR_CODE', 'False')
            parser.set('amazon_aws', 'AWS_KEY', 'AWS_KEY')
            parser.set('amazon_aws', 'SECRET_KEY', 'SECRET_KEY')
            parser.write(f)
            # Set access mode to owner only
            os.fchmod(f.fileno(), stat.S_IREAD | stat.S_IWRITE)
            f.close()
            del f

        else:
            # Now read the file
            self.db_user = config.get('database', 'USER')
            self.db_pass = config.get('database', 'PASSWD')
            self.db_base = config.get('database', 'DB')
            self.db_host = config.get('database', 'DBHOST')
            self.lite_db = config.get('database', 'LITE_DB')
            self.use = config.get('database', 'USE')
            self.qr_code = config.get('qr_code', 'QR_CODE')
            self.az_key = config.get('amazon_aws', 'AWS_KEY')
            self.az_skey = config.get('amazon_aws', 'SECRET_KEY')
            try:
                self.calibre_db = config.get('calibre', 'CALIBRE_DB')
            except:
                pass
Example #56
0
    def _cleanup(self):
        answers = []
        if self.environment[osetupcons.CoreEnv.GENERATE_STANDARD_ANSWERFILE]:
            answers.append(
                os.path.join(
                    osetupcons.FileLocations.OVIRT_SETUP_ANSWERS_DIR,
                    '%s-%s.conf' % (
                        datetime.datetime.now().strftime('%Y%m%d%H%M%S'),
                        self.environment[osetupcons.CoreEnv.ACTION],
                    ),
                ))
        if self.environment[osetupcons.CoreEnv.ANSWER_FILE] is not None:
            answers.append(self.environment[osetupcons.CoreEnv.ANSWER_FILE])

        for answer in answers:
            self.logger.info(
                _("Generating answer file '{name}'").format(name=answer, ))
            # Generate the answer file only if valid path is passed
            try:
                with open(self.resolveFile(answer), 'w') as f:
                    os.fchmod(f.fileno(), 0o600)
                    f.write(('# action=%s\n'
                             '[environment:default]\n') %
                            (self.environment[osetupcons.CoreEnv.ACTION], ))
                    consts = []
                    wlist = []
                    for constobj in self.environment[
                            osetupcons.CoreEnv.SETUP_ATTRS_MODULES]:
                        consts.extend(constobj.__dict__['__osetup_attrs__'])
                    for c in consts:
                        for k in c.__dict__.values():
                            if hasattr(k, '__osetup_attrs__'):
                                if (k.__osetup_attrs__['answerfile']
                                        and k.__osetup_attrs__[
                                            'answerfile_condition'](
                                                self.environment)):
                                    k = k.fget(None)
                                    if (k in self.environment
                                            and k not in wlist):
                                        v = self.environment[k]
                                        f.write('%s=%s:%s\n' % (
                                            k,
                                            common.typeName(v),
                                            '\n '.join(v)
                                            # We want the next lines to be
                                            # indented, so that
                                            # configparser will treat them
                                            # as a single multi-line value.
                                            # So we join with '\n '.
                                            if isinstance(v, list) else v,
                                        ))
                                        wlist.append(k)

            except IOError as e:
                self.logger.warning(
                    _('Cannot write to answer file: {answerfile} '
                      'Error: {error}').format(
                          answerfile=answer,
                          error=e,
                      ))
                self.logger.debug(
                    'Exception while writing to answer file: %s',
                    answer,
                    exc_info=True,
                )
Example #57
0
def make_unix_socket(style, nonblock, bind_path, connect_path, short=False):
    """Creates a Unix domain socket in the given 'style' (either
    socket.SOCK_DGRAM or socket.SOCK_STREAM) that is bound to 'bind_path' (if
    'bind_path' is not None) and connected to 'connect_path' (if 'connect_path'
    is not None).  If 'nonblock' is true, the socket is made non-blocking.

    Returns (error, socket): on success 'error' is 0 and 'socket' is a new
    socket object, on failure 'error' is a positive errno value and 'socket' is
    None."""

    try:
        sock = socket.socket(socket.AF_UNIX, style)
    except socket.error as e:
        return get_exception_errno(e), None

    try:
        if nonblock:
            set_nonblocking(sock)
        if bind_path is not None:
            # Delete bind_path but ignore ENOENT.
            try:
                os.unlink(bind_path)
            except OSError as e:
                if e.errno != errno.ENOENT:
                    return e.errno, None

            ovs.fatal_signal.add_file_to_unlink(bind_path)
            sock.bind(bind_path)

            try:
                os.fchmod(sock.fileno(), 0o700)
            except OSError as e:
                pass
        if connect_path is not None:
            try:
                sock.connect(connect_path)
            except socket.error as e:
                if get_exception_errno(e) != errno.EINPROGRESS:
                    raise
        return 0, sock
    except socket.error as e:
        sock.close()
        if (bind_path is not None and os.path.exists(bind_path)):
            ovs.fatal_signal.unlink_file_now(bind_path)
        eno = ovs.socket_util.get_exception_errno(e)
        if (eno == "AF_UNIX path too long" and os.uname()[0] == "Linux"):
            short_connect_path = None
            short_bind_path = None
            connect_dirfd = None
            bind_dirfd = None
            # Try workaround using /proc/self/fd
            if connect_path is not None:
                dirname = os.path.dirname(connect_path)
                basename = os.path.basename(connect_path)
                try:
                    connect_dirfd = os.open(dirname,
                                            os.O_DIRECTORY | os.O_RDONLY)
                except OSError as err:
                    return get_exception_errno(err), None
                short_connect_path = "/proc/self/fd/%d/%s" % (connect_dirfd,
                                                              basename)

            if bind_path is not None:
                dirname = os.path.dirname(bind_path)
                basename = os.path.basename(bind_path)
                try:
                    bind_dirfd = os.open(dirname, os.O_DIRECTORY | os.O_RDONLY)
                except OSError as err:
                    return get_exception_errno(err), None
                short_bind_path = "/proc/self/fd/%d/%s" % (bind_dirfd,
                                                           basename)

            try:
                return make_unix_socket(style, nonblock, short_bind_path,
                                        short_connect_path)
            finally:
                if connect_dirfd is not None:
                    os.close(connect_dirfd)
                if bind_dirfd is not None:
                    os.close(bind_dirfd)
        elif (eno == "AF_UNIX path too long"):
            if short:
                return get_exception_errno(e), None
            short_bind_path = None
            try:
                short_bind_path = make_short_name(bind_path)
                short_connect_path = make_short_name(connect_path)
            except:
                free_short_name(short_bind_path)
                return errno.ENAMETOOLONG, None
            try:
                return make_unix_socket(style,
                                        nonblock,
                                        short_bind_path,
                                        short_connect_path,
                                        short=True)
            finally:
                free_short_name(short_bind_path)
                free_short_name(short_connect_path)
        else:
            return get_exception_errno(e), None
Example #58
0
def main():
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument("config_file",
                            type=argparse.FileType("r"),
                            help="path to a configuration file")
    arg_parser.add_argument("--debug",
                            default=False,
                            action="store_true",
                            help="enable debug logging")
    args = arg_parser.parse_args()

    if args.debug:
        level = logging.DEBUG
    else:
        level = logging.WARNING
    logging.basicConfig(level=level)

    parser = configparser.RawConfigParser()
    parser.readfp(args.config_file)
    fetcher_config = dict(parser.items("secret-fetcher"))

    cfg = config.parse_config(
        fetcher_config, {
            "vault": {
                "url":
                config.String,
                "role":
                config.String,
                "auth_type":
                config.Optional(
                    config.OneOf(**VaultClientFactory.auth_types()),
                    default=VaultClientFactory.auth_types()["aws"]),
                "mount_point":
                config.Optional(config.String, default="aws-ec2"),
            },
            "output": {
                "path":
                config.Optional(config.String,
                                default="/var/local/secrets.json"),
                "owner":
                config.Optional(config.UnixUser, default=0),
                "group":
                config.Optional(config.UnixGroup, default=0),
                "mode":
                config.Optional(config.Integer(base=8), default=0o400),
            },
            "secrets": config.Optional(config.TupleOf(config.String),
                                       default=[]),
        })

    # pylint: disable=maybe-no-member
    client_factory = VaultClientFactory(cfg.vault.url, cfg.vault.role,
                                        cfg.vault.auth_type,
                                        cfg.vault.mount_point)
    while True:
        client = client_factory.get_client()

        secrets = {}
        soonest_expiration = client.token_expiration
        for secret_name in cfg.secrets:
            secrets[secret_name], expiration = client.get_secret(secret_name)
            soonest_expiration = min(soonest_expiration, expiration)

        with open(cfg.output.path + ".tmp", "w") as f:
            os.fchown(f.fileno(), cfg.output.owner, cfg.output.group)
            os.fchmod(f.fileno(), cfg.output.mode)

            json.dump(
                {
                    "secrets": secrets,
                    "vault": {
                        "token": client.token,
                        "url": cfg.vault.url,
                    },

                    # this is here to allow an upgrade path. the fetcher should
                    # be upgraded first followed by the application workers.
                    "vault_token": client.token,
                },
                f,
                indent=2,
                sort_keys=True)

        # swap out the file contents atomically
        os.rename(cfg.output.path + ".tmp", cfg.output.path)

        time_til_expiration = soonest_expiration - datetime.datetime.utcnow()
        time_to_sleep = time_til_expiration - VAULT_TOKEN_PREFETCH_TIME
        time.sleep(max(int(time_to_sleep.total_seconds()), 1))
Example #59
0
initCFG('server')

secret_hash = hashlib.sha512(CFG.secret_key.encode('ascii')).hexdigest()

os.umask(0o66)

with open("/etc/salt/master.d/susemanager_engine.conf", "w") as f:
    f.write(
        yaml.safe_dump(mgr_events_config,
                       default_flow_style=False,
                       allow_unicode=True))
    os.fchown(f.fileno(),
              pwd.getpwnam("salt").pw_uid,
              grp.getgrnam("salt").gr_gid)
    os.fchmod(f.fileno(), 0o640)

with open("/etc/salt/master.d/susemanager-users.txt", "w") as f:
    f.write("admin:" + secret_hash)
    os.fchown(f.fileno(),
              pwd.getpwnam("salt").pw_uid,
              grp.getgrnam("salt").gr_gid)
    os.fchmod(f.fileno(), 0o400)

if not os.path.isdir("/etc/salt/pki/api"):
    os.mkdir("/etc/salt/pki/api")
    os.chown("/etc/salt/pki/api",
             pwd.getpwnam("salt").pw_uid,
             grp.getgrnam("salt").gr_gid)
    os.chmod("/etc/salt/pki/api", 0o750)
Example #60
0
 def create_test_file(*path, mode=0o644, content="content\n"):
     path = os.path.join(*path)
     os.makedirs(os.path.dirname(path), exist_ok=True)
     with open(path, "w", encoding="utf-8") as f:
         f.write(content)
         os.fchmod(f.fileno(), mode)