Ejemplo n.º 1
0
def check_output(*popenargs, **kwargs):
    r"""Run command with arguments and return its output as a byte string.

    If the exit code was non-zero it raises a CalledProcessError.  The
    CalledProcessError object will have the return code in the returncode
    attribute and output in the output attribute.

    The arguments are the same as for the Popen constructor.  Example:

    >>> check_output(["ls", "-l", "/dev/null"])
    'crw-rw-rw- 1 root root 1, 3 Oct 18  2007 /dev/null\n'

    The stdout argument is not allowed as it is used internally.
    To capture standard error in the result, use stderr=STDOUT.

    >>> check_output(["/bin/sh", "-c",
    ...               "ls -l non_existent_file ; exit 0"],
    ...              stderr=STDOUT)
    'ls: non_existent_file: No such file or directory\n'
    """
    from subprocess import PIPE, CalledProcessError, Popen
    if 'stdout' in kwargs:
        raise ValueError('stdout argument not allowed, it will be overridden.')
    process = Popen(stdout=PIPE, *popenargs, **kwargs)
    output, unused_err = process.communicate()
    retcode = process.poll()
    if retcode:
        cmd = kwargs.get("args")
        if cmd is None:
            cmd = popenargs[0]
        err = CalledProcessError(retcode, cmd)
        err.output = output
        raise err
    return output
Ejemplo n.º 2
0
 def check_media_file(self, filename):
     valid_media_msg = '%s => OK' % filename
     invalid_media_msg = '%s => INVALID' % filename
     try:
         # cmd = self.validate_cmd.format(filename)
         cmd = self.validate_cmd
         log.debug('cmd: %s %s', cmd, filename)
         log.info('verifying {0}'.format(filename))
         # capturing stderr to stdout because ffprobe prints to stderr in all cases
         # Python 2.7+
         #subprocess.check_output(cmd.split() + [filename], stderr=subprocess.STDOUT)
         proc = subprocess.Popen(cmd.split() + [filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
         (stdout, _) = proc.communicate()
         returncode = proc.wait()
         if returncode != 0 or (stdout is not None and 'Error' in stdout):
             _ = CalledProcessError(returncode, cmd)
             _.output = stdout
             raise _
         print(valid_media_msg)
     except CalledProcessError as _:
         if self.verbose > 2:
             print(_.output)
         if self.skip_errors:
             print(invalid_media_msg)
             self.failed = True
             return False
         die(invalid_media_msg)
Ejemplo n.º 3
0
    def test_execute_error_quiet(self, check_call, check_output):
        error = CalledProcessError(1, 'cmd')
        error.output = 'some output'
        check_output.side_effect = error

        with pytest.raises(BumprError):
            to_patch = '{0}.print'.format('builtins' if IS_PY3 else '__builtin__')
            with patch(to_patch):
                execute('some failed command')
Ejemplo n.º 4
0
 def __init__(self, *, command: str, call_error: CalledProcessError) -> None:
     super().__init__(command=command, exit_code=call_error.returncode)
     CalledProcessError.__init__(
         self,
         returncode=call_error.returncode,
         cmd=call_error.cmd,
         output=call_error.output,
         stderr=call_error.stderr,
     )
Ejemplo n.º 5
0
    def test_fails_on_non_expected_exception(self):
        mock_client = _get_time_noop_mock_client()

        exp = CalledProcessError(-1, 'blah')
        exp.stderr = '"" is not a valid tag'
        controller_client = Mock()
        controller_client.get_models.side_effect = [exp]
        mock_client.get_controller_client.return_value = controller_client
        with self.assertRaises(CalledProcessError):
            amm.wait_until_model_disappears(
                mock_client, 'test_model', timeout=60)
Ejemplo n.º 6
0
def log_check_call(*args, **kwargs):
    kwargs['record_output'] = True
    retcode, output = log_call(*args, **kwargs)
    if retcode != 0:
        cmd = kwargs.get('args')
        if cmd is None:
            cmd = args[0]
        e = CalledProcessError(retcode, cmd)
        e.output = output
        raise e
    return 0
 def check_output(*args, **kwds):
     process = Popen(stdout=PIPE, *args, **kwds)
     output, _ = process.communicate()
     retcode = process.poll()
     if retcode:
         cmd = kwds.get("args")
         if cmd is None:
             cmd = args[0]
         error = CalledProcessError(retcode, cmd)
         error.output = output
         raise error
     return output
def check_output(*popenargs, **kwargs):
    """Run command with arguments and return its output as a byte string."""
    process = Popen(stdout=PIPE, *popenargs, **kwargs)
    output, unused_err = process.communicate()
    retcode = process.poll()
    if retcode:
        cmd = kwargs.get("args")
        if cmd is None:
            cmd = popenargs[0]
        error = CalledProcessError(retcode, cmd)
        error.output = output
        raise error
    return output
 def mockreturn(*args, **kwargs):
     if args == (['task', 'stats'],):
         output = b"""
         Category Data
         -------- ----
         Pending 0
         Waiting 0"""
         return output
     elif args == (['task', 'overdue'],):
         output = b'No matches.'
         e = CalledProcessError(1, 'task')
         e.output = output
         raise e
Ejemplo n.º 10
0
def call_for_stderr(command, *args, **kwargs):
    kwargs["stderr"] = _subprocess.PIPE

    proc = start_process(command, *args, **kwargs)
    output = proc.communicate()[1].decode("utf-8")
    exit_code = proc.poll()

    if exit_code != 0:
        error = CalledProcessError(exit_code, proc.command_string)
        error.output = output

        raise error

    return output
Ejemplo n.º 11
0
    def check_output(arguments, stdin=None, stderr=None, shell=False):
        temp_f = mkstemp()
        returncode = call(arguments, stdin=stdin, stdout=temp_f[0], stderr=stderr, shell=shell)
        close(temp_f[0])
        file_o = open(temp_f[1], 'r')
        cmd_output = file_o.read()
        file_o.close()
        remove(temp_f[1])

        if returncode != 0:
            error_cmd = CalledProcessError(returncode, arguments[0])
            error_cmd.output = cmd_output
            raise error_cmd
        else:
            return cmd_output
def check_output(run_args, *args, **kwargs):
    kwargs['stdout'] = PIPE
    kwargs['stderr'] = PIPE

    process = Popen(run_args, *args, **kwargs)
    stdout, stderr = process.communicate()

    retcode = process.poll()
    if retcode is not 0:
        exception = CalledProcessError(retcode, run_args[0])
        exception.stdout = stdout
        exception.stderr = stderr
        raise exception

    return stdout, stderr
Ejemplo n.º 13
0
	def __lt__(self,other):
		cmd = self.__get_recursive_name()
		#print "	",cmd,"<",other
		popen = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
		m = popen.communicate(other)
		ret = popen.wait()
		if ret:
			e = CalledProcessError(ret,cmd)
			e.stdout,e.stderr = m
			raise e
		class CommandOutput:
			def __init__(self,stdout,stderr):
				self.stdout = stdout
				self.stderr = stderr
		return CommandOutput(*m)
Ejemplo n.º 14
0
 def __run(self, *args, **kwargs):
     _args = [i for i in args if i is not None]
     argsLog = [self.__hidePassw(i) for i in args if i is not None ]
     logging.debug("CMD: " + " ". join(argsLog))
     process = Popen(
         _args, stdout=kwargs.pop('stdout', PIPE),
         stderr=kwargs.pop('stderr', PIPE),
         close_fds=kwargs.pop('close_fds', True), **kwargs)
     stdout, stderr = process.communicate()
     if process.returncode:
         exception = CalledProcessError(
             process.returncode, repr(args))
         exception.output = ''.join(filter(None, [stdout, stderr]))
         raise Error('1001', err=exception.output)
     return stdout
Ejemplo n.º 15
0
 def func(*args):
     """Wrapper function used to append arguments to command."""
     cmd = [command_line]
     cmd += [str(arg) for arg in args]
     proc = subprocess.Popen(cmd, stderr=PIPE, stdout=PIPE)
     stdout, stderr = proc.communicate()
     proc.wait()
     if proc.returncode:
         err = CalledProcessError(
             returncode=proc.returncode,
             cmd=" ".join(cmd),
             output=stdout,
         )
         err.stderr = stderr
         raise err
     return stdout, stderr
Ejemplo n.º 16
0
def check_call_capturing(arguments, input = None, preexec_fn = None):
    """Spawn a process and return its output."""

    (stdout, stderr, code) = call_capturing(arguments, input, preexec_fn)

    if code == 0:
        return (stdout, stderr)
    else:
        from subprocess import CalledProcessError

        error = CalledProcessError(code, arguments)

        error.stdout = stdout
        error.stderr = stderr

        raise error
Ejemplo n.º 17
0
 def __run(self, *args, **kwargs):
     _args = [i for i in args if i is not None]
     argsLog = [self.__hidePassw(i) for i in args if i is not None]
     logging.debug("Shell _run CMD: " + " ".join(argsLog))
     process = Popen(_args, stdout=PIPE, stderr=PIPE, close_fds=True, **kwargs)
     stdout, stderr = process.communicate()
     retCode = process.returncode
     logging.debug("Shell _run retCode: %d", retCode)
     logging.debug("            stdout:'%s'", stdout)
     logging.debug("            stderr:'%s'", stderr)
     if retCode or ((len(stderr) > 0) and ("Error" in stderr)):
         exception = CalledProcessError(process.returncode, repr(args))
         exception.output = "".join(filter(None, [stdout, stderr]))
         logging.debug("exception.output:'%s'", exception.output)
         raise Error("1001", err=repr(exception.output))
     return stdout
Ejemplo n.º 18
0
    def test_ignores_model_detail_exceptions(self):
        """ignore errors for model details as this might happen many times."""
        mock_client = _get_time_noop_mock_client()
        model_data = {'models': [{'name': ''}]}

        exp = CalledProcessError(-1, 'blah')
        exp.stderr = 'cannot get model details'
        controller_client = Mock()
        controller_client.get_models.side_effect = [
            exp,
            model_data]
        mock_client.get_controller_client.return_value = controller_client
        with patch.object(amm, 'sleep') as mock_sleep:
            amm.wait_until_model_disappears(
                mock_client, 'test_model', timeout=60)
            mock_sleep.assert_called_once_with(1)
Ejemplo n.º 19
0
def call_for_output(command, *args, **kwargs):
    kwargs["stdout"] = _subprocess.PIPE

    proc = start_process(command, *args, **kwargs)
    output = proc.communicate()[0]
    exit_code = proc.poll()

    if exit_code not in (None, 0):
        command_string = _command_string(command)
        command_string = command_string.format(*args)

        error = CalledProcessError(exit_code, command_string)
        error.output = output

        raise error

    return output
Ejemplo n.º 20
0
def check_output(*args, **kwargs):

    if 'stdout' in kwargs:
        raise ValueError('stdout argument not allowed, it will be overridden.')
    process = subprocess.Popen(stdout=subprocess.PIPE, *args, **kwargs)
    output, unused_err = process.communicate()
    retcode = process.poll()
    if retcode:
        cmd = kwargs.get("args")
        if cmd is None:
            cmd = args[0]

        error = CalledProcessError(retcode, cmd)
        error.output = output
        raise error

    return output
Ejemplo n.º 21
0
def log_check_call(*args, **kwargs):
    log = kwargs.pop('log', logging.debug)
    kwargs['stdout'] = PIPE
    kwargs['stderr'] = STDOUT
    kwargs['env'] = env = kwargs.get('env', os.environ.copy())
    env['PYTHONUNBUFFERED'] = '1'
    p = Popen(*args, **kwargs)
    output = []
    for line in iter(p.stdout.readline, ''):
        log(line.rstrip())
        output.append(line)
    retcode = p.wait()
    if retcode != 0:
        cmd = kwargs.get('args') or args[0]
        e = CalledProcessError(retcode, cmd)
        e.output = ''.join(output)
        raise e
    return 0
Ejemplo n.º 22
0
def execute_php_export(command, articleid):
    print "PHP Exporting article {0}:\n\t`$ {1}\n`".format(articleid, command)
    args = command.split()
    process = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
    pout, perr = process.communicate()
    code = process.poll()
    if code or pout or perr:
        output = pout + perr
        try:
            raise CalledProcessError(
                code, command, output=output
            )
        except Exception:
            error = CalledProcessError(code, command)
            error.output = output
            raise error
    print "PHP export of article {0} complete\n".format(articleid)
    return code
Ejemplo n.º 23
0
	def __call__(self,*args,**kwargs):
		cmd = self.__get_recursive_name() + list(args)
		#print "	",cmd
		kwargs = dict(kwargs)
		if "stdout" not in kwargs: kwargs["stdout"] = subprocess.PIPE
		if "stderr" not in kwargs: kwargs["stderr"] = subprocess.PIPE
		popen = subprocess.Popen(cmd,**kwargs)
		m = popen.communicate()
		ret = popen.wait()
		if ret:
			e = CalledProcessError(ret,cmd)
			e.stdout,e.stderr = m
			raise e
		class CommandOutput:
			def __init__(self,stdout,stderr):
				self.stdout = stdout
				self.stderr = stderr
		return CommandOutput(*m)
Ejemplo n.º 24
0
def _check_output(*popenargs, **kwargs):
    # Copyright (c) 2003-2005 by Peter Astrand <*****@*****.**>
    #
    # Licensed to PSF under a Contributor Agreement.
    # See http://www.python.org/2.4/license for licensing details.
    if 'stdout' in kwargs:
        raise ValueError('stdout argument not allowed, it will be overridden.')
    process = Popen(stdout=PIPE, *popenargs, **kwargs)
    output, _ = process.communicate()
    retcode = process.poll()
    if retcode:
        cmd = kwargs.get("args")
        if cmd is None:
            cmd = popenargs[0]
        e = CalledProcessError(retcode, cmd)
        e.output = output
        raise e
    return output
Ejemplo n.º 25
0
    def check_output(*popenargs, **kwargs):
        r"""Run command with arguments and return its output as a byte string.

        Backported from Python 2.7 as it's implemented as pure python on stdlib.

        >>> check_output(['/usr/bin/python', '--version'])
        Python 2.6.2
        """
        process = Popen(stdout=PIPE, *popenargs, **kwargs)
        output, _ = process.communicate()
        retcode = process.poll()
        if retcode:
            cmd = kwargs.get("args")
            if cmd is None:
                cmd = popenargs[0]
            error = CalledProcessError(retcode, cmd)
            error.output = output
            raise error
        return output
Ejemplo n.º 26
0
def capture_subprocess(cmd, encoding='UTF-8', **popen_kwargs):
    """Run a command, showing its usual outputs in real time,
    and return its stdout, stderr output as strings.

    No temporary files are used.
    """
    stdout = Pty()  # libc uses full buffering for stdout if it doesn't see a tty
    stderr = Pipe()

    # deadlocks occur if we have any write-end of a pipe open more than once
    # best practice: close any used write pipes just after spawn
    outputter = Popen(
        cmd,
        stdout=stdout.write,
        stderr=stderr.write,
        **popen_kwargs
    )
    stdout.readonly()  # deadlock otherwise
    stderr.readonly()  # deadlock otherwise

    # start one tee each on the original stdout and stderr
    # writing each to three places:
    #    1. the original destination
    #    2. a pipe just for that one stream
    stdout_tee = Tee(stdout.read, STDOUT)
    stderr_tee = Tee(stderr.read, STDERR)

    # clean up left-over processes and pipes:
    exit_code = outputter.wait()
    result = (stdout_tee.join(), stderr_tee.join())

    if encoding is not None:
        result = tuple(
            bytestring.decode(encoding)
            for bytestring in result
        )

    if exit_code == 0:
        return result
    else:
        error = CalledProcessError(exit_code, cmd)
        error.result = result
        raise error
Ejemplo n.º 27
0
    def __submit(self, call, env):
        output_chunks = []
        process = Popen(call, env=env, stderr=STDOUT, stdout=PIPE)
        while process.returncode is None:
            output_chunks.append(process.communicate())
            sleep(0.1)
        stdout = "".join([c[0].decode('utf-8') for c in output_chunks if c[0] is not None])
        stderr = "".join([c[1].decode('utf-8') for c in output_chunks if c[1] is not None])

        if process.returncode == 0:
            return stdout, stderr

        exc = CalledProcessError(process.returncode, call)
        exc.stdout = stdout
        exc.stderr = stderr

        print(stdout)
        print(stderr)

        raise exc
Ejemplo n.º 28
0
def check_output(command, cwd=None, shell=False, env=None,
                 stdin=__sentinel__, stderr=__sentinel__,
                 preexec_fn=None, use_texpath=True,
                 show_window=False):
    '''
    Takes a command to be passed to subprocess.Popen.

    Returns the output if the command was successful.

    By default stderr is redirected to stdout, so this will return any output
    to either stream. This can be changed by calling execute_command with
    stderr set to subprocess.PIPE or any other valid value.

    Raises CalledProcessError if the command returned a non-zero value
    Raises OSError if the executable is not found

    This is pretty much identical to subprocess.check_output(), but
    implemented here since it is unavailable in Python 2.6's library.
    '''
    returncode, stdout, stderr = execute_command(
        command,
        cwd=cwd,
        shell=shell,
        env=env,
        stdin=stdin,
        stderr=stderr,
        preexec_fn=preexec_fn,
        use_texpath=use_texpath,
        show_window=show_window
    )

    if returncode:
        e = CalledProcessError(
            returncode,
            command
        )
        e.output = stdout
        e.stderr = stderr
        raise e

    return stdout
Ejemplo n.º 29
0
    def __run(self, *args, **kwargs):
        _args = [i for i in args if i is not None]
        argsLog = [self.__hidePassw(i) for i in args if i is not None ]
        logging.debug("Shell _run CMD: " + " ". join(argsLog))
        process = Popen(
            _args, stdout = PIPE, stderr = PIPE, close_fds = True, **kwargs)
        stdout, stderr = process.communicate()
        retCode = process.returncode
        logging.debug("Shell _run retCode: %d",  retCode)
        logging.debug("            stdout:'%s'",  stdout)
        logging.debug("            stderr:'%s'",  stderr)

        if retCode or ((len(stderr) > 0) and ('Error' in stderr)) or ((retCode == 4) and ('<Exception>' in stdout)):
            exception = CalledProcessError(process.returncode, repr(args))
            err_msg = "retCode: "+str(retCode)+"\n'"+str(''.join(filter(None, [stderr])))
            if (retCode == 4) and ('<Exception>' in stdout):
                err_msg += str(''.join(filter(None, [stdout])))
            exception.output = err_msg +"'"
            logging.debug("exception.output:'%s'",  err_msg)
            raise Error('1001', err=str(err_msg))
        return stdout, stderr
Ejemplo n.º 30
0
def out_and_err(command, input=None, shell=False, env=None):
    """Run a shell command, and return stderr and stdout as string.

    If the command returns nonzero, raise CalledProcessError.

    :arg command: A list of commandline args
    :arg input: Data to pipe to stdin. Omit for none.

    Remaining args have the same meaning as for Popen.

    """
    process = Popen(command,
                    stdout=PIPE,
                    stdin=PIPE,
                    stderr=PIPE,
                    shell=shell,
                    env=env)
    out, err = process.communicate(input=input)
    status = process.poll()  # same as in check_output(), though wait() sounds better
    if status:
        error = CalledProcessError(status, command)
        error.output = out
        raise error
    return out, err
Ejemplo n.º 31
0
class ErrorFormattingTestCase(unit.TestCase):

    scenarios = (
        ('MissingStateCleanError', {
            'exception':
            errors.MissingStateCleanError,
            'kwargs': {
                'step': 'test-step'
            },
            'expected_message':
            ("Failed to clean: "
             "Missing state for 'test-step'. "
             "To clean the project, run `snapcraft clean`.")
        }),
        ('StepOutdatedError dependents', {
            'exception':
            errors.StepOutdatedError,
            'kwargs': {
                'step': 'test-step',
                'part': 'test-part',
                'dependents': ['test-dependent']
            },
            'expected_message':
            ("Failed to reuse files from previous build: "
             "The 'test-step' step of 'test-part' is out of date:\n"
             "The 'test-step' step for 'test-part' needs to be run again, "
             "but 'test-dependent' depends on it.\n"
             "To continue, clean that part's "
             "'test-step' step, run "
             "`snapcraft clean test-dependent -s test-step`.")
        }),
        ('StepOutdatedError dirty_properties', {
            'exception':
            errors.StepOutdatedError,
            'kwargs': {
                'step': 'test-step',
                'part': 'test-part',
                'dirty_properties': ['test-property1', 'test-property2']
            },
            'expected_message':
            ("Failed to reuse files from previous build: "
             "The 'test-step' step of 'test-part' is out of date:\n"
             "The 'test-property1' and 'test-property2' part properties "
             "appear to have changed.\n"
             "To continue, clean that part's "
             "'test-step' step, run "
             "`snapcraft clean test-part -s test-step`.")
        }),
        ('StepOutdatedError dirty_project_options', {
            'exception':
            errors.StepOutdatedError,
            'kwargs': {
                'step': 'test-step',
                'part': 'test-part',
                'dirty_project_options': ['test-option']
            },
            'expected_message':
            ("Failed to reuse files from previous build: "
             "The 'test-step' step of 'test-part' is out of date:\n"
             "The 'test-option' project option appears to have changed.\n"
             "To continue, clean that part's "
             "'test-step' step, run "
             "`snapcraft clean test-part -s test-step`.")
        }),
        ('SnapcraftEnvironmentError', {
            'exception': errors.SnapcraftEnvironmentError,
            'kwargs': {
                'message': 'test-message'
            },
            'expected_message': 'test-message'
        }),
        ('SnapcraftMissingLinkerInBaseError', {
            'exception':
            errors.SnapcraftMissingLinkerInBaseError,
            'kwargs': {
                'base': 'core18',
                'linker_path': '/snap/core18/current/lib64/ld-linux.so.2'
            },
            'expected_message':
            ("Cannot find the linker to use for the target base 'core18'.\n"
             "Please verify that the linker exists at the expected path "
             "'/snap/core18/current/lib64/ld-linux.so.2' and try again. If "
             "the linker does not exist contact the author of the base "
             "(run `snap info core18` to get information for this "
             "base).")
        }),
        ('ContainerError', {
            'exception': errors.ContainerError,
            'kwargs': {
                'message': 'test-message'
            },
            'expected_message': 'test-message'
        }),
        ('ContainerConnectionError', {
            'exception':
            errors.ContainerConnectionError,
            'kwargs': {
                'message': 'test-message'
            },
            'expected_message':
            ('test-message\n'
             'Refer to the documentation at '
             'https://linuxcontainers.org/lxd/getting-started-cli.')
        }),
        ('ContainerRunError string', {
            'exception':
            errors.ContainerRunError,
            'kwargs': {
                'command': 'test-command',
                'exit_code': '1'
            },
            'expected_message': ("The following command failed to run: "
                                 "'test-command' exited with 1\n")
        }),
        ('ContainerRunError list', {
            'exception':
            errors.ContainerRunError,
            'kwargs': {
                'command': ['test-command', 'test-argument'],
                'exit_code': '1'
            },
            'expected_message':
            ("The following command failed to run: "
             "'test-command test-argument' exited with 1\n")
        }),
        ('ContainerSnapcraftCmdError string', {
            'exception':
            errors.ContainerSnapcraftCmdError,
            'kwargs': {
                'command': 'test-command',
                'exit_code': '1'
            },
            'expected_message': ("Snapcraft command failed in the container: "
                                 "'test-command' exited with 1\n")
        }),
        ('ContainerSnapcraftCmdError list', {
            'exception':
            errors.ContainerSnapcraftCmdError,
            'kwargs': {
                'command': ['test-command', 'test-argument'],
                'exit_code': '1'
            },
            'expected_message':
            ("Snapcraft command failed in the container: "
             "'test-command test-argument' exited with 1\n")
        }),
        ('SnapdError', {
            'exception': errors.SnapdError,
            'kwargs': {
                'message': 'test-message'
            },
            'expected_message': 'test-message'
        }),
        ('PrimeFileConflictError', {
            'exception':
            errors.PrimeFileConflictError,
            'kwargs': {
                'fileset': {'test-file'}
            },
            'expected_message':
            ("Failed to filter files: "
             "The following files have been excluded by the `stage` "
             "keyword, but included by the `prime` keyword: "
             "{'test-file'}. "
             "Edit the `snapcraft.yaml` to make sure that the files "
             "included in `prime` are also included in `stage`.")
        }),
        ('InvalidAppCommandError', {
            'exception':
            errors.InvalidAppCommandError,
            'kwargs': {
                'command': 'test-command',
                'app': 'test-app'
            },
            'expected_message':
            ("Failed to generate snap metadata: "
             "The specified command 'test-command' defined in the app "
             "'test-app' does not exist or is not executable")
        }),
        ('InvalidContainerRemoteError', {
            'exception':
            errors.InvalidContainerRemoteError,
            'kwargs': {
                'remote': 'test-remote'
            },
            'expected_message':
            ("Failed to use LXD remote: "
             "'test-remote' is not a valid name.\n"
             "Use a LXD remote without colons, spaces and slashes in the "
             "name.\n")
        }),
        ('InvalidDesktopFileError', {
            'exception':
            errors.InvalidDesktopFileError,
            'kwargs': {
                'filename': 'test-file',
                'message': 'test-message'
            },
            'expected_message':
            ("Failed to generate desktop file: "
             "Invalid desktop file 'test-file': test-message.")
        }),
        ('SnapcraftPartMissingError', {
            'exception':
            errors.SnapcraftPartMissingError,
            'kwargs': {
                'part_name': 'test-part'
            },
            'expected_message':
            ("Failed to get part information: "
             "Cannot find the definition for part 'test-part'. "
             "If it is a remote part, run `snapcraft update` "
             "to refresh the remote parts cache. "
             "If it is a local part, make sure that it is defined in the "
             "`snapcraft.yaml`.")
        }),
        ('PartNotInCacheError', {
            'exception':
            errors.PartNotInCacheError,
            'kwargs': {
                'part_name': 'test-part'
            },
            'expected_message':
            ("Failed to get remote part information: "
             "Cannot find the part name 'test-part' in the cache. "
             "If it is an existing remote part, run `snapcraft update` "
             "and try again. If it has not been defined, consider going to "
             "https://wiki.ubuntu.com/snapcraft/parts to add it.")
        }),
        ('PluginError', {
            'exception': errors.PluginError,
            'kwargs': {
                'message': 'test-message'
            },
            'expected_message': 'Failed to load plugin: test-message'
        }),
        ('SnapcraftPartConflictError', {
            'exception':
            errors.SnapcraftPartConflictError,
            'kwargs': {
                'part_name': 'test-part',
                'other_part_name': 'test-other-part',
                'conflict_files': ('test-file1', 'test-file2')
            },
            'expected_message':
            ("Failed to stage: "
             "Parts 'test-other-part' and 'test-part' have the following "
             "files, but with different contents:\n"
             "    test-file1\n"
             "    test-file2\n"
             "\n"
             "Snapcraft offers some capabilities to solve this by use of "
             "the following keywords:\n"
             "    - `filesets`\n"
             "    - `stage`\n"
             "    - `snap`\n"
             "    - `organize`\n"
             "\n"
             "To learn more about these part keywords, run "
             "`snapcraft help plugins`.")
        }),
        ('MissingCommandError', {
            'exception':
            errors.MissingCommandError,
            'kwargs': {
                'required_commands': ['test-command1', 'test-command2']
            },
            'expected_message':
            ("Failed to run command: "
             "One or more packages are missing, please install:"
             " ['test-command1', 'test-command2']")
        }),
        ('InvalidWikiEntryError', {
            'exception': errors.InvalidWikiEntryError,
            'kwargs': {
                'error': 'test-error'
            },
            'expected_message': "Invalid wiki entry: 'test-error'"
        }),
        ('PluginOutdatedError', {
            'exception': errors.PluginOutdatedError,
            'kwargs': {
                'message': 'test-message'
            },
            'expected_message': 'This plugin is outdated: test-message'
        }),
        ('RequiredCommandFailure', {
            'exception': errors.RequiredCommandFailure,
            'kwargs': {
                'command': 'test-command'
            },
            'expected_message': "'test-command' failed."
        }),
        ('RequiredCommandNotFound', {
            'exception': errors.RequiredCommandNotFound,
            'kwargs': {
                'cmd_list': ['test-command', 'test-argument']
            },
            'expected_message': "'test-command' not found."
        }),
        ('RequiredPathDoesNotExist', {
            'exception': errors.RequiredPathDoesNotExist,
            'kwargs': {
                'path': 'test-path'
            },
            'expected_message': "Required path does not exist: 'test-path'"
        }),
        ('SnapcraftPathEntryError', {
            'exception':
            errors.SnapcraftPathEntryError,
            'kwargs': {
                'value': 'test-path',
                'key': 'test-key',
                'app': 'test-app'
            },
            'expected_message':
            ("Failed to generate snap metadata: "
             "The path 'test-path' set for 'test-key' in 'test-app' does "
             "not exist. Make sure that the files are in the `prime` "
             "directory.")
        }),
        ('InvalidPullPropertiesError', {
            'exception':
            errors.InvalidPullPropertiesError,
            'kwargs': {
                'plugin_name': 'test-plugin',
                'properties': ['test-property1', 'test-property2']
            },
            'expected_message':
            ("Failed to load plugin: "
             "Invalid pull properties specified by 'test-plugin' plugin: "
             "['test-property1', 'test-property2']")
        }),
        ('InvalidBuildPropertiesError', {
            'exception':
            errors.InvalidBuildPropertiesError,
            'kwargs': {
                'plugin_name': 'test-plugin',
                'properties': ['test-property1', 'test-property2']
            },
            'expected_message':
            ("Failed to load plugin: "
             "Invalid build properties specified by 'test-plugin' plugin: "
             "['test-property1', 'test-property2']")
        }),
        ('StagePackageDownloadError', {
            'exception':
            errors.StagePackageDownloadError,
            'kwargs': {
                'part_name': 'test-part',
                'message': 'test-message'
            },
            'expected_message': ("Failed to fetch stage packages: "
                                 "Error downloading packages for part "
                                 "'test-part': test-message.")
        }),
        ('InvalidContainerImageInfoError', {
            'exception':
            errors.InvalidContainerImageInfoError,
            'kwargs': {
                'image_info': 'test-image-info'
            },
            'expected_message':
            ('Failed to parse container image info: '
             'SNAPCRAFT_IMAGE_INFO is not a valid JSON string: '
             'test-image-info')
        }),
        # meta errors.
        ('AdoptedPartMissingError', {
            'exception':
            meta_errors.AdoptedPartMissingError,
            'kwargs': {
                'part': 'test-part'
            },
            'expected_message':
            ("Failed to generate snap metadata: "
             "'adopt-info' refers to a part named 'test-part', but it is "
             "not defined in the 'snapcraft.yaml' file.")
        }),
        ('AdoptedPartNotParsingInfo', {
            'exception':
            meta_errors.AdoptedPartNotParsingInfo,
            'kwargs': {
                'part': 'test-part'
            },
            'expected_message':
            ("Failed to generate snap metadata: "
             "'adopt-info' refers to part 'test-part', but that part is "
             "lacking the 'parse-info' property.")
        }),
        ('MissingSnapcraftYamlKeysError', {
            'exception':
            meta_errors.MissingSnapcraftYamlKeysError,
            'kwargs': {
                'keys': ['test-key1', 'test-key2']
            },
            'expected_message':
            ("Failed to generate snap metadata: "
             "Missing required key(s) in snapcraft.yaml: "
             "'test-key1' and 'test-key2'. Either specify the missing "
             "key(s), or use 'adopt-info' to get them from a part.")
        }),
        ('MissingMetadataFileError', {
            'exception':
            errors.MissingMetadataFileError,
            'kwargs': {
                'part_name': 'test-part',
                'path': 'test/path'
            },
            'expected_message':
            ("Failed to generate snap metadata: "
             "Part 'test-part' has a 'parse-info' referring to metadata "
             "file 'test/path', which does not exist.")
        }),
        ('UnhandledMetadataFileTypeError', {
            'exception':
            errors.UnhandledMetadataFileTypeError,
            'kwargs': {
                'path': 'test/path'
            },
            'expected_message':
            ("Failed to extract metadata from 'test/path': "
             "This type of file is not supported for supplying "
             "metadata.")
        }),
        ('InvalidExtractorValueError', {
            'exception':
            errors.InvalidExtractorValueError,
            'kwargs': {
                'path': 'test/path',
                'extractor_name': 'extractor'
            },
            'expected_message':
            ("Failed to extract metadata from 'test/path': "
             "Extractor 'extractor' didn't return ExtractedMetadata as "
             "expected.")
        }),
        ('PatcherNewerPatchelfError', {
            'exception':
            errors.PatcherNewerPatchelfError,
            'kwargs': {
                'elf_file':
                'test/path',
                'patchelf_version':
                'patchelf 0.9',
                'process_exception':
                CalledProcessError(cmd=['patchelf'], returncode=-1)
            },
            'expected_message':
            ("'test/path' cannot be patched to function properly in a "
             'classic confined snap: patchelf failed with exit code -1.\n'
             "'patchelf 0.9' may be too old. A newer version of patchelf "
             'may be required.\n'
             'Try adding the `after: [patchelf]` and a `patchelf` part '
             'that would filter out files from prime `prime: [-*]` or '
             '`build-snaps: [patchelf/latest/edge]` to the failing part '
             'in your `snapcraft.yaml` to use a newer patchelf.')
        }),
        ('PatcherGenericError', {
            'exception':
            errors.PatcherGenericError,
            'kwargs': {
                'elf_file':
                'test/path',
                'process_exception':
                CalledProcessError(cmd=['patchelf'], returncode=-1)
            },
            'expected_message':
            ("'test/path' cannot be patched to function properly in a "
             'classic confined snap: patchelf failed with exit code -1')
        }),
        ('StagePackageMissingError', {
            'exception':
            errors.StagePackageMissingError,
            'kwargs': {
                'package': 'libc6'
            },
            'expected_message':
            ("'libc6' is required inside the snap for this "
             "part to work properly.\nAdd it as a `stage-packages` "
             "entry for this part.")
        }),
        ('RemotePartsUpdateConnectionError', {
            'exception':
            errors.RemotePartsUpdateConnectionError,
            'kwargs': {
                'requests_exception':
                requests.exceptions.ConnectionError("I'm a naughty error")
            },
            'expected_message':
            ('Failed to update cache of remote parts: A Connection error '
             'occurred.\nPlease try again.')
        }),
        ('SnapcraftPluginCommandError string command', {
            'exception':
            errors.SnapcraftPluginCommandError,
            'kwargs': {
                'command': 'make install',
                'exit_code': -1,
                'part_name': 'make_test',
            },
            'expected_message':
            ("Failed to run 'make install' for 'make_test': "
             "Exited with code -1.\n"
             "Verify that the part is using the correct parameters and try "
             "again.")
        }),
        ('SnapcraftPluginCommandError list command', {
            'exception':
            errors.SnapcraftPluginCommandError,
            'kwargs': {
                'command': ['make', 'install'],
                'exit_code': 2,
                'part_name': 'make_test',
            },
            'expected_message':
            ("Failed to run 'make install' for 'make_test': "
             "Exited with code 2.\n"
             "Verify that the part is using the correct parameters and try "
             "again.")
        }),
    )

    def test_error_formatting(self):
        self.assertThat(str(self.exception(**self.kwargs)),
                        Equals(self.expected_message))
Ejemplo n.º 32
0
 def run(self, bin, *args):
     raise EnvCommandError(CalledProcessError(1, "python", output=""))
Ejemplo n.º 33
0
def build_docs(version='dev'):
    # Create virtualenv.
    doc_dir = os.path.dirname(os.path.realpath(__file__))
    virtualenv_dir = 'virtualenv'
    check_call(['virtualenv', virtualenv_dir])
    import sysconfig
    scripts_dir = os.path.basename(sysconfig.get_path('scripts'))
    activate_this_file = os.path.join(virtualenv_dir, scripts_dir,
                                      'activate_this.py')
    with open(activate_this_file) as f:
        exec(f.read(), dict(__file__=activate_this_file))
    # Upgrade pip because installation of sphinx with pip 1.1 available on Travis
    # is broken (see #207) and it doesn't support the show command.
    from pkg_resources import get_distribution, DistributionNotFound
    pip_version = get_distribution('pip').version
    if LooseVersion(pip_version) < LooseVersion('1.5.4'):
        print("Updating pip")
        check_call(['pip', 'install', '--upgrade', 'pip'])
    # Upgrade distribute because installation of sphinx with distribute 0.6.24
    # available on Travis is broken (see #207).
    try:
        distribute_version = get_distribution('distribute').version
        if LooseVersion(distribute_version) <= LooseVersion('0.6.24'):
            print("Updating distribute")
            check_call(['pip', 'install', '--upgrade', 'distribute'])
    except DistributionNotFound:
        pass
    # Install Sphinx and Breathe.
    pip_install('cppformat/sphinx',
                '12dde8afdb0a7bb5576e2656692c3478c69d8cc3',
                check_version='1.4a0.dev-20151013')
    pip_install('michaeljones/breathe',
                '1c9d7f80378a92cffa755084823a78bb38ee4acc')
    # Build docs.
    cmd = ['doxygen', '-']
    p = Popen(cmd, stdin=PIPE)
    p.communicate(input=r'''
      PROJECT_NAME      = C++ Format
      GENERATE_LATEX    = NO
      GENERATE_MAN      = NO
      GENERATE_RTF      = NO
      CASE_SENSE_NAMES  = NO
      INPUT             = {0}/format.h
      QUIET             = YES
      JAVADOC_AUTOBRIEF = YES
      AUTOLINK_SUPPORT  = NO
      GENERATE_HTML     = NO
      GENERATE_XML      = YES
      XML_OUTPUT        = doxyxml
      ALIASES           = "rst=\verbatim embed:rst"
      ALIASES          += "endrst=\endverbatim"
      MACRO_EXPANSION   = YES
      PREDEFINED        = _WIN32=1 \
                          FMT_USE_VARIADIC_TEMPLATES=1 \
                          FMT_USE_RVALUE_REFERENCES=1 \
                          FMT_USE_USER_DEFINED_LITERALS=1 \
                          FMT_API=
      EXCLUDE_SYMBOLS   = fmt::internal::* StringValue write_str
    '''.format(os.path.join(os.path.dirname(doc_dir), 'fmt')).encode('UTF-8'))
    if p.returncode != 0:
        raise CalledProcessError(p.returncode, cmd)
    check_call([
        'sphinx-build',
        '-Dbreathe_projects.format=' + os.path.join(os.getcwd(), 'doxyxml'),
        '-Dversion=' + version, '-Drelease=' + version, '-Aversion=' + version,
        '-b', 'html', doc_dir, 'html'
    ])
    try:
        check_call([
            'lessc', '--clean-css',
            '--include-path=' + os.path.join(doc_dir, 'bootstrap'),
            os.path.join(doc_dir, 'fmt.less'), 'html/_static/fmt.css'
        ])
    except OSError as e:
        if e.errno != errno.ENOENT:
            raise
        print(
            'lessc not found; make sure that Less (http://lesscss.org/) is installed'
        )
        sys.exit(1)
    return 'html'
Ejemplo n.º 34
0
async def run_process(
    command: Union[str, Sequence[str]],
    *,
    input: Optional[bytes] = None,
    stdout: int = PIPE,
    stderr: int = PIPE,
    check: bool = True,
    cwd: Union[str, bytes, 'PathLike[str]', None] = None,
    env: Optional[Mapping[str, str]] = None,
    start_new_session: bool = False,
) -> 'CompletedProcess[bytes]':
    """
    Run an external command in a subprocess and wait until it completes.

    .. seealso:: :func:`subprocess.run`

    :param command: either a string to pass to the shell, or an iterable of strings containing the
        executable name or path and its arguments
    :param input: bytes passed to the standard input of the subprocess
    :param stdout: either :data:`subprocess.PIPE` or :data:`subprocess.DEVNULL`
    :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL` or
        :data:`subprocess.STDOUT`
    :param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the process
        terminates with a return code other than 0
    :param cwd: If not ``None``, change the working directory to this before running the command
    :param env: if not ``None``, this mapping replaces the inherited environment variables from the
        parent process
    :param start_new_session: if ``true`` the setsid() system call will be made in the child
        process prior to the execution of the subprocess. (POSIX only)
    :return: an object representing the completed process
    :raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process exits with a
        nonzero return code

    """
    async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None:
        buffer = BytesIO()
        async for chunk in stream:
            buffer.write(chunk)

        stream_contents[index] = buffer.getvalue()

    async with await open_process(
            command,
            stdin=PIPE if input else DEVNULL,
            stdout=stdout,
            stderr=stderr,
            cwd=cwd,
            env=env,
            start_new_session=start_new_session) as process:
        stream_contents: List[Optional[bytes]] = [None, None]
        try:
            async with create_task_group() as tg:
                if process.stdout:
                    tg.start_soon(drain_stream, process.stdout, 0)
                if process.stderr:
                    tg.start_soon(drain_stream, process.stderr, 1)
                if process.stdin and input:
                    await process.stdin.send(input)
                    await process.stdin.aclose()

                await process.wait()
        except BaseException:
            process.kill()
            raise

    output, errors = stream_contents
    if check and process.returncode != 0:
        raise CalledProcessError(cast(int, process.returncode), command,
                                 output, errors)

    return CompletedProcess(command, cast(int, process.returncode), output,
                            errors)
Ejemplo n.º 35
0
    def run(self,
            contactname,
            sambaopts=None,
            credopts=None,
            versionopts=None,
            H=None,
            editor=None):
        from . import common

        lp = sambaopts.get_loadparm()
        creds = credopts.get_credentials(lp, fallback_machine=True)
        samdb = SamDB(url=H,
                      session_info=system_session(),
                      credentials=creds,
                      lp=lp)
        base_dn = samdb.domain_dn()
        scope = ldb.SCOPE_SUBTREE

        filter = ("(&(objectClass=contact)(name=%s))" %
                  ldb.binary_encode(contactname))

        if contactname.upper().startswith("CN="):
            # contact is specified by DN
            filter = "(objectClass=contact)"
            scope = ldb.SCOPE_BASE
            try:
                base_dn = samdb.normalize_dn_in_domain(contactname)
            except Exception as e:
                raise CommandError('Invalid dn "%s": %s' % (contactname, e))

        try:
            res = samdb.search(base=base_dn, scope=scope, expression=filter)
            contact_dn = res[0].dn
        except IndexError:
            raise CommandError('Unable to find contact "%s"' % (contactname))

        if len(res) > 1:
            for msg in sorted(res, key=attrgetter('dn')):
                self.outf.write("found: %s\n" % msg.dn)
            raise CommandError("Multiple results for contact '%s'\n"
                               "Please specify the contact's full DN" %
                               contactname)

        for msg in res:
            result_ldif = common.get_ldif_for_editor(samdb, msg)

            if editor is None:
                editor = os.environ.get('EDITOR')
                if editor is None:
                    editor = 'vi'

            with tempfile.NamedTemporaryFile(suffix=".tmp") as t_file:
                t_file.write(get_bytes(result_ldif))
                t_file.flush()
                try:
                    check_call([editor, t_file.name])
                except CalledProcessError as e:
                    raise CalledProcessError("ERROR: ", e)
                with open(t_file.name) as edited_file:
                    edited_message = edited_file.read()

        msgs_edited = samdb.parse_ldif(edited_message)
        msg_edited = next(msgs_edited)[1]

        res_msg_diff = samdb.msg_diff(msg, msg_edited)
        if len(res_msg_diff) == 0:
            self.outf.write("Nothing to do\n")
            return

        try:
            samdb.modify(res_msg_diff)
        except Exception as e:
            raise CommandError("Failed to modify contact '%s': " % contactname,
                               e)

        self.outf.write("Modified contact '%s' successfully\n" % contactname)
Ejemplo n.º 36
0
def cghub_submit(UUID,
                 NEW_UUID,
                 BAM_FILE,
                 ORIG_BAM_FILE,
                 MD5,
                 NORMAL_UUID,
                 NEW_NORMAL_UUID,
                 UPLOAD_KEY,
                 mode,
                 params,
                 test=0,
                 debug=False,
                 run_realignment_check=True):

    download_timing = params["%s_download_timing" % mode]
    merged_metrics = params["%s_merged_metrics" % mode]
    #merged_metrics = params["%s.markdup.metrics" % UUID]
    #merged_timing = "%s_merge_timing.txt" % BAM_FILE
    #merged_timing = "PAWG.%s.bam_merge_timing.txt" % UUID
    merged_timing = params["%s_merged_timing" % mode]
    #the submission directory
    CWD = os.getcwd()
    SUB_DIR = "%s/%s.partial" % (CWD, UUID)
    FIN_DIR = "%s/%s.FINISHED" % (CWD, UUID)

    if os.path.exists(FIN_DIR):
        sys.stderr.write("Upload already FINISHED\n")
        #sys.exit(0)
        raise Exception("Upload already FINISHED")

    if not os.path.exists(SUB_DIR):
        os.mkdir(SUB_DIR)

    if not os.path.exists("%s/PCAWG.%s.bam" % (SUB_DIR, UUID)):
        os.symlink(os.path.relpath(BAM_FILE, SUB_DIR),
                   "%s/PCAWG.%s.bam" % (SUB_DIR, UUID))

    #put metric compareing $ORIG_FILE and $BAM_FILE and save stats to $SUB_DIR
    try:
        if debug:
            cmd = "%s %s/realigned_bam_check -o %s -n %s -p %s" % (
                DEBUG_PYTHON, DEBUG_SCRIPT_DIR, ORIG_BAM_FILE, BAM_FILE,
                SUB_DIR)
        else:
            cmd = "realigned_bam_check -o %s -n %s -p %s" % (ORIG_BAM_FILE,
                                                             BAM_FILE, SUB_DIR)
        if run_realignment_check:
            sys.stderr.write("running the realignment_check\n")
            (stdout, stderr) = run_command(cmd)
            #raise CalledProcessError(500,"bad test")
    except CalledProcessError as cpe:
        sys.stderr.write("Realignment Check error\n")
        #now we upload to a the PCAWG_CHECK study instead of the full production PCAWG 2.0
        run_realignment_check = False
        #raise cpe

    NEW_UUID = NEW_UUID.rstrip()
    NORMAL_UUID = NORMAL_UUID.rstrip()
    NEW_NORMAL_UUID = NEW_NORMAL_UUID.rstrip()

    with open(MD5, "r") as md5f:
        md5 = md5f.readline()
        md5 = md5.rstrip()


#create cghub validating metadata with ICGC specific metadata added to it
#if not os.path.exists("%s/%s" % (SUB_DIR,NEW_UUID)) or not os.path.exists("%s/%s" % (SUB_DIR,UUID)):
#if not os.path.exists( os.path.join(SUB_DIR,NEW_UUID"trans.map") ):
    if not os.path.exists(os.path.join(SUB_DIR, "MD_DONE")):
        additional_test_options = ""
        if not run_realignment_check:
            additional_test_options = "-d analysis.pawg_check_template.xml"
        if test > 0:
            #use the test template, goes to a different study
            additional_test_options = "-d analysis.pawg_template.test.xml"
        try:
            if debug:
                cmd = "%s %s/create_pawg_metadata -u %s -f PCAWG.%s.bam -c %s -t %s -n %s -p %s %s" % (
                    DEBUG_PYTHON, DEBUG_SCRIPT_DIR, UUID, UUID, md5,
                    NEW_NORMAL_UUID, NEW_UUID, SUB_DIR,
                    additional_test_options)
            else:
                cmd = "create_pawg_metadata -u %s -f PCAWG.%s.bam -c %s -t %s -n %s -p %s %s" % (
                    UUID, UUID, md5, NEW_NORMAL_UUID, NEW_UUID, SUB_DIR,
                    additional_test_options)
            (stdout, stderr) = run_command(cmd)
        except CalledProcessError as cpe:
            sys.stderr.write("CGHub metadata creation error\n")
            raise cpe

        #add the QC metrics to the metadata
        try:
            if debug:
                #cmd = "%s %s/add_qc_results_to_metadata.pl %s/%s/analysis.xml %s" % (DEBUG_PERL,DEBUG_SCRIPT_DIR,SUB_DIR,NEW_UUID,QC_STATS_FILE)
                cmd = "%s %s/add_qc_results_to_metadata.pl %s/%s/analysis.xml %s %s %s %s %s" % (
                    DEBUG_PERL, DEBUG_SCRIPT_DIR, SUB_DIR, NEW_UUID,
                    params["%s:aligned_bam_dir" % mode],
                    params["%s:stats_dir" % mode], download_timing,
                    merged_metrics, merged_timing)
            else:
                cmd = "add_qc_results_to_metadata.pl %s/%s/analysis.xml %s %s %s %s %s" % (
                    SUB_DIR, NEW_UUID, params["%s:aligned_bam_dir" % mode],
                    params["%s:stats_dir" % mode], download_timing,
                    merged_metrics, merged_timing)
            (stdout, stderr) = run_command(cmd)
        except CalledProcessError as cpe:
            sys.stderr.write(
                "CGHub QC stats/ICGC fields addition to metadata error\n")
            raise cpe

        #write sentinal file
        with open(os.path.join(SUB_DIR, "MD_DONE"), "w") as outf:
            outf.write("metadata generated finished successfully\n")

    #check submission state
    try:
        if debug:
            cmd = "curl -sk %s/cghub/metadata/analysisDetail?analysis_id=%s | egrep -ie '<state>' | cut -d'>' -f 2 | cut -d\"<\" -f 1" % (
                REPO_SERVER, NEW_UUID)
        else:
            cmd = "curl -sk %s/cghub/metadata/analysisDetail?analysis_id=%s | egrep -ie '<state>' | cut -d'>' -f 2 | cut -d\"<\" -f 1" % (
                REPO_SERVER, NEW_UUID)
        (state, stderr) = run_command(cmd, SUB_DIR)
        state = state.rstrip()
    except CalledProcessError as cpe:
        sys.stderr.write("CGHub WSI query for state for %s failed\n" %
                         (NEW_UUID))
        raise cpe

    #if test level is 2 or above, quit before doing anything else
    if test > 1:
        return

    #not submitted yet
    if (state is None or state == "") and not os.path.exists(
            os.path.join(SUB_DIR, "SUBMIT_DONE")):
        #if not os.path.exists( os.path.join(SUB_DIR,"SUBMIT_DONE") ):
        try:
            #return True
            if debug:
                cmd = "%s %s/cgsubmit_fixed -s %s -c %s -u %s" % (
                    DEBUG_PYTHON, DEBUG_SCRIPT_DIR, REPO_SERVER,
                    DEBUG_UPLOAD_KEY, NEW_UUID)
            else:
                cmd = "cgsubmit_fixed -c %s -u %s" % (UPLOAD_KEY, NEW_UUID)
            (stdout, stderr) = run_command(cmd, SUB_DIR)
        except CalledProcessError as cpe:
            sys.stderr.write("CGHub metadata submission error\n")
            raise cpe

        #write sentinal value
        with open(os.path.join(SUB_DIR, "SUBMIT_DONE"), "w") as outf:
            outf.write("metadata submitted finished successfully\n")

    #submitted but manifest file needed for upload is probably gone, recreate by doing a valiadtion only submission (indempotent)
    elif not os.path.exists(os.path.join(SUB_DIR, NEW_UUID, "manifest.xml")):
        try:
            if debug:
                #return True
                cmd = "%s %s/cgsubmit_fixed -s %s --validate-only -u %s" % (
                    DEBUG_PYTHON, DEBUG_SCRIPT_DIR, REPO_SERVER, NEW_UUID)
            else:
                cmd = "cgsubmit_fixed --validate-only -u %s" % (NEW_UUID)
            (stdout, stderr) = run_command(cmd, SUB_DIR)
        except CalledProcessError as cpe:
            sys.stderr.write(
                "CGHub metadata submission manifest recreation error\n")
            raise cpe
        #must also delete any existing gto files
        if os.path.exists(os.path.join(
                SUB_DIR, NEW_UUID, "%s.gto" % NEW_UUID)) or os.path.exists(
                    os.path.join(SUB_DIR, NEW_UUID,
                                 "%s.gto.progress" % NEW_UUID)):
            try:
                run_command("rm %s" %
                            (os.path.join(SUB_DIR, NEW_UUID, "*.gto*")))
            except CalledProcessError as cpe:
                sys.stderr.write("CGHub gto deletion error\n")
                raise cpe

    #try to upload if in the right (or non-existent) state
    if state is None or state == "" or state == "submitted" or state == "uploading":
        try:
            if debug:
                #return True
                cmd = "%s/gtupload -c %s -u %s/manifest.xml -vv 2>%s/upload.stderr.log" % (
                    DEBUG_SCRIPT_DIR, DEBUG_UPLOAD_KEY, NEW_UUID, SUB_DIR)
            else:
                cmd = "gtupload -c %s -u %s/manifest.xml -vv 2>%s/upload.stderr.log" % (
                    UPLOAD_KEY, NEW_UUID, SUB_DIR)
            (stdout, stderr) = run_command(cmd, SUB_DIR)
        except CalledProcessError as cpe:
            sys.stderr.write(
                "CGHub file upload error, check error log %s/upload.stderr.log\n"
                % SUB_DIR)
            raise cpe
    elif state != "live":
        sys.stderr.write(
            "not in a submitting/uploading state, but also not live, CHECK THIS ONE\n"
        )
        raise CalledProcessError(1, "state not live: %s" % (state))

    #finally finish by renaming working dir
    #print "finishing with the rename"
    os.rename(SUB_DIR, FIN_DIR)
Ejemplo n.º 37
0
    def run(self, computername, credopts=None, sambaopts=None, versionopts=None,
            H=None, editor=None):
        lp = sambaopts.get_loadparm()
        creds = credopts.get_credentials(lp, fallback_machine=True)
        samdb = SamDB(url=H, session_info=system_session(),
                      credentials=creds, lp=lp)

        samaccountname = computername
        if not computername.endswith('$'):
            samaccountname = "%s$" % computername

        filter = ("(&(sAMAccountType=%d)(sAMAccountName=%s))" %
                  (dsdb.ATYPE_WORKSTATION_TRUST,
                   ldb.binary_encode(samaccountname)))

        domaindn = samdb.domain_dn()

        try:
            res = samdb.search(base=domaindn,
                               expression=filter,
                               scope=ldb.SCOPE_SUBTREE)
            computer_dn = res[0].dn
        except IndexError:
            raise CommandError('Unable to find computer "%s"' % (computername))

        if len(res) != 1:
            raise CommandError('Invalid number of results: for "%s": %d' %
                               ((computername), len(res)))

        msg = res[0]
        result_ldif = common.get_ldif_for_editor(samdb, msg)

        if editor is None:
            editor = os.environ.get('EDITOR')
            if editor is None:
                editor = 'vi'

        with tempfile.NamedTemporaryFile(suffix=".tmp") as t_file:
            t_file.write(get_bytes(result_ldif))
            t_file.flush()
            try:
                check_call([editor, t_file.name])
            except CalledProcessError as e:
                raise CalledProcessError("ERROR: ", e)
            with open(t_file.name) as edited_file:
                edited_message = edited_file.read()

        msgs_edited = samdb.parse_ldif(edited_message)
        msg_edited = next(msgs_edited)[1]

        res_msg_diff = samdb.msg_diff(msg, msg_edited)
        if len(res_msg_diff) == 0:
            self.outf.write("Nothing to do\n")
            return

        try:
            samdb.modify(res_msg_diff)
        except Exception as e:
            raise CommandError("Failed to modify computer '%s': " %
                               (computername, e))

        self.outf.write("Modified computer '%s' successfully\n" % computername)
Ejemplo n.º 38
0
def call_process(cmd):
    proc = Popen(cmd, stdout=stdout, stderr=stderr)
    proc.communicate()
    if proc.returncode != 0:
        raise CalledProcessError(proc.returncode, cmd)
Ejemplo n.º 39
0
    def shebang(self, line, cell):
        """Run a cell via a shell command
        
        The `%%script` line is like the #! line of script,
        specifying a program (bash, perl, ruby, etc.) with which to run.
        
        The rest of the cell is run by that program.
        
        Examples
        --------
        ::
        
            In [1]: %%script bash
               ...: for i in 1 2 3; do
               ...:   echo $i
               ...: done
            1
            2
            3
        """
        async def _handle_stream(stream, stream_arg, file_object):
            while True:
                line = (await stream.readline()).decode("utf8")
                if not line:
                    break
                if stream_arg:
                    self.shell.user_ns[stream_arg] = line
                else:
                    file_object.write(line)
                    file_object.flush()

        async def _stream_communicate(process, cell):
            process.stdin.write(cell)
            process.stdin.close()
            stdout_task = asyncio.create_task(
                _handle_stream(process.stdout, args.out, sys.stdout))
            stderr_task = asyncio.create_task(
                _handle_stream(process.stderr, args.err, sys.stderr))
            await asyncio.wait([stdout_task, stderr_task])
            await process.wait()

        if sys.platform.startswith("win"):
            asyncio.set_event_loop_policy(
                asyncio.WindowsProactorEventLoopPolicy())
        loop = asyncio.get_event_loop()
        argv = arg_split(line, posix=not sys.platform.startswith("win"))
        args, cmd = self.shebang.parser.parse_known_args(argv)
        try:
            p = loop.run_until_complete(
                asyncio.create_subprocess_exec(
                    *cmd,
                    stdout=asyncio.subprocess.PIPE,
                    stderr=asyncio.subprocess.PIPE,
                    stdin=asyncio.subprocess.PIPE,
                ))
        except OSError as e:
            if e.errno == errno.ENOENT:
                print("Couldn't find program: %r" % cmd[0])
                return
            else:
                raise

        if not cell.endswith('\n'):
            cell += '\n'
        cell = cell.encode('utf8', 'replace')
        if args.bg:
            self.bg_processes.append(p)
            self._gc_bg_processes()
            to_close = []
            if args.out:
                self.shell.user_ns[args.out] = p.stdout
            else:
                to_close.append(p.stdout)
            if args.err:
                self.shell.user_ns[args.err] = p.stderr
            else:
                to_close.append(p.stderr)
            self.job_manager.new(self._run_script,
                                 p,
                                 cell,
                                 to_close,
                                 daemon=True)
            if args.proc:
                self.shell.user_ns[args.proc] = p
            return

        try:
            loop.run_until_complete(_stream_communicate(p, cell))
        except KeyboardInterrupt:
            try:
                p.send_signal(signal.SIGINT)
                time.sleep(0.1)
                if p.returncode is not None:
                    print("Process is interrupted.")
                    return
                p.terminate()
                time.sleep(0.1)
                if p.returncode is not None:
                    print("Process is terminated.")
                    return
                p.kill()
                print("Process is killed.")
            except OSError:
                pass
            except Exception as e:
                print("Error while terminating subprocess (pid=%i): %s" %
                      (p.pid, e))
            return
        if args.raise_error and p.returncode != 0:
            # If we get here and p.returncode is still None, we must have
            # killed it but not yet seen its return code. We don't wait for it,
            # in case it's stuck in uninterruptible sleep. -9 = SIGKILL
            rc = p.returncode or -9
            raise CalledProcessError(rc, cell)
Ejemplo n.º 40
0
 def check_returncode(self):
     if self.returncode:
         raise CalledProcessError(self.returncode, self.args,
                                  self.stdout, self.stderr)
Ejemplo n.º 41
0
    def run(self,
            command,
            cwd=None,
            echo=None,
            cache_key=None,
            check=False,
            env=None):
        args = self.convert_args(command)

        if echo or echo is None and self.echo:
            if cwd and self.echo_directories:
                logging.info('cd {}'.format(cwd))

            # TODO: Wont properly reproduce command if command is a string
            logging.info(' '.join(arg.replace(' ', '\\ ') for arg in args))

        if cache_key:
            # assert not env # TODO
            key = '{}{}'.format(cache_key, ' '.join(command))
            if key in self.shelf:
                # logger.debug('CACHE HIT: {}'.format(key))
                return_code, stdout, stderr = self.shelf[key]
                result = Result()
                result.return_code = return_code
                result.stdout = stdout
                result.stderr = stderr
                return result
            else:
                # logger.debug('CACHE MISS: {}'.format(key))
                pass

        stdout = subprocess.PIPE
        stderr = subprocess.PIPE if not check else subprocess.STDOUT

        if cwd:
            cwd = str(cwd)

        popen = subprocess.Popen(args,
                                 cwd=cwd,
                                 stdout=stdout,
                                 stderr=stderr,
                                 env=env)
        stdout, stderr = popen.communicate()

        if stdout:
            stdout = six.text_type(stdout, encoding='utf-8')
        if stderr:
            stderr = six.text_type(stderr, encoding='utf-8')

        return_code = popen.returncode

        if check and return_code != 0:
            # TODO
            if stdout:
                logging.debug(stdout)
            if stderr:
                logging.debug(stdout)
            raise CalledProcessError(return_code, command, stdout)

        if cache_key:
            key = '{}{}'.format(cache_key, ' '.join(command))
            self.shelf[key] = return_code, stdout, stderr

        result = Result()
        result.return_code = return_code
        result.stdout = stdout
        result.stderr = stderr

        return result
Ejemplo n.º 42
0
    def _invoke_sort(self, input_paths, output_path):
        """Use the local sort command to sort one or more input files. Raise
        an exception if there is a problem.

        This is is just a wrapper to handle limitations of Windows sort
        (see Issue #288).

        :type input_paths: list of str
        :param input_paths: paths of one or more input files
        :type output_path: str
        :param output_path: where to pipe sorted output into
        """
        if not input_paths:
            raise ValueError('Must specify at least one input path.')

        # ignore locale when sorting
        env = os.environ.copy()
        env['LC_ALL'] = 'C'

        # Make sure that the tmp dir environment variables are changed if
        # the default is changed.
        env['TMP'] = self._opts['local_tmp_dir']
        env['TMPDIR'] = self._opts['local_tmp_dir']
        env['TEMP'] = self._opts['local_tmp_dir']

        log.debug('Writing to %s' % output_path)

        err_path = os.path.join(self._get_local_tmp_dir(), 'sort-stderr')

        # assume we're using UNIX sort unless we know otherwise
        if (not self._sort_is_windows_sort) or len(input_paths) == 1:
            with open(output_path, 'wb') as output:
                with open(err_path, 'wb') as err:
                    args = ['sort'] + list(input_paths)
                    log.debug('> %s' % cmd_line(args))
                    try:
                        check_call(args, stdout=output, stderr=err, env=env)
                        return
                    except CalledProcessError:
                        pass

        # Looks like we're using Windows sort
        self._sort_is_windows_sort = True

        log.debug('Piping files into sort for Windows compatibility')
        with open(output_path, 'wb') as output:
            with open(err_path, 'wb') as err:
                args = ['sort']
                log.debug('> %s' % cmd_line(args))
                proc = Popen(args,
                             stdin=PIPE,
                             stdout=output,
                             stderr=err,
                             env=env)

                # shovel bytes into the sort process
                for input_path in input_paths:
                    with open(input_path, 'rb') as input:
                        while True:
                            buf = input.read(_BUFFER_SIZE)
                            if not buf:
                                break
                            proc.stdin.write(buf)

                proc.stdin.close()
                proc.wait()

                if proc.returncode == 0:
                    return

        # looks like there was a problem. log it and raise an error
        with open(err_path) as err:
            for line in err:
                log.error('STDERR: %s' % line.rstrip('\r\n'))
        raise CalledProcessError(proc.returncode, args)
Ejemplo n.º 43
0
 def raise_error(*_):
     raise CalledProcessError(1, "django_admin.py", "No django_admin mock!")
Ejemplo n.º 44
0
 def raise_error(*_):
     raise CalledProcessError(1, "git", "No git mock!")
Ejemplo n.º 45
0
 def test_that_no_lvm_volume_groups_are_found_if_lvm_commands_dont_exist(
         self, co_mock):
     co_mock.side_effect = CalledProcessError(1, "vgs")
     self.assertEqual(0, len(LvmVolumeGroup.generate()))
Ejemplo n.º 46
0
class TestTemperatureEntry(unittest.TestCase):
    """
    Based on `sensors`, `vcgencmd` and thermal files, this module verifies temperature computations.
    """
    def setUp(self):
        # We'll store there filenames of some temp files mocking those under
        #  `/sys/class/thermal/thermal_zone*/temp`
        self._temp_files = []
        for temperature in [  # Fake temperatures
                b'50000', b'0', b'40000', b'50000'
        ]:
            file = tempfile.NamedTemporaryFile(delete=False)
            file.write(temperature)
            file.seek(0)
            self._temp_files.append(file)

    def tearDown(self):
        for file in self._temp_files:
            file.close()
            os.remove(file.name)

    @patch('archey.entries.temperature.check_output',
           side_effect=[FileNotFoundError(), 'temp=42.8\'C\n'])
    @patch(
        'archey.entries.temperature.iglob',
        return_value=[]  # No temperature from file will be retrieved
    )
    @HelperMethods.patch_clean_configuration(
        configuration={
            'temperature': {
                'sensors_chipsets': [],
                'use_fahrenheit': False,
                'char_before_unit': ' '
            }
        })
    def test_vcgencmd_only_no_max(self, _, __):
        """
        Test for `vcgencmd` output only (no sensor files).
        Only one value is retrieved, so no maximum should be displayed (see #39).
        """
        temperature = Temperature()

        output_mock = MagicMock()
        temperature.output(output_mock)

        self.assertDictEqual(
            temperature.value, {
                'temperature': 42.8,
                'max_temperature': 42.8,
                'char_before_unit': ' ',
                'unit': 'C'
            })
        self.assertEqual(output_mock.append.call_args[0][1], '42.8 C')

    @patch('archey.entries.temperature.check_output',
           side_effect=[FileNotFoundError(), 'temp=40.0\'C\n'])
    @patch('archey.entries.temperature.iglob')
    @HelperMethods.patch_clean_configuration(
        configuration={
            'temperature': {
                'sensors_chipsets': [],
                'use_fahrenheit': False,
                'char_before_unit': ' '
            }
        })
    def test_vcgencmd_and_files(self, iglob_mock, _):
        """Tests `vcgencmd` output AND sensor files"""
        iglob_mock.return_value = iter(
            [file.name for file in self._temp_files])
        self.assertDictEqual(
            Temperature().value, {
                'temperature': 45.0,
                'max_temperature': 50.0,
                'char_before_unit': ' ',
                'unit': 'C'
            })

    @patch(
        'archey.entries.temperature.check_output',
        side_effect=[
            FileNotFoundError(),  # No temperature from `sensors` call
            FileNotFoundError()  # No temperature from `vcgencmd` call
        ])
    @patch('archey.entries.temperature.iglob')
    @HelperMethods.patch_clean_configuration(
        configuration={
            'temperature': {
                'sensors_chipsets': [],
                'use_fahrenheit': True,
                'char_before_unit': '@'
            }
        })
    def test_files_only_in_fahrenheit(self, iglob_mock, _):
        """Test sensor files only, Fahrenheit (naive) conversion and special degree character"""
        iglob_mock.return_value = iter(
            [file.name for file in self._temp_files])
        self.assertDictEqual(
            Temperature().value,
            {
                'temperature': 116.0,  # 46.7 degrees C in Fahrenheit.
                'max_temperature': 122.0,  # 50 degrees C in Fahrenheit
                'char_before_unit': '@',
                'unit': 'F'
            })

    @patch(
        'archey.entries.temperature.check_output',
        side_effect=[
            FileNotFoundError(),  # No temperature from `sensors` call.
            FileNotFoundError()  # No temperature from `vcgencmd` call.
        ])
    @patch(
        'archey.entries.temperature.iglob',
        return_value=[]  # No temperature from file will be retrieved.
    )
    @HelperMethods.patch_clean_configuration(
        configuration={'temperature': {
            'sensors_chipsets': []
        }})
    def test_no_output(self, _, __):
        """Test when no value could be retrieved (anyhow)"""
        self.assertIsNone(Temperature().value)

    @patch(
        'archey.entries.temperature.check_output',  # Mock the `sensors` call.
        side_effect=[
            """\
{
   "who-cares-about":{
      "temp1":{
         "temp1_input": 45.000,
         "temp1_crit": 128.000
      },
      "temp2":{
         "temp2_input": 0.000,
         "temp2_crit": 128.000
      },
      "temp3":{
         "temp3_input": 38.000,
         "temp3_crit": 128.000
      },
      "temp4":{
         "temp4_input": 39.000,
         "temp4_crit": 128.000
      },
      "temp5":{
         "temp5_input": 0.000,
         "temp5_crit": 128.000
      },
      "temp6":{
         "temp6_input": 114.000,
         "temp6_crit": 128.000
      }
   },
   "the-chipsets-names":{
      "what-are":{
         "temp1_input": 45.000,
         "temp1_max": 100.000,
         "temp1_crit": 100.000,
         "temp1_crit_alarm": 0.000
      },
      "those":{
         "temp2_input": 43.000,
         "temp2_max": 100.000,
         "temp2_crit": 100.000,
         "temp2_crit_alarm": 0.000
      },
      "identifiers":{
         "temp3_input": 44.000,
         "temp3_max": 100.000,
         "temp3_crit": 100.000,
         "temp3_crit_alarm": 0.000
      }
   },
   "crap-a-fan-chip":{
      "fan1":{
         "fan1_input": 3386.000
      }
   }
}
""",
            FileNotFoundError()  # No temperature from `vcgencmd` call.
        ])
    @HelperMethods.patch_clean_configuration(
        configuration={
            'temperature': {
                'sensors_chipsets': [],
                'use_fahrenheit': True,
                'char_before_unit': ' '
            }
        })
    def test_sensors_only_in_fahrenheit(self, _):
        """Test computations around `sensors` output and Fahrenheit (naive) conversion"""
        self.assertDictEqual(
            Temperature().value,
            {
                'temperature': 126.6,  # (52.6 C in F)
                'max_temperature': 237.2,  # (114.0 C in F)
                'char_before_unit': ' ',
                'unit': 'F'
            })

    @patch(
        'archey.entries.temperature.check_output',
        side_effect=[
            CalledProcessError(1, 'sensors'),  # `sensors` will hard fail.
            FileNotFoundError()  # No temperature from `vcgencmd` call
        ])
    @patch('archey.entries.temperature.iglob')
    @HelperMethods.patch_clean_configuration(
        configuration={
            'temperature': {
                'sensors_chipsets': [],
                'use_fahrenheit': False,
                'char_before_unit': 'o'
            }
        })
    def test_sensors_error_1(self, iglob_mock, _):
        """Test `sensors` (hard) failure handling and polling from files in Celsius"""
        iglob_mock.return_value = iter(
            [file.name for file in self._temp_files])

        temperature = Temperature()

        output_mock = MagicMock()
        temperature.output(output_mock)

        self.assertDictEqual(
            temperature.value, {
                'temperature': 46.7,
                'max_temperature': 50.0,
                'char_before_unit': 'o',
                'unit': 'C'
            })
        self.assertEqual(output_mock.append.call_args[0][1],
                         '46.7oC (Max. 50.0oC)')

    @patch(
        'archey.entries.temperature.check_output',
        side_effect=[  # JSON decoding from `sensors` will fail..
            """\
{
    "Is this JSON valid ?": [
        "You", "should", "look", "twice.",
    ]
}
""",
            FileNotFoundError()  # No temperature from `vcgencmd` call
        ])
    @patch('archey.entries.temperature.iglob')
    @HelperMethods.patch_clean_configuration(
        configuration={
            'temperature': {
                'sensors_chipsets': [],
                'use_fahrenheit': False,
                'char_before_unit': 'o'
            }
        })
    def test_sensors_error_2(self, iglob_mock, _):
        """Test `sensors` (hard) failure handling and polling from files in Celsius"""
        iglob_mock.return_value = iter(
            [file.name for file in self._temp_files])
        self.assertDictEqual(
            Temperature().value, {
                'temperature': 46.7,
                'max_temperature': 50.0,
                'char_before_unit': 'o',
                'unit': 'C'
            })

    @patch(
        'archey.entries.temperature.check_output',
        side_effect=[
            FileNotFoundError(),  # No temperature from `sensors` call.
            FileNotFoundError()  # No temperature from `vcgencmd` call.
        ])
    @patch(
        'archey.entries.temperature.iglob',
        return_value=[]  # No temperature from file will be retrieved.
    )
    @HelperMethods.patch_clean_configuration
    def test_celsius_to_fahrenheit_conversion(self, _, __):
        """Simple tests for the `_convert_to_fahrenheit` static method"""
        test_conversion_cases = [(-273.15, -459.67), (0.0, 32.0), (21.0, 69.8),
                                 (37.0, 98.6), (100.0, 212.0)]

        for celsius_value, expected_fahrenheit in test_conversion_cases:
            self.assertAlmostEqual(
                Temperature._convert_to_fahrenheit(celsius_value),  # pylint: disable=protected-access
                expected_fahrenheit)
Ejemplo n.º 47
0
 def test_that_no_lvm_logical_volums_are_found_if_lvm_commands_dont_exist(
         self, co_mock):
     co_mock.side_effect = CalledProcessError(1, "lvs")
     self.assertEqual(0, len(LvmLogicalVolume.generate()))
Ejemplo n.º 48
0
    def run(self, executable, infiles, db_key, db_name, db_user, db_host):
        """
        Depricated functionality
        """
        # executable : path to KernelControl executable
        # infiles    : tuple of MS, instrument- and sky-model files
        # db_*       : database connection parameters
        # ----------------------------------------------------------------------
        self.logger.debug("executable = %s" % executable)
        self.logger.debug("infiles = %s" % str(infiles))
        self.logger.debug("db_key = %s" % db_key)
        self.logger.debug("db_name = %s" % db_name)
        self.logger.debug("db_user = %s" % db_user)
        self.logger.debug("db_host = %s" % db_host)

        (ms, parmdb_instrument, parmdb_sky) = infiles

        with log_time(self.logger):
            if os.path.exists(ms):
                self.logger.info("Processing %s" % (ms))
            else:
                self.logger.error("Dataset %s does not exist" % (ms))
                return 1

            #        Build a configuration parset specifying database parameters
            #                                                     for the kernel
            # ------------------------------------------------------------------
            self.logger.debug("Setting up BBSKernel parset")
            # Getting the filesystem must be done differently, using the
            # DataProduct keys in the parset provided by the scheduler.
            filesystem = "%s:%s" % (os.uname()[1], get_mountpoint(ms))
            fd, parset_file = mkstemp()
            kernel_parset = parameterset()
            for key, value in {
                    "ObservationPart.Filesystem": filesystem,
                    "ObservationPart.Path": ms,
                    "BBDB.Key": db_key,
                    "BBDB.Name": db_name,
                    "BBDB.User": db_user,
                    "BBDB.Host": db_host,
                    "ParmDB.Sky": parmdb_sky,
                    "ParmDB.Instrument": parmdb_instrument
            }.iteritems():
                kernel_parset.add(key, value)
            kernel_parset.writeFile(parset_file)
            os.close(fd)
            self.logger.debug("BBSKernel parset written to %s" % parset_file)

            #                                                     Run the kernel
            #               Catch & log output from the kernel logger and stdout
            # ------------------------------------------------------------------
            working_dir = mkdtemp(suffix=".%s" %
                                  (os.path.basename(__file__), ))
            try:
                self.logger.info("******** {0}".format(
                    open(parset_file).read()))
                cmd = [executable, parset_file, "0"]
                self.logger.debug("Executing BBS kernel")
                with CatchLog4CPlus(
                        working_dir,
                        self.logger.name + "." + os.path.basename(ms),
                        os.path.basename(executable),
                ):
                    bbs_kernel_process = Popen(cmd,
                                               stdout=PIPE,
                                               stderr=PIPE,
                                               cwd=working_dir)
                    sout, serr = bbs_kernel_process.communicate()
                log_process_output("BBS kernel", sout, serr, self.logger)
                if bbs_kernel_process.returncode != 0:
                    raise CalledProcessError(bbs_kernel_process.returncode,
                                             executable)
            except CalledProcessError, e:
                self.logger.error(str(e))
                return 1
            finally:
Ejemplo n.º 49
0
 def check_returncode(self):
     """Raise CalledProcessError if the exit code is non-zero."""
     if self.returncode:
         raise CalledProcessError(self.returncode, self.args,
                                  self.stdout, self.stderr)
Ejemplo n.º 50
0
def check_call(*args, **kwargs):
    """Adds support for timeout."""
    retcode = call(*args, **kwargs)
    if retcode:
        raise CalledProcessError(retcode, kwargs.get('args') or args[0])
    return 0
Ejemplo n.º 51
0
class TestErrorFormatting:
    scenarios = (
        (
            "IncompatibleBaseError",
            {
                "exception_class": errors.IncompatibleBaseError,
                "kwargs": {
                    "base": "core18",
                    "linker_version": "2.23",
                    "file_list": dict(a="2.26", b="2.27"),
                },
                "expected_message": (
                    "The linker version '2.23' used by the base 'core18' is "
                    "incompatible with files in this snap:\n"
                    "    a (2.26)\n"
                    "    b (2.27)"
                ),
            },
        ),
        (
            "PrimeFileConflictError",
            {
                "exception_class": errors.PrimeFileConflictError,
                "kwargs": {"fileset": {"test-file"}},
                "expected_message": (
                    "Failed to filter files: "
                    "The following files have been excluded by the `stage` "
                    "keyword, but included by the `prime` keyword: "
                    "{'test-file'}. "
                    "Edit the `snapcraft.yaml` to make sure that the files "
                    "included in `prime` are also included in `stage`."
                ),
            },
        ),
        (
            "SnapcraftAfterPartMissingError",
            {
                "exception_class": project_loader_errors.SnapcraftAfterPartMissingError,
                "kwargs": {"part_name": "test-part1", "after_part_name": "test-part2"},
                "expected_message": (
                    "Failed to get part information: "
                    "Cannot find the definition for part 'test-part2', required by "
                    "part 'test-part1'.\n"
                    "Remote parts are not supported with bases, so make sure that this "
                    "part is defined in the `snapcraft.yaml`."
                ),
            },
        ),
        (
            "PluginError",
            {
                "exception_class": errors.PluginError,
                "kwargs": {"message": "test-message"},
                "expected_message": "Failed to load plugin: test-message",
            },
        ),
        (
            "PluginBaseError",
            {
                "exception_class": errors.PluginBaseError,
                "kwargs": {"part_name": "go-part", "base": "arch"},
                "expected_message": "The plugin used by part 'go-part' does not support snaps using base 'arch'.",
            },
        ),
        (
            "SnapcraftPartConflictError",
            {
                "exception_class": errors.SnapcraftPartConflictError,
                "kwargs": {
                    "part_name": "test-part",
                    "other_part_name": "test-other-part",
                    "conflict_files": ("test-file1", "test-file2"),
                },
                "expected_message": (
                    "Failed to stage: "
                    "Parts 'test-other-part' and 'test-part' have the following "
                    "files, but with different contents:\n"
                    "    test-file1\n"
                    "    test-file2\n"
                    "\n"
                    "Snapcraft offers some capabilities to solve this by use of "
                    "the following keywords:\n"
                    "    - `filesets`\n"
                    "    - `stage`\n"
                    "    - `snap`\n"
                    "    - `organize`\n"
                    "\n"
                    "To learn more about these part keywords, run "
                    "`snapcraft help plugins`."
                ),
            },
        ),
        (
            "InvalidWikiEntryError",
            {
                "exception_class": errors.InvalidWikiEntryError,
                "kwargs": {"error": "test-error"},
                "expected_message": "Invalid wiki entry: 'test-error'",
            },
        ),
        (
            "PluginOutdatedError",
            {
                "exception_class": errors.PluginOutdatedError,
                "kwargs": {"message": "test-message"},
                "expected_message": "This plugin is outdated: test-message",
            },
        ),
        (
            "RequiredCommandFailure",
            {
                "exception_class": errors.RequiredCommandFailure,
                "kwargs": {"command": "test-command"},
                "expected_message": "'test-command' failed.",
            },
        ),
        (
            "RequiredCommandNotFound",
            {
                "exception_class": errors.RequiredCommandNotFound,
                "kwargs": {"cmd_list": ["test-command", "test-argument"]},
                "expected_message": "'test-command' not found.",
            },
        ),
        (
            "RequiredPathDoesNotExist",
            {
                "exception_class": errors.RequiredPathDoesNotExist,
                "kwargs": {"path": "test-path"},
                "expected_message": "Required path does not exist: 'test-path'",
            },
        ),
        (
            "SnapcraftPathEntryError",
            {
                "exception_class": errors.SnapcraftPathEntryError,
                "kwargs": {"value": "test-path", "key": "test-key", "app": "test-app"},
                "expected_message": (
                    "Failed to generate snap metadata: "
                    "The path 'test-path' set for 'test-key' in 'test-app' does "
                    "not exist. Make sure that the files are in the `prime` "
                    "directory."
                ),
            },
        ),
        (
            "InvalidPullPropertiesError",
            {
                "exception_class": errors.InvalidPullPropertiesError,
                "kwargs": {
                    "plugin_name": "test-plugin",
                    "properties": ["test-property1", "test-property2"],
                },
                "expected_message": (
                    "Failed to load plugin: "
                    "Invalid pull properties specified by 'test-plugin' plugin: "
                    "['test-property1', 'test-property2']"
                ),
            },
        ),
        (
            "InvalidBuildPropertiesError",
            {
                "exception_class": errors.InvalidBuildPropertiesError,
                "kwargs": {
                    "plugin_name": "test-plugin",
                    "properties": ["test-property1", "test-property2"],
                },
                "expected_message": (
                    "Failed to load plugin: "
                    "Invalid build properties specified by 'test-plugin' plugin: "
                    "['test-property1', 'test-property2']"
                ),
            },
        ),
        (
            "StagePackageDownloadError",
            {
                "exception_class": errors.StagePackageDownloadError,
                "kwargs": {"part_name": "test-part", "message": "test-message"},
                "expected_message": (
                    "Failed to fetch stage packages: "
                    "Error downloading packages for part "
                    "'test-part': test-message."
                ),
            },
        ),
        (
            "InvalidContainerImageInfoError",
            {
                "exception_class": errors.InvalidContainerImageInfoError,
                "kwargs": {"image_info": "test-image-info"},
                "expected_message": (
                    "Failed to parse container image info: "
                    "SNAPCRAFT_IMAGE_INFO is not a valid JSON string: "
                    "test-image-info"
                ),
            },
        ),
        (
            "MissingMetadataFileError",
            {
                "exception_class": errors.MissingMetadataFileError,
                "kwargs": {"part_name": "test-part", "path": "test/path"},
                "expected_message": (
                    "Failed to generate snap metadata: "
                    "Part 'test-part' has a 'parse-info' referring to metadata "
                    "file 'test/path', which does not exist."
                ),
            },
        ),
        (
            "UnhandledMetadataFileTypeError",
            {
                "exception_class": errors.UnhandledMetadataFileTypeError,
                "kwargs": {"path": "test/path"},
                "expected_message": (
                    "Failed to extract metadata from 'test/path': "
                    "This type of file is not supported for supplying "
                    "metadata."
                ),
            },
        ),
        (
            "InvalidExtractorValueError",
            {
                "exception_class": errors.InvalidExtractorValueError,
                "kwargs": {"path": "test/path", "extractor_name": "extractor"},
                "expected_message": (
                    "Failed to extract metadata from 'test/path': "
                    "Extractor 'extractor' didn't return ExtractedMetadata as "
                    "expected."
                ),
            },
        ),
        (
            "PatcherNewerPatchelfError",
            {
                "exception_class": errors.PatcherNewerPatchelfError,
                "kwargs": {
                    "elf_file": "test/path",
                    "patchelf_version": "patchelf 0.9",
                    "process_exception": CalledProcessError(
                        cmd=["patchelf"], returncode=-1
                    ),
                },
                "expected_message": (
                    "'test/path' cannot be patched to function properly in a "
                    "classic confined snap: patchelf failed with exit code -1.\n"
                    "'patchelf 0.9' may be too old. A newer version of patchelf "
                    "may be required.\n"
                    "Try adding the `after: [patchelf]` and a `patchelf` part "
                    "that would filter out files from prime `prime: [-*]` or "
                    "`build-snaps: [patchelf/latest/edge]` to the failing part "
                    "in your `snapcraft.yaml` to use a newer patchelf."
                ),
            },
        ),
        (
            "PatcherGenericError",
            {
                "exception_class": errors.PatcherGenericError,
                "kwargs": {
                    "elf_file": "test/path",
                    "process_exception": CalledProcessError(
                        cmd=["patchelf"], returncode=-1
                    ),
                },
                "expected_message": (
                    "'test/path' cannot be patched to function properly in a "
                    "classic confined snap: patchelf failed with exit code -1"
                ),
            },
        ),
        (
            "StagePackageMissingError",
            {
                "exception_class": errors.StagePackageMissingError,
                "kwargs": {"package": "libc6"},
                "expected_message": (
                    "'libc6' is required inside the snap for this "
                    "part to work properly.\nAdd it as a `stage-packages` "
                    "entry for this part."
                ),
            },
        ),
        (
            "SnapcraftCommandError",
            {
                "exception_class": errors.SnapcraftCommandError,
                "kwargs": {
                    "command": "pip install foo",
                    "call_error": CalledProcessError(
                        cmd=["/bin/sh"], returncode=1, output="failed"
                    ),
                },
                "expected_message": (
                    "Failed to run 'pip install foo': Exited with code 1."
                ),
            },
        ),
        (
            "SnapcraftPluginCommandError string command",
            {
                "exception_class": errors.SnapcraftPluginCommandError,
                "kwargs": {
                    "command": "make install",
                    "exit_code": -1,
                    "part_name": "make_test",
                },
                "expected_message": (
                    "Failed to run 'make install' for 'make_test': "
                    "Exited with code -1.\n"
                    "Verify that the part is using the correct parameters and try "
                    "again."
                ),
            },
        ),
        (
            "SnapcraftPluginCommandError list command",
            {
                "exception_class": errors.SnapcraftPluginCommandError,
                "kwargs": {
                    "command": ["make", "install"],
                    "exit_code": 2,
                    "part_name": "make_test",
                },
                "expected_message": (
                    "Failed to run 'make install' for 'make_test': "
                    "Exited with code 2.\n"
                    "Verify that the part is using the correct parameters and try "
                    "again."
                ),
            },
        ),
        (
            "CrossCompilationNotSupported",
            {
                "exception_class": errors.CrossCompilationNotSupported,
                "kwargs": {"part_name": "my-part"},
                "expected_message": (
                    "The plugin used by 'my-part' does not support "
                    "cross-compiling to a different target architecture."
                ),
            },
        ),
        (
            "CacheUpdateFailedError",
            {
                "exception_class": repo_errors.CacheUpdateFailedError,
                "kwargs": {"errors": ""},
                "expected_message": (
                    "Failed to update the package cache: "
                    "Some files could not be downloaded: "
                    "Check that the sources on your host are configured correctly."
                ),
            },
        ),
        (
            "CacheUpdateFailedError",
            {
                "exception_class": repo_errors.CacheUpdateFailedError,
                "kwargs": {"errors": "foo, bar"},
                "expected_message": (
                    "Failed to update the package cache: "
                    "Some files could not be downloaded:\n\nfoo\nbar\n\n"
                    "Check that the sources on your host are configured correctly."
                ),
            },
        ),
        (
            "StoreNetworkError generic error",
            {
                "exception_class": store_errors.StoreNetworkError,
                "kwargs": {
                    "exception": requests.exceptions.ConnectionError("bad error")
                },
                "expected_message": "There seems to be a network error: bad error",
            },
        ),
        (
            "StoreNetworkError max retry error",
            {
                "exception_class": store_errors.StoreNetworkError,
                "kwargs": {
                    "exception": requests.exceptions.ConnectionError(
                        urllib3.exceptions.MaxRetryError(
                            pool="test-pool", url="test-url"
                        )
                    )
                },
                "expected_message": (
                    "There seems to be a network error: maximum retries exceeded "
                    "trying to reach the store.\n"
                    "Check your network connection, and check the store status at "
                    "https://status.snapcraft.io/"
                ),
            },
        ),
        (
            "SnapcraftCopyFileNotFoundError",
            {
                "exception_class": errors.SnapcraftCopyFileNotFoundError,
                "kwargs": {"path": "test-path"},
                "expected_message": (
                    "Failed to copy 'test-path': no such file or directory.\n"
                    "Check the path and try again."
                ),
            },
        ),
        (
            "StoreServerError 500",
            {
                "exception_class": store_errors.StoreServerError,
                "kwargs": {"response": _fake_error_response(500)},
                "expected_message": (
                    "The Snap Store encountered an error while processing your "
                    "request: internal server error (code 500).\nThe operational "
                    "status of the Snap Store can be checked at "
                    "https://status.snapcraft.io/"
                ),
            },
        ),
        (
            "StoreServerError 501",
            {
                "exception_class": store_errors.StoreServerError,
                "kwargs": {"response": _fake_error_response(501)},
                "expected_message": (
                    "The Snap Store encountered an error while processing your "
                    "request: not implemented (code 501).\nThe operational "
                    "status of the Snap Store can be checked at "
                    "https://status.snapcraft.io/"
                ),
            },
        ),
        (
            "MountPointNotFoundError",
            {
                "exception_class": errors.MountPointNotFoundError,
                "kwargs": {"mount_point": "test-mount-point"},
                "expected_message": "Nothing is mounted at 'test-mount-point'",
            },
        ),
        (
            "RootNotMountedError",
            {
                "exception_class": errors.RootNotMountedError,
                "kwargs": {"root": "test-root"},
                "expected_message": "'test-root' is not mounted",
            },
        ),
        (
            "InvalidMountinfoFormat",
            {
                "exception_class": errors.InvalidMountinfoFormat,
                "kwargs": {"row": [1, 2, 3]},
                "expected_message": "Unable to parse mountinfo row: [1, 2, 3]",
            },
        ),
        (
            "InvalidStepError",
            {
                "exception_class": errors.InvalidStepError,
                "kwargs": {"step_name": "test-step-name"},
                "expected_message": "'test-step-name' is not a valid lifecycle step",
            },
        ),
        (
            "NoLatestStepError",
            {
                "exception_class": errors.NoLatestStepError,
                "kwargs": {"part_name": "test-part-name"},
                "expected_message": "The 'test-part-name' part hasn't run any steps",
            },
        ),
        (
            "NoNextStepError",
            {
                "exception_class": errors.NoNextStepError,
                "kwargs": {"part_name": "test-part-name"},
                "expected_message": (
                    "The 'test-part-name' part has run through its entire lifecycle"
                ),
            },
        ),
        (
            "StepHasNotRunError",
            {
                "exception_class": errors.StepHasNotRunError,
                "kwargs": {"part_name": "test-part-name", "step": steps.BUILD},
                "expected_message": (
                    "The 'test-part-name' part has not yet run the 'build' step"
                ),
            },
        ),
        (
            "ScriptletDuplicateFieldError",
            {
                "exception_class": errors.ScriptletDuplicateFieldError,
                "kwargs": {"field": "foo", "step": steps.PULL},
                "expected_message": (
                    "Unable to set foo: it was already set in the 'pull' step."
                ),
            },
        ),
        (
            "ToolMissingError",
            {
                "exception_class": errors.ToolMissingError,
                "kwargs": {"command_name": "runnable"},
                "expected_message": (
                    "A tool snapcraft depends on could not be found: 'runnable'.\n"
                    "Ensure the tool is installed and available, and try again."
                ),
            },
        ),
        (
            "NoSuchFileError",
            {
                "exception_class": inspection_errors.NoSuchFileError,
                "kwargs": {"path": "test-path"},
                "expected_message": (
                    "Failed to find part that provided path: 'test-path' does not "
                    "exist.\n"
                    "Check the file path and try again."
                ),
            },
        ),
        (
            "ProvidesInvalidFilePathError",
            {
                "exception_class": inspection_errors.ProvidesInvalidFilePathError,
                "kwargs": {"path": "test-path"},
                "expected_message": (
                    "Failed to find part that provides path: 'test-path' is not "
                    "in the staging or priming area.\n"
                    "Ensure the path is in the staging or priming area and try "
                    "again."
                ),
            },
        ),
        (
            "UntrackedFileError",
            {
                "exception_class": inspection_errors.UntrackedFileError,
                "kwargs": {"path": "test-path"},
                "expected_message": (
                    "No known parts provided 'test-path'. It may have been "
                    "provided by a scriptlet."
                ),
            },
        ),
        (
            "NoStepsRunError",
            {
                "exception_class": inspection_errors.NoStepsRunError,
                "kwargs": {},
                "expected_message": "Failed to get latest step: no steps have run",
            },
        ),
    )

    def test_error_formatting(self, exception_class, expected_message, kwargs):
        assert str(exception_class(**kwargs)) == expected_message
Ejemplo n.º 52
0
def build_docs(version='dev', **kwargs):
    doc_dir = kwargs.get('doc_dir',
                         os.path.dirname(os.path.realpath(__file__)))
    work_dir = kwargs.get('work_dir', '.')
    include_dir = kwargs.get(
        'include_dir', os.path.join(os.path.dirname(doc_dir), 'include',
                                    'fmt'))
    # Build docs.
    cmd = ['doxygen', '-']
    p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
    doxyxml_dir = os.path.join(work_dir, 'doxyxml')
    out, _ = p.communicate(input=r'''
      PROJECT_NAME      = fmt
      GENERATE_LATEX    = NO
      GENERATE_MAN      = NO
      GENERATE_RTF      = NO
      CASE_SENSE_NAMES  = NO
      INPUT             = {0}/chrono.h {0}/color.h {0}/core.h {0}/compile.h \
                          {0}/format.h {0}/os.h {0}/ostream.h {0}/printf.h \
                          {0}/xchar.h
      QUIET             = YES
      JAVADOC_AUTOBRIEF = YES
      AUTOLINK_SUPPORT  = NO
      GENERATE_HTML     = NO
      GENERATE_XML      = YES
      XML_OUTPUT        = {1}
      ALIASES           = "rst=\verbatim embed:rst"
      ALIASES          += "endrst=\endverbatim"
      MACRO_EXPANSION   = YES
      PREDEFINED        = _WIN32=1 \
                          __linux__=1 \
                          FMT_ENABLE_IF(...)= \
                          FMT_USE_VARIADIC_TEMPLATES=1 \
                          FMT_USE_RVALUE_REFERENCES=1 \
                          FMT_USE_USER_DEFINED_LITERALS=1 \
                          FMT_USE_ALIAS_TEMPLATES=1 \
                          FMT_API= \
                          "FMT_BEGIN_NAMESPACE=namespace fmt {{" \
                          "FMT_END_NAMESPACE=}}" \
                          "FMT_STRING_ALIAS=1" \
                          "FMT_VARIADIC(...)=" \
                          "FMT_VARIADIC_W(...)=" \
                          "FMT_DOC=1"
      EXCLUDE_SYMBOLS   = fmt::formatter fmt::printf_formatter fmt::arg_join \
                          fmt::basic_format_arg::handle
    '''.format(include_dir, doxyxml_dir).encode('UTF-8'))
    out = out.decode('utf-8')
    internal_symbols = [
        'fmt::detail::.*', 'basic_data<>', 'fmt::type_identity',
        'fmt::dynamic_formatter'
    ]
    noisy_warnings = [
      'warning: (Compound|Member .* of class) (' + '|'.join(internal_symbols) + \
        ') is not documented.',
      'warning: Internal inconsistency: .* does not belong to any container!'
    ]
    for w in noisy_warnings:
        out = re.sub('.*' + w + '\n', '', out)
    print(out)
    if p.returncode != 0:
        raise CalledProcessError(p.returncode, cmd)

    html_dir = os.path.join(work_dir, 'html')
    main_versions = reversed(versions[-3:])
    check_call([
        os.path.join(work_dir, 'virtualenv', 'bin', 'sphinx-build'),
        '-Dbreathe_projects.format=' + os.path.abspath(doxyxml_dir),
        '-Dversion=' + version, '-Drelease=' + version, '-Aversion=' + version,
        '-Aversions=' + ','.join(main_versions), '-b', 'html', doc_dir,
        html_dir
    ])
    try:
        check_call([
            'lessc', '--verbose', '--clean-css',
            '--include-path=' + os.path.join(doc_dir, 'bootstrap'),
            os.path.join(doc_dir, 'fmt.less'),
            os.path.join(html_dir, '_static', 'fmt.css')
        ])
    except OSError as e:
        if e.errno != errno.ENOENT:
            raise
        print('lessc not found; make sure that Less (http://lesscss.org/) ' +
              'is installed')
        sys.exit(1)
    return html_dir
Ejemplo n.º 53
0
async def black_run(
    project_name: str,
    repo_path: Optional[Path],
    project_config: Dict[str, Any],
    results: Results,
    no_diff: bool = False,
) -> None:
    """Run Black and record failures"""
    if not repo_path:
        results.stats["failed"] += 1
        results.failed_projects[project_name] = CalledProcessError(
            69, [], f"{project_name} has no repo_path: {repo_path}".encode(),
            b"")
        return

    stdin_test = project_name.upper() == "STDIN"
    cmd = [str(which(BLACK_BINARY))]
    if "cli_arguments" in project_config and project_config["cli_arguments"]:
        cmd.extend(_flatten_cli_args(project_config["cli_arguments"]))
    cmd.append("--check")
    if not no_diff:
        cmd.append("--diff")

    # Workout if we should read in a python file or search from cwd
    stdin = None
    if stdin_test:
        cmd.append("-")
        stdin = repo_path.read_bytes()
    elif "base_path" in project_config:
        cmd.append(project_config["base_path"])
    else:
        cmd.append(".")

    timeout = (project_config["timeout_seconds"]
               if "timeout_seconds" in project_config else TEN_MINUTES_SECONDS)
    with TemporaryDirectory() as tmp_path:
        # Prevent reading top-level user configs by manipulating environment variables
        env = {
            **os.environ,
            "XDG_CONFIG_HOME": tmp_path,  # Unix-like
            "USERPROFILE": tmp_path,  # Windows (changes `Path.home()` output)
        }

        cwd_path = repo_path.parent if stdin_test else repo_path
        try:
            LOG.debug(f"Running black for {project_name}: {' '.join(cmd)}")
            _stdout, _stderr = await _gen_check_output(cmd,
                                                       cwd=cwd_path,
                                                       env=env,
                                                       stdin=stdin,
                                                       timeout=timeout)
        except asyncio.TimeoutError:
            results.stats["failed"] += 1
            LOG.error(f"Running black for {repo_path} timed out ({cmd})")
        except CalledProcessError as cpe:
            # TODO: Tune for smarter for higher signal
            # If any other return value than 1 we raise - can disable project in config
            if cpe.returncode == 1:
                if not project_config["expect_formatting_changes"]:
                    results.stats["failed"] += 1
                    results.failed_projects[repo_path.name] = cpe
                else:
                    results.stats["success"] += 1
                return
            elif cpe.returncode > 1:
                results.stats["failed"] += 1
                results.failed_projects[repo_path.name] = cpe
                return

            LOG.error(f"Unknown error with {repo_path}")
            raise

    # If we get here and expect formatting changes something is up
    if project_config["expect_formatting_changes"]:
        results.stats["failed"] += 1
        results.failed_projects[repo_path.name] = CalledProcessError(
            0, cmd, b"Expected formatting changes but didn't get any!", b"")
        return

    results.stats["success"] += 1
Ejemplo n.º 54
0
str = type('')

import io
import errno
import warnings
from posix import statvfs_result
from subprocess import CalledProcessError

import pytest
from mock import patch

from gpiozero import *
from datetime import datetime, time

file_not_found = IOError(errno.ENOENT, 'File not found')
bad_ping = CalledProcessError(1, 'returned non-zero exit status 1')


def test_timeofday_bad_init(mock_factory):
    with pytest.raises(TypeError):
        TimeOfDay()
    with pytest.raises(ValueError):
        TimeOfDay(7, 12)
    with pytest.raises(TypeError):
        TimeOfDay(time(7))
    with pytest.raises(ValueError):
        TimeOfDay(time(7), time(7))
    with pytest.raises(ValueError):
        TimeOfDay(time(7), time(7))
    with pytest.raises(ValueError):
        TimeOfDay('7:00', '8:00')
Ejemplo n.º 55
0
 def test_user_error_raised_for_no_git_remote(self, check_output):
     check_output.side_effect = CalledProcessError(1, 'git')
     self.assertRaises(config.NoGitRemoteError, config.get_component_name,
                       None)
Ejemplo n.º 56
0
    def execute(self):
        if self.static:
            self.disabled('login')

        # Collect the list of running hosts.
        try:
            status = self.molecule._provisioner.status()
        except Exception as e:
            status = []

        # make sure vagrant knows about this host
        try:
            # Nowhere to log into if there is no running host.
            if len(status) == 0:
                raise InvalidHost("There is no running host.")

            # Check whether a host was specified.
            if self.molecule._args['<host>'] is None:

                # One running host is perfect. Log into it.
                if len(status) == 1:
                    hostname = status[0].name

                # But too many hosts is trouble as well.
                else:
                    raise InvalidHost(
                        "There are {} running hosts. You can only log into one at a time."
                        .format(len(status)))

            else:

                # If the host was specified, try to use it.
                hostname = self.molecule._args['<host>']
                match = [x.name for x in status if x.name.startswith(hostname)]
                if len(match) == 0:
                    raise CalledProcessError(1, None)
                elif len(match) != 1:
                    raise InvalidHost(
                        "There are {} hosts that match '{}'.  You can only log into one at a time.\n"
                        "Try {}molecule status{} to see available hosts.".
                        format(len(match), hostname, colorama.Fore.YELLOW,
                               colorama.Fore.RED))
                hostname = match[0]

            login_cmd = self.molecule._provisioner.login_cmd(hostname)
            login_args = self.molecule._provisioner.login_args(hostname)

        except CalledProcessError:
            # gets appended to python-vagrant's error message
            conf_format = [
                colorama.Fore.RED, self.molecule._args['<host>'],
                colorama.Fore.YELLOW, colorama.Fore.RESET
            ]
            conf_errmsg = '\n{0}Unknown host {1}. Try {2}molecule status{0} to see available hosts.{3}'
            utilities.logger.error(conf_errmsg.format(*conf_format))
            sys.exit(1)
        except InvalidHost as e:
            conf_format = [colorama.Fore.RED, e.message, colorama.Fore.RESET]
            conf_errmsg = '{}{}{}'
            utilities.logger.error(conf_errmsg.format(*conf_format))
            sys.exit(1)

        lines, columns = os.popen('stty size', 'r').read().split()
        dimensions = (int(lines), int(columns))
        self.molecule._pt = pexpect.spawn('/usr/bin/env ' +
                                          login_cmd.format(*login_args),
                                          dimensions=dimensions)
        signal.signal(signal.SIGWINCH, self.molecule._sigwinch_passthrough)
        self.molecule._pt.interact()
        return None, None
Ejemplo n.º 57
0
    def __exit__(self, *exc_info):
        inner_exit = self._bg_cmd.__exit__
        wload = self.wload
        logger = self.get_logger()

        try:
            suppress = inner_exit(*exc_info)
        except BaseException as e:
            exc_info = (type(e), e, e.__traceback__)
        else:
            if suppress:
                exc_info = (None, None, None)

        type_, value, traceback = exc_info

        returncode = self._bg_cmd.poll()

        if exc_info[0] is not None:
            try:
                self.gen.throw(*exc_info)
            except StopIteration as e:
                if e is value:
                    self._output = _NotSet(e)
                    return False
                else:
                    self._output = e.value
                    return True
            except BaseException as e:
                # Place a "bomb" value: if the user tries to access
                # "self.output", the exception will be raised again
                self._output = _NotSet(e)
                # __exit__ is not expected to re-raise the exception it was
                # given, instead it returns a falsy value to indicate it should
                # not be swallowed
                if e is value:
                    return False
                else:
                    raise
            # This cannot happen: throw() has to raise the exception or swallow
            # it and then later raise a StopIteration because it is finished
            else:
                assert False
        else:
            try:
                futures = self._futures
            except ValueError:
                results = dict(stdout=None, stderr=None)
            else:
                results = {
                    name: future.result()
                    for name, future in futures.items()
                }
                if wload._settings['log_std_streams']:
                    # Dump the stdout/stderr content to log files for easier
                    # debugging
                    for name, content in results.items():
                        path = ArtifactPath.join(wload.res_dir, f'{name}.log')
                        logger.debug(f'Saving {name} to {path}...')

                        with open(path, 'wb') as f:
                            f.write(content)

            # For convenience and to avoid depending too much on devlib's
            # BackgroundCommand in simple cases
            results['returncode'] = returncode

            if returncode:
                action = lambda: self.gen.throw(
                    CalledProcessError(
                        returncode=returncode,
                        cmd=f'<Workload {self.name}>',
                        output=results['stdout'],
                        stderr=results['stderr'],
                    ))
            else:
                action = lambda: self.gen.send(results)

            try:
                action()
            except StopIteration as e:
                output = e.value
                excep = None
            except Exception as e:
                output = _NotSet(e)
                excep = e
            else:
                excep = None

            self._output = output
            if excep is not None:
                raise excep
Ejemplo n.º 58
0
 def test_that_no_mounted_fs_are_found_if_df_command_doesnt_exist(
         self, co_mock):
     co_mock.side_effect = CalledProcessError(1, "df")
     self.assertEqual(0, len(MountedFileSystem.generate()))
Ejemplo n.º 59
0
    def _run_job_in_hadoop(self):
        for step_num, step in enumerate(self._get_steps()):
            self._warn_about_spark_archives(step)

            step_args = self._args_for_step(step_num)
            env = _fix_env(self._env_for_step(step_num))

            # log this *after* _args_for_step(), which can start a search
            # for the Hadoop streaming jar
            log.info('Running step %d of %d...' %
                     (step_num + 1, self._num_steps()))
            log.debug('> %s' % cmd_line(step_args))
            log.debug('  with environment: %r' % sorted(env.items()))

            log_interpretation = {}
            self._log_interpretations.append(log_interpretation)

            # try to use a PTY if it's available
            try:
                pid, master_fd = pty.fork()
            except (AttributeError, OSError):
                # no PTYs, just use Popen

                # user won't get much feedback for a while, so tell them
                # Hadoop is running
                log.debug('No PTY available, using Popen() to invoke Hadoop')

                step_proc = Popen(step_args, stdout=PIPE, stderr=PIPE, env=env)

                step_interpretation = _interpret_hadoop_jar_command_stderr(
                    step_proc.stderr,
                    record_callback=_log_record_from_hadoop)

                # there shouldn't be much output to STDOUT
                for line in step_proc.stdout:
                    _log_line_from_hadoop(to_unicode(line).strip('\r\n'))

                step_proc.stdout.close()
                step_proc.stderr.close()

                returncode = step_proc.wait()
            else:
                # we have PTYs
                if pid == 0:  # we are the child process
                    os.execvpe(step_args[0], step_args, env)
                else:
                    log.debug('Invoking Hadoop via PTY')

                    with os.fdopen(master_fd, 'rb') as master:
                        # reading from master gives us the subprocess's
                        # stderr and stdout (it's a fake terminal)
                        step_interpretation = (
                            _interpret_hadoop_jar_command_stderr(
                                master,
                                record_callback=_log_record_from_hadoop))
                        _, returncode = os.waitpid(pid, 0)

            # make sure output_dir is filled
            if 'output_dir' not in step_interpretation:
                step_interpretation['output_dir'] = (
                    self._step_output_uri(step_num))

            log_interpretation['step'] = step_interpretation

            self._log_counters(log_interpretation, step_num)

            step_type = step['type']

            if returncode:
                error = self._pick_error(log_interpretation, step_type)
                if error:
                    log.error('Probable cause of failure:\n\n%s\n' %
                              _format_error(error))

                # use CalledProcessError's well-known message format
                reason = str(CalledProcessError(returncode, step_args))
                raise StepFailedException(
                    reason=reason, step_num=step_num,
                    num_steps=self._num_steps())
Ejemplo n.º 60
0
def build_docs(sphinx_executable='sphinx-build', version='dev', **kwargs):
    doc_dir = kwargs.get('doc_dir',
                         os.path.dirname(os.path.realpath(__file__)))
    work_dir = kwargs.get('work_dir', '.')
    include_dir = kwargs.get(
        'include_dir', os.path.join(os.path.dirname(doc_dir), 'include',
                                    'fmt'))
    # Build docs.
    cmd = ['doxygen', '-']
    p = Popen(cmd, stdin=PIPE)
    doxyxml_dir = os.path.join(work_dir, 'doxyxml')
    p.communicate(input=r'''
      PROJECT_NAME      = fmt
      GENERATE_LATEX    = NO
      GENERATE_MAN      = NO
      GENERATE_RTF      = NO
      CASE_SENSE_NAMES  = NO
      INPUT             = {0}/core.h {0}/compile.h {0}/format.h {0}/os.h \
                          {0}/ostream.h {0}/printf.h {0}/time.h
      QUIET             = YES
      JAVADOC_AUTOBRIEF = YES
      AUTOLINK_SUPPORT  = NO
      GENERATE_HTML     = NO
      GENERATE_XML      = YES
      XML_OUTPUT        = {1}
      ALIASES           = "rst=\verbatim embed:rst"
      ALIASES          += "endrst=\endverbatim"
      MACRO_EXPANSION   = YES
      PREDEFINED        = _WIN32=1 \
                          FMT_USE_VARIADIC_TEMPLATES=1 \
                          FMT_USE_RVALUE_REFERENCES=1 \
                          FMT_USE_USER_DEFINED_LITERALS=1 \
                          FMT_USE_ALIAS_TEMPLATES=1 \
                          FMT_API= \
                          "FMT_BEGIN_NAMESPACE=namespace fmt {{" \
                          "FMT_END_NAMESPACE=}}" \
                          "FMT_STRING_ALIAS=1" \
                          "FMT_ENABLE_IF(B)="
      EXCLUDE_SYMBOLS   = fmt::internal::* StringValue write_str
    '''.format(include_dir, doxyxml_dir).encode('UTF-8'))
    if p.returncode != 0:
        raise CalledProcessError(p.returncode, cmd)
    html_dir = os.path.join(work_dir, 'html')
    main_versions = reversed(versions[-3:])
    check_call([
        sphinx_executable,
        '-Dbreathe_projects.format=' + os.path.abspath(doxyxml_dir),
        '-Dversion=' + version, '-Drelease=' + version, '-Aversion=' + version,
        '-Aversions=' + ','.join(main_versions), '-b', 'html', doc_dir,
        html_dir
    ])
    try:
        check_call([
            'lessc', '--clean-css',
            '--include-path=' + os.path.join(doc_dir, 'bootstrap'),
            os.path.join(doc_dir, 'fmt.less'),
            os.path.join(html_dir, '_static', 'fmt.css')
        ])
    except OSError as e:
        if e.errno != errno.ENOENT:
            raise
        print('lessc not found; make sure that Less (http://lesscss.org/) ' +
              'is installed')
        sys.exit(1)
    return html_dir