コード例 #1
0
ファイル: processutils.py プロジェクト: bobcallaway/manila
def ssh_execute(ssh, cmd, process_input=None,
                addl_env=None, check_exit_code=True):
    LOG.debug('Running cmd (SSH): %s', cmd)
    if addl_env:
        raise InvalidArgumentError(_('Environment not supported over SSH'))

    if process_input:
        # This is (probably) fixable if we need it...
        raise InvalidArgumentError(_('process_input not supported over SSH'))

    stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
    channel = stdout_stream.channel

    # NOTE(justinsb): This seems suspicious...
    # ...other SSH clients have buffering issues with this approach
    stdout = stdout_stream.read()
    stderr = stderr_stream.read()
    stdin_stream.close()

    exit_status = channel.recv_exit_status()

    # exit_status == -1 if no exit code was returned
    if exit_status != -1:
        LOG.debug('Result was %s' % exit_status)
        if check_exit_code and exit_status != 0:
            raise ProcessExecutionError(exit_code=exit_status,
                                        stdout=stdout,
                                        stderr=stderr,
                                        cmd=cmd)

    return (stdout, stderr)
コード例 #2
0
ファイル: impl_zmq.py プロジェクト: aostapenko/manila
def _multi_send(method, context, topic, msg, timeout=None,
                envelope=False, _msg_id=None):
    """
    Wraps the sending of messages,
    dispatches to the matchmaker and sends
    message to all relevant hosts.
    """
    conf = CONF
    LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})

    queues = _get_matchmaker().queues(topic)
    LOG.debug(_("Sending message(s) to: %s"), queues)

    # Don't stack if we have no matchmaker results
    if len(queues) == 0:
        LOG.warn(_("No matchmaker results. Not casting."))
        # While not strictly a timeout, callers know how to handle
        # this exception and a timeout isn't too big a lie.
        raise rpc_common.Timeout(_("No match from matchmaker."))

    # This supports brokerless fanout (addresses > 1)
    for queue in queues:
        (_topic, ip_addr) = queue
        _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)

        if method.__name__ == '_cast':
            eventlet.spawn_n(method, _addr, context,
                             _topic, msg, timeout, envelope,
                             _msg_id)
            return
        return method(_addr, context, _topic, msg, timeout,
                      envelope)
コード例 #3
0
ファイル: impl_zmq.py プロジェクト: aostapenko/manila
    def create_consumer(self, topic, proxy, fanout=False):
        # Register with matchmaker.
        _get_matchmaker().register(topic, CONF.rpc_zmq_host)

        # Subscription scenarios
        if fanout:
            sock_type = zmq.SUB
            subscribe = ('', fanout)[type(fanout) == str]
            topic = 'fanout~' + topic.split('.', 1)[0]
        else:
            sock_type = zmq.PULL
            subscribe = None
            topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))

        if topic in self.topics:
            LOG.info(_("Skipping topic registration. Already registered."))
            return

        # Receive messages from (local) proxy
        inaddr = "ipc://%s/zmq_topic_%s" % \
            (CONF.rpc_zmq_ipc_dir, topic)

        LOG.debug(_("Consumer is a zmq.%s"),
                  ['PULL', 'SUB'][sock_type == zmq.SUB])

        self.reactor.register(proxy, inaddr, sock_type,
                              subscribe=subscribe, in_bind=False)
        self.topics.append(topic)
コード例 #4
0
ファイル: amqp.py プロジェクト: nkrinner/manila
    def __call__(self, message_data):
        """Consumer callback to call a method on a proxy object.

        Parses the message for validity and fires off a thread to call the
        proxy object method.

        Message data should be a dictionary with two keys:
            method: string representing the method to call
            args: dictionary of arg: value

        Example: {'method': 'echo', 'args': {'value': 42}}

        """
        # It is important to clear the context here, because at this point
        # the previous context is stored in local.store.context
        if hasattr(local.store, 'context'):
            del local.store.context
        rpc_common._safe_log(LOG.debug, 'received %s', message_data)
        self.msg_id_cache.check_duplicate_message(message_data)
        ctxt = unpack_context(self.conf, message_data)
        method = message_data.get('method')
        args = message_data.get('args', {})
        version = message_data.get('version')
        namespace = message_data.get('namespace')
        if not method:
            LOG.warn(_('no method for message: %s') % message_data)
            ctxt.reply(_('No method for message: %s') % message_data,
                       connection_pool=self.connection_pool)
            return
        self.pool.spawn_n(self._process_data, ctxt, version, method,
                          namespace, args)
コード例 #5
0
ファイル: amqp.py プロジェクト: pombredanne/manila
def multicall(conf, context, topic, msg, timeout, connection_pool):
    """Make a call that returns multiple times."""
    # TODO(pekowski): Remove all these comments in Havana.
    # For amqp_rpc_single_reply_queue = False,
    # Can't use 'with' for multicall, as it returns an iterator
    # that will continue to use the connection.  When it's done,
    # connection.close() will get called which will put it back into
    # the pool
    # For amqp_rpc_single_reply_queue = True,
    # The 'with' statement is mandatory for closing the connection
    LOG.debug(_("Making synchronous call on %s ..."), topic)
    msg_id = uuid.uuid4().hex
    msg.update({"_msg_id": msg_id})
    LOG.debug(_("MSG_ID is %s") % (msg_id))
    _add_unique_id(msg)
    pack_context(msg, context)

    # TODO(pekowski): Remove this flag and the code under the if clause
    #                 in Havana.
    if not conf.amqp_rpc_single_reply_queue:
        conn = ConnectionContext(conf, connection_pool)
        wait_msg = MulticallWaiter(conf, conn, timeout)
        conn.declare_direct_consumer(msg_id, wait_msg)
        conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
    else:
        with _reply_proxy_create_sem:
            if not connection_pool.reply_proxy:
                connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
        msg.update({"_reply_q": connection_pool.reply_proxy.get_reply_q()})
        wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
        with ConnectionContext(conf, connection_pool) as conn:
            conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
    return wait_msg
コード例 #6
0
ファイル: amqp.py プロジェクト: pombredanne/manila
    def _process_data(self, ctxt, version, method, namespace, args):
        """Process a message in a new thread.

        If the proxy object we have has a dispatch method
        (see rpc.dispatcher.RpcDispatcher), pass it the version,
        method, and args and let it dispatch as appropriate.  If not, use
        the old behavior of magically calling the specified method on the
        proxy we have here.
        """
        ctxt.update_store()
        try:
            rval = self.proxy.dispatch(ctxt, version, method, namespace, **args)
            # Check if the result was a generator
            if inspect.isgenerator(rval):
                for x in rval:
                    ctxt.reply(x, None, connection_pool=self.connection_pool)
            else:
                ctxt.reply(rval, None, connection_pool=self.connection_pool)
            # This final None tells multicall that it is done.
            ctxt.reply(ending=True, connection_pool=self.connection_pool)
        except rpc_common.ClientException as e:
            LOG.debug(_("Expected exception during message handling (%s)") % e._exc_info[1])
            ctxt.reply(None, e._exc_info, connection_pool=self.connection_pool, log_failure=False)
        except Exception:
            # sys.exc_info() is deleted by LOG.exception().
            exc_info = sys.exc_info()
            LOG.error(_("Exception during message handling"), exc_info=exc_info)
            ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
コード例 #7
0
ファイル: impl_zmq.py プロジェクト: aostapenko/manila
    def __init__(self, addr, zmq_type, bind=True, subscribe=None):
        self.sock = _get_ctxt().socket(zmq_type)
        self.addr = addr
        self.type = zmq_type
        self.subscriptions = []

        # Support failures on sending/receiving on wrong socket type.
        self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
        self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
        self.can_sub = zmq_type in (zmq.SUB, )

        # Support list, str, & None for subscribe arg (cast to list)
        do_sub = {
            list: subscribe,
            str: [subscribe],
            type(None): []
        }[type(subscribe)]

        for f in do_sub:
            self.subscribe(f)

        str_data = {'addr': addr, 'type': self.socket_s(),
                    'subscribe': subscribe, 'bind': bind}

        LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
        LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
        LOG.debug(_("-> bind: %(bind)s"), str_data)

        try:
            if bind:
                self.sock.bind(addr)
            else:
                self.sock.connect(addr)
        except Exception:
            raise RPCException(_("Could not open socket."))
コード例 #8
0
ファイル: policy.py プロジェクト: aostapenko/manila
    def _check(self, match, target_dict, cred_dict):
        try:
            match_kind, match_value = match.split(':', 1)
        except Exception:
            LOG.exception(_("Failed to understand rule %(match)r") % locals())
            # If the rule is invalid, fail closed
            return False

        func = None
        try:
            old_func = getattr(self, '_check_%s' % match_kind)
        except AttributeError:
            func = self._checks.get(match_kind, self._checks.get(None, None))
        else:
            LOG.warning(_("Inheritance-based rules are deprecated; update "
                          "_check_%s") % match_kind)
            func = lambda brain, kind, value, target, cred: old_func(value,
                                                                     target,
                                                                     cred)

        if not func:
            LOG.error(_("No handler for matches of kind %s") % match_kind)
            # Fail closed
            return False

        return func(self, match_kind, match_value, target_dict, cred_dict)
コード例 #9
0
ファイル: impl_zmq.py プロジェクト: aostapenko/manila
            def publisher(waiter):
                LOG.info(_("Creating proxy for topic: %s"), topic)

                try:
                    # The topic is received over the network,
                    # don't trust this input.
                    if self.badchars.search(topic) is not None:
                        emsg = _("Topic contained dangerous characters.")
                        LOG.warn(emsg)
                        raise RPCException(emsg)

                    out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
                                         (ipc_dir, topic),
                                         sock_type, bind=True)
                except RPCException:
                    waiter.send_exception(*sys.exc_info())
                    return

                self.topic_proxy[topic] = eventlet.queue.LightQueue(
                    CONF.rpc_zmq_topic_backlog)
                self.sockets.append(out_sock)

                # It takes some time for a pub socket to open,
                # before we can have any faith in doing a send() to it.
                if sock_type == zmq.PUB:
                    eventlet.sleep(.5)

                waiter.send(True)

                while(True):
                    data = self.topic_proxy[topic].get()
                    out_sock.send(data)
                    LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
                              {'data': data})
コード例 #10
0
ファイル: impl_qpid.py プロジェクト: aostapenko/manila
    def reconnect(self):
        """Handles reconnecting and re-establishing sessions and queues"""
        if self.connection.opened():
            try:
                self.connection.close()
            except qpid_exceptions.ConnectionError:
                pass

        attempt = 0
        delay = 1
        while True:
            broker = self.brokers[attempt % len(self.brokers)]
            attempt += 1

            try:
                self.connection_create(broker)
                self.connection.open()
            except qpid_exceptions.ConnectionError, e:
                msg_dict = dict(e=e, delay=delay)
                msg = _("Unable to connect to AMQP server: %(e)s. "
                        "Sleeping %(delay)s seconds") % msg_dict
                LOG.error(msg)
                time.sleep(delay)
                delay = min(2 * delay, 60)
            else:
                LOG.info(_('Connected to AMQP server on %s'), broker)
                break
コード例 #11
0
    def __init__(self,
                 stdout=None,
                 stderr=None,
                 exit_code=None,
                 cmd=None,
                 description=None):
        self.exit_code = exit_code
        self.stderr = stderr
        self.stdout = stdout
        self.cmd = cmd
        self.description = description

        if description is None:
            description = _("Unexpected error while running command.")
        if exit_code is None:
            exit_code = '-'
        message = _('%(description)s\n'
                    'Command: %(cmd)s\n'
                    'Exit code: %(exit_code)s\n'
                    'Stdout: %(stdout)r\n'
                    'Stderr: %(stderr)r') % {
                        'description': description,
                        'cmd': cmd,
                        'exit_code': exit_code,
                        'stdout': stdout,
                        'stderr': stderr
                    }
        super(ProcessExecutionError, self).__init__(message)
コード例 #12
0
ファイル: impl_zmq.py プロジェクト: aostapenko/manila
    def consume_in_thread(self):
        """Runs the ZmqProxy service"""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        if not os.path.isdir(ipc_dir):
            try:
                utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
                utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
                              ipc_dir, run_as_root=True)
                utils.execute('chmod', '750', ipc_dir, run_as_root=True)
            except utils.ProcessExecutionError:
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Could not create IPC directory %s") %
                              (ipc_dir, ))

        try:
            self.register(consumption_proxy,
                          consume_in,
                          zmq.PULL,
                          out_bind=True)
        except zmq.ZMQError:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Could not create ZeroMQ receiver daemon. "
                            "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
コード例 #13
0
def ssh_execute(ssh,
                cmd,
                process_input=None,
                addl_env=None,
                check_exit_code=True):
    LOG.debug('Running cmd (SSH): %s', cmd)
    if addl_env:
        raise InvalidArgumentError(_('Environment not supported over SSH'))

    if process_input:
        # This is (probably) fixable if we need it...
        raise InvalidArgumentError(_('process_input not supported over SSH'))

    stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
    channel = stdout_stream.channel

    # NOTE(justinsb): This seems suspicious...
    # ...other SSH clients have buffering issues with this approach
    stdout = stdout_stream.read()
    stderr = stderr_stream.read()
    stdin_stream.close()

    exit_status = channel.recv_exit_status()

    # exit_status == -1 if no exit code was returned
    if exit_status != -1:
        LOG.debug('Result was %s' % exit_status)
        if check_exit_code and exit_status != 0:
            raise ProcessExecutionError(exit_code=exit_status,
                                        stdout=stdout,
                                        stderr=stderr,
                                        cmd=cmd)

    return (stdout, stderr)
コード例 #14
0
ファイル: lockutils.py プロジェクト: pombredanne/manila
 def inner(*args, **kwargs):
     try:
         with lock(name, lock_file_prefix, external, lock_path):
             LOG.debug(_('Got semaphore / lock "%(function)s"'), {"function": f.__name__})
             return f(*args, **kwargs)
     finally:
         LOG.debug(_('Semaphore / lock released "%(function)s"'), {"function": f.__name__})
コード例 #15
0
ファイル: impl_zmq.py プロジェクト: aostapenko/manila
    def consume(self, sock):
        #TODO(ewindisch): use zero-copy (i.e. references, not copying)
        data = sock.recv()
        LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
        if sock in self.mapping:
            LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
                'data': data})
            self.mapping[sock].send(data)
            return

        proxy = self.proxies[sock]

        if data[2] == 'cast':  # Legacy protocol
            packenv = data[3]

            ctx, msg = _deserialize(packenv)
            request = rpc_common.deserialize_msg(msg)
            ctx = RpcContext.unmarshal(ctx)
        elif data[2] == 'impl_zmq_v2':
            packenv = data[4:]

            msg = unflatten_envelope(packenv)
            request = rpc_common.deserialize_msg(msg)

            # Unmarshal only after verifying the message.
            ctx = RpcContext.unmarshal(data[3])
        else:
            LOG.error(_("ZMQ Envelope version unsupported or unknown."))
            return

        self.pool.spawn_n(self.process, proxy, ctx, request)
コード例 #16
0
ファイル: service.py プロジェクト: aostapenko/manila
    def _wait_child(self):
        try:
            # Don't block if no child processes have exited
            pid, status = os.waitpid(0, os.WNOHANG)
            if not pid:
                return None
        except OSError as exc:
            if exc.errno not in (errno.EINTR, errno.ECHILD):
                raise
            return None

        if os.WIFSIGNALED(status):
            sig = os.WTERMSIG(status)
            LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
                     dict(pid=pid, sig=sig))
        else:
            code = os.WEXITSTATUS(status)
            LOG.info(_('Child %(pid)s exited with status %(code)d'),
                     dict(pid=pid, code=code))

        if pid not in self.children:
            LOG.warning(_('pid %d not in child list'), pid)
            return None

        wrap = self.children.pop(pid)
        wrap.children.remove(pid)
        return wrap
コード例 #17
0
ファイル: impl_zmq.py プロジェクト: aostapenko/manila
    def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
                 zmq_type_out=None, in_bind=True, out_bind=True,
                 subscribe=None):

        LOG.info(_("Registering reactor"))

        if zmq_type_in not in (zmq.PULL, zmq.SUB):
            raise RPCException("Bad input socktype")

        # Items push in.
        inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
                        subscribe=subscribe)

        self.proxies[inq] = proxy
        self.sockets.append(inq)

        LOG.info(_("In reactor registered"))

        if not out_addr:
            return

        if zmq_type_out not in (zmq.PUSH, zmq.PUB):
            raise RPCException("Bad output socktype")

        # Items push out.
        outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)

        self.mapping[inq] = outq
        self.mapping[outq] = inq
        self.sockets.append(outq)

        LOG.info(_("Out reactor registered"))
コード例 #18
0
ファイル: service.py プロジェクト: aostapenko/manila
    def wait(self):
        """Loop waiting on children to die and respawning as necessary"""

        LOG.debug(_('Full set of CONF:'))
        CONF.log_opt_values(LOG, std_logging.DEBUG)

        while self.running:
            wrap = self._wait_child()
            if not wrap:
                # Yield to other threads if no children have exited
                # Sleep for a short time to avoid excessive CPU usage
                # (see bug #1095346)
                eventlet.greenthread.sleep(.01)
                continue

            while self.running and len(wrap.children) < wrap.workers:
                self._start_child(wrap)

        if self.sigcaught:
            signame = {signal.SIGTERM: 'SIGTERM',
                       signal.SIGINT: 'SIGINT'}[self.sigcaught]
            LOG.info(_('Caught %s, stopping children'), signame)

        for pid in self.children:
            try:
                os.kill(pid, signal.SIGTERM)
            except OSError as exc:
                if exc.errno != errno.ESRCH:
                    raise

        # Wait for children to die
        if self.children:
            LOG.info(_('Waiting on %d children to exit'), len(self.children))
            while self.children:
                self._wait_child()
コード例 #19
0
ファイル: impl_kombu.py プロジェクト: aostapenko/manila
 def _connect(self, params):
     """Connect to rabbit.  Re-establish any queues that may have
     been declared before if we are reconnecting.  Exceptions should
     be handled by the caller.
     """
     if self.connection:
         LOG.info(_("Reconnecting to AMQP server on "
                  "%(hostname)s:%(port)d") % params)
         try:
             self.connection.release()
         except self.connection_errors:
             pass
         # Setting this in case the next statement fails, though
         # it shouldn't be doing any network operations, yet.
         self.connection = None
     self.connection = kombu.connection.BrokerConnection(**params)
     self.connection_errors = self.connection.connection_errors
     if self.memory_transport:
         # Kludge to speed up tests.
         self.connection.transport.polling_interval = 0.0
     self.consumer_num = itertools.count(1)
     self.connection.connect()
     self.channel = self.connection.channel()
     # work around 'memory' transport bug in 1.1.3
     if self.memory_transport:
         self.channel._new_queue('ae.undeliver')
     for consumer in self.consumers:
         consumer.reconnect(self.channel)
     LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
              params)
コード例 #20
0
ファイル: impl_qpid.py プロジェクト: aostapenko/manila
 def _error_callback(exc):
     if isinstance(exc, qpid_exceptions.Empty):
         LOG.debug(_('Timed out waiting for RPC response: %s') %
                   str(exc))
         raise rpc_common.Timeout()
     else:
         LOG.exception(_('Failed to consume message from queue: %s') %
                       str(exc))
コード例 #21
0
ファイル: impl_kombu.py プロジェクト: aostapenko/manila
 def _error_callback(exc):
     if isinstance(exc, socket.timeout):
         LOG.debug(_('Timed out waiting for RPC response: %s') %
                   str(exc))
         raise rpc_common.Timeout()
     else:
         LOG.exception(_('Failed to consume message from queue: %s') %
                       str(exc))
         info['do_consume'] = True
コード例 #22
0
ファイル: amqp.py プロジェクト: nkrinner/manila
 def _process_data(self, message_data):
     msg_id = message_data.pop('_msg_id', None)
     waiter = self._call_waiters.get(msg_id)
     if not waiter:
         LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
                    ', message : %(data)s'), {'msg_id': msg_id,
                                              'data': message_data})
         LOG.warn(_('_call_waiters: %s') % self._call_waiters)
     else:
         waiter.put(message_data)
コード例 #23
0
ファイル: impl_kombu.py プロジェクト: aostapenko/manila
    def reconnect(self):
        """Handles reconnecting and re-establishing queues.
        Will retry up to self.max_retries number of times.
        self.max_retries = 0 means to retry forever.
        Sleep between tries, starting at self.interval_start
        seconds, backing off self.interval_stepping number of seconds
        each attempt.
        """

        attempt = 0
        while True:
            params = self.params_list[attempt % len(self.params_list)]
            attempt += 1
            try:
                self._connect(params)
                return
            except (IOError, self.connection_errors) as e:
                pass
            except Exception, e:
                # NOTE(comstud): Unfortunately it's possible for amqplib
                # to return an error not covered by its transport
                # connection_errors in the case of a timeout waiting for
                # a protocol response.  (See paste link in LP888621)
                # So, we check all exceptions for 'timeout' in them
                # and try to reconnect in this case.
                if 'timeout' not in str(e):
                    raise

            log_info = {}
            log_info['err_str'] = str(e)
            log_info['max_retries'] = self.max_retries
            log_info.update(params)

            if self.max_retries and attempt == self.max_retries:
                LOG.error(_('Unable to connect to AMQP server on '
                            '%(hostname)s:%(port)d after %(max_retries)d '
                            'tries: %(err_str)s') % log_info)
                # NOTE(comstud): Copied from original code.  There's
                # really no better recourse because if this was a queue we
                # need to consume on, we have no way to consume anymore.
                sys.exit(1)

            if attempt == 1:
                sleep_time = self.interval_start or 1
            elif attempt > 1:
                sleep_time += self.interval_stepping
            if self.interval_max:
                sleep_time = min(sleep_time, self.interval_max)

            log_info['sleep_time'] = sleep_time
            LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
                        'unreachable: %(err_str)s. Trying again in '
                        '%(sleep_time)d seconds.') % log_info)
            time.sleep(sleep_time)
コード例 #24
0
ファイル: cinder.py プロジェクト: rpafford/manila
 def check_attach(self, context, volume, instance=None):
     if volume['status'] != "available":
         msg = _("status must be 'available'")
         raise exception.InvalidVolume(reason=msg)
     if volume['attach_status'] == "attached":
         msg = _("already attached")
         raise exception.InvalidVolume(reason=msg)
     if instance and not CONF.cinder_cross_az_attach:
         if instance['availability_zone'] != volume['availability_zone']:
             msg = _("Instance and volume not in same availability_zone")
             raise exception.InvalidVolume(reason=msg)
コード例 #25
0
ファイル: api.py プロジェクト: aostapenko/manila
def notify(context, publisher_id, event_type, priority, payload):
    """Sends a notification using the specified driver

    :param publisher_id: the source worker_type.host of the message
    :param event_type:   the literal type of event (ex. Instance Creation)
    :param priority:     patterned after the enumeration of Python logging
                         levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
    :param payload:       A python dictionary of attributes

    Outgoing message format includes the above parameters, and appends the
    following:

    message_id
      a UUID representing the id for this notification

    timestamp
      the GMT timestamp the notification was sent at

    The composite message will be constructed as a dictionary of the above
    attributes, which will then be sent via the transport mechanism defined
    by the driver.

    Message example::

        {'message_id': str(uuid.uuid4()),
         'publisher_id': 'compute.host1',
         'timestamp': timeutils.utcnow(),
         'priority': 'WARN',
         'event_type': 'compute.create_instance',
         'payload': {'instance_id': 12, ... }}

    """
    if priority not in log_levels:
        raise BadPriorityException(
            _('%s not in valid priorities') % priority)

    # Ensure everything is JSON serializable.
    payload = jsonutils.to_primitive(payload, convert_instances=True)

    msg = dict(message_id=str(uuid.uuid4()),
               publisher_id=publisher_id,
               event_type=event_type,
               priority=priority,
               payload=payload,
               timestamp=str(timeutils.utcnow()))

    for driver in _get_drivers():
        try:
            driver.notify(context, msg)
        except Exception as e:
            LOG.exception(_("Problem '%(e)s' attempting to "
                            "send to notification system. "
                            "Payload=%(payload)s")
                          % dict(e=e, payload=payload))
コード例 #26
0
ファイル: cinder.py プロジェクト: mbr4v0v/manila
 def check_attach(self, context, volume, instance=None):
     if volume['status'] != "available":
         msg = _("status must be 'available'")
         raise exception.InvalidVolume(reason=msg)
     if volume['attach_status'] == "attached":
         msg = _("already attached")
         raise exception.InvalidVolume(reason=msg)
     if instance and not CONF.cinder_cross_az_attach:
         if instance['availability_zone'] != volume['availability_zone']:
             msg = _("Instance and volume not in same availability_zone")
             raise exception.InvalidVolume(reason=msg)
コード例 #27
0
ファイル: quotas.py プロジェクト: mbr4v0v/manila
 def _validate_quota_limit(self, limit, minimum, maximum, force_update):
     # NOTE: -1 is a flag value for unlimited
     if limit < -1:
         msg = _("Quota limit must be -1 or greater.")
         raise webob.exc.HTTPBadRequest(explanation=msg)
     if ((limit < minimum and not force_update)
             and (maximum != -1 or (maximum == -1 and limit != -1))):
         msg = _("Quota limit must greater than %s.") % minimum
         raise webob.exc.HTTPBadRequest(explanation=msg)
     if maximum != -1 and limit > maximum:
         msg = _("Quota limit must less than %s.") % maximum
         raise webob.exc.HTTPBadRequest(explanation=msg)
コード例 #28
0
ファイル: quotas.py プロジェクト: nkrinner/manila
 def _validate_quota_limit(self, limit, minimum, maximum, force_update):
     # NOTE: -1 is a flag value for unlimited
     if limit < -1:
         msg = _("Quota limit must be -1 or greater.")
         raise webob.exc.HTTPBadRequest(explanation=msg)
     if ((limit < minimum and not force_update) and
        (maximum != -1 or (maximum == -1 and limit != -1))):
         msg = _("Quota limit must greater than %s.") % minimum
         raise webob.exc.HTTPBadRequest(explanation=msg)
     if maximum != -1 and limit > maximum:
         msg = _("Quota limit must less than %s.") % maximum
         raise webob.exc.HTTPBadRequest(explanation=msg)
コード例 #29
0
ファイル: common.py プロジェクト: nkrinner/manila
    def __init__(self, info=None, topic=None, method=None):
        """Initiates Timeout object.

        :param info: Extra info to convey to the user
        :param topic: The topic that the rpc call was sent to
        :param rpc_method_name: The name of the rpc method being
                                called
        """
        self.info = info
        self.topic = topic
        self.method = method
        super(Timeout, self).__init__(
            None, info=info or _("<unknown>"), topic=topic or _("<unknown>"), method=method or _("<unknown>")
        )
コード例 #30
0
ファイル: utils.py プロジェクト: kleopatra999/manila
def _get_not_supported_column(col_name_col_instance, column_name):
    try:
        column = col_name_col_instance[column_name]
    except Exception:
        msg = _("Please specify column %s in col_name_col_instance "
                "param. It is required because column has unsupported "
                "type by sqlite).")
        raise exception.ManilaException(msg % column_name)

    if not isinstance(column, Column):
        msg = _("col_name_col_instance param has wrong type of "
                "column instance for column %s It should be instance "
                "of sqlalchemy.Column.")
        raise exception.ManilaException(msg % column_name)
    return column
コード例 #31
0
ファイル: log.py プロジェクト: aostapenko/manila
 def deprecated(self, msg, *args, **kwargs):
     stdmsg = _("Deprecated: %s") % msg
     if CONF.fatal_deprecations:
         self.critical(stdmsg, *args, **kwargs)
         raise DeprecatedConfig(msg=stdmsg)
     else:
         self.warn(stdmsg, *args, **kwargs)
コード例 #32
0
def bool_from_string(subject, strict=False, default=False):
    """Interpret a string as a boolean.

    A case-insensitive match is performed such that strings matching 't',
    'true', 'on', 'y', 'yes', or '1' are considered True and, when
    `strict=False`, anything else returns the value specified by 'default'.

    Useful for JSON-decoded stuff and config file parsing.

    If `strict=True`, unrecognized values, including None, will raise a
    ValueError which is useful when parsing values passed in from an API call.
    Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
    """
    if not isinstance(subject, six.string_types):
        subject = six.text_type(subject)

    lowered = subject.strip().lower()

    if lowered in TRUE_STRINGS:
        return True
    elif lowered in FALSE_STRINGS:
        return False
    elif strict:
        acceptable = ', '.join("'%s'" % s
                               for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
        msg = _("Unrecognized value '%(val)s', acceptable values are:"
                " %(acceptable)s") % {
                    'val': subject,
                    'acceptable': acceptable
                }
        raise ValueError(msg)
    else:
        return default
コード例 #33
0
ファイル: loopingcall.py プロジェクト: aostapenko/manila
    def start(self, initial_delay=None, periodic_interval_max=None):
        self._running = True
        done = event.Event()

        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    idle = self.f(*self.args, **self.kw)
                    if not self._running:
                        break

                    if periodic_interval_max is not None:
                        idle = min(idle, periodic_interval_max)
                    LOG.debug(_('Dynamic looping call sleeping for %.02f '
                                'seconds'), idle)
                    greenthread.sleep(idle)
            except LoopingCallDone, e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in dynamic looping call'))
                done.send_exception(*sys.exc_info())
                return
コード例 #34
0
    def acquire(self):
        basedir = os.path.dirname(self.fname)

        if not os.path.exists(basedir):
            fileutils.ensure_tree(basedir)
            LOG.info(_LI('Created lock path: %s'), basedir)

        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                LOG.debug('Got file lock "%s"', self.fname)
                return True
            except IOError as e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise threading.ThreadError(_("Unable to acquire lock on"
                                                  " `%(filename)s` due to"
                                                  " %(exception)s") %
                                                {
                                                    'filename': self.fname,
                                                    'exception': e,
                                                })
コード例 #35
0
ファイル: loopingcall.py プロジェクト: aostapenko/manila
    def start(self, interval, initial_delay=None):
        self._running = True
        done = event.Event()

        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_('task run outlasted interval by %s sec') %
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone, e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
コード例 #36
0
def string_to_bytes(text, unit_system='IEC', return_int=False):
    """Converts a string into an float representation of bytes.

    The units supported for IEC ::

        Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
        KB, KiB, MB, MiB, GB, GiB, TB, TiB

    The units supported for SI ::

        kb(it), Mb(it), Gb(it), Tb(it)
        kB, MB, GB, TB

    Note that the SI unit system does not support capital letter 'K'

    :param text: String input for bytes size conversion.
    :param unit_system: Unit system for byte size conversion.
    :param return_int: If True, returns integer representation of text
                       in bytes. (default: decimal)
    :returns: Numerical representation of text in bytes.
    :raises ValueError: If text has an invalid value.

    """
    try:
        base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
    except KeyError:
        msg = _('Invalid unit system: "%s"') % unit_system
        raise ValueError(msg)
    match = reg_ex.match(text)
    if match:
        magnitude = float(match.group(1))
        unit_prefix = match.group(2)
        if match.group(3) in ['b', 'bit']:
            magnitude /= 8
    else:
        msg = _('Invalid string format: %s') % text
        raise ValueError(msg)
    if not unit_prefix:
        res = magnitude
    else:
        res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
    if return_int:
        return int(math.ceil(res))
    return res
コード例 #37
0
ファイル: policy.py プロジェクト: poornimakshirsagar/manila
    def set_rules(self, rules, overwrite=True, use_conf=False):
        """Create a new Rules object based on the provided dict of rules.

        :param rules: New rules to use. It should be an instance of dict.
        :param overwrite: Whether to overwrite current rules or update them
                          with the new rules.
        :param use_conf: Whether to reload rules from cache or config file.
        """

        if not isinstance(rules, dict):
            raise TypeError(
                _("Rules must be an instance of dict or Rules, "
                  "got %s instead") % type(rules))
        self.use_conf = use_conf
        if overwrite:
            self.rules = Rules(rules, self.default_rule)
        else:
            self.rules.update(rules)
コード例 #38
0
ファイル: api.py プロジェクト: mbr4v0v/manila
 def create_port(self,
                 tenant_id,
                 network_id,
                 host_id=None,
                 subnet_id=None,
                 fixed_ip=None,
                 device_owner=None,
                 device_id=None,
                 mac_address=None,
                 security_group_ids=None,
                 dhcp_opts=None):
     try:
         port_req_body = {'port': {}}
         port_req_body['port']['network_id'] = network_id
         port_req_body['port']['admin_state_up'] = True
         port_req_body['port']['tenant_id'] = tenant_id
         if security_group_ids:
             port_req_body['port']['security_groups'] = security_group_ids
         if mac_address:
             port_req_body['port']['mac_address'] = mac_address
         if self._has_port_binding_extension() and host_id:
             port_req_body['port']['binding:host_id'] = host_id
         if dhcp_opts is not None:
             port_req_body['port']['extra_dhcp_opts'] = dhcp_opts
         if subnet_id:
             fixed_ip_dict = {'subnet_id': subnet_id}
             if fixed_ip:
                 fixed_ip_dict.update({'ip_address': fixed_ip})
             port_req_body['port']['fixed_ips'] = [fixed_ip_dict]
         if device_owner:
             port_req_body['port']['device_owner'] = device_owner
         if device_id:
             port_req_body['port']['device_id'] = device_id
         port = self.client.create_port(port_req_body).get('port', {})
         return port
     except neutron_client_exc.NeutronClientException as e:
         LOG.exception(
             _('Neutron error creating port on network %s') % network_id)
         if e.status_code == 409:
             raise exception.PortLimitExceeded()
         raise exception.NetworkException(code=e.status_code,
                                          message=e.message)
コード例 #39
0
ファイル: cinder.py プロジェクト: mbr4v0v/manila
 def check_detach(self, context, volume):
     if volume['status'] == "available":
         msg = _("already detached")
         raise exception.InvalidVolume(reason=msg)
コード例 #40
0
class deprecated(object):
    """A decorator to mark callables as deprecated.

    This decorator logs a deprecation message when the callable it decorates is
    used. The message will include the release where the callable was
    deprecated, the release where it may be removed and possibly an optional
    replacement.

    Examples:

    1. Specifying the required deprecated release

    >>> @deprecated(as_of=deprecated.ICEHOUSE)
    ... def a(): pass

    2. Specifying a replacement:

    >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
    ... def b(): pass

    3. Specifying the release where the functionality may be removed:

    >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
    ... def c(): pass

    4. Specifying the deprecated functionality will not be removed:
    >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
    ... def d(): pass

    5. Specifying a replacement, deprecated functionality will not be removed:
    >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
    ... def e(): pass

    """

    # NOTE(morganfainberg): Bexar is used for unit test purposes, it is
    # expected we maintain a gap between Bexar and Folsom in this list.
    BEXAR = 'B'
    FOLSOM = 'F'
    GRIZZLY = 'G'
    HAVANA = 'H'
    ICEHOUSE = 'I'
    JUNO = 'J'
    KILO = 'K'

    _RELEASES = {
        # NOTE(morganfainberg): Bexar is used for unit test purposes, it is
        # expected we maintain a gap between Bexar and Folsom in this list.
        'B': 'Bexar',
        'F': 'Folsom',
        'G': 'Grizzly',
        'H': 'Havana',
        'I': 'Icehouse',
        'J': 'Juno',
        'K': 'Kilo',
    }

    _deprecated_msg_with_alternative = _(
        '%(what)s is deprecated as of %(as_of)s in favor of '
        '%(in_favor_of)s and may be removed in %(remove_in)s.')

    _deprecated_msg_no_alternative = _(
        '%(what)s is deprecated as of %(as_of)s and may be '
        'removed in %(remove_in)s. It will not be superseded.')

    _deprecated_msg_with_alternative_no_removal = _(
        '%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')

    _deprecated_msg_with_no_alternative_no_removal = _(
        '%(what)s is deprecated as of %(as_of)s. It will not be superseded.')

    def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
        """Initialize decorator

        :param as_of: the release deprecating the callable. Constants
            are define in this class for convenience.
        :param in_favor_of: the replacement for the callable (optional)
        :param remove_in: an integer specifying how many releases to wait
            before removing (default: 2)
        :param what: name of the thing being deprecated (default: the
            callable's name)

        """
        self.as_of = as_of
        self.in_favor_of = in_favor_of
        self.remove_in = remove_in
        self.what = what

    def __call__(self, func):
        if not self.what:
            self.what = func.__name__ + '()'

        @functools.wraps(func)
        def wrapped(*args, **kwargs):
            msg, details = self._build_message()
            LOG.deprecated(msg, details)
            return func(*args, **kwargs)

        return wrapped

    def _get_safe_to_remove_release(self, release):
        # TODO(dstanek): this method will have to be reimplemented once
        #    when we get to the X release because once we get to the Y
        #    release, what is Y+2?
        new_release = chr(ord(release) + self.remove_in)
        if new_release in self._RELEASES:
            return self._RELEASES[new_release]
        else:
            return new_release

    def _build_message(self):
        details = dict(what=self.what,
                       as_of=self._RELEASES[self.as_of],
                       remove_in=self._get_safe_to_remove_release(self.as_of))

        if self.in_favor_of:
            details['in_favor_of'] = self.in_favor_of
            if self.remove_in > 0:
                msg = self._deprecated_msg_with_alternative
            else:
                # There are no plans to remove this function, but it is
                # now deprecated.
                msg = self._deprecated_msg_with_alternative_no_removal
        else:
            if self.remove_in > 0:
                msg = self._deprecated_msg_no_alternative
            else:
                # There are no plans to remove this function, but it is
                # now deprecated.
                msg = self._deprecated_msg_with_no_alternative_no_removal
        return msg, details
コード例 #41
0
ファイル: quotas.py プロジェクト: mbr4v0v/manila
    def update(self, req, id, body):
        context = req.environ['manila.context']
        authorize_update(context)
        project_id = id

        bad_keys = []

        # By default, we can force update the quota if the extended
        # is not loaded
        force_update = True
        extended_loaded = False
        if self.ext_mgr.is_loaded('os-extended-quotas'):
            # force optional has been enabled, the default value of
            # force_update need to be changed to False
            extended_loaded = True
            force_update = False

        user_id = None
        if self.ext_mgr.is_loaded('os-user-quotas'):
            # Update user quotas only if the extended is loaded
            params = urlparse.parse_qs(req.environ.get('QUERY_STRING', ''))
            user_id = params.get('user_id', [None])[0]

        try:
            settable_quotas = QUOTAS.get_settable_quotas(context,
                                                         project_id,
                                                         user_id=user_id)
        except exception.NotAuthorized:
            raise webob.exc.HTTPForbidden()

        for key, value in body['quota_set'].items():
            if (key not in QUOTAS and key not in NON_QUOTA_KEYS):
                bad_keys.append(key)
                continue
            if key == 'force' and extended_loaded:
                # only check the force optional when the extended has
                # been loaded
                force_update = strutils.bool_from_string(value)
            elif key not in NON_QUOTA_KEYS and value:
                try:
                    value = int(value)
                except (ValueError, TypeError):
                    msg = _("Quota '%(value)s' for %(key)s should be "
                            "integer.") % {
                                'value': value,
                                'key': key
                            }
                    LOG.warn(msg)
                    raise webob.exc.HTTPBadRequest(explanation=msg)

        LOG.debug("force update quotas: %s" % force_update)

        if len(bad_keys) > 0:
            msg = _("Bad key(s) %s in quota_set") % ",".join(bad_keys)
            raise webob.exc.HTTPBadRequest(explanation=msg)

        try:
            quotas = self._get_quotas(context,
                                      id,
                                      user_id=user_id,
                                      usages=True)
        except exception.NotAuthorized:
            raise webob.exc.HTTPForbidden()

        for key, value in body['quota_set'].items():
            if key in NON_QUOTA_KEYS or (not value and value != 0):
                continue
            # validate whether already used and reserved exceeds the new
            # quota, this check will be ignored if admin want to force
            # update
            try:
                value = int(value)
            except (ValueError, TypeError):
                msg = _("Quota '%(value)s' for %(key)s should be "
                        "integer.") % {
                            'value': value,
                            'key': key
                        }
                LOG.warn(msg)
                raise webob.exc.HTTPBadRequest(explanation=msg)

            if force_update is not True and value >= 0:
                quota_value = quotas.get(key)
                if quota_value and quota_value['limit'] >= 0:
                    quota_used = (quota_value['in_use'] +
                                  quota_value['reserved'])
                    LOG.debug(
                        "Quota %(key)s used: %(quota_used)s, "
                        "value: %(value)s.", {
                            'key': key,
                            'quota_used': quota_used,
                            'value': value
                        })
                    if quota_used > value:
                        msg = (_("Quota value %(value)s for %(key)s are "
                                 "greater than already used and reserved "
                                 "%(quota_used)s") % {
                                     'value': value,
                                     'key': key,
                                     'quota_used': quota_used
                                 })
                        raise webob.exc.HTTPBadRequest(explanation=msg)

            minimum = settable_quotas[key]['minimum']
            maximum = settable_quotas[key]['maximum']
            self._validate_quota_limit(value, minimum, maximum, force_update)
            try:
                db.quota_create(context,
                                project_id,
                                key,
                                value,
                                user_id=user_id)
            except exception.QuotaExists:
                db.quota_update(context,
                                project_id,
                                key,
                                value,
                                user_id=user_id)
            except exception.AdminRequired:
                raise webob.exc.HTTPForbidden()
        return {'quota_set': self._get_quotas(context, id, user_id=user_id)}
コード例 #42
0
def execute(*cmd, **kwargs):
    """Helper method to shell out and execute a command through subprocess.

    Allows optional retry.

    :param cmd:             Passed to subprocess.Popen.
    :type cmd:              string
    :param process_input:   Send to opened process.
    :type process_input:    string
    :param env_variables:   Environment variables and their values that
                            will be set for the process.
    :type env_variables:    dict
    :param check_exit_code: Single bool, int, or list of allowed exit
                            codes.  Defaults to [0].  Raise
                            :class:`ProcessExecutionError` unless
                            program exits with one of these code.
    :type check_exit_code:  boolean, int, or [int]
    :param delay_on_retry:  True | False. Defaults to True. If set to True,
                            wait a short amount of time before retrying.
    :type delay_on_retry:   boolean
    :param attempts:        How many times to retry cmd.
    :type attempts:         int
    :param run_as_root:     True | False. Defaults to False. If set to True,
                            the command is prefixed by the command specified
                            in the root_helper kwarg.
    :type run_as_root:      boolean
    :param root_helper:     command to prefix to commands called with
                            run_as_root=True
    :type root_helper:      string
    :param shell:           whether or not there should be a shell used to
                            execute this command. Defaults to false.
    :type shell:            boolean
    :param loglevel:        log level for execute commands.
    :type loglevel:         int.  (Should be logging.DEBUG or logging.INFO)
    :returns:               (stdout, stderr) from process execution
    :raises:                :class:`UnknownArgumentError` on
                            receiving unknown arguments
    :raises:                :class:`ProcessExecutionError`
    """

    process_input = kwargs.pop('process_input', None)
    env_variables = kwargs.pop('env_variables', None)
    check_exit_code = kwargs.pop('check_exit_code', [0])
    ignore_exit_code = False
    delay_on_retry = kwargs.pop('delay_on_retry', True)
    attempts = kwargs.pop('attempts', 1)
    run_as_root = kwargs.pop('run_as_root', False)
    root_helper = kwargs.pop('root_helper', '')
    shell = kwargs.pop('shell', False)
    loglevel = kwargs.pop('loglevel', logging.DEBUG)

    if isinstance(check_exit_code, bool):
        ignore_exit_code = not check_exit_code
        check_exit_code = [0]
    elif isinstance(check_exit_code, int):
        check_exit_code = [check_exit_code]

    if kwargs:
        raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs)

    if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
        if not root_helper:
            raise NoRootWrapSpecified(
                message=_('Command requested root, but did not '
                          'specify a root helper.'))
        cmd = shlex.split(root_helper) + list(cmd)

    cmd = map(str, cmd)

    while attempts > 0:
        attempts -= 1
        try:
            LOG.log(loglevel, 'Running cmd (subprocess): %s',
                    strutils.mask_password(' '.join(cmd)))
            _PIPE = subprocess.PIPE  # pylint: disable=E1101

            if os.name == 'nt':
                preexec_fn = None
                close_fds = False
            else:
                preexec_fn = _subprocess_setup
                close_fds = True

            obj = subprocess.Popen(cmd,
                                   stdin=_PIPE,
                                   stdout=_PIPE,
                                   stderr=_PIPE,
                                   close_fds=close_fds,
                                   preexec_fn=preexec_fn,
                                   shell=shell,
                                   env=env_variables)
            result = None
            for _i in six.moves.range(20):
                # NOTE(russellb) 20 is an arbitrary number of retries to
                # prevent any chance of looping forever here.
                try:
                    if process_input is not None:
                        result = obj.communicate(process_input)
                    else:
                        result = obj.communicate()
                except OSError as e:
                    if e.errno in (errno.EAGAIN, errno.EINTR):
                        continue
                    raise
                break
            obj.stdin.close()  # pylint: disable=E1101
            _returncode = obj.returncode  # pylint: disable=E1101
            LOG.log(loglevel, 'Result was %s' % _returncode)
            if not ignore_exit_code and _returncode not in check_exit_code:
                (stdout, stderr) = result
                raise ProcessExecutionError(exit_code=_returncode,
                                            stdout=stdout,
                                            stderr=stderr,
                                            cmd=' '.join(cmd))
            return result
        except ProcessExecutionError:
            if not attempts:
                raise
            else:
                LOG.log(loglevel, '%r failed. Retrying.', cmd)
                if delay_on_retry:
                    greenthread.sleep(random.randint(20, 200) / 100.0)
        finally:
            # NOTE(termie): this appears to be necessary to let the subprocess
            #               call clean something up in between calls, without
            #               it two execute calls in a row hangs the second one
            greenthread.sleep(0)
コード例 #43
0
ファイル: policy.py プロジェクト: poornimakshirsagar/manila
import re

from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest

from manila.openstack.common import fileutils
from manila.openstack.common.gettextutils import _, _LE
from manila.openstack.common import jsonutils
from manila.openstack.common import log as logging

policy_opts = [
    cfg.StrOpt('policy_file',
               default='policy.json',
               help=_('The JSON file that defines policies.')),
    cfg.StrOpt('policy_default_rule',
               default='default',
               help=_('Default rule. Enforced when a requested rule is not '
                      'found.')),
]

CONF = cfg.CONF
CONF.register_opts(policy_opts)

LOG = logging.getLogger(__name__)

_checks = {}


class PolicyNotAuthorized(Exception):
コード例 #44
0
ファイル: policy.py プロジェクト: poornimakshirsagar/manila
 def __init__(self, rule):
     msg = _("Policy doesn't allow %s to be performed.") % rule
     super(PolicyNotAuthorized, self).__init__(msg)
コード例 #45
0
ファイル: cinder.py プロジェクト: mbr4v0v/manila
 def check_attached(self, context, volume):
     """Raise exception if volume in use."""
     if volume['status'] != "in-use":
         msg = _("status must be 'in-use'")
         raise exception.InvalidVolume(reason=msg)