Ejemplo n.º 1
0
 def validate_and_parse_keystone_token(self, cms_token):
     """Validate Keystone CMS token.
     
     Partially taken from Keystone's common/cms.py module."""
     signing_cert_file_name = get_config().get('keystone', 'signing_cert_file_name')
     ca_file_name = get_config().get('keystone', 'ca_file_name')
     openssl_cmd = get_config().get('keystone', 'openssl_cmd')
     process = subprocess.Popen([openssl_cmd, "cms", "-verify",
                               "-certfile",
                               signing_cert_file_name,
                               "-CAfile", ca_file_name,
                                           "-inform", "PEM",
                                           "-nosmimecap", "-nodetach",
                                           "-nocerts", "-noattr"],
                                          stdin=subprocess.PIPE,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE)
     output, err = process.communicate(cms_token)
     retcode = process.poll()
     if retcode:
         raise
     token_info = json.loads(output)
     #print json.dumps(token_info, sort_keys=True,
     #          indent=4, separators=(',', ': '))
     res = {'username': str(token_info['access']['user']['username']),
            'groups': [str(role['name']) for role in token_info['access']['user']['roles']]}
     return res
Ejemplo n.º 2
0
    def __init__(self, avatar=None):
        ## Twisted Resource is a not a new style class, so emulating a super-call
        resource.Resource.__init__(self)
        self.avatar = avatar

        self.use_security_proxy = get_config().getboolean('auth', 'security_proxy_rest')
        self.use_keystone_tokens = get_config().getboolean('auth', 'use_keystone', False)
Ejemplo n.º 3
0
def allocate_virtual_compute_from_hangar(model, event):
    if not IVirtualizationContainer.providedBy(model.__parent__):
        return

    if IDeployed.providedBy(model):
        return

    auto_allocate = get_config().getboolean('vms', 'auto_allocate', True)

    if not auto_allocate:
        return

    if IHangar.providedBy(model.__parent__.__parent__):
        action = AllocateAction
        msg = 'Allocated compute %s'
    elif ICompute.providedBy(model.__parent__.__parent__):
        action = DeployAction
        msg = 'Deployed compute %s'
    else:
        return

    try:
        path = canonical_path(model)
        owner = model.__owner__
        ul = UserLogger(subject=model, owner=owner)
        log.msg('Attempting %s for %s (%s, %s)' % (action.__name__, model, path, owner),
                system='create-event')
        d = task.deferLater(reactor, 2.0, virtual_compute_action, action, path, event)
        d.addCallback(lambda r: ul.log(msg % path))
        if not get_config().getboolean('stats', 'only_report_on_sync', True):
            d.addCallback(lambda r: defer.maybeDeferred(getUtility(IUserStatisticsProvider).update, owner))
        d.addErrback(log.err)
    except Exception:
        log.err(system='create-event')
Ejemplo n.º 4
0
    def execute(self, context, *args, **kw):
        @db.ro_transact
        def check_backend(context):
            return context.__parent__.backend != 'kvm'

        if (yield check_backend(context)):
            return

        cmd = ['undefined']
        try:
            vm_parameters = args[1]

            secret = get_config().getstring('deploy', 'dhcp_key', 'secret')
            server = get_config().getstring('deploy', 'dhcp_server', 'localhost')
            server_port = get_config().getstring('deploy', 'dhcp_server_port', '7911')
            hook_script = get_config().getstring('deploy', 'hook_script_deallocate',
                                                 'scripts/deallocate_dhcp_ip.sh')

            mac_addr = getattr(context, 'mac_address')

            cmd = [hook_script, secret, server, server_port, mac_addr,
                   str(vm_parameters['ip_address']),
                   vm_parameters['uuid']]

            yield subprocess.async_check_output(cmd)
        except error.ProcessTerminated:
            log.msg('Executing allocate_dhcp_ip.sh hook script failed: %s' % (cmd))
            log.err(system='deploy-hook-kvm')
            raise
        except Exception:
            log.err(system='undeploy')
            raise
Ejemplo n.º 5
0
    def get_credit(self, uid):
        log.info('Requesting credit update for %s', uid)
        try:
            agent = Agent(reactor)
            whmcs_api_uri = get_config().getstring('whmcs', 'api_uri', '')
            whmcs_user = get_config().getstring('whmcs', 'user', '')
            whmcs_password = get_config().getstring('whmcs', 'password', '')

            if not isinstance(whmcs_api_uri, str) or not whmcs_api_uri.startswith('http'):
                raise InvalidConfiguration('Invalid configuration: [whmcs]api_uri is %r, '
                                           'must be a valid URI' % whmcs_api_uri)

            if not isinstance(whmcs_user, str):
                raise InvalidConfiguration('Invalid configuration: [whmcs]user is %r", '
                                           'must be a non-empty string' % whmcs_user)

            if not isinstance(whmcs_password, str):
                raise InvalidConfiguration('Invalid configuration: [whmcs]user is %r, '
                                           'must be a non-empty string' % whmcs_password)

            pwmd5 = hashlib.md5()
            pwmd5.update(whmcs_password)
            reqbody = WHMCSRequestBody({'username': whmcs_user,
                                        'password': pwmd5.hexdigest(),
                                        'clientid': uid,
                                        'action': 'getclientsdetails',
                                        'responsetype': 'json'})

            headers = Headers({'User-Agent': ['OMS-KNOT 2.0'],
                               'Content-Type': ['application/x-www-form-urlencoded']})

            response = yield agent.request('POST', whmcs_api_uri, headers, reqbody)

            finished = defer.Deferred()
            rbody = ResponseProtocol(finished, 1024 * 10)
            response.deliverBody(rbody)
            data = yield finished
        except Exception as e:
            log.error(e, exc_info=sys.exc_info())
            raise

        if response.code < 400:
            data = json.loads(data)
            if data.get('result') == 'error':
                raise WHMCSAPIError('%s' % (data.get('message')))
            credit_balance_field = 'customfields%s' % get_config().getstring('whmcs', 'balance_limit', 2)
            credit_balance_value = (data.get(credit_balance_field) if credit_balance_field in data
                                    and data.get(credit_balance_field) != "" else 0)
            log.debug('Received \'%s\' as a credit_balance_value. Field checked: \' %s\'. Full response: %s' % 
                            (credit_balance_value, credit_balance_field, data))
            try:
                credit_balance = float(credit_balance_value)
            except TypeError:
                log.error('Credit balance value error: %s cannot be converted to floating point value',
                          credit_balance_value)
                credit_balance = .0
            defer.returnValue((float(data.get('credit')), credit_balance))

        raise WHMCSAPIError('%s: %s' % (response.code, data))
Ejemplo n.º 6
0
def checkers():
    global _checkers
    if _checkers is None:
        pam_checker = PamAuthChecker() if get_config().getboolean('auth', 'use_pam', False) else None
        password_checker = FilePasswordDB(get_config().get('auth', 'passwd_file'), hash=ssha_hash)
        pubkey_checker = (InMemoryPublicKeyCheckerDontUse()
                          if get_config().getboolean('auth', 'use_inmemory_pkcheck', False) else None)
        _checkers = filter(None, [pam_checker, password_checker, pubkey_checker])
    return _checkers
Ejemplo n.º 7
0
def update_passwd(user, password=None, force_askpass=False, group=None, force=False):
    passwd_file = get_config().get('auth', 'passwd_file')
    restricted_users = get_config().getstring('auth', 'restricted_users', '').split(',')

    if user in map(string.strip, restricted_users) and not force:
        raise UserManagementError('User %s is restricted! Update permission denied!' % user)

    with open(passwd_file) as f:
        lines = f.readlines()

    found = False
    for line in lines:
        if line.startswith(user + ':'):
            found = True

    if not found:
        raise UserManagementError("User %s doesn't exist" % user)

    with open(passwd_file, 'w') as f:
        for line in lines:
            def parse_line(line):
                _user, pw, groups = line.split(':', 2)

                if ':' in groups:
                    groups, uid = groups.split(':', 1)
                else:
                    uid = None

                return _user, pw, groups, uid

            line = line.rstrip('\n')

            if line.startswith(user + ':'):
                try:
                    newpw = hash_pw(ask_password() if password is None
                                 and (force_askpass or not group) else password)
                except UserManagementError:
                    newpw = password

                _user, oldpw, groups, uid = parse_line(line)

                if group:
                    groups = group

                if newpw is None:
                    newpw = oldpw

                f.write('%s:%s:%s:%s\n' % (_user, newpw, groups, uid))
            else:
                _user, old_pw, groups, uid = parse_line(line)
                f.write('%s:%s:%s:%s\n' % (_user, old_pw, groups, uid))
Ejemplo n.º 8
0
def add_user(user, password, group=None, uid=None, force=False):
    restricted_users = get_config().getstring('auth', 'restricted_users', '').split(',')

    if user in [ruser.strip() for ruser in restricted_users] and not force:
        raise UserManagementError('User "%s" is restricted! Adding permission denied!' % user)

    passwd_file = get_config().getstring('auth', 'passwd_file')
    with open(passwd_file) as f:
        for line in f:
            if line.startswith(user + ':'):
                raise UserManagementError("User %s already exists" % user)

    with open(passwd_file, 'a') as f:
        f.write('%s:%s:%s:%s\n' % (user, hash_pw(password), group or 'users', uid))
Ejemplo n.º 9
0
def config_defaults():
    logging.captureWarnings(True)

    log_filename = get_config().get('logging', 'file')
    log_level = get_config().getstring('logging', 'level', 'INFO')

    default_ignored_messages = ['.*[email protected]',
                                '.*POST .*/webterm',
                                '.*GET /favicon.ico',
                                '.*POST /+stream', '.*OPTIONS /',
                                '.*GET /plugins/onc/root/',
                                'got channel session request',
                                'channel open',
                                'remote close',
                                'sending close 0',
                                'disabling diffie-hellman-group-exchange because we cannot find moduli file']

    if log_filename == 'stdout':
        root_handlers = ['stderr']
    else:
        root_handlers = ['default']

    logging.config.dictConfig({
        'formatters': {
            'default': {'format': '%(asctime)s %(thread)x %(name)s %(levelname)s %(message)s'},
            'twisted': {'format': '%(asctime)s %(thread)x %(name)s %(levelname)s %(system)s %(message)s'}},
        'handlers': {'default': {'class': 'logging.handlers.WatchedFileHandler',
                                 'filename': log_filename,
                                 'formatter': 'default'},
                     'twisted': {'class': 'logging.handlers.WatchedFileHandler',
                                 'filename': log_filename,
                                 'formatter': 'twisted'},
                     'stderr': {'class': 'logging.StreamHandler', 'formatter': 'default'}},
        'filters': {
            'twisted-system': {'()': 'opennode.oms.log.TwistedSystemFilter',
                               'banlist': ['SSHServerTransport', 'SSHService']},
            'excluded-messages': {'()': 'opennode.oms.log.MessageRegexFilter',
                                  'banlist': default_ignored_messages}},
        'root': {'handlers': root_handlers, 'level': log_level},
        'loggers': {'twisted': {'level': 'INFO', 'handlers': ['twisted'], 'propagate': False,
                                'filters': ['twisted-system', 'excluded-messages']},
                    'txn': {'level': 'WARNING'},
                    'ZEO.zrpc': {'level': 'WARNING'},
                    'ZEO.ClientStorage': {'level': 'WARNING'},
                    'salt': {'level': 'WARNING'}},
        'version': 1,
        'disable_existing_loggers': False
    })
Ejemplo n.º 10
0
 def adjust_cpulimit():
     """Set cpulimit to a configured percentage * cores"""
     cores = getattr(self.context, 'num_cores', 1)
     cpu_limit_factor = get_config().getfloat('vms', 'cpu_limit', 80)
     cpu_limit = cores * cpu_limit_factor / 100.0
     log.msg("Updating cpulimit to %s" % cpu_limit, system='deploy')
     self.context.cpu_limit = cpu_limit
Ejemplo n.º 11
0
    def execute(self, factory, config, permissions, **kw):
        if not permissions:
            return False

        if get_config().getboolean('auth', 'enforce_attribute_rights_definition'):
            perms = {}
        else:
            perms = AuditingPermissionDictionary()

        # mandatory, otherwise zope's default Checker impl will be used
        # which doesn't play well with Twisted.
        defineChecker(factory, Checker(perms, perms))

        # TODO: supply the 'inherit' option to the class somehow
        # to facilitate optional inheritance of all permissions
        for class_inheritance_level in permissions:
            for name, permission in class_inheritance_level.items():
                if isinstance(permission, tuple):
                    read_perm, write_perm = permission
                    config.action(discriminator=('protectNameSet', factory, name),
                                  callable=grokcore.security.util.protect_setattr,
                                  args=(factory, name, write_perm))
                else:
                    read_perm = permission

                config.action(discriminator=('protectName', factory, name),
                              callable=grokcore.security.util.protect_getattr,
                              args=(factory, name, read_perm))

        return True
Ejemplo n.º 12
0
    def content(self):
        config = get_config()

        return {
            section: EtcConfigSection(section, dict(config.items(section)))
            for section in config.sections()
        }
Ejemplo n.º 13
0
    def execute(self, args):
        from opennode.oms.config import get_config
        logfilename = get_config().get('logging', 'file')

        if logfilename == 'stdout':
            log.msg('System is configured to log to stdout. Cannot cat to omsh terminal',
                    system='catlog')
            return

        nr_of_lines = int(args.n) if args.n is not None else 10

        if not args.u:
            outputCb = utils.getProcessOutput("tail",
                                              args=('-n %s' % nr_of_lines, logfilename),
                                              errortoo=True)

            outputCb.addCallback(lambda output: self.write(output))
            yield outputCb
            return

        @db.ro_transact
        def get_user_log():
            eventlog = db.get_root()['oms_root']['eventlog']
            if self.user.id not in eventlog.listnames():
                return

            usereventlog = eventlog[self.user.id]

            for event in sorted(usereventlog.listcontent(), key=lambda event: event.timestamp, reverse=True):
                self.write('%s %s %s\n' % (event.timestamp, event.levelname, event.message))

        yield get_user_log()
Ejemplo n.º 14
0
def start_daemons(event):
    try:
        Proc().start_daemons()
    except Exception as e:
        log.msg("Got exception while starting daemons", system='proc')
        if get_config().getboolean('debug', 'print_exceptions'):
            log.err(e, system='proc')
Ejemplo n.º 15
0
    def apply(self, principal):
        billable_group = get_config().getstring('auth', 'billable_group',
                                                'users')
        if billable_group in map(str, principal.groups):
            profile, uid, need_update = yield self._get_profile_and_need_update(
                principal)
            log.debug('Need update %s uid=%s : %s', profile, uid, need_update)

            if need_update:
                try:
                    check_call = getUtility(ICreditCheckCall)
                    credit, balance_limit = yield defer.maybeDeferred(
                        check_call.get_credit, uid)
                    profile = yield self._update_credit(
                        principal, credit, balance_limit)
                except Exception as e:
                    log.error('Error updating credit: %s',
                              e,
                              exc_info=sys.exc_info())

            @db.ro_transact()
            def check_credit(profile):
                log.debug('Checking if user %s has credit (%s): %s', profile,
                          profile.credit, profile.has_credit())
                assert profile.has_credit(
                ), 'User %s does not have enough credit' % principal.id

            yield check_credit(profile)
        else:
            log.info(
                'User "%s" is not a member of a billable group "%s": %s. Not updating credit',
                principal.id, billable_group, map(str, principal.groups))
Ejemplo n.º 16
0
def init(test=False):
    global _db, _testing

    if _db and not test:
        return

    log.info("Initializing zodb")
    handle(BeforeDatabaseInitalizedEvent())

    if not test:
        storage_type = get_config().get('db', 'storage_type')

        if storage_type == 'zeo':
            from ZODB import DB
            storage = ClientStorage('%s/socket' % get_db_dir())
            _db = DB(storage)
        elif storage_type == 'embedded':
            from ZODB import DB
            storage = FileStorage('%s/data.fs' % get_db_dir())
            _db = DB(storage)
        elif storage_type == 'memory':
            from ZODB.tests.util import DB
            _db = DB()
        else:
            raise Exception("Unknown storage type '%s'" % storage_type)
    else:
        from ZODB.tests.util import DB
        _db = DB()
        _testing = True

    init_schema()
Ejemplo n.º 17
0
    def run(self, *args, **kwargs):
        self.args = args
        log.msg('Running action against "%s": %s args: %s timeout: %s' % (self.hostname, self.action,
                                                                          self.args, self.timeout),
                system='salt-simple', logLevel=logging.DEBUG)
        cmd = get_config().getstring('salt', 'remote_command', 'salt')

        killhook = kwargs.get('__killhook')
        if killhook:
            killhook.addCallback(lambda r: log.msg('"%s" to "%s" aborted' % (self.action, self.hostname)))

        timeout = ('--timeout=%s' % self.timeout) if self.timeout is not None else None

        args = (list(reduce(lambda a, b: a + b,
                            map(lambda a: (dict_to_kwargs(a) if type(a) is dict else ['"%s"' % str(a)]),
                                args)))
                if args else [])

        output = yield subprocess.async_check_output(
            filter(None, (cmd.split(' ') +
                          ['--no-color', '--out=json', timeout, self.hostname, self.action] + args)),
            killhook=killhook)

        log.msg('Action "%s" to "%s" finished.' % (self.action, self.hostname),
                system='salt-simple', logLevel=logging.DEBUG)
        data = json.loads(output) if output else {}
        rdata = self._handle_errors(data)
        defer.returnValue(rdata)
Ejemplo n.º 18
0
def start_daemons(event):
    try:
        Proc().start_daemons()
    except Exception as e:
        log.msg("Got exception while starting daemons", system='proc')
        if get_config().getboolean('debug', 'print_exceptions'):
            log.err(e, system='proc')
Ejemplo n.º 19
0
 def handle_error(self, e, action, c, compute, status_name):
     e.trap(Exception)
     log.msg("Got exception on %s of '%s'" % (action, c), system='sync')
     if get_config().getboolean('debug', 'print_exceptions'):
         log.err(e, system='sync')
     self.delete_outstanding_request(compute)
     set_compute_status(compute.__name__, status_name, True)
Ejemplo n.º 20
0
def ensure_zeo_is_running(event):
    """We start zeo after the application has performed the basic initialization
    because we cannot import opennode.oms.zodb.db until all grokkers are run in the
    correct order.

    """

    if get_config().get('db', 'storage_type') != 'zeo':
        return

    log.msg("Ensuring ZEO is running", system='db')

    # prevent zeo starting during unit tests etc
    global _daemon_started
    if not _daemon_started:
        return

    from opennode.oms.zodb.db import get_db_dir

    db_dir = get_db_dir()

    from zc.lockfile import LockFile, LockError
    try:
        with closing(LockFile(os.path.join(db_dir, 'data.fs.lock'))):
            log.msg("Starting ZEO server", system='db')
        run_zeo(db_dir)
    except LockError:
        log.msg("ZEO is already running", system='db')
Ejemplo n.º 21
0
    def auto_tags(self):
        res = [u'state:' + self.context.state] if self.context.state else []
        if self.context.architecture:
            for i in self.context.architecture:
                res.append(u'arch:' + i)

        from opennode.knot.model.virtualizationcontainer import IVirtualizationContainer
        p = sudo(self.context)
        if (IVirtualCompute.providedBy(p) and
                IVirtualizationContainer.providedBy(p.__parent__)):
            res.append(u'virt_type:' + p.__parent__.backend)
            res.append(u'virt:yes')
        else:
            res.append(u'virt:no')

        config = get_config()
        if config.has_section('netenv-tags'):
            for tag, nets in config.items('netenv-tags'):
                try:
                    if (self.context.ipv4_address is not None and
                        len(netaddr.all_matching_cidrs(self.context.ipv4_address.split('/')[0],
                                                       nets.split(','))) > 0):
                        res.append(u'env:' + tag)
                except ValueError:
                    # graceful ignoring of incorrect ips
                    pass
        return res
Ejemplo n.º 22
0
    def execute(self, factory, config, permissions, **kw):
        if not permissions:
            return False

        if get_config().getboolean('auth',
                                   'enforce_attribute_rights_definition'):
            perms = {}
        else:
            perms = AuditingPermissionDictionary()

        # mandatory, otherwise zope's default Checker impl will be used
        # which doesn't play well with Twisted.
        defineChecker(factory, Checker(perms, perms))

        # TODO: supply the 'inherit' option to the class somehow
        # to facilitate optional inheritance of all permissions
        for class_inheritance_level in permissions:
            for name, permission in class_inheritance_level.items():
                if isinstance(permission, tuple):
                    read_perm, write_perm = permission
                    config.action(
                        discriminator=('protectNameSet', factory, name),
                        callable=grokcore.security.util.protect_setattr,
                        args=(factory, name, write_perm))
                else:
                    read_perm = permission

                config.action(discriminator=('protectName', factory, name),
                              callable=grokcore.security.util.protect_getattr,
                              args=(factory, name, read_perm))

        return True
Ejemplo n.º 23
0
    def gather_user_vm_stats(self):
        credit_check_cooldown = get_config().getstring('auth', 'billing_timeout', 60)

        @db.ro_transact
        def get_users_with_vms_to_update():
            home = db.get_root()['oms_root']['home']
            update_list = []
            for profile in home.listcontent():
                timeout = ((datetime(*map(int, re.split('[^\d]', profile.vm_stats_timestamp)[:-1])) +
                            timedelta(seconds=credit_check_cooldown))
                           if profile.vm_stats_timestamp
                           else datetime.min)

                if timeout < datetime.now():
                    update_list.append(profile.name)

            return update_list

        update_list = yield get_users_with_vms_to_update()

        for name in update_list:
            try:
                yield defer.maybeDeferred(getUtility(IUserStatisticsProvider).update, name)
            except Exception:
                log.msg('Non-fatal error during user stats syncing', system='sync')
                log.err(system='sync')
Ejemplo n.º 24
0
 def handle_error(self, e, action, c, compute, status_name):
     e.trap(Exception)
     log.msg("Got exception on %s of '%s'" % (action, c), system='sync')
     if get_config().getboolean('debug', 'print_exceptions'):
         log.err(e, system='sync')
     self.delete_outstanding_request(compute)
     set_compute_status(compute.__name__, status_name, True)
Ejemplo n.º 25
0
def ensure_zeo_is_running(event):
    """We start zeo after the application has performed the basic initialization
    because we cannot import opennode.oms.zodb.db until all grokkers are run in the
    correct order.

    """

    if get_config().get('db', 'storage_type') != 'zeo':
        return

    log.msg("Ensuring ZEO is running", system='db')

    # prevent zeo starting during unit tests etc
    global _daemon_started
    if not _daemon_started:
        return

    from opennode.oms.zodb.db import get_db_dir

    db_dir = get_db_dir()

    from zc.lockfile import LockFile, LockError
    try:
        with closing(LockFile(os.path.join(db_dir, 'data.fs.lock'))):
            log.msg("Starting ZEO server", system='db')
        run_zeo(db_dir)
    except LockError:
        log.msg("ZEO is already running", system='db')
Ejemplo n.º 26
0
    def gather_phy(self):
        name = yield db.get(self.context, 'hostname')
        try:
            data = yield IGetHostMetrics(self.context).run(__killhook=self._killhook)

            log.msg('%s: host metrics received: %s' % (name, len(data)), system='metrics',
                    logLevel=logging.DEBUG)
            timestamp = int(time.time() * 1000)

            # db transact is needed only to traverse the zodb.
            @db.ro_transact
            def get_streams():
                streams = []
                host_metrics = self.context['metrics']
                if host_metrics:
                    for k in data:
                        if host_metrics[k]:
                            streams.append((IStream(host_metrics[k]), (timestamp, data[k])))
                return streams

            for stream, data_point in (yield get_streams()):
                stream.add(data_point)
        except OperationRemoteError as e:
            log.msg('%s: remote error: %s' % (name, e), system='metrics', logLevel=logging.WARNING)
        except Exception:
            log.msg("%s: error gathering host metrics" % name, system='metrics', logLevel=logging.ERROR)
            if get_config().getboolean('debug', 'print_exceptions'):
                log.err(system='metrics')
Ejemplo n.º 27
0
    def gather_phy(self):
        name = yield db.get(self.context, 'hostname')
        try:
            data = yield IGetHostMetrics(self.context).run(__killhook=self._killhook)

            log.msg('%s: host metrics received: %s' % (name, len(data)), system='metrics',
                    logLevel=logging.DEBUG)
            timestamp = int(time.time() * 1000)

            # db transact is needed only to traverse the zodb.
            @db.ro_transact
            def get_streams():
                streams = []
                host_metrics = self.context['metrics']
                if host_metrics:
                    for k in data:
                        if host_metrics[k]:
                            streams.append((IStream(host_metrics[k]), (timestamp, data[k])))
                return streams

            for stream, data_point in (yield get_streams()):
                stream.add(data_point)
        except OperationRemoteError as e:
            log.msg('%s: remote error: %s' % (name, e), system='metrics', logLevel=logging.WARNING)
        except Exception:
            log.msg("%s: error gathering host metrics" % name, system='metrics', logLevel=logging.ERROR)
            if get_config().getboolean('debug', 'print_exceptions'):
                log.err(system='metrics')
Ejemplo n.º 28
0
    def gather_user_vm_stats(self):
        credit_check_cooldown = get_config().getstring('auth',
                                                       'billing_timeout', 60)

        @db.ro_transact
        def get_users_with_vms_to_update():
            home = db.get_root()['oms_root']['home']
            update_list = []
            for profile in home.listcontent():
                timeout = ((datetime(
                    *map(int,
                         re.split('[^\d]', profile.vm_stats_timestamp)[:-1])) +
                            timedelta(seconds=credit_check_cooldown))
                           if profile.vm_stats_timestamp else datetime.min)

                if timeout < datetime.now():
                    update_list.append(profile.name)

            return update_list

        update_list = yield get_users_with_vms_to_update()

        for name in update_list:
            try:
                yield defer.maybeDeferred(
                    getUtility(IUserStatisticsProvider).update, name)
            except Exception:
                log.msg('Non-fatal error during user stats syncing',
                        system='sync')
                log.err(system='sync')
Ejemplo n.º 29
0
    def auto_tags(self):
        res = [u'state:' + self.context.state] if self.context.state else []
        if self.context.architecture:
            for i in self.context.architecture:
                res.append(u'arch:' + i)

        from opennode.knot.model.virtualizationcontainer import IVirtualizationContainer
        p = sudo(self.context)
        if (IVirtualCompute.providedBy(p) and
                IVirtualizationContainer.providedBy(p.__parent__)):
            res.append(u'virt_type:' + p.__parent__.backend)
            res.append(u'virt:yes')
        else:
            res.append(u'virt:no')

        config = get_config()
        if config.has_section('netenv-tags'):
            for tag, nets in config.items('netenv-tags'):
                try:
                    if (self.context.ipv4_address is not None and
                        len(netaddr.all_matching_cidrs(self.context.ipv4_address.split('/')[0],
                                                       nets.split(','))) > 0):
                        res.append(u'env:' + tag)
                except ValueError:
                    # graceful ignoring of incorrect ips
                    pass
        return res
Ejemplo n.º 30
0
def init(test=False):
    global _db, _testing

    if _db and not test:
        return

    log.info("Initializing zodb")
    handle(BeforeDatabaseInitalizedEvent())

    if not test:
        storage_type = get_config().get('db', 'storage_type')

        if storage_type == 'zeo':
            from ZODB import DB
            storage = ClientStorage('%s/socket' % get_db_dir())
            _db = DB(storage)
        elif storage_type == 'embedded':
            from ZODB import DB
            storage = FileStorage('%s/data.fs' % get_db_dir())
            _db = DB(storage)
        elif storage_type == 'memory':
            from ZODB.tests.util import DB
            _db = DB()
        else:
            raise Exception("Unknown storage type '%s'" % storage_type)
    else:
        from ZODB.tests.util import DB
        _db = DB()
        _testing = True

    init_schema()
Ejemplo n.º 31
0
 def handle_remote_error(self, ore, c, compute, status_name):
     ore.trap(OperationRemoteError)
     if ore.value.remote_tb and get_config().getboolean('debug', 'print_exceptions'):
         log.err(ore, system='sync')
     else:
         log.msg(str(ore.value), system='sync', logLevel=ERROR)
     self.delete_outstanding_request(compute)
     set_compute_status(compute.__name__, status_name, True)
Ejemplo n.º 32
0
    def pack(self):
        storage_type = get_config().get('db', 'storage_type')

        if storage_type == 'zeo':
            print "[db_pack] zeo pack not implemented yet, please setup cron to run bin/zeopack -u db/socket"
        elif storage_type == 'embedded':
            d = db.get_db()
            d.pack(time.time())
Ejemplo n.º 33
0
def add_user(user, password, group=None, uid=None, force=False):
    restricted_users = get_config().getstring('auth', 'restricted_users',
                                              '').split(',')

    if user in [ruser.strip() for ruser in restricted_users] and not force:
        raise UserManagementError(
            'User "%s" is restricted! Adding permission denied!' % user)

    passwd_file = get_config().getstring('auth', 'passwd_file')
    with open(passwd_file) as f:
        for line in f:
            if line.startswith(user + ':'):
                raise UserManagementError("User %s already exists" % user)

    with open(passwd_file, 'a') as f:
        f.write('%s:%s:%s:%s\n' %
                (user, hash_pw(password), group or 'users', uid))
 def __init__(self):
     super(MemoryProfilerDaemonProcess, self).__init__()
     config = get_config()
     self.enabled = config.getboolean('daemon', 'memory-profiler', False)
     self.interval = config.getint('debug', 'memory_profiler_interval', 60)
     self.track = config.getint('debug', 'memory_profiler_track_changes', 0)
     self.verbose = config.getint('debug', 'memory_profiler_verbose', 0)
     self.summary_tracker = tracker.SummaryTracker()
Ejemplo n.º 35
0
    def execute(self, context, *args, **kw):
        @db.ro_transact
        def check_backend(context):
            return context.__parent__.backend != 'kvm'

        if (yield check_backend(context)):
            return

        cmd = ['undefined']
        try:
            vm_parameters = args[1]

            secret = get_config().getstring('deploy', 'dhcp_key', 'secret')
            server = get_config().getstring('deploy', 'dhcp_server',
                                            'localhost')
            server_port = get_config().getstring('deploy', 'dhcp_server_port',
                                                 '7911')
            hook_script = get_config().getstring(
                'deploy', 'hook_script_allocate',
                'scripts/allocate_dhcp_ip.sh')

            @db.transact
            def ensure_compute_mac_address(context):
                mac_address = getattr(context, 'mac_address', None)
                if not mac_address:
                    mac_address = mac_addr_kvm_generator()
                    context.mac_address = unicode(mac_address)
                    vm_parameters.update({'mac_address': mac_address})
                return mac_address

            mac_address = yield ensure_compute_mac_address(context)

            cmd = [
                hook_script, secret, server, server_port, mac_address,
                str(vm_parameters['ip_address']), vm_parameters['uuid']
            ]

            yield subprocess.async_check_output(cmd)
        except error.ProcessTerminated:
            log.msg('Executing allocate_dhcp_ip.sh hook script failed: %s' %
                    (cmd))
            log.err(system='deploy-hook-kvm')
            raise
        except Exception:
            log.err(system='deploy-hook-kvm')
            raise
Ejemplo n.º 36
0
 def _getKeyNames(self, ktype):
     remote_salt_key_cmd = get_config().getstring('salt',
                                                  'remote_key_command',
                                                  None)
     output = subprocess.check_output(
         remote_salt_key_cmd.split(' ') + ['--no-color', '--out=raw'])
     data = eval(output) if output else {}
     return data.get(ktype)
Ejemplo n.º 37
0
 def __init__(self):
     super(MemoryProfilerDaemonProcess, self).__init__()
     config = get_config()
     self.enabled = config.getboolean('daemon', 'memory-profiler', False)
     self.interval = config.getint('debug', 'memory_profiler_interval', 60)
     self.track = config.getint('debug', 'memory_profiler_track_changes', 0)
     self.verbose = config.getint('debug', 'memory_profiler_verbose', 0)
     self.summary_tracker = tracker.SummaryTracker()
Ejemplo n.º 38
0
    def pack(self):
        storage_type = get_config().get('db', 'storage_type')

        if storage_type == 'zeo':
            print "[db_pack] zeo pack not implemented yet, please setup cron to run bin/zeopack -u db/socket"
        elif storage_type == 'embedded':
            d = db.get_db()
            d.pack(time.time())
Ejemplo n.º 39
0
    def _render(self, request):
        origin = request.getHeader('Origin')
        if origin:
            request.setHeader('Access-Control-Allow-Origin', origin)
            request.setHeader('Access-Control-Allow-Credentials', 'true')
        else:
            request.setHeader('Access-Control-Allow-Origin', '*')
        request.setHeader('Access-Control-Allow-Methods', 'GET, PUT, POST, DELETE, OPTIONS, HEAD')
        request.setHeader('Access-Control-Allow-Headers',
                          'Origin, Content-Type, Cache-Control, X-Requested-With, Authorization')

        ret = None
        try:
            ret = yield self.handle_request(request)
            # allow views to take full control of output streaming
            if ret is not NOT_DONE_YET and ret is not EmptyResponse:
                request.setHeader('Content-Type', 'application/json')
                json_data = json.dumps(ret, indent=2, cls=JsonSetEncoder)
                request.setHeader('Content-Length', intToBytes(len(json_data)))
                request.write(json_data)
        except HttpStatus as exc:
            request.setResponseCode(exc.status_code, exc.status_description)
            for name, value in exc.headers.items():
                request.responseHeaders.addRawHeader(name, value)
            # emit a header with location of the trusted keystone instance
            if get_config().getboolean('auth', 'use_keystone', False):
                keystone_uri = get_config().getstring('keystone', 'keystone_uri')
                request.setHeader('WWW-Authenticate', 'Keystone uri=%s' % keystone_uri)
            if exc.body:
                request.write(json.dumps(exc.body))
            else:
                request.write("%s %s\n" % (exc.status_code, exc.status_description))
            if exc.message:
                request.write("%s\n" % exc.message)
        except Exception:
            request.setHeader('Content-Type', 'text/plain')
            request.setResponseCode(500, "Server Error")
            error_message = "%s %s\n\n" % (500, "Server Error")
            request.setHeader('Content-Length', intToBytes(len(error_message)))
            request.write(error_message)
            log.err(system='httprest')
            failure.Failure().printTraceback(request)
        finally:
            if ret is not NOT_DONE_YET:
                request.finish()
Ejemplo n.º 40
0
 def handle_remote_error(self, ore, c, compute, status_name):
     ore.trap(OperationRemoteError)
     if ore.value.remote_tb and get_config().getboolean(
             'debug', 'print_exceptions'):
         log.err(ore, system='sync')
     else:
         log.msg(str(ore.value), system='sync', logLevel=ERROR)
     self.delete_outstanding_request(compute)
     set_compute_status(compute.__name__, status_name, True)
Ejemplo n.º 41
0
    def gather_vms(self):

        @db.ro_transact
        def get_vms_if_not_empty():
            vms = follow_symlinks(self.context['vms']) or []

            for vm in vms:
                if IVirtualCompute.providedBy(vm):
                    return vms

            log.msg('%s: no VMs' % (self.context.hostname), system='metrics', logLevel=logging.DEBUG)

        vms = yield get_vms_if_not_empty()

        # get the metrics for all running VMS
        if not vms or self.context.state != u'active':
            return

        name = yield db.get(self.context, 'hostname')

        try:
            log.msg('%s: gather VM metrics' % (name), system='metrics', logLevel=logging.DEBUG)
            submitter = IVirtualizationContainerSubmitter(vms)
            metrics = yield submitter.submit(IGetGuestMetrics, __killhook=self._killhook)
        except OperationRemoteError as e:
            log.msg('%s: remote error: %s' % (name, e), system='metrics', logLevel=logging.DEBUG)
            if e.remote_tb:
                log.msg(e.remote_tb, system='metrics', logLevel=logging.DEBUG)
            return
        except Exception:
            log.msg("%s: error gathering VM metrics" % name, system='metrics', logLevel=logging.ERROR)
            if get_config().getboolean('debug', 'print_exceptions'):
                log.err(system='metrics')
            return

        if not metrics:
            log.msg('%s: no VM metrics received!' % name, system='metrics', logLevel=logging.WARNING)
            return

        log.msg('%s: VM metrics received: %s' % (name, len(metrics)), system='metrics')
        timestamp = int(time.time() * 1000)

        # db transact is needed only to traverse the zodb.
        @db.ro_transact
        def get_streams():
            streams = []
            for uuid, data in metrics.items():
                if vms[uuid] and vms[uuid]['metrics']:
                    vm_metrics = vms[uuid]['metrics']
                    for k in data:
                        if vm_metrics[k]:
                            streams.append((IStream(vm_metrics[k]), (timestamp, data[k])))
            return streams

        # streams could defer the data appending but we don't care
        for stream, data_point in (yield get_streams()):
            stream.add(data_point)
Ejemplo n.º 42
0
    def update(self, username):
        auth = getUtility(IAuthentication)
        p = auth.getPrincipal(username)

        if p is None:
            log.warning(
                'User not found in authentication: %s. Possibly a stale profile record.',
                username)
            return

        if username is None:
            username = p.id

        billable_group = get_config().getstring('auth', 'billable_group',
                                                'users')

        if billable_group not in p.groups:
            log.debug('User %s is not part of billable group: %s', username,
                      billable_group)
            return

        if type(username) not in (str, unicode):
            username = username.id

        user_computes = self.get_computes(username)

        user_stats = {
            'num_cores_total': 0,
            'diskspace_total': 0,
            'memory_total': 0,
            'vm_count': len(user_computes)
        }

        for compute in user_computes:
            try:
                # only account for cores and RAM of the running VMs
                if compute.state == u'active':
                    user_stats['num_cores_total'] += compute.num_cores
                    user_stats['memory_total'] += compute.memory or 0
                user_stats['diskspace_total'] += compute.diskspace.get(
                    u'total') or 0
            except Exception:
                log.error('Error collecting stats from %s',
                          compute,
                          exc_info=sys.exc_info())

        user_stats['timestamp'] = datetime.now()
        user_stats['credit'] = self.get_credit(username)

        self.save_vm_stats(username, user_stats)

        loggers = getAllUtilitiesRegisteredFor(IUserStatisticsLogger)
        for logger in loggers:
            logger.log(username, user_stats)

        log.debug('Statistics update logged for %s', username)
        return user_stats
Ejemplo n.º 43
0
def _select_checker(value, interaction):
    checker = getCheckerForInstancesOf(type(value))
    if not checker:
        if get_config().getboolean('auth', 'enforce_attribute_rights_definition'):
            perms = {}
        else:
            if get_config().getboolean('auth', 'audit_all_missing_attribute_rights_definitions'):
                perms = AuditingPermissionDictionary()
            else:
                perms = strong_defaultdict(lambda: CheckerPublic)

        return Checker(perms, perms, interaction=interaction)

    # handle checkers for "primitive" types like str
    if type(checker) is object:
        return checker

    return Checker(checker.get_permissions, checker.set_permissions, interaction=interaction)
Ejemplo n.º 44
0
    def gather_vms(self):

        @db.ro_transact
        def get_vms_if_not_empty():
            vms = follow_symlinks(self.context['vms']) or []

            for vm in vms:
                if IVirtualCompute.providedBy(vm):
                    return vms

            log.msg('%s: no VMs' % (self.context.hostname), system='metrics', logLevel=logging.DEBUG)

        vms = yield get_vms_if_not_empty()

        # get the metrics for all running VMS
        if not vms or self.context.state != u'active':
            return

        name = yield db.get(self.context, 'hostname')

        try:
            log.msg('%s: gather VM metrics' % (name), system='metrics', logLevel=logging.DEBUG)
            submitter = IVirtualizationContainerSubmitter(vms)
            metrics = yield submitter.submit(IGetGuestMetrics, __killhook=self._killhook)
        except OperationRemoteError as e:
            log.msg('%s: remote error: %s' % (name, e), system='metrics', logLevel=logging.DEBUG)
            if e.remote_tb:
                log.msg(e.remote_tb, system='metrics', logLevel=logging.DEBUG)
            return
        except Exception:
            log.msg("%s: error gathering VM metrics" % name, system='metrics', logLevel=logging.ERROR)
            if get_config().getboolean('debug', 'print_exceptions'):
                log.err(system='metrics')

        if not metrics:
            log.msg('%s: no VM metrics received!' % name, system='metrics', logLevel=logging.WARNING)
            return

        log.msg('%s: VM metrics received: %s' % (name, len(metrics)), system='metrics')
        timestamp = int(time.time() * 1000)

        # db transact is needed only to traverse the zodb.
        @db.ro_transact
        def get_streams():
            streams = []
            for uuid, data in metrics.items():
                if vms[uuid] and vms[uuid]['metrics']:
                    vm_metrics = vms[uuid]['metrics']
                    for k in data:
                        if vm_metrics[k]:
                            streams.append((IStream(vm_metrics[k]), (timestamp, data[k])))
            return streams

        # streams could defer the data appending but we don't care
        for stream, data_point in (yield get_streams()):
            stream.add(data_point)
Ejemplo n.º 45
0
def handle_virtual_compute_config_change_request(compute, event):
    c = sudo(compute)
    compute_p = yield db.get(c, '__parent__')
    compute_type = yield db.get(compute_p, 'backend')
    # At the moment we only handle openvz backend updates (OMS-568)
    if compute_type != 'openvz':
        return

    update_param_whitelist = ['diskspace',
                              'memory',
                              'num_cores',
                              'swap_size']

    param_modifier = {'diskspace': lambda d: d['total']}

    unit_corrections_coeff = {'memory': 1 / 1024.0,
                              'swap_size': 1 / 1024.0,
                              'diskspace': 1 / 1024.0}

    params_to_update = filter(lambda (k, v): k in update_param_whitelist, event.modified.iteritems())

    if len(params_to_update) == 0:
        return

    # correct unit coefficients (usually MB -> GB)
    params_to_update = map(lambda (k, v): (k, param_modifier.get(k, lambda x: x)(v)), params_to_update)
    params_to_update = map(lambda (k, v): (k, unit_corrections_coeff.get(k) * v
                                            if k in unit_corrections_coeff else v), params_to_update)

    @db.transact
    def update_vm_limits(cpu_limit):
        logger.debug("Setting cpu_limit to %s, previous value %s" % (cpu_limit / 100.0, c.cpu_limit))
        c.cpu_limit = cpu_limit / 100.0
    
    cores_setting = filter(lambda(k, v): k == 'num_cores', params_to_update)
    if len(cores_setting) == 1:
        # adjust cpu_limit to follow the number of cores as well
        cpu_limit = int(cores_setting[0][1] * get_config().getfloat('vms', 'cpu_limit', 80))
        log.msg("Updating cpulimit to %s" % cpu_limit, system='vm-configuration-update')

        params_to_update.append(('cpu_limit', cpu_limit))
        yield update_vm_limits(cpu_limit)

    submitter = IVirtualizationContainerSubmitter((yield db.get(compute, '__parent__')))
    try:
        yield submitter.submit(IUpdateVM, (yield db.get(compute, '__name__')), dict(params_to_update))
    except Exception as e:
        @db.transact
        def reset_to_original_values():
            for mk, mv in event.modified.iteritems():
                setattr(compute, mk, event.original[mk])
        yield reset_to_original_values()
        raise e  # must re-throw, because sys.exc_info seems to get erased with the yield
    else:
        owner = (yield db.get(compute, '__owner__'))
        UserLogger(subject=compute, owner=owner).log('Compute "%s" configuration changed' % compute)
Ejemplo n.º 46
0
    def run(self):
        while True:
            try:
                if not self.paused:
                    yield self.sync()
            except Exception:
                if get_config().getboolean('debug', 'print_exceptions'):
                    log.err(system='sync')

            yield async_sleep(self.interval)
Ejemplo n.º 47
0
    def run(self):
        while True:
            try:
                if not self.paused:
                    yield self.ping_check()
            except Exception:
                if get_config().getboolean('debug', 'print_exceptions'):
                    log.err(system='ping-check')

            yield async_sleep(self.interval)
Ejemplo n.º 48
0
def _generate_classes():
    # Dynamically generate an adapter class for each supported Salt salt Action:
    for interface, action in ACTIONS.items():
        cls_name = 'Salt%s' % interface.__name__[1:]
        cls = type(cls_name, (SaltBase, ), dict(action=action))
        classImplements(cls, interface)
        executor = get_config().getstring('salt', 'executor_class', 'simple')
        cls.__executor__ = OVERRIDE_EXECUTORS.get(interface, SaltBase.executor_classes[executor])
        cls.timeout = TIMEOUTS.get(interface)
        globals()[cls_name] = cls
Ejemplo n.º 49
0
def delete_user(user):
    passwd_file = get_config().get('auth', 'passwd_file')
    with open(passwd_file) as f:
        lines = f.readlines()

    with open(passwd_file, 'w') as f:
        for line in lines:
            if line.startswith(user + ':'):
                continue
            f.write(line)
Ejemplo n.º 50
0
def delete_user(user):
    passwd_file = get_config().get('auth', 'passwd_file')
    with open(passwd_file) as f:
        lines = f.readlines()

    with open(passwd_file, 'w') as f:
        for line in lines:
            if line.startswith(user + ':'):
                continue
            f.write(line)
Ejemplo n.º 51
0
 def execute(self, cmd, args):
     hostname = yield db.get(self.context, 'hostname')
     remote_salt_key_cmd = get_config().getstring('salt', 'remote_key_command', None)
     if remote_salt_key_cmd:
         try:
             output = subprocess.check_output([remote_salt_key_cmd, self._remote_option, hostname,
                                               '--no-color', '--out=raw'])
             log.msg('Salt output: %s' % output, system='action-accept')
         except subprocess.CalledProcessError as e:
             cmd.write("%s\n" % format_error(e))
     else:
         try:
             import salt.config
             from salt.key import Key
             c_path = get_config().getstring('salt', 'master_config_path', '/etc/salt/master')
             opts = salt.config.client_config(c_path)
             yield getattr(Key(opts), self._action)(hostname)
         except Exception as e:
             cmd.write("%s\n" % format_error(e))
Ejemplo n.º 52
0
 def initialize(self):
     print "[OncPlugin] initializing plugin"
     try:
         symlink_target = get_config().get('onc', 'symlink_target')
         relative_path = os.path.join(*(['../..']))
         symlink_source = pkg_resources.resource_filename(__name__, relative_path)
         if os.path.islink(symlink_target):
             os.unlink(symlink_target)
         os.symlink(symlink_source, symlink_target)
     except ConfigKeyError:
         pass
Ejemplo n.º 53
0
def _generate_classes():
    # Dynamically generate an adapter class for each supported Salt salt Action:
    for interface, action in ACTIONS.items():
        cls_name = 'Salt%s' % interface.__name__[1:]
        cls = type(cls_name, (SaltBase, ), dict(action=action))
        classImplements(cls, interface)
        executor = get_config().getstring('salt', 'executor_class', 'simple')
        cls.__executor__ = OVERRIDE_EXECUTORS.get(
            interface, SaltBase.executor_classes[executor])
        cls.timeout = TIMEOUTS.get(interface)
        globals()[cls_name] = cls
def setup_acl(event):
    if event.test:
        acl.preload_acl_file('')
        return

    acl_file = get_config().getstring('auth', 'acl_file', 'oms_acl')
    if not os.path.exists(acl_file):
        log.warning("ACL file doesn't exist")
        return

    yield acl.preload_acl_file(file(acl_file), filename=acl_file)
Ejemplo n.º 55
0
def get_db_dir():
    db_dir = 'db'
    try:
        # useful during development
        db_dir = subprocess.check_output('scripts/current_db_dir.sh').strip()
    except:
        pass

    if db_dir == 'db':
        db_dir = get_config().get('db', 'path')

    return db_dir
Ejemplo n.º 56
0
    def events(self, after, limit=None):
        # XXX: if nobody fills the data (func issues) then we return fake data
        if not self.data and get_config().getboolean('metrics', 'fake_metrics', False):
            return self._fake_events(after, limit)

        res = []
        for idx, (ts, value) in enumerate(self.data):
            if ts <= after or (limit and idx >= limit):
                break
            res.append((ts, value))

        return res
def setup_roles(event):
    perm_file = get_config().get('auth', 'permissions_file')

    if os.path.exists(perm_file):
        setup_conf_reload_watch(perm_file, reload_roles)
        perm_file_factory = file
    else:  # read the content from the egg
        perm_file = os.path.join('../../../', 'oms_permissions')
        perm_file_factory = lambda f: pkg_resources.resource_stream(
            __name__, f)

    reload_roles(perm_file_factory(perm_file))
Ejemplo n.º 58
0
def _select_checker(value, interaction):
    checker = getCheckerForInstancesOf(type(value))
    if not checker:
        if get_config().getboolean('auth',
                                   'enforce_attribute_rights_definition'):
            perms = {}
        else:
            if get_config().getboolean(
                    'auth', 'audit_all_missing_attribute_rights_definitions'):
                perms = AuditingPermissionDictionary()
            else:
                perms = strong_defaultdict(lambda: CheckerPublic)

        return Checker(perms, perms, interaction=interaction)

    # handle checkers for "primitive" types like str
    if type(checker) is object:
        return checker

    return Checker(checker.get_permissions,
                   checker.set_permissions,
                   interaction=interaction)
Ejemplo n.º 59
0
 def _get_profile_and_need_update(self, principal):
     credit_check_cooldown = get_config().getstring('auth',
                                                    'billing_timeout', 60)
     try:
         profile = traverse1('/home/%s' % principal.id)
         timeout = (datetime.strptime(profile.credit_timestamp,
                                      '%Y-%m-%dT%H:%M:%S.%f') +
                    timedelta(seconds=credit_check_cooldown))
         log.debug('Next update for "%s": %s', principal.id, timeout)
         return (profile, profile.uid, timeout < datetime.now())
     except Exception as e:
         log.error('%s', e)
         raise
Ejemplo n.º 60
0
def run():
    parser = argparse.ArgumentParser(description='Manage OMS passwords')
    parser.add_argument('user', help="user name")
    parser.add_argument('-g',
                        help="group(s): comma separated list of "
                        "groups the user belongs to",
                        required=False,
                        default=None)
    parser.add_argument('-s',
                        action='store_true',
                        help="force password "
                        "prompt even if setting group(s) via -g",
                        required=False,
                        default=None)

    group = parser.add_mutually_exclusive_group()
    group.add_argument('-a', action='store_true', help="add user")
    group.add_argument('-d', action='store_true', help="delete user")
    group.add_argument('-c',
                       action='store_true',
                       help="check password, useful "
                       "to troubleshoot login issues")

    args = parser.parse_args()

    ensure_base_dir()
    passwd_file = get_config().get('auth', 'passwd_file')

    if not os.path.exists(passwd_file):
        with open(passwd_file, 'w'):
            pass

    try:
        if args.d:
            delete_user(args.user)
        elif args.a:
            add_user(args.user, ask_password(), group=args.g)
        elif args.c:
            password_checker = FilePasswordDB(passwd_file, hash=ssha_hash)
            credentials = UsernamePassword(args.user, getpass("Password: "******"Wrong credentials")

            print "ok"
        else:
            update_passwd(args.user, args.s, force=True)
    except UserManagementError as e:
        print e
        sys.exit(1)