def delete_pool(self, pool_name, return_to=None, **unused):

        template_args = {
            'return_to': util.make_url_internal(return_to),
            'was_deleted': False,
            'pool_name': pool_name,
            'slave_table': []
        }

        # check that desired stack exists
        try:
            pool_id = Pool.build_id(pool_name, namespace=None, owner=None)
            pool = Pool.get(pool_id)
            template_args['pool'] = pool

            # get server labels from GUID
            slave_table = []
            slaves = Slave.all(count_per_req=10000)
            for slave in pool.slaves_usage_bytes:
                for so in slaves:
                    if so.name == slave:
                        slave_table.append(so.label)
                        break

            template_args['slave_table'] = slave_table

            # handle save action
            if cherrypy.request.method == 'POST':
                
                pool.delete()
                template_args['was_deleted'] = True

        except Exception, e:
            logger.exception(e)
            template_args['controller_exception'] = e    
Esempio n. 2
0
    def delete_pool(self, pool_name, return_to=None, **unused):

        template_args = {
            'return_to': util.make_url_internal(return_to),
            'was_deleted': False,
            'pool_name': pool_name,
            'slave_table': []
        }

        # check that desired stack exists
        try:
            pool_id = Pool.build_id(pool_name, namespace=None, owner=None)
            pool = Pool.get(pool_id)
            template_args['pool'] = pool

            # get server labels from GUID
            slave_table = []
            slaves = Slave.all()
            for slave in pool.slaves_usage_bytes:
                for so in slaves:
                    if so.name == slave:
                        slave_table.append(so.label)
                        break

            template_args['slave_table'] = slave_table

            # handle save action
            if cherrypy.request.method == 'POST':

                pool.delete()
                template_args['was_deleted'] = True

        except Exception, e:
            logger.exception(e)
            template_args['controller_exception'] = e
    def list_messages(self, category=None, return_to=None, **kw):
        '''
        Handles the message listing page
        '''

        messages = Message.all()
        try:
            self_config = SelfConfig.get()
        except splunk.AuthorizationFailed:
            return self.redirect_to_url(['manager'])

        if category:
            messages = messages.filter(category=category)

        # get list of available indexers that could be assigned
        slaves = Slave.all(count_per_req=10000)
        slave_label_map = {}
        for slave in slaves:
            slave_label_map[slave.name] = slave.label

        soft_messages = []
        hard_messages = []
        all_messages = []
        for message in messages:
                       
            newMessage = {
                'text': message.description,
                'create_time': message.create_time,
                'severity': message.severity.lower(),
                'pool_name': message.pool_name,
                'slave_name': slave_label_map.get(message.slave_name, message.slave_name),
                'stack_name': message.stack_name,
                'category': message.category
            }

            all_messages.append(newMessage)

            if message.category == 'license_window':
                hard_messages.append(newMessage)
            else: 
                soft_messages.append(newMessage)

        soft_messages.sort(key=operator.itemgetter('create_time'), reverse=True)
        hard_messages.sort(key=operator.itemgetter('create_time'), reverse=True)
        all_messages.sort(key=operator.itemgetter('create_time'), reverse=True)

        template_args = {
            'category': category,
            'soft_messages': soft_messages,
            'hard_messages': hard_messages,
            'all_messages': all_messages,
            'return_to': util.make_url_internal(return_to) or self.make_url(['manager','system','licensing']),
            'server_name': self_config.slave_label
        }
        return self.render_template('/licensing/messages/list.html', template_args)
    def list_indexers(self, return_to=None, **kw):
        '''
        Handles the indexer listing page
        '''

        slaves = Slave.all(count_per_req=10000)
        try:
            self_config = SelfConfig.get()
        except splunk.AuthorizationFailed:
            return self.redirect_to_url(['manager'])

        template_args = {
            'slaves': slaves or [],
            'return_to': util.make_url_internal(return_to) or self.make_url(['manager','system','licensing']),
            'server_name': self_config.slave_label
        }
        return self.render_template('/licensing/slaves/list.html', template_args)
Esempio n. 5
0
    def list_indexers(self, return_to=None, **kw):
        '''
        Handles the indexer listing page
        '''

        slaves = Slave.all()
        try:
            self_config = SelfConfig.get()
        except splunk.AuthorizationFailed:
            return self.redirect_to_url(['manager'])

        template_args = {
            'slaves':
            slaves or [],
            'return_to':
            util.make_url_internal(return_to)
            or self.make_url(['manager', 'system', 'licensing']),
            'server_name':
            self_config.slave_label
        }
        return self.render_template('/licensing/slaves/list.html',
                                    template_args)
    def show_summary(self, **unused):
        '''
        Renders the new license summary page
        '''

        #
        # retrieve data
        #

        # get local slave info; unauthorized access is simply
        # booted to manager TOC
        try:
            self_config = SelfConfig.get()
        except splunk.AuthorizationFailed:
            return self.redirect_to_url(['manager'])


        local_master_uri = None 
        if self_config.master_uri.lower() not in ['', 'self']:
            return self.show_slave_summary()

        # get all slaves, and find the local one in the mix
        slave_label_map = {}
        slaves = Slave.all(count_per_req=10000)
        local_slave = None
        for slave in slaves:
            slave_label_map[slave.name] = slave.label
            if slave.name == self_config.slave_name:
                local_slave = slave
        if not local_slave:
            raise Exception, 'Could not retrieve slave information for slave name: %s' % self_config.slave_name
        slave_count = len(slaves)
    
        # get active group
        active_group = Group.all().filter(is_active=True)
        if len(active_group) == 0:
            logger.warn('no active license groups found; redirecting user to "add license" page')
            self.redirect_to_url(['manager','system','licensing','licenses','new'], _qs={'return_to': cherrypy.request.relative_uri})

        active_group = active_group[0]

        # get associated stacks
        stack_query = Stack.all()
        stacks = []
        for stack in stack_query:
            if stack.name in active_group.stack_names:
                stacks.append(stack)

        # get associated pools
        pool_query = Pool.all()
        pools = []
        catchall_pool_names = []
        for pool in pool_query:
            if pool.stack_name in [s.name for s in stacks]:
                pools.append(pool)
                if pool.slaves == CATCHALL_SLAVE_LIST:
                    catchall_pool_names.append(pool.name)

        licenses = License.all()
        messages = Message.all()


        #
        # generate output info
        #

        stack_table = []
        for stack in stacks:
            #if not stack.quota_bytes:
            #    remaining_perc = None
            #else:
            #    remaining_perc = stack.remaining_bytes / stack.quota_bytes
            stack_table.append({
                'name': stack.name,
                'quota_bytes': stack.quota_bytes,
                'label': stack.label
                #'remaining_bytes': stack.remaining_bytes,
                #'remaining_perc': remaining_perc
            })

        # compile a summary list of messages by category
        hard_messages = {}
        soft_messages = {}
        for message in messages:
            if message.category == 'license_window':
                message_type = hard_messages
            else: 
                message_type = soft_messages   

            message_type.setdefault(message.category, {
                'severity': message.severity.lower(),
                'count': 0, 
                'latest_time': datetime.datetime.fromtimestamp(0, splunk.util.localTZ), 
                'slaves': set()}
            )
            message_type[message.category]['count'] += 1
            message_type[message.category]['latest_time'] = max(
                message.create_time,
                message_type[message.category]['latest_time']
            )
            message_type[message.category]['slaves'].add(slave_label_map.get(message.slave_name, message.slave_name))




        # loop over the per-slave data embedded in each pool descriptor
        pool_table = []
        slave_table = []
        local_used_bytes = 0.0
        for pool in pools:

            effective_global_quota = pool.quota_bytes['byte_value']
            if pool.quota_bytes['value_mode'] == 'MAX':
                for stack in stacks:
                    if pool.stack_name == stack.name:
                        effective_global_quota = stack.quota_bytes
                        break
                
            pool_table.append({
                'name': pool.name,
                'stack_name': pool.stack_name,
                'used_bytes': pool.used_bytes,
                'quota_bytes': effective_global_quota,
                'quota_mode': pool.quota_bytes['value_mode']
            })

            for slave in sorted(pool.slaves_usage_bytes):
                tmp_slave_bytes = float(pool.slaves_usage_bytes[slave])
                
                # accum the usage for the local slave
                if slave == self_config.slave_name:
                    local_used_bytes += tmp_slave_bytes
                
                if not effective_global_quota:
                    used_perc = None
                else:
                    used_perc = tmp_slave_bytes / effective_global_quota

                slave_table.append({
                    'pool_name': pool.name,
                    'name': slave_label_map.get(slave, slave),
                    'used_bytes': tmp_slave_bytes,
                    'used_perc': used_perc
                })

        license_table = []
        for license in licenses:
            license_table.append({
                'name': license.name,
                'label': license.label,
                'type': license.type,
                'stack_name': license.stack_name,
                'quota_bytes': license.quota_bytes,
                'expiration_time': license.expiration_time,
                'status': license.status.upper(),
                'can_remove': license.metadata.can_remove
            })
        license_table.sort(key=operator.itemgetter('expiration_time'))


        # the UI will only support managing pools within the enterprise stack
        if active_group.name in POOLABLE_GROUPS:
            can_edit_pools = True
        else:
            can_edit_pools = False
        

        # assemble into mako dict
        template_args = {
            'local_slave_name': local_slave.label,
            'local_used_bytes': local_used_bytes,
            'local_warning_count': local_slave.warning_count,
            'local_master_uri': local_master_uri,
            'active_group_name': active_group.name,
            'default_stack_name': DEFAULT_STACK_NAME,
            'slave_count': slave_count,
            'pool_table': pool_table,
            'stack_table': stack_table,
            'slave_table': slave_table,
            'license_table': license_table,
            'hard_messages': hard_messages,
            'soft_messages': soft_messages,
            'can_edit_pools': can_edit_pools,
            'catchall_pool_names': catchall_pool_names,
            'can_be_remote_master': self_config.features.get('CanBeRemoteMaster') == 'ENABLED',
            'showLicenseUsage': (cherrypy.config['product_type'] != 'hunk')
        }

        return self.render_template('/licensing/overview.html', template_args)
            if pool.slaves == CATCHALL_SLAVE_LIST and pool.name != pool_object.name:
                can_be_catch_all = False
                break

        # determine indexer selection mode
        if pool_object.slaves == CATCHALL_SLAVE_LIST and can_be_catch_all:
            slave_mode = 'catchall'
        else:
            slave_mode = 'explicit'

        # get list of pools that maintain true exclusivity on slaves as
        # opposed to listing them due to auto-inclusion via catch-all mechanism
        restrictive_pools = set([p.name for p in pools if p.slaves != CATCHALL_SLAVE_LIST])

        # get list of available indexers that could be assigned
        slaves = Slave.all(count_per_req=10000)
        slave_list = []
        slave_label_map = {}
        for slave in slaves:
            slave_label_map[slave.name] = slave.label
            slave_is_eligible = True

            # slaves that are explicitly assigned to pool within the same
            # stack cannot be assigned again; slaves that are registered
            # with a catch-all pool are still eligible to be explicitly set
            if (stack_name in slave.stack_names) \
                    and (set(slave.pool_names) & restrictive_pools) \
                    and pool_object.name not in slave.pool_names:
                slave_is_eligible = False

            slave_list.append([
Esempio n. 8
0
            if pool.slaves == CATCHALL_SLAVE_LIST and pool.name != pool_object.name:
                can_be_catch_all = False
                break

        # determine indexer selection mode
        if pool_object.slaves == CATCHALL_SLAVE_LIST and can_be_catch_all:
            slave_mode = 'catchall'
        else:
            slave_mode = 'explicit'

        # get list of pools that maintain true exclusivity on slaves as
        # opposed to listing them due to auto-inclusion via catch-all mechanism
        restrictive_pools = set([p.name for p in pools if p.slaves != CATCHALL_SLAVE_LIST])

        # get list of available indexers that could be assigned
        slaves = Slave.all()
        slave_list = []
        slave_label_map = {}
        for slave in slaves:
            slave_label_map[slave.name] = slave.label
            slave_is_eligible = True

            # slaves that are explicitly assigned to pool within the same
            # stack cannot be assigned again; slaves that are registered
            # with a catch-all pool are still eligible to be explicitly set
            if (stack_name in slave.stack_names) \
                    and (set(slave.pool_names) & restrictive_pools) \
                    and pool_object.name not in slave.pool_names:
                slave_is_eligible = False

            slave_list.append([
Esempio n. 9
0
    def list_messages(self, category=None, return_to=None, **kw):
        '''
        Handles the message listing page
        '''

        messages = Message.all()
        try:
            self_config = SelfConfig.get()
        except splunk.AuthorizationFailed:
            return self.redirect_to_url(['manager'])

        if category:
            messages = messages.filter(category=category)

        # get list of available indexers that could be assigned
        slaves = Slave.all()
        slave_label_map = {}
        for slave in slaves:
            slave_label_map[slave.name] = slave.label

        soft_messages = []
        hard_messages = []
        all_messages = []
        for message in messages:

            newMessage = {
                'text':
                message.description,
                'create_time':
                message.create_time,
                'severity':
                message.severity.lower(),
                'pool_name':
                message.pool_name,
                'slave_name':
                slave_label_map.get(message.slave_name, message.slave_name),
                'stack_name':
                message.stack_name,
                'category':
                message.category
            }

            all_messages.append(newMessage)

            if message.category == 'license_window':
                hard_messages.append(newMessage)
            else:
                soft_messages.append(newMessage)

        soft_messages.sort(key=operator.itemgetter('create_time'),
                           reverse=True)
        hard_messages.sort(key=operator.itemgetter('create_time'),
                           reverse=True)
        all_messages.sort(key=operator.itemgetter('create_time'), reverse=True)

        template_args = {
            'category':
            category,
            'soft_messages':
            soft_messages,
            'hard_messages':
            hard_messages,
            'all_messages':
            all_messages,
            'return_to':
            util.make_url_internal(return_to)
            or self.make_url(['manager', 'system', 'licensing']),
            'server_name':
            self_config.slave_label
        }
        return self.render_template('/licensing/messages/list.html',
                                    template_args)
Esempio n. 10
0
    def show_summary(self, **unused):
        '''
        Renders the new license summary page
        '''

        #
        # retrieve data
        #

        # get local slave info; unauthorized access is simply
        # booted to manager TOC
        try:
            self_config = SelfConfig.get()
        except splunk.AuthorizationFailed:
            return self.redirect_to_url(['manager'])

        local_master_uri = None
        if self_config.master_uri.lower() not in ['', 'self']:
            return self.show_slave_summary()

        # get all slaves, and find the local one in the mix
        slave_label_map = {}
        slaves = Slave.all()
        local_slave = None
        for slave in slaves:
            slave_label_map[slave.name] = slave.label
            if slave.name == self_config.slave_name:
                local_slave = slave
        if not local_slave:
            raise Exception, 'Could not retrieve slave information for slave name: %s' % self_config.slave_name
        slave_count = len(slaves)

        # get active group
        active_group = Group.all().filter(is_active=True)
        if len(active_group) == 0:
            logger.warn(
                'no active license groups found; redirecting user to "add license" page'
            )
            self.redirect_to_url(
                ['manager', 'system', 'licensing', 'licenses', 'new'],
                _qs={'return_to': cherrypy.request.relative_uri})

        active_group = active_group[0]

        # get associated stacks
        stack_query = Stack.all()
        stacks = []
        for stack in stack_query:
            if stack.name in active_group.stack_names:
                stacks.append(stack)

        # get associated pools
        pool_query = Pool.all()
        pools = []
        catchall_pool_names = []
        for pool in pool_query:
            if pool.stack_name in [s.name for s in stacks]:
                pools.append(pool)
                if pool.slaves == CATCHALL_SLAVE_LIST:
                    catchall_pool_names.append(pool.name)

        licenses = License.all()
        messages = Message.all()

        #
        # generate output info
        #

        stack_table = []
        for stack in stacks:
            #if not stack.quota_bytes:
            #    remaining_perc = None
            #else:
            #    remaining_perc = stack.remaining_bytes / stack.quota_bytes
            stack_table.append({
                'name': stack.name,
                'quota_bytes': stack.quota_bytes,
                'label': stack.label
                #'remaining_bytes': stack.remaining_bytes,
                #'remaining_perc': remaining_perc
            })

        # compile a summary list of messages by category
        hard_messages = {}
        soft_messages = {}
        for message in messages:
            if message.category == 'license_window':
                message_type = hard_messages
            else:
                message_type = soft_messages

            message_type.setdefault(
                message.category, {
                    'severity':
                    message.severity.lower(),
                    'count':
                    0,
                    'latest_time':
                    datetime.datetime.fromtimestamp(0, splunk.util.localTZ),
                    'slaves':
                    set()
                })
            message_type[message.category]['count'] += 1
            message_type[message.category]['latest_time'] = max(
                message.create_time,
                message_type[message.category]['latest_time'])
            message_type[message.category]['slaves'].add(
                slave_label_map.get(message.slave_name, message.slave_name))

        # loop over the per-slave data embedded in each pool descriptor
        pool_table = []
        slave_table = []
        local_used_bytes = 0.0
        for pool in pools:

            effective_global_quota = pool.quota_bytes['byte_value']
            if pool.quota_bytes['value_mode'] == 'MAX':
                for stack in stacks:
                    if pool.stack_name == stack.name:
                        effective_global_quota = stack.quota_bytes
                        break

            pool_table.append({
                'name': pool.name,
                'stack_name': pool.stack_name,
                'used_bytes': pool.used_bytes,
                'quota_bytes': effective_global_quota,
                'quota_mode': pool.quota_bytes['value_mode']
            })

            for slave in sorted(pool.slaves_usage_bytes):
                tmp_slave_bytes = float(pool.slaves_usage_bytes[slave])

                # accum the usage for the local slave
                if slave == self_config.slave_name:
                    local_used_bytes += tmp_slave_bytes

                if not effective_global_quota:
                    used_perc = None
                else:
                    used_perc = tmp_slave_bytes / effective_global_quota

                slave_table.append({
                    'pool_name': pool.name,
                    'name': slave_label_map.get(slave, slave),
                    'used_bytes': tmp_slave_bytes,
                    'used_perc': used_perc
                })

        license_table = []
        for license in licenses:
            license_table.append({
                'name': license.name,
                'label': license.label,
                'type': license.type,
                'stack_name': license.stack_name,
                'quota_bytes': license.quota_bytes,
                'expiration_time': license.expiration_time,
                'status': license.status.upper(),
                'can_remove': license.metadata.can_remove
            })
        license_table.sort(key=operator.itemgetter('expiration_time'))

        # the UI will only support managing pools within the enterprise stack
        if active_group.name in POOLABLE_GROUPS:
            can_edit_pools = True
        else:
            can_edit_pools = False

        # assemble into mako dict
        template_args = {
            'local_slave_name':
            local_slave.label,
            'local_used_bytes':
            local_used_bytes,
            'local_warning_count':
            local_slave.warning_count,
            'local_master_uri':
            local_master_uri,
            'active_group_name':
            active_group.name,
            'default_stack_name':
            DEFAULT_STACK_NAME,
            'slave_count':
            slave_count,
            'pool_table':
            pool_table,
            'stack_table':
            stack_table,
            'slave_table':
            slave_table,
            'license_table':
            license_table,
            'hard_messages':
            hard_messages,
            'soft_messages':
            soft_messages,
            'can_edit_pools':
            can_edit_pools,
            'catchall_pool_names':
            catchall_pool_names,
            'can_be_remote_master':
            self_config.features.get('CanBeRemoteMaster') == 'ENABLED'
        }

        return self.render_template('/licensing/overview.html', template_args)
Esempio n. 11
0
                can_be_catch_all = False
                break

        # determine indexer selection mode
        if pool_object.slaves == CATCHALL_SLAVE_LIST and can_be_catch_all:
            slave_mode = 'catchall'
        else:
            slave_mode = 'explicit'

        # get list of pools that maintain true exclusivity on slaves as
        # opposed to listing them due to auto-inclusion via catch-all mechanism
        restrictive_pools = set(
            [p.name for p in pools if p.slaves != CATCHALL_SLAVE_LIST])

        # get list of available indexers that could be assigned
        slaves = Slave.all()
        slave_list = []
        slave_label_map = {}
        for slave in slaves:
            slave_label_map[slave.name] = slave.label
            slave_is_eligible = True

            # slaves that are explicitly assigned to pool within the same
            # stack cannot be assigned again; slaves that are registered
            # with a catch-all pool are still eligible to be explicitly set
            if (stack_name in slave.stack_names) \
                    and (set(slave.pool_names) & restrictive_pools) \
                    and pool_object.name not in slave.pool_names:
                slave_is_eligible = False

            slave_list.append([slave.name, slave.label, slave_is_eligible])
Esempio n. 12
0
                can_be_catch_all = False
                break

        # determine indexer selection mode
        if pool_object.slaves == CATCHALL_SLAVE_LIST and can_be_catch_all:
            slave_mode = 'catchall'
        else:
            slave_mode = 'explicit'

        # get list of pools that maintain true exclusivity on slaves as
        # opposed to listing them due to auto-inclusion via catch-all mechanism
        restrictive_pools = set(
            [p.name for p in pools if p.slaves != CATCHALL_SLAVE_LIST])

        # get list of available indexers that could be assigned
        slaves = Slave.all(count_per_req=10000)
        slave_list = []
        slave_label_map = {}
        for slave in slaves:
            slave_label_map[slave.name] = slave.label
            slave_is_eligible = True

            # slaves that are explicitly assigned to pool within the same
            # stack cannot be assigned again; slaves that are registered
            # with a catch-all pool are still eligible to be explicitly set
            if (stack_name in slave.stack_names) \
                    and (set(slave.pool_names) & restrictive_pools) \
                    and pool_object.name not in slave.pool_names:
                slave_is_eligible = False

            slave_list.append([slave.name, slave.label, slave_is_eligible])