Beispiel #1
0
class HydraCollectionStanza(SOLNAppObjModel):
    '''
    Provides object mapping for the example hydra collection stanzas
    The conf file is used to determine what jobs are to be done to what hosts in the prototype this amounts to printing a message.
    Field Meanings:
        target - The target resources on which to apply the job, typically a remote host, this is a comma delimited list of targets
        username - The username to use on all targets for auth purposes
        realm - the realm if using realms based credential storage
        task - The types of the jobs to be executed, which matches the capability of a worker that can execute it, this is a comma delimited list of tasks
        message - This is a message to print out
        big_job_interval - The collection interval for the particular task
        medium_job_interval - ibidem
        small_job_interval - ibidem
    '''

    resource = 'configs/conf-hydra_collection'

    use_model_as_spec = True

    target = CSVField()
    username = Field()
    realm = Field()
    task = CSVField()
    message = Field()
    big_job_interval = IntField()
    medium_job_interval = IntField()
    small_job_interval = IntField()
Beispiel #2
0
class CookedTCPInput(SocketInput):

    resource = 'data/inputs/tcp/cooked'
    compressed = BoolField()
    enable_s2s_heartbeat = BoolField()
    input_shutdown_timeout = IntField()
    # TODO: cast to RouteField()
    route = Field()
    s2s_heartbeat_timeout = IntField()
Beispiel #3
0
class AlertOverlay(SplunkAppObjModel):
    
    resource              = 'unix/alert_overlay'
    description           = Field()
    business_impact       = Field()
    remediation           = Field()
    escalation            = Field()
    threshold_max         = IntField()
    threshold_min         = IntField()
    threshold_type        = Field()
    threshold_unit        = Field()
class ClusterSlaveBucket(SplunkAppObjModel):
    '''
    Represents a slave's cluster bucket state
    '''

    resource = 'cluster/slave/buckets'

    checksum = Field(is_mutable=False)
    earliest_time = IntField(is_mutable=False)
    generations = DictField(is_mutable=False)
    index = Field(is_mutable=False)
    latest_time = IntField(is_mutable=False)
    search_state = Field(is_mutable=False)
    status = Field(is_mutable=False)
class ClusterSlaveInfo(SplunkAppObjModel):
    '''
    Represents a slave node's state
    TODO
    '''
    resource = 'cluster/slave/info'

    active_bundle = DictField(is_mutable=False)
    base_generation_id = IntField(is_mutable=False)
    is_registered = BoolField(is_mutable=False)
    last_heartbeat_attempt = IntField(is_mutable=False)
    latest_bundle = DictField(is_mutable=False)
    maintenance_mode = IntField(is_mutable=False)
    restart_state = Field(is_mutable=False)
    site = Field(is_mutable=False)
    status = Field(is_mutable=False)
class Cluster(HDFSAppObjModel):

    resource = 'clusters'
    hadoop_home = Field()
    java_home = Field()
    uri = Field()
    namenode_http_port = IntField()
    authentication_mode = Field()
    authorization_mode = BoolField()
    kerberos_principal = Field()
    kerberos_service_principal = Field()
    auth_to_local = Field()
    ha = BoolField()
    hdfs_site = Field()

    def isSecure(self):
        return self.kerberos_principal != None and self.kerberos_principal != ''

    def isHaEnabled(self):
        return self.ha == 1

    def getURI(self):
        if self.uri == None or self.uri.strip() == '':
            return 'hdfs://' + self.name
        return self.uri

    def isLocallyMounted(self):
        return self.getURI().startswith('file://')
class FiredAlertSummary(SplunkAppObjModel):
    '''
    Represents a Splunk fired/triggered alert summary
    '''

    resource = 'alerts/fired_alerts'

    triggered_alert_count = IntField()
class ClusterMasterGeneration(SplunkAppObjModel):
    '''
    Represents a master's generation info
    '''

    resource = '/cluster/master/generation'

    generation_id = IntField(is_mutable=False)
    generation_peers = DictField(is_mutable=False)
    last_complete_generation_id = IntField(is_mutable=False)
    multisite_error = Field(is_mutable=False)
    pending_generation_id = IntField(is_mutable=False)
    pending_last_attempt = IntField(is_mutable=False)
    pending_last_reason = Field(is_mutable=False)
    replication_factor_met = BoolField(is_mutable=False)
    search_factor_met = BoolField(is_mutable=False)
    was_forced = BoolField(is_mutable=False)
Beispiel #9
0
class MonitorInput(Input):

    resource = 'data/inputs/monitor'
    always_open_file = BoolField()
    blacklist = Field()
    crc_salt = Field(api_name='crcSalt')
    file_count = IntField(api_name='filecount', is_mutable=False)
    follow_symlink = BoolField(api_name='followSymlink')
    follow_tail = BoolField()
    host_regex = Field()
    host_segment = Field()
    # TODO : cast to TimeField()
    ignore_older_than = Field()
    move_policy = Field()
    recursive = BoolField()
    time_before_close = IntField()
    whitelist = Field()
class ClusterMasterPeer(SplunkAppObjModel):
    '''
    Represents a master's cluster peer state
    '''

    resource = 'cluster/master/peers'

    active_bundle_id = Field(is_mutable=False)
    apply_bundle_status = DictField(is_mutable=False)
    base_generation_id = IntField(is_mutable=False)
    bucket_count = IntField(is_mutable=False)
    bucket_count_by_index = DictField(is_mutable=False)
    delayed_buckets_to_discard = ListField(is_mutable=False)
    fixup_set = ListField(is_mutable=False)
    host_port_pair = Field(is_mutable=False)
    is_searchable = BoolField(is_mutable=False)
    label = Field(is_mutable=False)
    last_heartbeat = EpochField(is_mutable=False)
    latest_bundle_id = Field(is_mutable=False)
    pending_job_count = IntField(is_mutable=False)
    primary_count = IntField(is_mutable=False)
    primary_count_remote = IntField(is_mutable=False)
    replication_count = IntField(is_mutable=False)
    replication_port = IntField(is_mutable=False)
    replication_use_ssl = BoolField(is_mutable=False)
    search_state_counter = DictField(is_mutable=False)
    site = Field(is_mutable=False)
    status = Field(is_mutable=False)
    status_counter = DictField(is_mutable=False)
class ClusterMasterBucket(SplunkAppObjModel):
    '''
    Represents a master's cluster bucket state
    '''

    resource = 'cluster/master/buckets'

    bucket_size = IntField(is_mutable=False)
    constrain_to_origin_site = BoolField(is_mutable=False)
    force_roll = BoolField(is_mutable=False)
    frozen = BoolField(is_mutable=False)
    index = Field(is_mutable=False)
    origin_site = Field(is_mutable=False)
    peers = DictField(is_mutable=False)
    primaries_by_site = DictField(is_mutable=False)
    rep_count_by_site = DictField(is_mutable=False)
    search_count_by_site = DictField(is_mutable=False)
    service_after_time = IntField(is_mutable=False)
    standalone = BoolField(is_mutable=False)
Beispiel #12
0
class DataModelSummarization(SplunkAppObjModel):
    resource = 'admin/summarization'

    name = Field()
    access_count = IntField(api_name='summary.access_count')
    access_time = Field(api_name='summary.access_time')
    buckets = IntField(api_name='summary.buckets')
    buckets_size = IntField(api_name='summary.buckets_size')
    complete = Field(api_name='summary.complete')
    digest = Field(api_name="eai:digest")
    earliest = Field(api_name='summary.earliest_time')
    is_inprogress = Field(api_name='summary.is_inprogress')
    last_error = Field(api_name='summary.last_error')
    last_sid = Field(api_name='summary.last_sid')
    latest = Field(api_name='summary.latest_time')
    mod_time = Field(api_name='summary.mod_time')
    retention = IntField(api_name='summary.time_range')
    size = IntField(api_name='summary.size')
    summary_id = Field(api_name='summary.id')
Beispiel #13
0
class WinEventLogInput(Input):

    resource = 'data/inputs/win-event-log-collections'
    checkpoint_interval = IntField(api_name='checkpointInterval')
    current_only = BoolField()
    evt_dc_name = Field()
    evt_dns_name = Field()
    evt_resolve_ad_obj = BoolField()
    logs = ListField()
    start_from = Field()
class FiredAlert(SplunkAppObjModel):
    '''
    Represents a Splunk fired/triggered alert
    '''

    resource = 'alerts/fired_alerts/-'

    actions          = ListField()
    alert_type       = Field()
    savedsearch_name = Field()
    sid              = Field()
    severity         = IntField()
    trigger_time     = EpochField()
    # these are rendered time string in the current user's timezone
    trigger_time_rendered = Field()
    expiration_time_rendered  = Field()
    digest_mode      = BoolField()
    triggered_alerts = IntField()

    def get_savedsearch(self):
        from splunk.models.saved_search import SavedSearch
        return SavedSearch.get(self.entity.getLink('savedsearch'))       

    def get_job(self):
      job_id = self.entity.getLink('job')
      #TODO: return a search job object
      return None

    @classmethod
    def get_alerts(cls, alerts_id):
        '''
        Returns a SplunkQuerySet that can be used to access the alerts fired by the given id.
        The SplunkQuerySet can be modified to include a search, custom ordering etc..

        example alerts_id:
           absolute: https://localhost:8089/servicesNS/nobody/search/aalerts/fired_alerts/AlertTest1
           relative: /servicesNS/nobody/search/alerts/fired_alerts/AlertTest1 
        '''

        k      = SplunkQuerySet(FiredAlert.manager(), 30)
        k._uri = alerts_id
        return k 
class License(SplunkAppObjModel):
    '''
    Represents a single license object
    '''

    resource = 'licenser/licenses'

    creation_time = EpochField()
    expiration_time = EpochField()
    features = ListField()
    hash = Field(api_name='license_hash')
    label = Field()
    max_violations = IntField()
    payload = Field()
    quota_bytes = FloatField(api_name='quota')
    sourcetypes = ListField()
    stack_name = Field(api_name='stack_id')
    status = Field()
    type = Field()
    window_period = IntField()
Beispiel #16
0
class Slave(SplunkAppObjModel):
    '''
    Represents a Splunk license slave server
    '''

    resource = 'licenser/slaves'

    added_usage_parsing_warnings = BoolField()
    pool_names = ListField(api_name='pool_ids', is_mutable=False)
    stack_names = ListField(api_name='stack_ids', is_mutable=False)
    warning_count = IntField()
    label = Field()
class SelfConfig(SplunkAppObjModel):
    '''
    Represents a Splunk license tracker (master) server
    '''

    resource = 'licenser/localslave'
    resource_default = 'licenser/localslave/license'

    connection_timeout = IntField(is_mutable=False)
    features = DictField(is_mutable=False)
    last_master_contact_attempt_time = EpochField(is_mutable=False)
    last_master_contact_success_time = EpochField(is_mutable=False)
    last_trackerdb_service_time = EpochField(is_mutable=False)
    license_keys = ListField(is_mutable=False)
    master_guid = Field(is_mutable=False)
    master_uri = Field()
    receive_timeout = IntField(is_mutable=False)
    send_timeout = IntField(is_mutable=False)
    slave_name = Field(api_name='slave_id', is_mutable=False)
    slave_label = Field(is_mutable=False)
    squash_threshold = IntField(is_mutable=False)
class Pool(SplunkAppObjModel):
    '''
    Represents a license pool container
    '''

    resource = 'licenser/pools'

    description = Field()
    is_catch_all = BoolField()
    penalty = IntField()
    quota_bytes = IntByteField(api_name='quota')
    slaves = ListField()
    slaves_usage_bytes = DictField(is_mutable=False)
    stack_name = Field(api_name='stack_id', is_mutable=False)
    used_bytes = FloatField()
class ClusterSearchheadGeneration(SplunkAppObjModel):
    '''
    Represents a searchhead node's state
    '''

    resource = 'cluster/searchhead/generation'

    generation_error = Field(is_mutable=False)
    generation_id = Field(is_mutable=False)
    generation_peers = DictField(is_mutable=False)
    is_searchable = BoolField(is_mutable=False)
    multisite_error = IntField(is_mutable=False)
    replication_factor_met = BoolField(is_mutable=False)
    search_factor_met = BoolField(is_mutable=False)
    status = BoolField(is_mutable=False)
    was_forced = BoolField(is_mutable=False)
Beispiel #20
0
class Input(SplunkRESTModel):

    resource = 'data/inputs'
    disabled = BoolField(is_mutable=False)
    host = Field()
    index = Field()
    queue = Field()
    rcvbuf = IntField(api_name='_rcvbuf', is_mutable=False)
    source = Field()
    sourcetype = Field()

    def _reload(self):
        path = '/'.join([self.id.rsplit('/', 1)[0], '_reload'])
        response, content = rest.simpleRequest(path, method='POST')
        if response.status == 200:
            return True
        return False
class ClusterMasterIndex(SplunkAppObjModel):
    '''
    Represents a master's cluster indexes
    '''
    resource = 'cluster/master/indexes'

    buckets_with_excess_copies = IntField(is_mutable=False)
    buckets_with_excess_searchable_copies = IntField(is_mutable=False)
    index_size = IntField(is_mutable=False)
    is_searchable = BoolField(is_mutable=False)
    num_buckets = IntField(is_mutable=False)
    replicated_copies_tracker = ListField(is_mutable=False)
    searchable_copies_tracker = ListField(is_mutable=False)
    sort_order = IntField(is_mutable=False)
    total_excess_bucket_copies = IntField(is_mutable=False)
    total_excess_searchable_copies = IntField(is_mutable=False)
class ClusterMasterInfo(SplunkAppObjModel):
    '''
    Represents a master node's state
    '''

    resource = 'cluster/master/info'

    active_bundle = DictField(is_mutable=False)
    apply_bundle_status = DictField(is_mutable=False)
    indexing_ready_flag = BoolField(is_mutable=False)
    initialized_flag = BoolField(is_mutable=False)
    label = Field(is_mutable=False)
    latest_bundle = DictField(is_mutable=False)
    maintenance_mode = BoolField(is_mutable=False)
    multisite = BoolField(is_mutable=False)
    rolling_restart_flag = BoolField(is_mutable=False)
    service_ready_flag = BoolField(is_mutable=False)
    start_time = IntField(is_mutable=False)
Beispiel #23
0
class AlertField(StructuredField):
    '''
    Represents the saved search alerting configuration
    '''
    class SuppressAlertField(StructuredField):
        '''
        Represents the suppression configuration for saved search alerting
        configuration
        '''
        enabled = BoolField('alert.suppress')
        period = Field()
        fieldlist = Field('alert.suppress.fields')

    type = Field('alert_type')
    comparator = Field('alert_comparator')
    threshold = Field('alert_threshold')
    condition = Field('alert_condition')
    suppress = SuppressAlertField()
    digest_mode = BoolField()
    expires = Field()
    severity = Field()
    fired_count = IntField('triggered_alert_count')
    track = BoolField()
Beispiel #24
0
class Index(SplunkAppObjModel):
    '''
    Represents an Splunk index
    '''

    resource                  = 'data/indexes'

    assureUTF8                = BoolField()
    blockSignatureDatabase    = Field()
    blockSignSize             = IntField()
    bucketRebuildMemoryHint   = Field()
    coldPath                  = Field()
    coldPath_expanded         = Field()
    coldToFrozenDir           = Field()
    coldToFrozenScript        = Field()
    compressRawdata           = BoolField()
    currentDBSizeMB           = IntField()
    defaultDatabase           = Field()
    disabled                  = BoolField()

    enableOnlineBucketRepair  = BoolField()
    enableRealtimeSearch      = BoolField()
    frozenTimePeriodInSecs    = IntField()
    homePath                  = Field()
    homePath_expanded         = Field()
    indexThreads              = Field()
    isInternal                = BoolField()
    isReady                   = BoolField()
    isVirtual                 = BoolField()
    lastInitSequenceNumber	  = IntField()
    lastInitTime              = EpochField()
    maxBloomBackfillBucketAge = Field()
    maxBucketSizeCacheEntries = IntField()
    maxConcurrentOptimizes    = IntField()
    maxDataSize	              = Field()
    maxHotBuckets	          = IntField()
    maxHotIdleSecs            = IntField()
    maxHotSpanSecs            = IntField()
    maxMemMB                  = IntField()
    maxMetaEntries            = IntField()
    maxRunningProcessGroups   = IntField()
    maxRunningProcessGroupsLowPriority = IntField()
    maxTime	                  = Field()
    maxTimeUnreplicatedNoAcks = IntField()
    maxTimeUnreplicatedWithAcks = IntField()
    maxTotalDataSizeMB        = IntField()
    maxWarmDBCount            = IntField()
    memPoolMB                 = Field()
    minRawFileSyncSecs        = Field()
    minStreamGroupQueueSize   = IntField()
    minTime                   = Field()
    partialServiceMetaPeriod  = IntField()
    processTrackerServiceInterval = IntField()
    quarantineFutureSecs      = IntField()
    quarantinePastSecs        = IntField()
    rawChunkSizeBytes         = IntByteField()
    repFactor	              = IntField()
    rotatePeriodInSecs        = IntField()
    serviceMetaPeriod         = IntField()
    serviceOnlyAsNeeded       = BoolField()
    serviceSubtaskTimingPeriod = IntField()
    suppressBannerList        = BoolField()
    sync                      = BoolField()
    syncMeta                  = BoolField()
    thawedPath                = Field()
    thawedPath_expanded       = Field()
    throttleCheckPeriod       = IntField()
    totalEventCount           = IntField()
Beispiel #25
0
class F5TaskModel(SOLNAppObjModel):
    resource = 'configs/conf-f5_bigip_tasks'

    use_model_as_spec = True

    name = Field()
    description = Field(default_value="")
    servers = Field(default_value="")
    templates = Field(default_value="")
    index = Field(default_value="default")
    sourcetype = Field(default_value="f5_bigip")
    interval = IntField(default_value=1000)
    disabled = BoolField(default_value=1)

    def __str__(self):
        ret = super(F5TaskModel, self).__str__()

        ret += ", description: " + str(self.description) + \
               ", servers:" + str(self.servers) + \
               ", templates:" + str(self.templates) + \
               ", index:" + str(self.index) + \
               ", sourcetype:" + str(self.sourcetype) + \
               ", interval:" + str(self.interval) + \
               ", disabled:" + str(self.disabled)
        return ret

    def from_data(self, task_data):
        self.name = task_data.name
        self.description = task_data.description
        self.namespace = task_data.appName
        self.servers = task_data.servers
        self.templates = task_data.templates
        self.index = task_data.index
        self.sourcetype = task_data.sourcetype
        self.interval = task_data.interval
        self.disabled = task_data.disabled

        return self

    # for output purpose
    def to_dict(self):
        ret = dict()
        ret["name"] = self.name
        ret["description"] = self.description
        ret["appName"] = self.namespace
        ret["servers"] = self.servers
        ret["templates"] = self.templates
        ret["index"] = self.index
        ret["sourcetype"] = self.sourcetype
        ret["interval"] = self.interval
        ret["disabled"] = self.disabled

        ret["id"] = self.namespace + ":" + self.name
        try:
            ret["_removable"] = self.metadata.can_remove
        except:
            ret["_removable"] = True

        for key in ret:
            if ret[key] is None:
                ret[key] = ''
        return ret

    def get_metadata(self):
        return {
            'index': self.index,
            'sourcetype': self.sourcetype,
            'global_interval': self.interval,
        }

    def get_server_keys(self):
        return [key.strip() for key in str(self.servers).split('|')
                ] if self.servers else []

    def get_template_keys(self):
        return [key.strip() for key in str(self.templates).split('|')
                ] if self.templates else []

    def get_hash(self):
        return hash((self.name, self.index, self.sourcetype, self.interval))
Beispiel #26
0
class Backfill(SplunkAppObjModel):

    resource = 'backfill/dm_backfill'
    dedup = BoolField(default_value=False)
    description = Field()
    earliest = FloatField()
    index = Field(default_value='summary')
    latest = FloatField()
    maxjobs = IntField(default_value=1)
    namespace = Field(default_value='search')
    owner = Field()
    reverse = BoolField(default_value=False)
    saved_search = Field()
    seed = FloatField()
    status = IntField(default_value=0)
    totaljobs = IntField()

    def validate(self):
        if self.earliest > self.latest:
            self.errors.append(
                ['earliest', 'earliest time is after latest time'])
        if self.earliest < 0:
            self.errors.append(['earliest', 'invalid earliest time specified'])
        if self.latest < 0:
            self.errors.append(['latest', 'invalid earliest time specified'])
        if int(self.maxjobs) > 4 or int(self.maxjobs) < 1:
            self.errors.append(['maxjobs', 'maxjobs must be in range 1-4'])
        if self.status and int(self.status) not in range(0, 2):
            self.errors.append(['status', 'status must be in range 0-2'])
        if self.description and len(self.description) > 1024:
            self.errors.append([
                'description',
                'description cannot be longer than 1024 characters'
            ])
        if len(self.errors) > 0:
            return False
        return True

    def human2epoch(self, earliest=None, latest=None):
        """ converts the browser time provided into epoch time """
        def passive_convert(time, pattern):
            """ private - try to parse the times without raising """

            new_time = None

            try:
                new_time = strptime(time, pattern)
            except:
                pass

            return new_time

        if not earliest and not latest:
            return False

        temp = {'earliest': earliest, 'latest': latest}

        for x in temp:
            if temp[x]:
                new_ts = None
                # 1: Tues Jan 21 2011 12:34:56 GMT-0200 (Foobar Standard Time)
                # 2: Tues Jan 21 2011 12:34:56 GMT+0200 (Foobar Standard Time)
                # 3: Tues Jan 21 2011 12:34:56 -0200 (FST)
                # 4: Tues Jan 21 12:34:56 GMT 2011
                combinations = [[
                    temp[x].split('(')[0].split('-')[0],
                    '%a %b %d %Y %H:%M:%S %Z'
                ],
                                [
                                    temp[x].split('(')[0].split('+')[0],
                                    '%a %b %d %Y %H:%M:%S %Z'
                                ],
                                [
                                    ' '.join([
                                        ' '.join(temp[x].split(' ')[:-2]),
                                        temp[x].split(' ')[-1][1:-1]
                                    ]), '%a %b %d %Y %H:%M:%S %Z'
                                ], [temp[x], '%a %b %d %H:%M:%S %Z %Y']]

                for pair in combinations:
                    new_ts = passive_convert(pair[0], pair[1])
                    if new_ts:
                        break

                if not new_ts:
                    return False

                if x in ['earliest']:
                    self.earliest = float(mktime(new_ts))
                else:
                    self.latest = float(mktime(new_ts))
        return True
Beispiel #27
0
class ScriptedInput(Input):

    resource = 'data/inputs/script'
    interval = IntField()
    pass_auth = Field(api_name='passAuth')
class ClusterConfig(SplunkAppObjModel):
    '''
    Represents the current node
    '''
    resource = 'cluster/config'

    cxn_timeout = IntField()
    disabled = BoolField()
    forwarderdata_rcv_port = IntField()
    forwarderdata_use_ssl = BoolField()
    heartbeat_period = IntField()
    heartbeat_timeout = IntField()
    master_uri = Field()
    max_peer_build_load = IntField()
    max_peer_rep_load = IntField()
    mode = Field()
    multisite = BoolField()
    percent_peers_to_restart = IntField()
    ping_flag = BoolField()
    quiet_period = IntField()
    rcv_timeout = IntField()
    register_forwarder_address = Field()
    register_replication_address = Field()
    register_search_address = Field()
    rep_cxn_timeout = IntField()
    rep_max_rcv_timeout = IntField()
    rep_max_send_timeout = IntField()
    rep_rcv_timeout = IntField()
    rep_send_timeout = IntField()
    replication_factor = IntField()
    replication_port = IntField()
    replication_use_ssl = BoolField()
    restart_timeout = IntField()
    search_factor = IntField()
    search_files_retry_timeout = IntField()
    secret = Field()
    send_timeout = IntField()
    site = Field()
class HDFSExport(SavedSearch):

    resource = 'hdfs_export'
    search = Field()
    uri = Field()
    base_path = Field()
    starttime = IntField()
    next_scheduled_time = Field()
    cron_schedule = Field()
    parallel_searches = Field()
    partition_fields = Field()
    status = Field()
    compress_level = Field()
    firstevent = EpochField('status.earliest')
    lastevent = EpochField('status.latest')
    jobexporterrors = Field('status.jobs.errors')
    jobprogess = FloatField('status.jobs.progress')
    jobruntime = FloatField('status.jobs.runtime')
    jobstart = EpochField('status.jobs.starttime')
    jobend = EpochField('status.jobs.endtime')
    jobearliest = EpochField('status.jobs.earliest')
    load = Field('status.load')
    export_sids = Field('status.jobs.sids')  # comma delimited list
    scheduled_sid = Field('status.jobs.psid')
    maxspan = Field()
    minspan = Field()
    roll_size = Field()
    format = Field()
    fields = Field()

    def get_export_factor(self):
        if self.load == None or len(self.load) == 0:
            return 0
        loads = self.load.split(',')
        return float(loads[0])

    def get_percent_complete(self):
        '''
            returns the percentage of index times attempted to be exported
            compared to today.
        '''
        try:
            indexTimeSpan = time.mktime(
                self.lastevent.timetuple()) - time.mktime(
                    self.firstevent.timetuple())
            exportedIndexTimeSpan = time.time() - time.mktime(
                self.firstevent.timetuple())
            return indexTimeSpan / exportedIndexTimeSpan
        except:
            return 0

    def getErrors(self):
        if self.jobexporterrors == None or len(self.jobexporterrors) == 0:
            return []

        err = self.jobexporterrors.split(',')
        return err

    def isPaused(self):
        return not self.schedule.is_scheduled

    def execute_action(self, action_name):
        if not self.action_links:
            return False
        url = None
        url_base = None
        for item in self.action_links:
            if action_name == item[0]:
                url = item[1]
                break
            elif item[0] == 'list':
                url_base = item[1]

        # ghetto way to build the action url when not provided by endpoint
        if url == None and url_base != None:
            url = url_base + '/' + action_name

        if url == None:
            return False

        response, content = rest.simpleRequest(url, method='POST')
        return response.status == 200

    def pause(self):
        return self.execute_action('pause')

    def resume(self):
        return self.execute_action('resume')

    def force(self):
        #TODO: return the actual search id of the spawned search job
        return self.execute_action('force')

    def hasPartitionField(self, field):
        fields = self.partition_fields.split(',')
        return field in fields

    @classmethod
    def parse_except_messages(cls, e):
        return HDFSAppObjModel.parse_except_messages(e)
Beispiel #30
0
class TAVMwareCollectionStanza(SOLNAppObjModel):
    '''
	Provides object mapping for the TA-vmware collection stanzas
	The conf file is used to determine what jobs are to be done to what hosts.
	'''

    resource = 'configs/conf-ta_vmware_collection'

    use_model_as_spec = True

    #The target resource on which to apply the job, i.e. the vCenter uri or unmanaged host uri
    target = CSVField()
    #The username to use on all targets for auth purposes
    username = Field()
    #The type of the job to be executed, which matches the capability of a worker that can execute it, this is a comma delimited list of tasks
    #Vmware tasks include hostvmperf, otherperf, inv, task, event
    task = CSVField()
    #this is the realm associated with the credential, though unused in vmware, it must be present for hydra
    realm = Field()
    #The tasks considered atomic and thus config tokens of this task cannot generate a job while a previously generated job from the same config token is in progress
    atomic_tasks = CSVField()
    #We use wildcard fields for priority since they are rare we do not want them to show up in metadata unnecessarily
    wildcard_fields = {
        "priority":
        WildcardField(re.compile(".+_priority$"), IntField()),
        "confirmation_expiration":
        WildcardField(re.compile(".+_confirmation_expiration$"), IntField())
    }

    #These are the destination indexes for the different data types
    perf_index = Field()
    inv_index = Field()
    taskevent_index = Field()
    #The following are internal fields used by the app to determine state for GUI configuration
    credential_validation = BoolField()
    target_type = Field()
    #The following are filters for performance data
    managed_host_whitelist = Field()
    managed_host_blacklist = Field()
    host_metric_whitelist = CSVField()
    host_metric_blacklist = CSVField()
    host_instance_whitelist = CSVField()
    host_instance_blacklist = CSVField()
    vm_metric_whitelist = CSVField()
    vm_metric_blacklist = CSVField()
    vm_instance_whitelist = CSVField()
    vm_instance_blacklist = CSVField()
    rp_metric_whitelist = CSVField()
    rp_metric_blacklist = CSVField()
    rp_instance_whitelist = CSVField()
    rp_instance_blacklist = CSVField()
    cluster_metric_whitelist = CSVField()
    cluster_metric_blacklist = CSVField()
    cluster_instance_whitelist = CSVField()
    cluster_instance_blacklist = CSVField()
    perf_entity_blacklist = CSVField()
    # perf format type, it value should be 'csv' or 'normal'
    perf_format_type = Field()
    # For HostSystem Inv only config.hyperThread is collected by default, this field has
    # comma delimited addl. attributes that need to be collected
    hostsystem_inv_config = CSVField()
    #The following are the collection intervals for particular tasks
    hostvmperf_interval = IntField()
    otherperf_interval = IntField()
    inv_interval = IntField()
    task_interval = IntField()
    event_interval = IntField()
    hierarchyinv_interval = IntField()
    hostinv_interval = IntField()
    vminv_interval = IntField()
    clusterinv_interval = IntField()
    datastoreinv_interval = IntField()
    rpinv_interval = IntField()
    task_interval = IntField()
    event_interval = IntField()
    # maxObjectUpdates count value for waitForUpdates API call, which decide max objects value in the SOAP response
    inv_maxObjUpdates = IntField()
    #The following are the expiration periods for particular tasks
    hostvmperf_expiration = IntField()
    otherperf_expiration = IntField()
    inv_expiration = IntField()
    task_expiration = IntField()
    event_expiration = IntField()
    hierarchyinv_expiration = IntField()
    hostinv_expiration = IntField()
    vminv_expiration = IntField()
    clusterinv_expiration = IntField()
    datastoreinv_expiration = IntField()
    rpinv_expiration = IntField()
    # The following are fields for enable datagen and set poweroff vms count in auto generated vms
    # Set autoeventgen = true, if random moids and names are generated for hosts and vms to reach out to vc limit (1000 hosts, 10000 vms).
    # This fields are used only for internal purpose only.
    autoeventgen = BoolField()
    autoeventgen_poweroff_vmcount = IntField()
    deployment_type = Field()