def connect(self, username, password, namespace = None): return binding.connect( scheme=self.context.scheme, host=self.context.host, port=self.context.port, username=username, password=password, namespace=namespace)
def main(argv): opts = parse(argv, {}, ".splunkrc") context = connect(**opts.kwargs) service = Service(context) assert service.apps().status == 200 assert service.indexes().status == 200 assert service.info().status == 200 assert service.settings().status == 200 assert service.search("search 404").status == 200
def main(argv): """ main entry """ usage = 'usage: %prog --help for options' opts = utils.parse(argv, RULES, ".splunkrc", usage=usage) context = binding.connect(**opts.kwargs) operation = None # splunk.binding.debug = True # for verbose information (helpful for debugging) # Extract from command line and build into variable args kwargs = {} for key in RULES.keys(): if opts.kwargs.has_key(key): if key == "operation": operation = opts.kwargs[key] else: kwargs[key] = urllib.quote(opts.kwargs[key]) # no operation? if name present, default to list, otherwise list-all if not operation: if kwargs.has_key('name'): operation = 'list' else: operation = 'list-all' # pre-sanitize if (operation != "list" and operation != "create" and operation != "delete" and operation != "list-all"): print "operation %s not one of list-all, list, create, delete" % operation sys.exit(0) if not kwargs.has_key('name') and operation != "list-all": print "operation requires a name" sys.exit(0) # remove arg 'name' from passing through to operation builder, except on create if operation != "create" and operation != "list-all": name = kwargs['name'] kwargs.pop('name') # perform operation on saved search created with args from cli if operation == "list-all": result = context.get("saved/searches", **kwargs) elif operation == "list": result = context.get("saved/searches/%s" % name, **kwargs) elif operation == "create": result = context.post("saved/searches", **kwargs) else: result = context.delete("saved/searches/%s" % name, **kwargs) print "HTTP STATUS: %d" % result.status xml_data = result.body.read() sys.stdout.write(xml_data)
def test(self): global opts paths = [ "/services", "authentication/users", "search/jobs" ] handlers = [ binding.handler(), # default handler urllib2_handler, ] for handler in handlers: context = binding.connect(handler=handler, **opts.kwargs) for path in paths: body = context.get(path).body.read() self.assertTrue(isatom(body))
# License for the specific language governing permissions and limitations # under the License. """Retrieves a list of installed apps from Splunk using the binding module.""" from xml.etree import ElementTree import splunk.binding as binding HOST = "localhost" PORT = 8089 USERNAME = "******" PASSWORD = "******" context = binding.connect( host=HOST, port=PORT, username=USERNAME, password=PASSWORD) response = context.get('apps/local') if response.status != 200: raise Exception, "%d (%s)" % (response.status, response.reason) body = response.body.read() data = ElementTree.XML(body) apps = data.findall("{http://www.w3.org/2005/Atom}entry/{http://www.w3.org/2005/Atom}title") for app in apps: print app.text
def application(environ, start_response): """ The splunk proxy processor """ debug_connect(environ) # extract some basic HTTP/WSGI info endpoint = environ["PATH_INFO"] query = environ["QUERY_STRING"] # perform idempotent login/connect -- get login creds from ~/.splunkrc opts = utils.parse(sys.argv[1:], {}, ".splunkrc") connection = connect(**opts.kwargs) # get lower level context context = binding.connect( host=connection.host, username=connection.username, password=connection.password) ## ## here we can/should/must look up the endpoint and decide what operation ## needs to be done -- for now we simply "get" for basic urls, and ## look for a special "search" in the query (if present) and build a job ## out of it. We also look for special Odata (excel) Catalog lookups ## ## in particular, we want to look for: ## ## /services/search/jobs ## if endpoint.lower() == "/catalog" or endpoint.lower() == "/catalog/": # here we fabricate a catalog endpoint trace("OData catalog get") (data, body) = get_splunk_catalog(context) elif endpoint.lower().find("/catalog/") == 0: # we have a full-on catalog query/search # remove the pre-pended catalog endpoint = endpoint.replace("/Catalog","") title = endpoint # quote the special characters, and post the search endpoint = urllib.quote(endpoint) trace("OData catalog dispatch: %s" % endpoint) data = post_catalog_search(context, endpoint) # fixup query results body = str(data.body.read()) trace("Splunk generated Atom/XML before fixup is:") trace(body) body = fix_xml(body, title) elif query: trace("raw query: base: %s, query: %s" % (endpoint, query)) ## ## sanitize query, and issue ## ## this is a little awkward, browsers and BI apps seem to sanitize the ## query string(s) which doesn't get accepted by splunkd. So we unquote ## the original and rebuild it the way we would like to see it. ## query = urllib.unquote(query) endpoint = urllib.quote(endpoint) if endpoint == "/services/search/jobs": data = post_query(context, endpoint, query=query) # fixup query results body = fix_xml(data.body.read()) else: data = context.get(endpoint, search=query) body = data.body.read() else: # catch all for passthrough trace("request passthrough endpoint: %s" % endpoint) data = context.get(endpoint) body = data.body.read() # extract the status and headers from the splunk operation status = str(data["status"]) + " " + data["reason"] headers = data["headers"] trace("Returning Atom/XML:\n") trace(body + "\n") ## ## clean hop-by-hop from headers (described in section 13.5.1 of RFC2616), ## and adjust the header length if modified by fix_xml() ## for thing in headers: if thing[0] == "connection": headers.remove(thing) if thing[0] == "content-length": headers.remove(thing) headers.insert(0, ("content-length", str(len(body)))) # start the response (retransmit the status and headers) start_response(status, headers) return [body]
def setUp(self): global opts self.context = binding.connect(**opts.kwargs)
def main(): """ main entry """ # perform idmpotent login/connect -- get login creds from ~/.splunkrc # if not specified in the command line arguments. options = parse(sys.argv[1:], CLIRULES, ".splunkrc") if options.kwargs['omode'] not in OUTPUT_MODES: print "output mode must be one of %s, found %s" % (OUTPUT_MODES, options.kwargs['omode']) sys.exit(1) # minor sanity check on start/end time try: int(options.kwargs['start']) int(options.kwargs['end']) except ValueError: print "ERROR: start and end times most be expressed as an integer." print " An integer that represents seconds from 1970." sys.exit(1) connection = connect(**options.kwargs) # get lower level context. context = binding.connect( host=connection.host, username=connection.username, password=connection.password) # open restart file. rfd = None try: rfd = open(RESTART_FILE, "r") except IOError: pass # check request and environment for sanity. if options.kwargs['restart'] is not False and rfd is None: print "Failed to open restart file %s for reading" % RESTART_FILE sys.exit(1) elif options.kwargs['restart'] is False and rfd is not None: print "Warning: restart file %s exists." % RESTART_FILE print " manually remove this file to continue complete export" print " or use --restart=1 to continue export" sys.exit(1) else: pass # close restart file. if rfd is not None: rfd.close() # normalize buckets to contain no more than "limit" events per bucket # however, there may be a situation where there will be more events in # our smallest bucket (one second) -- but there is not much we are going # to do about it. bucket_list = normalize_export_buckets(options, context) # # if we have a restart in progress, we should spend some time to validate # the export by examining the last bit of the exported file versus the # restart log we have so far. # if options.kwargs['restart'] is not False: (bucket_list, sane) = validate_export(options, bucket_list) if sane is False: print "Failed to validate export, consistency check failed" sys.exit(1) # open export for writing, unless we are restarting the export, # In which case we append to the export. mode = "w" if options.kwargs['restart'] is not False: mode = "a" try: options.kwargs['fd'] = open(options.kwargs['output'], mode) except IOError: print "Failed to open output file %s w/ mode %s" % \ (options.kwargs['output'], mode) sys.exit(1) # chunk through each bucket, and on success, remove the restart file. if export(options, context, bucket_list) is True: os.remove(RESTART_FILE)