def __init__(self, context): super(ConsulController, self).__init__(context) os.environ['PATRONI_CONSUL_HOST'] = 'localhost:8500' self._client = consul.Consul()
import requests from bs4 import BeautifulSoup import pymysql from datetime import datetime, timedelta import threading import time from kafka import KafkaProducer from json import dumps from json import loads import consul # consul kv 받아오기 c = consul.Consul(host='54.152.246.15', port=8500) index = None index, data = c.kv.get('db_config', index=index) db_config = loads(data['Value']) print(db_config) # 주식번호 index, data = c.kv.get('stock_code2', index=index) stock_code = loads(data['Value']) print(stock_code) # 주식번호 stock_id = 2 # 파싱해오는 실시간 주식 정보의 url과 헤더 url = 'https://finance.naver.com/item/sise_time.nhn?code={}&thistime={}&page={}' header = { 'User-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36"
# # h.add_metric(["labels1", "labels2"], [["1", 1], ["2", 1]], 2) # h.add_metric(["labels1"], [["1", 1], ["2", 1]], 2) # yield h # i = InfoMetricFamily("InfoMetricFamily", # 'InfoMetricFamily text', # labels=['InfoMetricFamily']) # i.add_metric(["InfoMetricFamily example"], # {"InfoMetricFamily example": '123'}) # yield i while True: try: c = consul.Consul(host=os.environ.get('CONSUL_HOST') if os.environ.get('CONSUL_HOST') else '127.0.0.1', port=23345, scheme='http') hostname = socket.gethostname() ip = socket.gethostbyname(hostname) c.agent.service.register(name=hostname + '_agent', address=ip, port=23310, tags=['sapspa_agent', 'sapspa'], enable_tag_override=True) c.agent.service.register(name=hostname + '_host', address=ip, port=23311, tags=['host', 'sapspa'], enable_tag_override=True) break
def test_agent_checks(self, consul_port): c = consul.Consul(port=consul_port) def verify_and_dereg_check(check_id): assert set(c.agent.checks().keys()) == set([check_id]) assert c.agent.check.deregister(check_id) is True assert set(c.agent.checks().keys()) == set([]) def verify_check_status(check_id, status, notes=None): checks = c.agent.checks() assert checks[check_id]['Status'] == status if notes: assert checks[check_id]['Output'] == notes # test setting notes on a check c.agent.check.register('check', ttl='1s', notes='foo') assert c.agent.checks()['check']['Notes'] == 'foo' c.agent.check.deregister('check') assert set(c.agent.checks().keys()) == set([]) assert c.agent.check.register( 'script_check', script='/bin/true', interval=10) is True verify_and_dereg_check('script_check') assert c.agent.check.register( 'check name', check_id='check_id', script='/bin/true', interval=10) is True verify_and_dereg_check('check_id') assert c.agent.check.register('ttl_check', ttl='100ms') is True assert c.agent.check.ttl_warn('ttl_check') is True verify_check_status('ttl_check', 'warning') assert c.agent.check.ttl_warn('ttl_check', notes='its not quite right') is True verify_check_status('ttl_check', 'warning', 'its not quite right') assert c.agent.check.ttl_fail('ttl_check') is True verify_check_status('ttl_check', 'critical') assert c.agent.check.ttl_fail('ttl_check', notes='something went boink!') is True verify_check_status('ttl_check', 'critical', notes='something went boink!') assert c.agent.check.ttl_pass('ttl_check') is True verify_check_status('ttl_check', 'passing') assert c.agent.check.ttl_pass('ttl_check', notes='all hunky dory!') is True verify_check_status('ttl_check', 'passing', notes='all hunky dory!') # wait for ttl to expire time.sleep(120 / 1000.0) verify_check_status('ttl_check', 'critical') verify_and_dereg_check('ttl_check') pytest.raises(AssertionError, c.agent.check.register, 'check_id', script='/bin/true', ttl=50) pytest.raises(AssertionError, c.agent.check.register, 'check_id', interval=10, ttl=50)
def test_catalog(self, consul_port): c = consul.Consul(port=consul_port) # grab the node our server created, so we can ignore it _, nodes = c.catalog.nodes() assert len(nodes) == 1 current = nodes[0] # test catalog.datacenters assert c.catalog.datacenters() == ['dc1'] # test catalog.register pytest.raises(consul.ConsulException, c.catalog.register, 'foo', '10.1.10.11', dc='dc2') assert c.catalog.register('n1', '10.1.10.11', service={'service': 's1'}, check={'name': 'c1'}) is True assert c.catalog.register( 'n1', '10.1.10.11', service={'service': 's2'}) is True assert c.catalog.register( 'n2', '10.1.10.12', service={ 'service': 's1', 'tags': ['master'] }) is True # test catalog.nodes pytest.raises(consul.ConsulException, c.catalog.nodes, dc='dc2') _, nodes = c.catalog.nodes() nodes.remove(current) assert [x['Node'] for x in nodes] == ['n1', 'n2'] # test catalog.services pytest.raises(consul.ConsulException, c.catalog.services, dc='dc2') _, services = c.catalog.services() assert services == {'s1': [u'master'], 's2': [], 'consul': []} # test catalog.node pytest.raises(consul.ConsulException, c.catalog.node, 'n1', dc='dc2') _, node = c.catalog.node('n1') assert set(node['Services'].keys()) == set(['s1', 's2']) _, node = c.catalog.node('n3') assert node is None # test catalog.service pytest.raises(consul.ConsulException, c.catalog.service, 's1', dc='dc2') _, nodes = c.catalog.service('s1') assert set([x['Node'] for x in nodes]) == set(['n1', 'n2']) _, nodes = c.catalog.service('s1', tag='master') assert set([x['Node'] for x in nodes]) == set(['n2']) # test catalog.deregister pytest.raises(consul.ConsulException, c.catalog.deregister, 'n2', dc='dc2') assert c.catalog.deregister('n1', check_id='c1') is True assert c.catalog.deregister('n2', service_id='s1') is True # check the nodes weren't removed _, nodes = c.catalog.nodes() nodes.remove(current) assert [x['Node'] for x in nodes] == ['n1', 'n2'] # check n2's s1 service was removed though _, nodes = c.catalog.service('s1') assert set([x['Node'] for x in nodes]) == set(['n1']) # cleanup assert c.catalog.deregister('n1') is True assert c.catalog.deregister('n2') is True _, nodes = c.catalog.nodes() nodes.remove(current) assert [x['Node'] for x in nodes] == []
def test_health_service(self, acl_consul): c = consul.Consul(port=acl_consul.port, token=acl_consul.token) # check there are no nodes for the service 'foo' index, nodes = c.health.service('foo') assert nodes == [] # register two nodes, one with a long ttl, the other shorter c.agent.service.register('foo', service_id='foo:1', check=Check.ttl('10s'), tags=['tag:foo:1']) c.agent.service.register('foo', service_id='foo:2', check=Check.ttl('100ms')) time.sleep(40 / 1000.0) # check the nodes show for the /health/service endpoint index, nodes = c.health.service('foo') assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2'] # but that they aren't passing their health check index, nodes = c.health.service('foo', passing=True) assert nodes == [] # ping the two node's health check c.agent.check.ttl_pass('service:foo:1') c.agent.check.ttl_pass('service:foo:2') time.sleep(40 / 1000.0) # both nodes are now available index, nodes = c.health.service('foo', passing=True) assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2'] # wait until the short ttl node fails time.sleep(120 / 1000.0) # only one node available index, nodes = c.health.service('foo', passing=True) assert [node['Service']['ID'] for node in nodes] == ['foo:1'] # ping the failed node's health check c.agent.check.ttl_pass('service:foo:2') time.sleep(40 / 1000.0) # check both nodes are available index, nodes = c.health.service('foo', passing=True) assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2'] # check that tag works index, nodes = c.health.service('foo', tag='tag:foo:1') assert [node['Service']['ID'] for node in nodes] == ['foo:1'] # deregister the nodes c.agent.service.deregister('foo:1') c.agent.service.deregister('foo:2') time.sleep(40 / 1000.0) index, nodes = c.health.service('foo') assert nodes == []
def test_health_node(self, acl_consul): c = consul.Consul(port=acl_consul.port, token=acl_consul.token) # grab local node name node = c.agent.self()['Config']['NodeName'] index, checks = c.health.node(node) assert node in [check["Node"] for check in checks]
def locked_action(key, f, c=None, session_ttl=None): lockKey = key + '/.lock' c = c or consul.Consul(consistency='consistent') pid = os.getpid() while True: session = None try: now = datetime.datetime.now() session = c.session.create(name="consul-scripting-helper[" + str(pid) + "] locked_action " + key + " (" + str(now) + ")", ttl=session_ttl) # Acquire lock acquired = False index = None while not acquired: # Look for an existing lock, blocking until not taken logging.debug("issuing 'get'") # We have to wait less than the session TTL, otherwise our session will time out wait_seconds = session_ttl * 0.8 if session_ttl is not None else None index, data = c.kv.get(lockKey, index=index, wait=str(wait_seconds) + 's') logging.debug("'get' returned: {0}".format((index, data))) if session_ttl is not None: logging.debug("renewing session: " + session) c.session.renew(session) if data is not None: if data['Flags'] != lockFlagValue: # same as https://github.com/hashicorp/consul/blob/v0.8.3/api/lock.go#L197 raise Exception( 'Existing key does not match lock use (lockKey: {0})' .format(lockKey)) if 'Session' in data: # if somebody else has the lock we can't acquire it; same as https://github.com/hashicorp/consul/blob/v0.8.3/api/lock.go#L204 logging.debug( "lock is already held by session '{0}', retrying". format(data['Session'])) continue now = datetime.datetime.now() acquired = c.kv.put(lockKey, socket.gethostname() + ' (' + str(now) + ')', acquire=session, flags=lockFlagValue) # Now we have the lock. logging.debug("got lock") # We need to track the index at which we acquired it, so that we can pass # that to `delete` below. acquired_index, _ = c.kv.get(lockKey) logging.debug("lock acquired index is {0}, running command".format( acquired_index)) try: # Perform action. res = f(c, session) logging.debug("lock command finished") return res except: logging.exception('Exception in locked action') raise finally: # Delete lock key (which will automatically release the lock) # We need to call `delete` with `cas=acquired_index`, so that we can # only delete if nobody else has acquired it since (otherwise we would # be deleting their lock under their feet). # We _should_ own the lock, but it may not be so if an operator or health # check forcefully took it away from us. In that case don't want # the program to continue as an assumption is violated, so we raise. # TODO Potentially we want to try-loop around this as well in case consul goes down. did_delete = c.kv.delete(lockKey, cas=acquired_index) if did_delete: logging.debug("deleted lock") else: raise Exception( 'lock deletion failed; perhaps an operator or health check took the lock way from us (lockKey: {0})' .format(lockKey)) c.session.destroy(session) session = None except ConnectionError as e: logging.warning( "got connection error when trying to connect to consul, retrying (exception was: {0})" .format(e)) except consul.ConsulException as e: logging.warning( "got consul error in locked_action, retrying (exception was: {0})" .format(e)) # TODO Potentially don't retry here and above if f() has already finished, # so that it doesn't get run twice except: raise finally: if session is not None: # This can only happen when an exception blew us up because otherwise # we'd have set `session = None` above. logging.error( 'Exception while holding consul session, destroying session' ) c.session.destroy(session) session = None time.sleep(0.1)
def get_client(self): # type: () -> consul.Consul import consul self._client = consul.Consul(**self.client_options) return self._client
##create db connect connection = pymysql.connect(host="127.0.0.1",port=3306,user='******', password='******', db='consul', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) ##init length of table with connection.cursor() as cursor: sql = 'select count(*) from {}'.format(tbname) tblength =cursor.execute(sql) connection.commit() ##create consul object c = consul.Consul() while(1): #judge if have new service result = c.agent.services() servicemap = {} for i in result.values(): servicemap[i.get("Service")] = i.get("Address") print(27) if (len(servicemap)) > tblength: with connection.cursor() as cursor:
#!/usr/bin/env python import consul import json c=consul.Consul(host="172.28.128.5",port=8500) (idx,endpoints)=c.kv.get("network/docker/network/v1.0/endpoint/",recurse=True) print(endpoints) epdata=[ ep['Value'] for ep in endpoints if ep['Value'] is not None] for data in epdata: jsondata=json.loads(data.decode("utf-8")) print("Endpoint Name: %s" % jsondata["name"]) print("Interface: %s" % jsondata["ep_iface"]) # print("IP address: %s" % jsondata["ep_iface"]["addr"]) # print("MAC address: %s" % jsondata["ep_iface"]["mac"]) print("Locator: %s\n" % jsondata["locator"])
def __init__(self, prefix=''): self._client = consul.Consul() self.prefix = prefix
from config import Config import boto3 import requests import consul app = Flask(__name__) # Setup the Flask-JWT-Extended extension app.config['JWT_SECRET_KEY'] = Config.key app.config['JWT_ACCESS_TOKEN_EXPIRES'] = Config.access app.config['JWT_REFRESH_TOKEN_EXPIRES'] = Config.refresh jwt = JWTManager(app) client = consul.Consul(host='172.18.0.6', port=8500) """ These attributes are also available file.filename # The actual name of the file file.content_type file.content_length file.mimetype """ def uploadToS3(local_path, bucket, s3_path): s3 = boto3.resource( 's3', aws_access_key_id=Config.AWS_ACCESS_KEY,
def setUp(self): self.client = consul.Consul() if self.client.kv.get('', recurse=True)[1]: self.skipTest('Consul contains unknown values!')
def test_kv_wait(self, acl_consul): c = consul.Consul(port=acl_consul.port, token=acl_consul.token) assert c.kv.put('foo', 'bar') is True index, data = c.kv.get('foo') check, data = c.kv.get('foo', index=index, wait='20ms') assert index == check
def __init__(self, host=None, port=None, token=None): self.host = host self.port = port self.token = token self.consul = consul.Consul(host=host, port=port) self.registered = False
def test_agent_self(self, acl_consul): c = consul.Consul(port=acl_consul.port, token=acl_consul.token) assert set(c.agent.self().keys()) == { 'Member', 'Stats', 'Config', 'Coord', 'DebugConfig', 'Meta' }
import docker import json import consul import logging import requests logging.basicConfig( filename='/var/log/docker-events/docker-events-consul.log', level=logging.INFO, format=' [%(levelname)s] %(asctime)s (%(threadName)-10s) %(message)s') # Configuration consul_host = '172.17.0.2' client = docker.from_env() consul = consul.Consul(host=consul_host) # Entities class ContainerMetadata: def __init__(self, id, short_id, name, image_name, version, ip): self.id = id self.short_id = short_id self.name = name self.image_name = image_name self.version = version self.ip = ip def toJSON(self): return json.dumps(self, default=lambda o: o.__dict__,
def test_health_state(self, acl_consul): c = consul.Consul(port=acl_consul.port, token=acl_consul.token) # The empty string is for the Serf Health Status check, which has an # empty ServiceID index, nodes = c.health.state('any') assert [node['ServiceID'] for node in nodes] == [''] # register two nodes, one with a long ttl, the other shorter c.agent.service.register('foo', service_id='foo:1', check=Check.ttl('10s')) c.agent.service.register('foo', service_id='foo:2', check=Check.ttl('100ms')) time.sleep(40 / 1000.0) # check the nodes show for the /health/state/any endpoint index, nodes = c.health.state('any') assert set([node['ServiceID'] for node in nodes]) == {'', 'foo:1', 'foo:2'} # but that they aren't passing their health check # continuation line over-indented for visual indent index, nodes = c.health.state('passing') assert [node['ServiceID'] for node in nodes] != 'foo' # ping the two node's health check c.agent.check.ttl_pass('service:foo:1') c.agent.check.ttl_pass('service:foo:2') time.sleep(40 / 1000.0) # both nodes are now available index, nodes = c.health.state('passing') assert set([node['ServiceID'] for node in nodes]) == {'', 'foo:1', 'foo:2'} # wait until the short ttl node fails time.sleep(2200 / 1000.0) # only one node available index, nodes = c.health.state('passing') assert set([node['ServiceID'] for node in nodes]) == {'', 'foo:1'} # ping the failed node's health check c.agent.check.ttl_pass('service:foo:2') time.sleep(40 / 1000.0) # check both nodes are available index, nodes = c.health.state('passing') assert set([node['ServiceID'] for node in nodes]) == {'', 'foo:1', 'foo:2'} # deregister the nodes c.agent.service.deregister('foo:1') c.agent.service.deregister('foo:2') time.sleep(40 / 1000.0) index, nodes = c.health.state('any') assert [node['ServiceID'] for node in nodes] == ['']
def __init__(self, *args, **kwargs): self.__consul = consul.Consul() self.__prefix = '' self.update(*args, **kwargs)
def test_connect(self, acl_consul): c = consul.Consul(port=acl_consul.port, token=acl_consul.token) one_root_name = c.connect.certificates.list()['Roots'][0]['Name'] assert one_root_name == 'Consul CA Root Cert' assert c.connect.certificates.current()['Provider'] == 'consul' config = { "LeafCertTTL": "72h", "PrivateKey": """-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAvhOI639ZxhUSCtUycUDNSgrlcJoMebPPEYzhP6R0tGgTA/iK mZfQkH5KiI6+DSbPOm+4XAKl0uhRc/IzO93G9cey9WECRR/4MvErWx26AWVIikKi HigielCZkDBM8UwdVGuCJEteePr0MCHZufBb+n/dJCIQoW8zAxrf17j6psGUIQ4q vMHe1NoeUTRDyQRvifTfPnal+Du01WWmo4xSSNwmz0P2BMK7dKUqSWO/+tjvqsRM PJ3iK4RdKrFNxYtEvXs1oxmnuAA5N5+N3dp40YINwNS6U8HDyenRdoDz9+L00rKr 4LqfxT/7fhsUWTp66NSCJ5FB+HvVNOVMYJCdcQIDAQABAoIBAFKbJsxCIn/ltFuk 4DE8GnvtVF2ldXqG3J0h/tqr3FRzTqBRKdsqkWfamwXSE2Kf1TvuRc8nD24jrRKY 1g9UiR1T1sCxkkz8Yd+x1EfbGXV10RrVbqNwidtin/p8I1RS4gRIXNztLHejiPBK JdebUD4Dx9XmQBgIvcDBszqLl9NUWRP7WIAlJIDGWB97dOMNUIkfVoCMB2sRtB9S NhCAOhCWU2ogiAK/VltLSKuMFS4O+JA7L0tVIRVTNzTV0qgUhwEN+jzXOxr+o3ME h+OS3HvKadnogjm763+z1lSkltmV2aBSLkQ/rdrGYdowOROukSWBGvBLAVADKQNe KAiLRukCgYEA9C9Zntb9nqaB6vUUbw3UBg4l1ZOvP7TT7okPi2gALcQGvXJszyM7 6iYVZzc5WDMlJDF2rK2NmSc0r0PxDtod5Yx6+NZNJxEoIqSlQ9EuZb6StAXKWwUp j+MddQQYrWJ/XRNoyVw/3AeXJGJ/bbyOEdGQdAD1fR/d8E4x60LBcLsCgYEAx0X0 yhBkO9BEYflQBaPapqcmkUyPflwPCHWpKYkKXe2BhC3ecvHtRbvFRxTIjdWmimzG qWe6TQ6y50yTxu3OtB1zr/wLlO1Lnh5GpO/CdFZyHPMx9G+ZACa6VKQRRnAr/89u 21K4w4n+XZ5+FiM6oZuNCPFvaDSPpdFVmkG2zcMCgYEAs7VFV8kRznjslGbaKwEa UXqIDa8rAs+qSZtFfWuE9imWbEaSHb/okGKGzfeulGLPqttcY6k21Mgt1e9CsZOE /TNuLURKc74Es7pIhqikZOzkOukoC+S4udSEEWJ6ZuBeq7llHQxvUxei/HwhHDnw tNxuW3F5OiUByaFCy8nNY5ECgYB+bp9CeSXvYVORyeoAuhZneFQbCjVc2bHcDzMW HFHMiH2NTMD/swl1Hg6r677XZr/0wdFF+H8WlwXhhQ+ZvpZQZwNYnBUJF4nLt6/g xBJfV92tCoweZ5PuqaO9Qh+HznHMK2NNmm2J+wq9gWrERvPTR2szSmQB/J9LeMbU SW9fmwKBgQDcZK4R3w+WdUGnmbAPlz2x28Ehfzt39C/9QYC6UIKaZhSgdC5VAT22 ggrb4edVeP4mYaHoEJWzhyPiH4ya0FPJyqD0TVnSPR5tLSGW8Co3M8ctUuTZGWYf Hc0PLtVZSiTaTGz8MXYhOIdqthRl/MkkGeW/ixMHRzafJ0fxaBVegQ== -----END RSA PRIVATE KEY-----""", "RootCert": """-----BEGIN CERTIFICATE----- MIIDXTCCAkWgAwIBAgIJAKCHvfmguKnYMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX aWRnaXRzIFB0eSBMdGQwHhcNMTkxMDA5MDQyMDAyWhcNMjAxMDA4MDQyMDAyWjBF MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB CgKCAQEApW4SAb1x/+M4djJV1J8RJUb6j3UI3ODE7dJc+byLSG8eUA/aPVKtyQeD V4qQsTEJgcK+uv/bca0AfSDpNdKHENTEl7FezfgG5gPHFSu/L/uayclDxDygqIXi dUQDg675vEYmnqXVudZsvj0zoxiyOt+GfTpAemVzNtOfnfo03rZsnhkmSgcPChCc ib5Bd+0zanq70OeGG6bBm0wRTfizBhDxsKlTS/f8TKhw/7EtN4kPpVLQ/5RACECg Qhh4lYXcAjapPsUZvPXwpdCW8uLoon4+QoIx8celiMTjoKj0489zEjtAQd0fqzva oD048hWF6W5YEMyHzbOkHXhpx2nVMQIDAQABo1AwTjAdBgNVHQ4EFgQUtRUXLy5w dLPpjjg+wFiqVkdY0FQwHwYDVR0jBBgwFoAUtRUXLy5wdLPpjjg+wFiqVkdY0FQw DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAArs06sJ5VPDdDOiwyOWv q7PRH4advgGI9ZoN/oRB1N2Aeh2RJHFLXBIAYD+3fftSLl1Xm+5HzIevM/djBkC2 cDL6oqvxD8yiPNFJYvqFa+ffrNzmo++x719rnf01rEoh7VXGA1TA8HMMsrocdZv+ xj8sx/WrPu5w2+C+o8f/eAXjFn0ZOz1/iUhziwEJG1BbCV6VEF4rooNdbFzgPLPe Jro/qF2xNGo/D60lfuDHzbJmTC8BvXoYF8YQMrW3ggJPa0TdXX22zElk/G1T00Ls mVAOExDtOAOnv2drQ07st0ZAjrLKX5eHYImwYit1tdYXHZdWPsgp0plA15cMw/3y JA== -----END CERTIFICATE----- """, "RotationPeriod": "2160h" } assert c.connect.certificates.put(provider='consul', config=config) intention = c.connect.intentions.create(source_name='web', source_type='consul', destination_name='db', action='allow') intentions = c.connect.intentions.list() assert intentions[0]['ID'] == intention['ID'] intention = c.connect.intentions.get(intention['ID']) assert intention['ID'] == intention['ID'] assert c.connect.intentions.put(intentions[0]['ID'], source_name='web', source_type='consul', destination_name='db', action='allow') assert c.connect.intentions.check(source='web', destination='db') == { 'Allowed': True } match_list = c.connect.intentions.list_match(by='source', name='web') assert match_list['web'][0]['SourceName'] == 'web' assert match_list['web'][0]['DestinationName'] == 'db' assert c.connect.intentions.delete(intentions[0]['ID']) assert c.connect.intentions.list() == []
import consul c = consul.Consul(host='127.0.0.1', port=8500) # in another process c.kv.put('foo', 'bar') # poll a key for updates index = None while True: index, data = c.kv.get('foo', index=index) print(data['Value'])
def test_agent_self(self, consul_port): c = consul.Consul(port=consul_port) assert set(c.agent.self().keys()) == set(['Member', 'Config'])
if db: db_exists = True if ':' in db: (address, port) = db.split(':') else: address = db port = 8500 db = address + ':' + str(port) time.sleep(5) #Connect to Consul c = consul.Consul(host=address, port=port) #Test to see Consul values need to be zeroed if c.kv.get('hits')[1] == None: c.kv.put('hits', '0') c.kv.put(option_a, '0') c.kv.put(option_b, '0') c.kv.put(option_c, '0') @app.route('/') def index(): vote_cookie = request.cookies.get('vote') if db is False:
def test_acl_implicit_token_use(self, acl_consul): # configure client to use the master token by default c = consul.Consul(port=acl_consul.port, token=acl_consul.token) master_token = acl_consul.token acls = c.acl.list() assert set([x['ID'] for x in acls]) == \ set(['anonymous', master_token]) assert c.acl.info('foo') is None compare = [c.acl.info(master_token), c.acl.info('anonymous')] compare.sort(key=operator.itemgetter('ID')) assert acls == compare rules = """ key "" { policy = "read" } key "private/" { policy = "deny" } """ token = c.acl.create(rules=rules) assert c.acl.info(token)['Rules'] == rules token2 = c.acl.clone(token) assert c.acl.info(token2)['Rules'] == rules assert c.acl.update(token2, name='Foo') == token2 assert c.acl.info(token2)['Name'] == 'Foo' assert c.acl.destroy(token2) is True assert c.acl.info(token2) is None c.kv.put('foo', 'bar') c.kv.put('private/foo', 'bar') c_limited = consul.Consul(port=acl_consul.port, token=token) assert c_limited.kv.get('foo')[1]['Value'] == six.b('bar') pytest.raises(consul.ACLPermissionDenied, c_limited.kv.put, 'foo', 'bar2') pytest.raises(consul.ACLPermissionDenied, c_limited.kv.delete, 'foo') assert c.kv.get('private/foo')[1]['Value'] == six.b('bar') assert c_limited.kv.get('private/foo')[1] is None pytest.raises(consul.ACLPermissionDenied, c_limited.kv.put, 'private/foo', 'bar2') pytest.raises(consul.ACLPermissionDenied, c_limited.kv.delete, 'private/foo') # check we can override the client's default token assert c.kv.get('private/foo', token=token)[1] is None pytest.raises(consul.ACLPermissionDenied, c.kv.put, 'private/foo', 'bar2', token=token) pytest.raises(consul.ACLPermissionDenied, c.kv.delete, 'private/foo', token=token) # clean up c.acl.destroy(token) acls = c.acl.list() assert set([x['ID'] for x in acls]) == \ set(['anonymous', master_token])
#!/usr/bin/env python import consul consul = consul.Consul(host='172.17.0.2') # Add a node to the catalog consul.catalog.register('nifi-1', address='172.17.0.1', service={ "Service": "redis", "ID": "redis1", "Tags": ["master", "v1"], "Port": 8000 })
def get_consul_api(module, token=None): return consul.Consul(host=module.params.get('host'), port=module.params.get('port'), scheme=module.params.get('scheme'), verify=module.params.get('validate_certs'), token=module.params.get('token'))
def test_agent_checks(self, acl_consul): c = consul.Consul(port=acl_consul.port, token=acl_consul.token) def verify_and_dereg_check(check_id): assert set(c.agent.checks().keys()) == {check_id} assert c.agent.check.deregister(check_id) is True assert set(c.agent.checks().keys()) == set([]) def verify_check_status(check_id, status, notes=None): checks = c.agent.checks() assert checks[check_id]['Status'] == status if notes: assert checks[check_id]['Output'] == notes # test setting notes on a check c.agent.check.register('check1', Check.ttl('1s'), notes='foo') c.agent.check.register('check2', script='/usr/bin/true', interval=1, notes='foo2') c.agent.check.register('check3', ttl=1, notes='foo3') c.agent.check.register('check4', http='http://localhost:8500', interval=1, notes='foo4') c.agent.check.register('check5', http='http://localhost:8500', timeout=1, interval=1, notes='foo5') # c.agent.check.register('check5', Check.ttl('1s'), notes='foo5') assert c.agent.checks()['check1']['Notes'] == 'foo' c.agent.check.deregister('check1') c.agent.check.deregister('check2') c.agent.check.deregister('check3') c.agent.check.deregister('check4') c.agent.check.deregister('check5') assert set(c.agent.checks().keys()) == set([]) assert c.agent.check.register('script_check', Check.script('/bin/true', 10)) is True verify_and_dereg_check('script_check') assert c.agent.check.register('check name', Check.script('/bin/true', 10), check_id='check_id') is True verify_and_dereg_check('check_id') http_addr = "http://127.0.0.1:{0}".format(acl_consul.port) assert c.agent.check.register('http_check', Check.http(http_addr, '10ms')) is True time.sleep(1) verify_check_status('http_check', 'passing') verify_and_dereg_check('http_check') assert c.agent.check.register( 'http_timeout_check', Check.http(http_addr, '100ms', timeout='2s')) is True verify_and_dereg_check('http_timeout_check') assert c.agent.check.register('ttl_check', Check.ttl('100ms')) is True assert c.agent.check.ttl_warn('ttl_check') is True verify_check_status('ttl_check', 'warning') assert c.agent.check.ttl_warn('ttl_check', notes='its not quite right') is True verify_check_status('ttl_check', 'warning', 'its not quite right') assert c.agent.check.ttl_fail('ttl_check') is True verify_check_status('ttl_check', 'critical') assert c.agent.check.ttl_fail('ttl_check', notes='something went boink!') is True verify_check_status('ttl_check', 'critical', notes='something went boink!') assert c.agent.check.ttl_pass('ttl_check') is True verify_check_status('ttl_check', 'passing') assert c.agent.check.ttl_pass('ttl_check', notes='all hunky dory!') is True verify_check_status('ttl_check', 'passing', notes='all hunky dory!') # wait for ttl to expire time.sleep(120 / 1000.0) verify_check_status('ttl_check', 'critical') verify_and_dereg_check('ttl_check')
def collect(self): # get SID list from os dir sidList = get_sid_list() for sid in sidList: c = consul.Consul(host=os.environ.get('CONSUL_HOST') if os.environ.get('CONSUL_HOST') else '127.0.0.1', port=23345, scheme='http') kvid, kvv = c.kv.get(sid + '_login') if kvv: # get SID login info from consul kvvDict = json.loads(kvv['Value']) conn = None for instance in get_instance_list_by_sid(sid): if not conn and instance['type'] == 'DIALOG': conn = R3rfcconn(r3ashost='127.0.0.1', r3sysnr=instance['sysnr'], r3client=kvvDict['r3client'], r3user=kvvDict['r3user'], r3pwd=kvvDict['r3pwd']) instance_check_cmd = f'su - {sid.lower()}adm -c "sapcontrol -nr {instance["sysnr"]} -function GetProcessList"' instance_check_cmd_args = shlex.split(instance_check_cmd) sp = subprocess.run(instance_check_cmd_args, capture_output=True) output = sp.stdout.decode('utf-8') outputlines = output.splitlines() if 'Red' in output: g_instancestatus = StateSetMetricFamily( "InstanceStatus", 'Instance Status Check in SID', labels=['SID', 'Instance']) g_instancestatus.add_metric([sid, instance["profile"]], {'status': False}) yield g_instancestatus else: g_instancestatus = StateSetMetricFamily( "InstanceStatus", 'Instance Status Check in SID', labels=['SID', 'Instance']) g_instancestatus.add_metric([sid, instance["profile"]], {'status': True}) yield g_instancestatus pass pass if conn: for p in get_instance_servername_list_by_sid(sid): # master identification servername = p['servername'] profile = p['profile'] kvid_master, kvv_master = c.kv.get(sid + '_master') if kvv_master: kvvDict_master = json.loads(kvv_master['Value']) if servername == kvvDict_master['servername']: # during user count, by user type USRLIST = conn.get_user_list() g_usercount = GaugeMetricFamily( "UserCount", 'System Overall User Count', labels=['SID']) g_usercount.add_metric([sid], len(USRLIST)) yield g_usercount # during dump count DUMPLIST = conn.get_dump_list() g_dumpcount = GaugeMetricFamily( "DumpCount", 'System Overall Dump Count', labels=['SID']) g_dumpcount.add_metric([sid], len(DUMPLIST)) yield g_dumpcount # get bk job status job_status = conn.get_bkjob_status_count() g_jobstatus = GaugeMetricFamily( "BKJobCount", 'Current Background Job Count Status', labels=['SID', 'BKJobStatus']) g_jobstatus.add_metric([sid, 'Finish'], job_status['finish']) g_jobstatus.add_metric([sid, 'Running'], job_status['running']) g_jobstatus.add_metric([sid, 'Cancel'], job_status['cancel']) yield g_jobstatus else: c.kv.put(sid + '_master', json.dumps({"servername": servername})) # during user count, by user type USRLIST = conn.get_user_list() g_usercount = GaugeMetricFamily( "UserCount", 'System Overall User Count', labels=['SID']) g_usercount.add_metric([sid], len(USRLIST)) yield g_usercount # during dump count DUMPLIST = conn.get_dump_list() g_dumpcount = GaugeMetricFamily( "DumpCount", 'System Overall Dump Count', labels=['SID']) g_dumpcount.add_metric([sid], len(DUMPLIST)) yield g_dumpcount # get bk job status job_status = conn.get_bkjob_status_count() g_jobstatus = GaugeMetricFamily( "BKJobCount", 'Current Background Job Count Status', labels=['SID', 'BKJobStatus']) g_jobstatus.add_metric([sid, 'Finish'], job_status['finish']) g_jobstatus.add_metric([sid, 'Running'], job_status['running']) g_jobstatus.add_metric([sid, 'Cancel'], job_status['cancel']) yield g_jobstatus # during workprocess count, by wp type wplist = conn.get_server_wp_list(servername) running_dia_count = 0 running_upd_count = 0 running_btc_count = 0 for wp in wplist: if wp['WP_ISTATUS'] != 2: if wp['WP_TYP'] == 'DIA': running_dia_count += 1 pass if wp['WP_TYP'] == 'BTC': running_btc_count += 1 pass if wp['WP_TYP'] == 'UPD': running_upd_count += 1 pass g_wpcount = GaugeMetricFamily( "WorkprocessCount", 'WorkprocessCount of One Instance in SID group by Type', labels=['SID', 'Instance', 'WorkprocessType']) g_wpcount.add_metric([sid, profile, 'DIA'], running_dia_count) g_wpcount.add_metric([sid, profile, 'BTC'], running_btc_count) g_wpcount.add_metric([sid, profile, 'UPD'], running_upd_count) yield g_wpcount conn.close() '''
#!/usr/bin/env python3 import consul import urllib import json import time import socket from functools import reduce import logging docker_container_names = [] consul_client = consul.Consul(host=socket.gethostbyname(socket.gethostname()), port=8500, scheme='http') # Get only passing service and add its ServiceID(docker container name) and Service Address to list def reducer(acc, val): try: if val['Checks'][1]['Status'] == 'passing': acc.append((val['Service']['ID'], val['Service']['Address'])) return acc except AttributeError: pass # Find saas service containers and add them to known docker_container_names saas_index, saas_status = consul_client.health.service(service='saas') saas_container_names = reduce(reducer, saas_status, []) try: docker_container_names.extend(saas_container_names) except TypeError: