def delete(conf: dict): '''Delete the docker infrastructure. Args: conf: The configuration data. ''' delete_containers(conf) path = conf['base'] if os.path.exists(path): info(f'removing directory: {path}') try: shutil.rmtree(path, ignore_errors=False, onerror=None) except FileNotFoundError: pass # this is okay except PermissionError as exc: # Bad news! # Try deleting it as sudo. warn(str(exc)) # This is not okay! warn('will try to delete as sudo') cmd = f'sudo rm -rf {path}' try: subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError as exc: err(str(exc)) # failed as exec else: info(f'directory does not exist: {path}')
def ximport(conf: dict, xconf: str): ''' Import an external grafana server system. The imported system will be stored in a local system. This operation requires an import conf file. Args: conf: The configuration data. xconf: The external grafana configuration data. ''' info('import') ofn = conf['file'] if os.path.exists(ofn): err(f'archive file already exists: {ofn}') iconf = get_xconf(xconf) conf['import'] = iconf auth = (iconf['username'], iconf['password']) grr = read_all_services(iconf['url'], auth) sql = save_pg(conf) info(f'writing to {ofn}') if 'zip' in ofn.lower(): # Do zip with ZipFile(ofn, 'w') as zfp: zfp.writestr('conf.json', json.dumps(conf)) zfp.writestr('gr.json', json.dumps(grr)) zfp.writestr('pg.sql', sql) # One can unzip the individual files like this: # $ unzip -p /mnt/example.zip conf.json > /mnt/conf.json # $ unzip -p /mnt/example.zip gr.json > /mnt/gr.json # $ unzip -p /mnt/example.zip pg.sql > /mnt/pg.sql else: err('only zip files are supported')
def load_folders(conf: dict, recs: list): '''Load the folders up to the grafana server. This sets (loads) the folders on the grafana server by sending the folder configuration data through the REST API. Args: conf: The configuration data. recs: The grafana setup data for folders. ''' headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} auth = (conf['gr']['username'], conf['gr']['password']) gurl = conf['gr']['url'] for rec in recs: name = rec['title'] url = gurl + '/api/folders' info(f'uploading folder "{name}" - {url}') try: response = requests.post(url, json=rec, auth=auth, headers=headers) except requests.ConnectionError as exc: err(str(exc)) info(f'response status: {response.status_code} from {url}') if response.status_code not in (200, 412, 500): err(f'upload failed with status {response.status_code} to {url}')
def load_dashboards(conf: dict, recs: list, fmap: dict): '''Load the dashboards up to the grafana server. This sets (loads) the dashboards on the grafana server by sending the folder configuration data through the REST API. Args: conf: The configuration data. recs: The grafana setup data for dashboards. fmap: The folder/title id map. ''' headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} auth = (conf['gr']['username'], conf['gr']['password']) gurl = conf['gr']['url'] for rec in recs: name = rec['dashboard']['title'] fmapid = rec['folderId'] fid = fmap[fmapid] if fmapid in fmap else 0 url = gurl + '/api/dashboards/db' info(f'uploading dashboard ({fid}) "{name}" - {url}') jrec = rec jrec['dashboard']['id'] = None # create the dash board jrec['dashboard']['uid'] = None jrec['folderId'] = fid try: response = requests.post(url, json=jrec, auth=auth, headers=headers) except requests.ConnectionError as exc: err(str(exc)) info(f'response status: {response.status_code} from {url}') if response.status_code not in (200, 400, 412): err(f'upload failed with status {response.status_code} to {url}')
def load_datasources(conf: dict, recs: list): '''Load the datasources up to the grafana server. This sets (loads) the datasources in the grafana server by sending the datasource configuration data through the REST API. Args: conf: The configuration data. recs: The grafana setup data for datasources. ''' headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} pmap = {} if 'import' in conf: # Load the import mappings. for rec in conf['import']['databases']: rmap = {} for key, val in rec.items(): if key == 'name': continue rmap[key] = val name = rec['name'] pmap[name] = rmap # Fill in the url for the default database. pgname = conf['pg']['name'] if pgname not in pmap: pmap[pgname] = { 'name': pgname, 'url': getpgip(conf), 'password': conf['pg']['password'] } # Update grafana. auth = (conf['gr']['username'], conf['gr']['password']) gurl = conf['gr']['url'] for rec in recs: name = rec['name'] if name in pmap: for key, val in pmap[name].items(): rec[key] = val url = gurl + '/api/datasources' info(f'uploading datasource "{name}" - {url}') try: response = requests.post(url, json=rec, auth=auth, headers=headers) except requests.ConnectionError as exc: err(str(exc)) info(f'response status: {response.status_code} from {url}') if response.status_code not in (200, 409): err(f'upload failed with status {response.status_code} to {url}')
def load(conf: dict) -> dict: '''Load the zip state data. The state data that describes a project is stored in a zip file with known files. This function encapsulates reading them. The known files are: conf.json, gr.json and pg.sql. The conf.json file contains project configuration data. The gr.json file contains the grafana server datasources, folders and dashboards setup. The pq.sql contains the database setup. The conf dictionary that is returned as three top level keys: 'conf', 'gr' and 'pg'. One for each file read. Args: opts: The command line arguments. Returns: conf: The configuration data from each file. ''' ofn = conf['file'] if not os.path.exists(ofn): err(f'archive file does not exist: {ofn}') info(f'loading from {ofn}') with ZipFile(ofn, 'r') as zfp: zfn = 'conf.json' with zfp.open(zfn) as ifp: info(f'loading {zfn} from {ofn}') zconf = json.loads(ifp.read().decode('utf-8')) zfn = 'gr.json' with zfp.open(zfn) as ifp: info(f'loading {zfn} from {ofn}') zgr = json.loads(ifp.read().decode('utf-8')) zfn = 'pg.sql' with zfp.open(zfn) as ifp: info(f'loading {zfn} from {ofn}') sql = ifp.read().decode('utf-8') result = { 'conf': zconf, 'gr': zgr, 'pg': sql, } return result
def load(conf: dict, sql: str): '''Load database data. This is done using psql in the container by copying the sql to the mnt directory that is mounted to the container. Note that this could be used for much more than just loading because it executes arbitrary SQL but loading is its primary purpose. Args: conf: The configuration data. sql: The SQL commands used to update the database. ''' dbname = conf['pg']['dbname'] name = conf['pg']['name'] user = conf['pg']['username'] mnt = conf['pg']['mnt'] tfn = f'mnt{os.getpid()}.sql' tfpx = f'{mnt}/{tfn}' # external (host) path tfpi = f'/mnt/{tfn}' # internal (container) path # Fix minor nit. The role always already exists. sql = sql.replace('CREATE ROLE postgres;', '-- CREATE ROLE postgres;') # Now write the SQL. with open(tfpx, 'w', encoding='utf-8') as ofp: ofp.write(sql) if not os.path.exists(tfpx): err(f'file does not exist: {tfpx}') # Write to the database. cmd = f'docker exec {name} psql -d {dbname} -U {user} -f {tfpi}' tmax = 10 tcnt = 0 while tcnt <= tmax: try: info(cmd) out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) break except subprocess.CalledProcessError as exc: tcnt += 1 warn(f'try {tcnt} of {tmax}\n' + exc.output.decode('utf-8')) if tcnt == tmax: err(str(exc)) time.sleep(5) debug(out.decode('utf-8'))
def load_fmap(conf: dict, recs: list) -> dict: '''Map the grafana folders from the old ids to the new ones. This must be done after the new folders have been uploaded. Args: conf: The configuration data. recs: The grafana setup data for folders. Returns: map: The folder map with the correct ids. ''' fmapn = {} for rec in recs: # old folders fid = rec['id'] title = rec['title'] fmapn[title] = {'old': fid, 'new': -1} # Get the new folders. headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} auth = (conf['gr']['username'], conf['gr']['password']) url = conf['gr']['url'] + '/api/folders?limit=100' info(f'downloading folders from {url}') try: response = requests.get(url, auth=auth, headers=headers) except requests.ConnectionError as exc: err(str(exc)) if response.status_code != 200: err(f'download failed with status {response.status_code} to {url}') folders = response.json() # these are the new folders # Now map them. for rec in folders: fid = rec['id'] title = rec['title'] fmapn[title]['new'] = fid # Map the old folder ids to the new folder ids. fmap = {} for val in fmapn.values(): key = val['old'] val = val['new'] fmap[key] = val return fmap
def create_containers(conf: dict, wait: float): '''Create the docker containers. Args: conf: The configuration data. wait: The container create wait time. ''' create_start(conf, 'pg') # postgresql create_start(conf, 'gr') # grafana client = docker.from_env() num = 0 for key in ['gr', 'pg']: kconf = conf[key] cname = kconf['cname'] containers = client.containers.list(filters={'name': cname}) if containers: info(f'container already exists: "{cname}"') continue # Create the volume mounted subdirectories with the proper # permissions. kwargs = kconf['client.containers.run'] for key1 in kwargs['volumes']: try: os.makedirs(key1) os.chmod(key1, 0o775) except FileExistsError as exc: info(str(exc)) # this is perfectly fine ports = kconf['ports'] info(f'creating container "{cname}": {ports}') try: cobj = client.containers.run(**kwargs) except docker.errors.DockerException as exc: logs = cobj.logs().decode('utf-8') err(f'container failed to run: "{cname}" - {exc}:\n{logs}\n') num += 1 if wait: create_container_init(conf, wait)
def check_port(port: int) -> Container: # pylint: disable=inconsistent-return-statements '''Check to see if a port is valid. A port is valid if it shows up as an external port for a grafana docker container. If a port is not valid, the program exits. Args: port: The external port to look for. Returns: object: The container object if found. ''' client = docker.from_env() containers = client.containers.list(filters={'label': 'grape.type'}) for container in sorted(containers, key=lambda x: x.name.lower()): ports = get_external_ports(container) if ports and int(ports[0]) == port: ctype = container.labels['grape.type'] if ctype == 'gr': return container err(f'no grape grafana containers found that expose port: {port},\n\t' 'try running "pipenv run grape status -v"')
def read_service(burl: str, auth: tuple, service: str) -> dict: '''Read data for a single grafana service. Args: burl: The base URL for the service. auth: The auth tuple. service: The grafana REST service. Returns: response: The JSON from the URL request. ''' headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} url = f'{burl}/{service}' info(f'reading {url}') try: response = requests.get(url, auth=auth, headers=headers) except requests.ConnectionError as exc: err(str(exc)) if response.status_code != 200: err(f'request to {url} failed with status {response.status_code}\n' f'{json.dumps(response.json(), indent=2)}') result = response.json() return result
def getpgip(conf: dict) -> str: '''Get the correct internal IP address for the pg container. Special handling to "fix" the url by reading the data from the docker container and to get the correct internal IP address. Args: conf: The configuration data. Returns: ipa: The corrected IP address for the pg container. ''' name = conf['gr']['cname'] client = docker.from_env() containers = client.containers.list(filters={'name': name}) if len(containers) == 0: err('docker container does not exist: "{name}"') elif len(containers) > 1: err('too many docker containers (>1) named "{name}"') rec = containers[0].attrs hip = rec['NetworkSettings']['Networks']['bridge']['Gateway'] port = conf['pg']['xport'] url = f'{hip}:{port}' return url
def create_container_init(conf: dict, waitval: float): # pylint: disable=too-many-locals '''Initialize the containers. Wait for the containers to initialized by looking for messages in the logs. This search allows initialization to complete faster than just doing a simple wait. Args: conf: The configuration data. waitval: The container create wait time in seconds. ''' # This is a heuristic that does a short wait to give docker # sufficient time to start to define the new containers before we # start to query them. # # In particular, this significantly reduces the chance # that the docker.errors.NotFound exception will be # raised. # # One second is probably overkill. time.sleep(1) client = docker.from_env() # The values below are heuristic based on empirical observation of # the logs. They may have to change based on versions of docker. # Values entries must be in lowercase, they are used for pattern # matching in the docker logs. recs = [ { 'key': 'gr', 'values': ['created default admin', 'http server listen'] }, { 'key': 'pg', 'values': ['database system is ready to accept connections'] }, ] # Define the sleep interval. # Try to report status about every 2 seconds or so based on elaped time. sleep = 0.1 # time to sleep smodval = max(2, int(2. / sleep)) # report approximately every 2s # Wait the containers to initialize. for rec in recs: key = rec['key'] values = rec['values'] name = conf[key]['name'] info( f'checking container initialization status of "{name}" with max wait: {waitval}' ) # Load the containers. # Note that the the containers.get() and the logs() operations # are glommed together under the same timeout because the user # only cares about the total time. try: cobj = client.containers.get(name) except docker.errors.DockerException as exc: logs = cobj.logs().decode('utf-8') # provide the full log ##clist = [f'{c.name}:{c.short_id}:{c.status}' # for c in client.containers.list(all=True)] err(f'container failed to initialize: "{name}" - {exc}\n{logs}') # Read the container logs. start = time.time() logs = '' i = 0 while True: try: logs = cobj.logs(tail=20).decode('utf-8') done = False for value in values: if value in logs.lower(): elapsed = time.time() - start info( f'container initialized: "{name}" after {elapsed:0.1f} seconds' ) done = True # initialization was successful, bases on log pattern match break if done: break except docker.errors.DockerException as exc: logs = cobj.logs().decode('utf-8') # provide the full log err(f'container failed to initialize: "{name}" - {exc}\n{logs}' ) elapsed = time.time() - start if elapsed <= waitval: i += 1 if (i % smodval) == 0: info(' container not initialized yet, will try again: ' f'{name} ({elapsed:0.1f}s)') time.sleep(sleep) else: # Worst case is that we simply wait the maximum time. logs = cobj.logs().decode('utf-8') # provide the full log err(f'container failed to initialize: "{name}"\nData: {logs}')