Exemplo n.º 1
0
def load_folders(conf: dict, recs: list):
    '''Load the folders up to the grafana server.

    This sets (loads) the folders on the grafana server by sending the
    folder configuration data through the REST API.

    Args:
        conf: The configuration data.
        recs: The grafana setup data for folders.
    '''
    headers = {'Content-Type': 'application/json',
               'Accept': 'application/json'}
    auth = (conf['gr']['username'], conf['gr']['password'])
    gurl = conf['gr']['url']
    for rec in recs:
        name = rec['title']
        url = gurl + '/api/folders'
        info(f'uploading folder "{name}" - {url}')
        try:
            response = requests.post(url,
                                     json=rec,
                                     auth=auth,
                                     headers=headers)
        except requests.ConnectionError as exc:
            err(str(exc))
        info(f'response status: {response.status_code} from {url}')
        if response.status_code not in (200, 412, 500):
            err(f'upload failed with status {response.status_code} to {url}')
Exemplo n.º 2
0
def load_dashboards(conf: dict, recs: list, fmap: dict):
    '''Load the dashboards up to the grafana server.

    This sets (loads) the dashboards on the grafana server by sending
    the folder configuration data through the REST API.

    Args:
        conf: The configuration data.
        recs: The grafana setup data for dashboards.
        fmap: The folder/title id map.
    '''
    headers = {'Content-Type': 'application/json',
               'Accept': 'application/json'}
    auth = (conf['gr']['username'], conf['gr']['password'])
    gurl = conf['gr']['url']
    for rec in recs:
        name = rec['dashboard']['title']
        fmapid = rec['folderId']
        fid = fmap[fmapid] if fmapid in fmap else 0
        url = gurl + '/api/dashboards/db'
        info(f'uploading dashboard ({fid}) "{name}" - {url}')
        jrec = rec
        jrec['dashboard']['id'] = None  # create the dash board
        jrec['dashboard']['uid'] = None
        jrec['folderId'] = fid
        try:
            response = requests.post(url,
                                     json=jrec,
                                     auth=auth,
                                     headers=headers)
        except requests.ConnectionError as exc:
            err(str(exc))
        info(f'response status: {response.status_code} from {url}')
        if response.status_code not in (200, 400, 412):
            err(f'upload failed with status {response.status_code} to {url}')
Exemplo n.º 3
0
def ximport(conf: dict, xconf: str):
    '''
    Import an external grafana server system.

    The imported system will be stored in a local system.

    This operation requires an import conf file.

    Args:
        conf: The configuration data.
        xconf: The external grafana configuration data.
    '''
    info('import')
    ofn = conf['file']
    if os.path.exists(ofn):
        err(f'archive file already exists: {ofn}')

    iconf =  get_xconf(xconf)
    conf['import'] = iconf
    auth = (iconf['username'], iconf['password'])
    grr = read_all_services(iconf['url'], auth)
    sql = save_pg(conf)
    info(f'writing to {ofn}')
    if 'zip' in ofn.lower():
        # Do zip
        with ZipFile(ofn, 'w') as zfp:
            zfp.writestr('conf.json', json.dumps(conf))
            zfp.writestr('gr.json', json.dumps(grr))
            zfp.writestr('pg.sql', sql)
        # One can unzip the individual files like this:
        #   $ unzip -p /mnt/example.zip conf.json > /mnt/conf.json
        #   $ unzip -p /mnt/example.zip gr.json > /mnt/gr.json
        #   $ unzip -p /mnt/example.zip pg.sql > /mnt/pg.sql
    else:
        err('only zip files are supported')
Exemplo n.º 4
0
def delete(conf: dict):
    '''Delete the docker infrastructure.

    Args:
        conf: The configuration data.
    '''
    delete_containers(conf)
    path = conf['base']
    if os.path.exists(path):
        info(f'removing directory: {path}')
        try:
            shutil.rmtree(path, ignore_errors=False, onerror=None)
        except FileNotFoundError:
            pass  # this is okay
        except PermissionError as exc:
            # Bad news!
            # Try deleting it as sudo.
            warn(str(exc))  # This is not okay!
            warn('will try to delete as sudo')
            cmd = f'sudo rm -rf {path}'
            try:
                subprocess.check_output(cmd,
                                        stderr=subprocess.STDOUT,
                                        shell=True)
            except subprocess.CalledProcessError as exc:
                err(str(exc))  # failed as exec
    else:
        info(f'directory does not exist: {path}')
Exemplo n.º 5
0
def main():
    '''Load command main.

    This is the command line entry point for the load command.
    '''
    opts = getopts()
    initv(opts.verbose)
    info(f'load {opts.fname} into {opts.base}')
    conf = get_conf(opts.base, opts.fname, opts.grxport, opts.pgxport)
    load(conf, opts.wait)
    info('done')
Exemplo n.º 6
0
def main():
    '''Create command main.

    This is the command line entry point for the create command.
    '''
    opts = getopts()
    initv(opts.verbose)
    info(f'creating {opts.base} based containers')
    conf = get_conf(opts.base, '', opts.grxport, opts.pgxport)
    create(conf, opts.wait)
    info('done')
Exemplo n.º 7
0
def main():
    '''Import command main.

    This is the command line entry point for the import command.
    '''
    opts = getopts()
    initv(opts.verbose)
    info(f'import from {opts.xconf}')
    conf = get_conf(opts.base, opts.fname, opts.grxport, opts.pgxport)
    ximport(conf, opts.xconf)
    info('done')
Exemplo n.º 8
0
def load_datasources(conf: dict, recs: list):
    '''Load the datasources up to the grafana server.

    This sets (loads) the datasources in the grafana server by sending
    the datasource configuration data through the REST API.

    Args:
        conf: The configuration data.
        recs: The grafana setup data for datasources.
    '''
    headers = {'Content-Type': 'application/json',
               'Accept': 'application/json'}
    pmap = {}
    if 'import' in conf:
        # Load the import mappings.
        for rec in conf['import']['databases']:
            rmap = {}
            for key, val in rec.items():
                if key == 'name':
                    continue
                rmap[key] = val
            name = rec['name']
            pmap[name] = rmap

    # Fill in the url for the default database.
    pgname = conf['pg']['name']
    if pgname not in pmap:
        pmap[pgname] = {
            'name': pgname,
            'url':  getpgip(conf),
            'password': conf['pg']['password']
        }

    # Update grafana.
    auth = (conf['gr']['username'], conf['gr']['password'])
    gurl = conf['gr']['url']
    for rec in recs:
        name = rec['name']
        if name in pmap:
            for key, val in pmap[name].items():
                rec[key] = val
        url = gurl + '/api/datasources'
        info(f'uploading datasource "{name}" - {url}')
        try:
            response = requests.post(url,
                                     json=rec,
                                     auth=auth,
                                     headers=headers)
        except requests.ConnectionError as exc:
            err(str(exc))
        info(f'response status: {response.status_code} from {url}')
        if response.status_code not in (200, 409):
            err(f'upload failed with status {response.status_code} to {url}')
Exemplo n.º 9
0
def create_start(conf: dict, key: str):
    '''Create the start script.

    Args:
        kconf: The configuration data.
        key: pg or gr.
    '''
    # Create the start script.
    kconf = conf[key]
    base = kconf['base']
    name = kconf['name']
    fname = os.path.join(os.getcwd(), base, key, 'start.sh')
    if os.path.exists(fname):
        return

    # Create the docker command.
    cmd = 'docker run'
    kwargs = kconf['client.containers.run']
    if kwargs['detach']:
        cmd += ' -d'
    if kwargs['remove']:
        cmd += ' --rm'
    if name:
        cmd += f' --name {name} -h {name}'
    if kwargs['environment']:
        for env in kwargs['environment']:
            cmd += f' -e "{env}"'
    if kwargs['ports']:
        for key1, val1 in kwargs['ports'].items():
            cport = key1
            hport = val1
            cmd += f' -p {hport}:{cport}'
    if kwargs['volumes']:
        for key1, val1 in kwargs['volumes'].items():
            cmd += f' -v {key1}:' + val1['bind']
    cmd += ' ' + kwargs['image']

    # Create the script.
    info(f'start script: {fname}')
    dname = os.path.dirname(fname)
    if not os.path.exists(dname):
        os.makedirs(dname)
    with open(fname, 'w', encoding='utf-8') as ofp:
        ofp.write(f'''\
#!/usr/bin/env bash
# Start the {name} container.
cd {os.getcwd()}
{cmd}
echo "started - it may take up to 30 seconds to initialize"
''')
    os.chmod(fname, 0o775)
Exemplo n.º 10
0
def load(conf: dict, sql: str):
    '''Load database data.

    This is done using psql in the container by copying
    the sql to the mnt directory that is mounted to the
    container.

    Note that this could be used for much more than just
    loading because it executes arbitrary SQL but loading
    is its primary purpose.

    Args:
        conf: The configuration data.
        sql: The SQL commands used to update the database.
    '''
    dbname = conf['pg']['dbname']
    name = conf['pg']['name']
    user = conf['pg']['username']
    mnt = conf['pg']['mnt']
    tfn = f'mnt{os.getpid()}.sql'
    tfpx = f'{mnt}/{tfn}'  # external (host) path
    tfpi = f'/mnt/{tfn}'  # internal (container) path

    # Fix minor nit. The role always already exists.
    sql = sql.replace('CREATE ROLE postgres;', '-- CREATE ROLE postgres;')

    # Now write the SQL.
    with open(tfpx, 'w', encoding='utf-8') as ofp:
        ofp.write(sql)
    if not os.path.exists(tfpx):
        err(f'file does not exist: {tfpx}')

    # Write to the database.
    cmd = f'docker exec {name} psql -d {dbname} -U {user} -f {tfpi}'
    tmax = 10
    tcnt = 0
    while tcnt <= tmax:
        try:
            info(cmd)
            out = subprocess.check_output(cmd,
                                          stderr=subprocess.STDOUT,
                                          shell=True)
            break
        except subprocess.CalledProcessError as exc:
            tcnt += 1
            warn(f'try {tcnt} of {tmax}\n' + exc.output.decode('utf-8'))
            if tcnt == tmax:
                err(str(exc))
            time.sleep(5)
    debug(out.decode('utf-8'))
Exemplo n.º 11
0
def main():
    '''Status command main.

    This is the command line entry point for the status command.

    It list the statistics for all grape containers running on
    the current system.
    '''
    opts = getopts()
    initv(opts.verbose)
    info('status')
    client = docker.from_env()
    containers = client.containers.list(filters={'label': 'grape.type'})

    # Collect report rows for each column.
    cols = {
        'created': Column('Created'),
        'id': Column('Id'),
        'elapsed': Column('Elapsed'),
        'image': Column('Image'),
        'name': Column('Name'),
        'ports': Column('Port'),
        'started': Column('Started'),
        'status': Column('Status'),
        'type': Column('Type'),
        'version': Column('Version')
    }
    populate_columns(containers, cols)

    # Report the status for all of the containers.
    colnames = [
        'name', 'type', 'version', 'status', 'started', 'elapsed', 'id',
        'image', 'created', 'ports'
    ]
    ofp = sys.stdout
    if opts.verbose:
        if ofp == sys.stdout:
            ofp.write('\x1b[34m')
        for key in colnames:
            ofp.write(cols[key].hdr())
        if ofp == sys.stdout:
            ofp.write('\x1b[0m')
        ofp.write('\n')

    for i in range(cols['name'].size()):
        for key in colnames:
            ofp.write(cols[key].get(i))
        ofp.write('\n')

    info('done')
Exemplo n.º 12
0
def load_fmap(conf: dict, recs: list) -> dict:
    '''Map the grafana folders from the old ids to the new ones.

    This must be done after the new folders have been uploaded.

    Args:
        conf: The configuration data.
        recs: The grafana setup data for folders.

    Returns:
        map: The folder map with the correct ids.
    '''
    fmapn = {}
    for rec in recs:  # old folders
        fid = rec['id']
        title = rec['title']
        fmapn[title] = {'old': fid, 'new': -1}

    # Get the new folders.
    headers = {'Content-Type': 'application/json',
               'Accept': 'application/json'}
    auth = (conf['gr']['username'], conf['gr']['password'])
    url = conf['gr']['url'] + '/api/folders?limit=100'
    info(f'downloading folders from {url}')
    try:
        response = requests.get(url,
                                auth=auth,
                                headers=headers)
    except requests.ConnectionError as exc:
        err(str(exc))
    if response.status_code != 200:
        err(f'download failed with status {response.status_code} to {url}')
    folders = response.json()  # these are the new folders

    # Now map them.
    for rec in folders:
        fid = rec['id']
        title = rec['title']
        fmapn[title]['new'] = fid

    # Map the old folder ids to the new folder ids.
    fmap = {}
    for val in fmapn.values():
        key = val['old']
        val = val['new']
        fmap[key] = val

    return fmap
Exemplo n.º 13
0
def read_all_services(burl: str, auth: tuple) -> dict:
    '''Read the complete grafana state from an external server and
    save it.

    The services are the datasourceds, folders and dashboards.

    Args:
        burl: The base URL.
        auth: The auth tuple.

    Returns:
        state: The datasources, folders and dashboards.
    '''
    info('reading grafana')

    # Read the datasources.
    datasources = read_service(burl, auth, 'api/datasources')

    # Read the folders.
    folders = read_service(burl, auth, 'api/folders?limit=100')
    info(f'read {len(folders)} folders')
    fids : List[int] = reduce(lambda x, y: x+[y] if not y in x else x,
                              [fid['id'] for fid in folders],
                              [])
    if not fids:
        # The General folder always exists.
        fids = [0]

    # Read the dashboards.
    dashboards = []
    for fid in fids:
        recs = read_service(burl, auth, f'api/search?folderIds={fid}')
        for rec in recs:
            uid = rec['uid']
            dash = read_service(burl, auth, f'api/dashboards/uid/{uid}')
            dash['folderId'] = fid
            dashboards.append(dash)

    result = {
        'datasources': datasources,
        'folders': folders,
        'dashboards': dashboards,
    }
    info(f'{len(result["datasources"])} datasources')
    info(f'{len(result["folders"])} folders')
    info(f'{len(result["dashboards"])} dashboards')
    return result
Exemplo n.º 14
0
def delete_containers(conf: dict):
    '''Delete the docker containers.

    Args:
        conf: The configuration data.
    '''
    client = docker.from_env()
    for key in ['gr', 'pg']:
        cname = conf[key]['cname']
        containers = client.containers.list(filters={'name': cname})
        if containers:
            for container in containers:
                info(f'deleting container by name: "{cname}"')
                container.stop()
                time.sleep(3)
        else:
            info(f'container does not exist: "{cname}"')
Exemplo n.º 15
0
def main():
    '''Tree command main.

    This is the command line entry point for the tree command.

    It presents a tree view of one or more grafana servers.
    '''
    opts = getopts()
    initv(opts.verbose)
    info('tree')
    container = check_port(opts.grxport)
    burl = f'http://127.0.0.1:{opts.grxport}'
    name = container.name + ':' + str(opts.grxport)
    root = collect(burl, DEFAULT_AUTH, name)
    if opts.fname:
        with open(opts.fname, 'w', encoding='utf-8') as ofp:
            print_tree(opts, ofp, root)
    else:
        print_tree(opts, sys.stdout, root)
    info('done')
Exemplo n.º 16
0
def load(conf: dict) -> dict:
    '''Load the zip state data.

    The state data that describes a project is stored in
    a zip file with known files. This function encapsulates
    reading them.

    The known files are: conf.json, gr.json and pg.sql.

    The conf.json file contains project configuration data.

    The gr.json file contains the grafana server datasources,
    folders and dashboards setup.

    The pq.sql contains the database setup.

    The conf dictionary that is returned as three top level
    keys: 'conf', 'gr' and 'pg'. One for each file read.

    Args:
        opts: The command line arguments.

    Returns:
        conf: The configuration data from each file.
    '''
    ofn = conf['file']
    if not os.path.exists(ofn):
        err(f'archive file does not exist: {ofn}')

    info(f'loading from {ofn}')
    with ZipFile(ofn, 'r') as zfp:
        zfn = 'conf.json'
        with zfp.open(zfn) as ifp:
            info(f'loading {zfn} from {ofn}')
            zconf = json.loads(ifp.read().decode('utf-8'))

        zfn = 'gr.json'
        with zfp.open(zfn) as ifp:
            info(f'loading {zfn} from {ofn}')
            zgr = json.loads(ifp.read().decode('utf-8'))

        zfn = 'pg.sql'
        with zfp.open(zfn) as ifp:
            info(f'loading {zfn} from {ofn}')
            sql = ifp.read().decode('utf-8')
    result = {
        'conf': zconf,
        'gr': zgr,
        'pg': sql,
    }
    return result
Exemplo n.º 17
0
def save(conf: dict) -> str:
    '''Save the database by reading the contents.

    This is the same as a backup command and the only reasonable way
    to do it is by using the pgdump command.

    Args:
        conf: The configuration data.

    Returns:
        sql: The SQL to restore the database.
    '''
    info('reading the database')
    name = conf['pg']['name']
    user = conf['pg']['username']
    cmd = f'docker exec {name} pg_dumpall -U {user}'
    try:
        info(cmd)
        out = subprocess.check_output(cmd, shell=True)
    except subprocess.CalledProcessError as exc:
        warn(str(exc))
        out = b'-- no pg docker container'
    sql = str(out.decode('utf-8'))
    info(f'read {len(sql)} bytes of sql for the database')
    return sql
Exemplo n.º 18
0
def read_service(burl: str, auth: tuple, service: str) -> dict:
    '''Read data for a single grafana service.

    Args:
        burl: The base URL for the service.
        auth: The auth tuple.
        service: The grafana REST service.

    Returns:
        response: The JSON from the URL request.
    '''
    headers = {'Content-Type': 'application/json',
               'Accept': 'application/json'}
    url = f'{burl}/{service}'
    info(f'reading {url}')
    try:
        response = requests.get(url, auth=auth, headers=headers)
    except requests.ConnectionError as exc:
        err(str(exc))
    if response.status_code != 200:
        err(f'request to {url} failed with status {response.status_code}\n'
            f'{json.dumps(response.json(), indent=2)}')
    result = response.json()
    return result
Exemplo n.º 19
0
def create_containers(conf: dict, wait: float):
    '''Create the docker containers.

    Args:
        conf: The configuration data.
        wait: The container create wait time.
    '''
    create_start(conf, 'pg')  # postgresql
    create_start(conf, 'gr')  # grafana
    client = docker.from_env()
    num = 0
    for key in ['gr', 'pg']:
        kconf = conf[key]
        cname = kconf['cname']
        containers = client.containers.list(filters={'name': cname})
        if containers:
            info(f'container already exists: "{cname}"')
            continue

        # Create the volume mounted subdirectories with the proper
        # permissions.
        kwargs = kconf['client.containers.run']
        for key1 in kwargs['volumes']:
            try:
                os.makedirs(key1)
                os.chmod(key1, 0o775)
            except FileExistsError as exc:
                info(str(exc))  # this is perfectly fine

        ports = kconf['ports']
        info(f'creating container "{cname}": {ports}')
        try:
            cobj = client.containers.run(**kwargs)
        except docker.errors.DockerException as exc:
            logs = cobj.logs().decode('utf-8')
            err(f'container failed to run: "{cname}" - {exc}:\n{logs}\n')
        num += 1

    if wait:
        create_container_init(conf, wait)
Exemplo n.º 20
0
def xexport(conf: dict, xconf: str):
    '''
    Export to an external grafana server system.

    This is meant to be the inverse operation to
    the importing.

    This operation requires an import conf file
    as well as a load zip file.

    Args:
        conf: The configuration data.
        xconf: The external grafana configuration data.
    '''
    info('export')

    # Collect the data in the import yaml file.
    # Create a password map so that the passwords
    # for the databases can be restored.
    iconf = get_xconf(xconf)
    pmap: Dict[str, Any] = {}
    if 'databases' in iconf:
        for database in iconf['databases']:
            for key in database:
                # Allow the user to specify an arbitrary key like
                # 'name' or 'database'.
                if key == 'password':
                    continue
                name = database[key]
                password = database['password']
                if key not in pmap:
                    pmap[key] = {}
                pmap[key][name] = password

    # Collect the data in the save zip file.
    result = zp_load(conf)
    zgr = result['gr']

    # Fix the passwords in zgr.
    # Everything else s/b fine.
    if pmap:
        for rec in zgr['datasources']:
            if not rec['password']:
                # The password is not defined.
                # If it is defined, it is not changed because the user
                # changed it manually by editing the zip contents.
                for key, prec in pmap.items():
                    if key not in rec:
                        continue
                    name = rec[key]
                    if name not in prec:
                        continue
                    password = prec[name]
                    rec['password'] = password

    # Fix the conf to write to the external source.
    conf['gr']['username'] = iconf['username']
    conf['gr']['password'] = iconf['password']
    conf['gr']['url'] = iconf['url']

    # Remove the local datasource.
    name = conf['pg']['name']
    if name in zgr['datasources']:
        info(f'removing the local database datasource: {name}')
        del zgr['datasources'][name]

    # Write the grafana configuration out.
    gr_load_all(conf, zgr)
Exemplo n.º 21
0
def create_container_init(conf: dict, waitval: float):  # pylint: disable=too-many-locals
    '''Initialize the containers.

    Wait for the containers to initialized by looking
    for messages in the logs.

    This search allows initialization to complete
    faster than just doing a simple wait.

    Args:
        conf: The configuration data.
        waitval: The container create wait time in seconds.
    '''
    # This is a heuristic that does a short wait to give docker
    # sufficient time to start to define the new containers before we
    # start to query them.
    #
    # In particular, this significantly reduces the chance
    # that the docker.errors.NotFound exception will be
    # raised.
    #
    # One second is probably overkill.
    time.sleep(1)
    client = docker.from_env()

    # The values below are heuristic based on empirical observation of
    # the logs. They may have to change based on versions of docker.
    # Values entries must be in lowercase, they are used for pattern
    # matching in the docker logs.
    recs = [
        {
            'key': 'gr',
            'values': ['created default admin', 'http server listen']
        },
        {
            'key': 'pg',
            'values': ['database system is ready to accept connections']
        },
    ]

    # Define the sleep interval.
    # Try to report status about every 2 seconds or so based on elaped time.
    sleep = 0.1  # time to sleep
    smodval = max(2, int(2. / sleep))  # report approximately every 2s

    # Wait the containers to initialize.
    for rec in recs:
        key = rec['key']
        values = rec['values']
        name = conf[key]['name']
        info(
            f'checking container initialization status of "{name}" with max wait: {waitval}'
        )

        # Load the containers.
        # Note that the the containers.get() and the logs() operations
        # are glommed together under the same timeout because the user
        # only cares about the total time.
        try:
            cobj = client.containers.get(name)
        except docker.errors.DockerException as exc:
            logs = cobj.logs().decode('utf-8')  # provide the full log
            ##clist = [f'{c.name}:{c.short_id}:{c.status}'
            # for c in client.containers.list(all=True)]
            err(f'container failed to initialize: "{name}" - {exc}\n{logs}')

        # Read the container logs.
        start = time.time()
        logs = ''
        i = 0
        while True:
            try:
                logs = cobj.logs(tail=20).decode('utf-8')
                done = False
                for value in values:
                    if value in logs.lower():
                        elapsed = time.time() - start
                        info(
                            f'container initialized: "{name}" after {elapsed:0.1f} seconds'
                        )
                        done = True  # initialization was successful, bases on log pattern match
                        break
                if done:
                    break
            except docker.errors.DockerException as exc:
                logs = cobj.logs().decode('utf-8')  # provide the full log
                err(f'container failed to initialize: "{name}" - {exc}\n{logs}'
                    )

            elapsed = time.time() - start
            if elapsed <= waitval:
                i += 1
                if (i % smodval) == 0:
                    info('   container not initialized yet, will try again: '
                         f'{name} ({elapsed:0.1f}s)')
                time.sleep(sleep)
            else:
                # Worst case is that we simply wait the maximum time.
                logs = cobj.logs().decode('utf-8')  # provide the full log
                err(f'container failed to initialize: "{name}"\nData: {logs}')