Exemple #1
0
def instances_list(p_isJSON,
                   p_isVERBOSE,
                   p_region,
                   p_instance,
                   p_instance_type=None,
                   p_cloud=None,
                   p_resource=None):
    try:
        filter_params = {}
        filter_params['region'] = p_region
        filter_params['instance'] = p_instance
        filter_params['resource'] = p_resource
        from cloud import CloudConnection
        cloud = CloudConnection(cloud=p_cloud)
        client = cloud.get_client(instance_type=p_instance_type)
        client.isJson = p_isJSON
        client.verbose = p_isVERBOSE
        instances_list = client.get_list(filter_params)
        if p_isJSON:
            json_dict = {}
            json_dict['data'] = instances_list
            json_dict['state'] = 'completed'
            print(json.dumps([json_dict]))
        elif not p_isVERBOSE:
            keys = client.header_keys
            headers = client.header_titles
            print("")
            print(api.format_data_to_table(instances_list, keys, headers))
    except Exception as e:
        util.exit_message(str(e), 1, p_isJSON)
    return (0)
Exemple #2
0
def list_apt_packages(p_repo, p_isJSON):
    cli_ver = get_cli_ver(p_repo)
    raw_list = util.read_file_string(get_apt_repo_file_name())
    repoList = []
    repoDict = {}
    for line in raw_list.splitlines():
        data = line.split()
        if len(data) != 2:
            continue
        if data[0] == "Package:":
            repoDict['component'] = data[1]
        if data[0] == "Version:":
            version = data[1]

        if data[0] == "Filename:":
            repoDict['filename'] = data[1]
            p1 = version.find(".pgdg" + cli_ver)
            if p1 > 0:
                repoDict['version'] = version[0:p1]
                repoList.append(repoDict)
            repoDict = {}

    keys = ['component', 'version']
    headers = ['Component', 'Version']

    if p_isJSON:
        print(json.dumps(repoList, sort_keys=True, indent=2))
    else:
        print(api.format_data_to_table(repoList, keys, headers))

    return (0)
Exemple #3
0
def list(p_isJSON):
    repo_dict = get_json_file(PGDG_REPO_LIST, p_isJSON)
    os = util.get_os()
    kount = 0
    lList = []
    for rl in repo_dict:
        lDict = {}
        if os == rl['os']:
            kount = kount + 1
            lDict['repo'] = rl['repo']
            if is_installed(rl['repo']):
                lDict['status'] = "Installed"
            else:
                lDict['status'] = ""
            lList.append(lDict)

    if kount == 0:
        msg = "No repo's available for os = " + os
        util.exit_message(msg, 1, p_isJSON)

    keys = ['repo', 'status']
    headers = ['Repo', 'Status']

    if p_isJSON:
        print(json.dumps(lList, sort_keys=True, indent=2))
    else:
        print(api.format_data_to_table(lList, keys, headers))

    return 0
Exemple #4
0
def list_defaults(p_isJSON):
  try:
    c = con.cursor()
    sql = "SELECT user_group, user_name, section, d_key, d_value \n" + \
          "  FROM defaults \n" + \
          "ORDER BY 1, 2, 3, 4"
    c.execute(sql)
    dataset = c.fetchall()
    dict = []
    for data in dataset:
      d = {}
      d['group'] = str(data[0])
      d['user'] = str(data[1])
      d['section'] = str(data[2])
      d['key'] = str(data[3])
      d['value'] = str(data[4])
      dict.append(d)

  except Exception as e:
    fatal_sql_error(e, sql, "meta.list_defaults()")

  if p_isJSON:
    print(json.dumps(dict, sort_keys=True, indent=2))
  else:
    print("")
    print(api.format_data_to_table(dict, 
      ["group", "user", "section", "key", "value"],
      ["Group", "User", "Section", "Key", "Value"]))

  return(0)
Exemple #5
0
def cloud_metalist(p_isJSON,
                   p_isVERBOSE,
                   p_type,
                   p_region,
                   p_version=None,
                   cloud="aws",
                   instance_type="db",
                   group=""):
    from cloud import CloudConnection
    cloud = CloudConnection(cloud=cloud)
    client = cloud.get_client(instance_type=instance_type, region=p_region)
    if p_type == "instance-class":
        col_names = [
            "DBInstanceClass", "v_cpu", "memory", "iops", "network_performance"
        ]
        col_titles = [
            "DBInstanceClass", "vCPU", "Memory", "IOPS Optimized",
            "Network Performance"
        ]
        db_instance_meta_list = meta_list("", "", "aws-rds", "", True)
        result = client.instance_class(
            p_version, db_instance_meta_list=db_instance_meta_list)
    elif p_type == "rds-versions":
        col_names = ["DBEngineVersionDescription", "EngineVersion"]
        col_titles = ["DBEngineVersionDescription", "EngineVersion"]
        result = client.rds_versions()
    elif p_type == "res-group":
        col_names = ["name", "region"]
        col_titles = ["ResourceGroup", "Region"]
        result = client.get_resource_group()
    elif p_type == "instance-type":
        col_names = [
            "name", "number_of_cores", "resource_disk_size_in_mb",
            "memory_in_mb", "max_data_disk_count", "os_disk_size_in_mb"
        ]
        col_titles = [
            "Name", "number_of_cores", "resource_disk_size_in_mb",
            "memory_in_mb", "max_data_disk_count", "os_disk_size_in_mb"
        ]
        result = client.get_instance_types(p_region)
    elif p_type == "storage-accounts":
        col_names = ["name", "location"]
        col_titles = ["Name", "location"]
        result = client.storage_accounts(group)
    else:
        col_names = ["vpc", "subnet_group"]
        col_titles = ["VPC", "Subnet Group"]
        result = client.subnet_groups(res_group=group)
    if p_isJSON:
        print(json.dumps(result, indent=2))
    else:
        print(api.format_data_to_table(result, col_names, col_titles))
    return (0)
Exemple #6
0
def list(p_isJSON):

    l_list = meta.get_lab_list()

    keys = ['lab', 'disp_name', 'enabled', 'credit_to']
    headers = ['Lab', 'Name', 'Enabled?', 'Credits']

    if p_isJSON:
        print(json.dumps(l_list, sort_keys=True, indent=2))
    else:
        print(api.format_data_to_table(l_list, keys, headers))

    return 0
Exemple #7
0
def meta_list(p_isJSON, p_isVERBOSE, p_meta, p_instance, return_dict=False):
    repo = util.get_value("GLOBAL", "REPO")
    url = repo + "/" + p_meta + ".txt"
    try:
        response = urllib2.urlopen(url, timeout=15)
        meta_string = response.read()
    except Exception as e:
        util.exit_message("Cannot retrieve METALIST '" + url + "'", 1,
                          p_isJSON)

    dict = []
    kount = 0

    for line in meta_string.splitlines():
        kount = kount + 1

        word = line.split("\t")
        for i in range(len(word)):
            word[i] = word[i].strip()

        ## process header rows
        if kount < 3:
            if kount == 1:
                ## row 1 contains column names
                num_cols = len(word)
                col_names = word
            else:
                ## row 2 contains column titles
                col_titles = word
            continue

        ## process valid detail rows
        if len(word) == num_cols:
            if p_instance:
                if word[0] != p_instance:
                    continue
            d = {}
            for i in range(len(word)):
                d[col_names[i]] = word[i]
            dict.append(d)

    if return_dict:
        return dict
    if p_isJSON:
        print(json.dumps(dict, sort_keys=True, indent=2))
    else:
        print("")
        print(api.format_data_to_table(dict, col_names, col_titles))

    return (0)
Exemple #8
0
def pglist(p_isJSON, p_isVERBOSE, p_region="", p_email=""):
    try:
        c = con.cursor()
        sql = "SELECT u.email, g.name as server_group, s.name as server_name, \n" + \
              "       s.host, s.port, s.maintenance_db as db, s.username as db_user, \n" + \
              "       s.id as sid, g.id as gid, s.password as pwd \n" + \
              "  FROM server s, user u, servergroup g \n" + \
              " WHERE s.user_id = u.id AND s.servergroup_id = g.id \n" + \
              "   AND u.email LIKE ? \n" + \
              "ORDER BY 1, 2, 3"
        c.execute(sql, [p_email])
        svr_list = c.fetchall()
        svrs = []
        for row in svr_list:
            svr_dict = {}
            svr_dict['email'] = str(row[0])
            svr_dict['server_group'] = str(row[1])
            svr_dict['server_name'] = str(row[2])
            svr_dict['host'] = str(row[3])
            svr_dict['port'] = str(row[4])
            svr_dict['db'] = str(row[5])
            svr_dict['db_user'] = str(row[6])
            svr_dict['sid'] = str(row[7])
            svr_dict['gid'] = str(row[8])
            has_pwd = False
            if row[9]:
                has_pwd = True
            svr_dict['has_pwd'] = has_pwd
            svrs.append(svr_dict)
    except Exception as error:
        msg = "pgDevOps must be installed & initialized."
        util.exit_message(msg, 1, p_isJSON)

    keys = [
        'email', 'server_group', 'server_name', 'host', 'port', 'db', 'db_user'
    ]
    headers = [
        'Email Address', 'Server Group', 'Server Name', 'Host', 'Port', 'DB',
        'DB User'
    ]

    if p_isJSON:
        print(json.dumps(svrs, sort_keys=True, indent=2))
    else:
        print("")
        print(api.format_data_to_table(svrs, keys, headers))

    return (0)
Exemple #9
0
def list_packages(p_repo, p_SHOWDUPS, p_isJSON, p_isEXTRA):
    if not is_repo(p_repo, p_isJSON):
        util.exit_message(p_repo + " is not a valid REPO.", 1, p_isJSON)

    [repo_type, name, url, package, key,
     pkg_filter] = get_repo(p_repo, p_isJSON)

    if not is_installed(p_repo):
        util.exit_message(p_repo + " is not registered.", 1, p_isJSON)

    options = ""
    if p_SHOWDUPS:
        options = "--showduplicates"

    if util.get_os() in APT_LIST:
        return list_apt_packages(p_repo, p_isJSON)

    os = util.get_os()
    if os == "el6":
        cmd = "yum list all | grep " + p_repo
    else:
        cmd = "yum repo-pkgs " + p_repo + " list " + options
    cmd = cmd + " | awk '"

    ## filter package list unless asked to show --extra or --test
    kount = 0
    if not p_isEXTRA:
        for p in pkg_filter:
            kount = kount + 1
            ps = "/" + p.replace('.', '\.') + "/"
            if kount > 1:
                cmd = cmd + " || " + ps
            else:
                cmd = cmd + ps

    cmd = "sudo " + cmd + " { print }' | awk '!/debug/ && !/docs/ { print }'"
    outp = util.getoutput(cmd)
    my_logger.info("\n$ " + cmd + "\n\n" + str(outp))

    repoList = []
    for line in outp.splitlines():
        data = line.split()
        if len(data) != 3:
            continue

        repoDict = {}

        p1 = data[0].find('.')
        pkg_nm = data[0][0:p1]
        p2 = data[1].find('.rhel')
        if p2 > 0:
            pkg_ver = data[1][0:p2]
        else:
            pkg_ver = data[1]
        status = ""
        if data[2].startswith("@"):
            status = "Installed"

        repoDict['component'] = pkg_nm
        repoDict['version'] = pkg_ver
        repoDict['status'] = status
        if pkg_nm > "":
            repoList.append(repoDict)

    keys = ['component', 'version', 'status']
    headers = ['Component', 'Version', 'Status']

    if p_isJSON:
        print(json.dumps(repoList, sort_keys=True, indent=2))
    else:
        print(api.format_data_to_table(repoList, keys, headers))

    return (0)
Exemple #10
0
def get_list(p_isOLD,
             p_isExtensions,
             p_isJSON,
             p_isTEST,
             p_showLATEST,
             p_comp=None,
             p_relnotes=None,
             p_return=False):
    # r_sup_plat = util.like_pf("r.sup_plat")
    r_sup_plat = "1 = 1"

    if p_isOLD:
        exclude_comp = ""
    else:
        exclude_comp = " AND v.component NOT IN (SELECT component FROM components)"

    parent_comp_condition = ""
    installed_category_conditions = " AND p.category > 0 "
    available_category_conditions = " AND p.is_extension = 0"
    ext_component = ""

    if p_isExtensions:
        installed_category_conditions = " AND p.is_extension = 1"
        available_category_conditions = " AND p.is_extension = 1"
        if p_comp != "all":
            ext_component = " AND parent = '" + p_comp + "' "

    installed = \
      "SELECT p.category, g.description as category_desc, g.short_desc as short_cat_desc, \n" + \
      "       c.component, c.version, c.port, c.status, r.stage, \n" + \
      "       coalesce((select is_current from versions where c.component = component AND c.version = version),0), \n" + \
      "       c.datadir, p.is_extension, \n" + \
      "       coalesce((select parent from versions where c.component = component and c.version = version),'') as parent, \n" + \
      "       coalesce((select release_date from versions where c.component = component and c.version = version),'20200101'), \n" + \
      "       c.install_dt, r.disp_name, \n" + \
      "       coalesce((select release_date from versions where c.component = component and is_current = 1),'20200101') \n" + \
      "  FROM components c, releases r, projects p, categories g \n" + \
      " WHERE c.component = r.component AND r.project = p.project \n" + \
      "   AND p.category = g.category \n"  + \
      "   AND " + r_sup_plat + installed_category_conditions + ext_component

    available = \
      "SELECT c.category, c.description, c.short_desc as short_cat_desc, v.component, v.version, 0, 'NotInstalled', \n" + \
      "       r.stage, v.is_current, '', p.is_extension, v.parent as parent, v.release_date, '', \n" + \
      "       r.disp_name, \n" + \
      "       coalesce((select release_date from versions where v.component = component and is_current = 1),'20200101') \n" + \
      "  FROM versions v, releases r, projects p, categories c \n" + \
      " WHERE v.component = r.component AND r.project = p.project \n" + \
      "   AND p.category = c.category \n" + \
      "   AND " + util.like_pf("v.platform") + " \n" + \
      "   AND " + r_sup_plat + exclude_comp + available_category_conditions + ext_component

    extensions = \
      "SELECT c.category, c.description, c.short_desc as short_cat_desc, v.component, v.version, 0, 'NotInstalled', \n" + \
      "       r.stage, v.is_current, '', p.is_extension, v.parent as parent, v.release_date, '', \n" + \
      "       r.disp_name,  \n" + \
      "       coalesce((select release_date from versions where v.component = component and is_current = 1),'20200101') \n" + \
      "  FROM versions v, releases r, projects p, categories c \n" + \
      " WHERE v.component = r.component AND r.project = p.project \n" + \
      "   AND p.is_extension = 1 AND p.category = c.category \n" + \
      "   AND " + util.like_pf("v.platform") + " \n" + \
      "   AND v.parent in (select component from components) AND " + r_sup_plat + exclude_comp

    if p_isExtensions:
        sql = installed + "\n UNION \n" + available + "\n ORDER BY 1, 3, 4, 6"
    else:
        sql = installed + "\n UNION \n" + available + "\n UNION \n" + extensions + "\n ORDER BY 1, 3, 4, 6"

    try:
        c = con.cursor()
        c.execute(sql)
        data = c.fetchall()

        headers = [
            'Category', 'Component', 'Version', 'ReleaseDt', 'Stage', 'Status',
            'Updates'
        ]
        keys = [
            'short_cat_desc', 'component', 'version', 'release_date', 'stage',
            'status', 'current_version'
        ]

        jsonList = []
        kount = 0
        previous_version = None
        previous_comp = None
        for row in data:
            compDict = {}
            kount = kount + 1

            category = str(row[0])
            category_desc = str(row[1])
            short_cat_desc = str(row[2])
            comp = str(row[3])
            version = str(row[4])
            port = str(row[5])

            if previous_comp and previous_version:
                if previous_comp == comp and previous_version == version:
                    continue

            previous_version = version
            previous_comp = comp

            if str(row[6]) == "Enabled":
                status = "Installed"
            else:
                status = str(row[6])
            if status == "NotInstalled" and p_isJSON == False:
                status = ""

            stage = str(row[7])
            if stage in ("soon", "bring-own", "included"):
                continue

            is_current = str(row[8])
            if is_current == "0" and status in ("", "NotInstalled"):
                if not p_isOLD:
                    continue

            current_version = get_current_version(comp)
            is_update_available = 0
            cv = Version.coerce(current_version)
            iv = Version.coerce(version)
            if cv > iv:
                is_update_available = 1

            if is_update_available == 0:
                updates = 0
                current_version = ""
            else:
                updates = 1

            if (port == "0") or (port == "1"):
                port = ""

            datadir = row[9]
            if row[9] is None:
                datadir = ""
            else:
                datadir = str(row[9]).strip()

            is_extension = row[10]

            parent = row[11]

            disp_name = row[14]

            release_desc = ''
            release_date = '1970-01-01'
            curr_rel_date = '1970-01-01'

            curr_rel_dt = str(row[15])
            rel_dt = str(row[12])
            if len(rel_dt) == 8:
                release_date = rel_dt[0:4] + "-" + rel_dt[4:6] + "-" + rel_dt[
                    6:8]
                curr_rel_date = curr_rel_dt[0:4] + "-" + curr_rel_dt[
                    4:6] + "-" + curr_rel_dt[6:8]

            compDict['is_new'] = 0

            try:
                rd = datetime.datetime.strptime(release_date, '%Y-%m-%d')
                today_date = datetime.datetime.today()
                date_diff = (today_date - rd).days

                if date_diff <= 30:
                    compDict['is_new'] = 1
                if p_showLATEST and date_diff > 30:
                    continue
            except Exception as e:
                pass

            if util.is_postgres(comp):
                if port > "" and status == "Installed" and datadir == "":
                    status = "NotInitialized"
                    port = ""

            ins_date = str(row[13])
            install_date = ""
            compDict['is_updated'] = 0
            if ins_date:
                install_date = ins_date[0:4] + "-" + ins_date[
                    5:7] + "-" + ins_date[8:10]

                try:
                    insDate = datetime.datetime.strptime(
                        install_date, '%Y-%m-%d')
                    today_date = datetime.datetime.today()
                    date_diff = (today_date - insDate).days
                    if date_diff <= 30:
                        compDict['is_updated'] = 1
                except Exception as e:
                    pass

            if p_relnotes and p_isJSON:
                rel_version = version
                if current_version != "":
                    rel_version = current_version
                rel_notes = str(util.get_relnotes(comp, rel_version))
                markdown_text = unicode(rel_notes,
                                        sys.getdefaultencoding(),
                                        errors='ignore').strip()
                html_text = mistune.markdown(markdown_text)
                compDict['rel_notes'] = html_text

            compDict['category'] = category
            compDict['category_desc'] = category_desc
            compDict['short_cat_desc'] = short_cat_desc
            compDict['component'] = comp
            compDict['version'] = version
            compDict['is_extension'] = is_extension
            compDict['disp_name'] = disp_name
            compDict['release_desc'] = release_desc
            compDict['port'] = port
            compDict['release_date'] = release_date
            compDict['install_date'] = install_date
            compDict['curr_release_date'] = curr_rel_date
            compDict['status'] = status
            compDict['stage'] = stage
            compDict['updates'] = updates
            compDict['is_current'] = is_current
            compDict['current_version'] = current_version
            compDict['parent'] = parent
            jsonList.append(compDict)

        if p_return:
            return jsonList

        if p_isJSON:
            print(json.dumps(jsonList, sort_keys=True, indent=2))
        else:
            if len(jsonList) >= 1:
                if p_showLATEST:
                    print("New components released in the last 30 days.")
                print(api.format_data_to_table(jsonList, keys, headers))

    except Exception as e:
        fatal_error(e, sql, "meta.get_list()")
    sys.exit(0)
Exemple #11
0
def rdslist(p_isJSON,
            p_isVERBOSE,
            p_region="",
            p_instance="",
            p_email="",
            p_engines=["postgres"]):

    if p_region is None:
        p_region = ""
    if p_instance is None:
        p_instance = ""

    try:
        rds_regions = []
        available_rds_regions = boto3.session.Session().get_available_regions(
            "rds")
        if p_region > "":
            if p_region in available_rds_regions:
                rds_regions = [p_region]
            else:
                msg = str(p_region) + " is not a valid region for rds."
                util.exit_message(msg, 1, p_isJSON)
        else:
            rds_regions = available_rds_regions

        # get all of the postgres db instances
        pg_list = []
        for region in rds_regions:
            msg = "Searching " + region + "..."
            util.message(msg, "info", p_isJSON)
            rds = boto3.client('rds', region_name=region)

            dbs = rds.describe_db_instances()
            for db in dbs['DBInstances']:
                if db['Engine'] in p_engines:
                    ec2 = boto3.client('ec2', region_name=region)
                    pg_dict = {}
                    pg_dict['engine'] = get_field(db, 'Engine')
                    pg_dict['region'] = region

                    pg_dict['instance'] = get_field(db, 'DBInstanceIdentifier')
                    if p_instance > "":
                        if p_instance != pg_dict['instance']:
                            continue
                    pg_dict['arn'] = get_field(db, 'DBInstanceArn')

                    pg_dict['master_user'] = get_field(db, 'MasterUsername')
                    pg_dict['status'] = get_field(db, 'DBInstanceStatus')
                    pg_dict['address'] = get_field(db, 'Endpoint', 'Address')
                    pg_dict['port'] = get_field(db, 'Endpoint', 'Port')
                    pg_dict['dbname'] = get_field(db, 'DBName')

                    pg_dict['db_class'] = get_field(db, 'DBInstanceClass')
                    pg_dict['engine_version'] = get_field(db, 'EngineVersion')
                    pg_dict['auto_minor_upgrade'] = get_field(
                        db, 'AutoMinorVersionUpgrade')

                    try:
                        pg_dict['create_time'] = convert_tz(
                            get_field(db, 'InstanceCreateTime'))
                    except Exception as e:
                        pg_dict['create_time'] = ""

                    pg_dict['iops'] = get_field(db, 'Iops')

                    pg_dict['storage_allocated'] = get_field(
                        db, 'AllocatedStorage')
                    storage_type = get_field(db, 'StorageType')
                    if storage_type == 'standard':
                        pg_dict['storage_type'] = storage_type + ": Magnetic"
                    elif storage_type == 'gp2':
                        pg_dict[
                            'storage_type'] = storage_type + ": General Purpose (SSD)"
                    elif storage_type == 'io2':
                        pg_dict[
                            'storage_type'] = storage_type + ": Provisioned IOPS (SSD)"
                    else:
                        pg_dict['storage_type'] = storage_type
                    pg_dict['storage_encrypted'] = get_field(
                        db, 'StorageEncrypted')

                    pg_dict['maint_window'] = get_field(
                        db, 'PreferredMaintenanceWindow')
                    pg_dict['backup_window'] = get_field(
                        db, 'PreferredBackupWindow')
                    pg_dict['backup_retention'] = get_field(
                        db, 'BackupRetentionPeriod')

                    try:
                        pg_dict['latest_restorable'] = convert_tz(
                            get_field(db, 'LatestRestorableTime'))
                    except Exception as e:
                        pg_dict['latest_restorable'] = ""

                    pg_dict['az_is_multi'] = get_field(db, 'MultiAZ')
                    pg_dict['az_primary'] = get_field(db, 'AvailabilityZone')
                    pg_dict['az_secondary'] = get_field(
                        db, 'SecondaryAvailabilityZone')
                    pg_dict['publicly_accessible'] = get_field(
                        db, 'PubliclyAccessible')

                    pg_dict['monitoring_interval'] = get_field(
                        db, 'MonitoringInterval')
                    pg_dict['monitoring_resource_arn'] = get_field(
                        db, 'EnhancedMonitoringResourceArn')
                    pg_dict['monitoring_role_arn'] = get_field(
                        db, 'MonitoringRoleArn')

                    pg_dict['subnet_group'] = get_field(
                        db, 'DBSubnetGroup', 'DBSubnetGroupName')

                    pg_dict['vpc'] = ""
                    try:
                        vpc_id = get_field(db, 'DBSubnetGroup', 'VpcId')
                        pg_dict['vpc_id'] = vpc_id
                        pg_dict['vpc'] = vpc_id
                        my_vpcs = ec2.describe_vpcs(VpcIds=[
                            vpc_id,
                        ])
                        for my_vpc in my_vpcs['Vpcs']:
                            for tag in my_vpc['Tags']:
                                if tag['Key'] == "Name":
                                    vpc_name = tag['Value']
                                    pg_dict['vpc'] = vpc_name
                                    break
                    except Exception as e:
                        pass

                    try:
                        pg_dict['is_in_pglist'] = is_in_pglist(
                            p_email, pg_dict['region'], pg_dict['instance'],
                            pg_dict['address'], pg_dict['port'],
                            pg_dict['dbname'], pg_dict['master_user'])
                    except Exception as e:
                        pass
                    pg_list.append(pg_dict)
    except KeyboardInterrupt as e:
        util.exit_message("Keyboard Interrupt", 1, p_isJSON)
    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        print(exc_type, fname, exc_tb.tb_lineno)
        msg = "Unable to run rds.describe_db_instances().  \n" + str(e)
        util.exit_message(msg, 1, p_isJSON)

    if p_isJSON:
        json_dict = {}
        json_dict['data'] = pg_list
        json_dict['state'] = 'completed'
        print(json.dumps([json_dict]))
        return (0)

    if p_isVERBOSE:
        print_verbose(pg_list)
        return (0)

    keys = [
        'region', 'instance', 'status', 'dbname', 'db_class', 'vpc',
        'az_is_multi'
    ]
    headers = [
        'Region', 'Instance', 'Status', 'DBName', 'Class', 'VPC', 'MultiAZ'
    ]
    print("")
    print(api.format_data_to_table(pg_list, keys, headers))

    return (0)