Exemple #1
0
 def init_k8s_client():
     config.load_kube_config()
     api = core_v1_api.CoreV1Api()
     return api
def main():
    
    args = get_args()

    # open config file and load test scenario
    f = open("../%s" % args.configfile, "r")
    data = yaml.load(f, Loader=yaml.FullLoader)
    f.close()

    config.load_kube_config() 
#    c = Configuration() 
#    c.assert_hostname = False
#    Configuration.set_default(c) 
    api = core_v1_api.CoreV1Api() 
#    name = 'busybox-test'

    # connect k8s and read test containers
    #v1 = client.CoreV1Api()
    #ret = v1.list_pod_for_all_namespaces(label_selector="app=iperf3", watch=False)
    pods = api.list_pod_for_all_namespaces(label_selector="app=iperf3", watch=False)
    
    # check if each pods are READY

    # update scenario data with real container name
    servers=[]
    targets=[]
    clients=[]
    stypes=[]
    ctypes=[]
    snamespaces=[]
    cnamespaces=[]
    mode=data['mode']
    proto=data['proto']

    for i in data['flows']:
        stypes.append(i['server']['type'])
        ctypes.append(i['client']['type'])
        targets.append(i['target'])
        if i['server']['type'] == 'kubernetes':
            for j in pods.items:
                if j.spec.hostname == i['server']['name']:
                    servers.append(j.metadata.name)
                    snamespaces.append(j.metadata.namespace)
        else:
            servers.append(i['server']['name'])
            snamespaces.append("")

        if i['client']['type'] == 'kubernetes':
            for j in pods.items:
                if j.spec.hostname == i['client']['name']:
                    clients.append(j.metadata.name)
                    cnamespaces.append(j.metadata.namespace)
        else:
            clients.append(i['server']['name'])
            cnamespaces.append("")
#        print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name))
    f = open("/tmp/advlabtools-scenario-temp", "w")
    test_str = ""
    servers_str = ""
    for s in servers: servers_str += '"' + s + '" '
    test_str += 'servers=(' + servers_str + ')\n'
    targets_str = ""
    for s in targets: targets_str += '"' + s + '" '
    test_str += 'targets=(' + targets_str + ')\n'
    clients_str = ""
    for s in clients: clients_str += '"' + s + '" '
    test_str += "clients=(" + clients_str + ")\n"
    stypes_str = ""
    for s in stypes: stypes_str += '"' + s + '" '
    test_str += "stypes=(" + stypes_str + ")\n"
    ctypes_str = ""
    for s in ctypes: ctypes_str += '"' + s + '" '
    test_str += "ctypes=(" + ctypes_str + ")\n"
    snamespaces_str = ""
    for s in snamespaces: snamespaces_str += '"' + s + '" '
    test_str += "snamespaces=(" + snamespaces_str + ")\n"
    cnamespaces_str = ""
    for s in cnamespaces: cnamespaces_str += '"' + s + '" '
    test_str += "cnamespaces=(" + cnamespaces_str + ")\n"
    test_str += 'mode="' + mode + '"\n' + 'proto="' + proto + '"'
#    print test_str
    f.write(test_str)
    f.close()

#    exec_command1 = ['iperf3', '-s']
#    resp1 = stream(api.connect_get_namespaced_pod_exec, 'iperf3-dep1-7bb5958b8d-vjg6n', 'default', command=exec_command1, stderr=True, stdin=False, stdout=True, tty=False)
#    exec_command2 = ['iperf3', '-J', '-c', 'iperf3-dep1', '-t', '10']
#    resp2 = stream(api.connect_post_namespaced_pod_exec, 'iperf3-dep2-64b64896fb-msrxt', 'default', command=exec_command2, stderr=True, stdin=False, stdout=True, tty=False)
#
#    while True:
#        if not resp2.is_open():
#            resp2.peek_stdout()
#            resp1.close()
#            break

    cmd = "../scripts/k8s_iperf3_peer.bash "
    cmd_options = get_option(args)

    print('Testing, wait {} seconds... '.format(args.time), end=" ")
    dirname = str(subprocess.check_output( cmd+cmd_options, shell=True, universal_newlines=True )).replace('\n','')
    print('done')

    json_files = glob.glob(dirname + "/*cl*.json")
    print("[Test Result]")
    print("| Number of peers: " + str(len(json_files)))

    csv_files = glob.glob(dirname + "/*esxtop*.csv")
    print("[Test Result]")
    print("| Number of hosts: " + str(len(csv_files)))
    print("")

    x={}
    y={}
    t=np.zeros(int(args.time))
    i=0

    init_plt()
    
    for file in json_files:
        print(file)
        f = open(file, 'r')
        perf_dict = json.load(f)

        if args.udp:
            print("| Avg Bandwidth(Gbps): " + str(perf_dict["end"]["sum"]["bits_per_second"] / 1000000000), end=" ")
            print("| Jitter(ms): " + str(perf_dict["end"]["sum"]["jitter_ms"]), end=" ")
            print("| Lost Packets: " + str(perf_dict["end"]["sum"]["lost_packets"]), end=" ")
            print("| Lost %: " + str(perf_dict["end"]["sum"]["lost_percent"]))
            print("| Sender CPU%: " + str(perf_dict["end"]["cpu_utilization_percent"]["host_total"]), end=" ")
            print("| Receiver CPU%: " + str(perf_dict["end"]["cpu_utilization_percent"]["remote_total"]))
        else:
            print("| Avg Bandwidth(Gbps): " + str(perf_dict["end"]["sum_received"]["bits_per_second"] / 1000000000), end=" ")
            print("| Retransmits: " + str(perf_dict["end"]["sum_sent"]["retransmits"])) 
            print("| Sender CPU%: " + str(perf_dict["end"]["cpu_utilization_percent"]["host_total"]), end=" ")
            print("| Receiver CPU%: " + str(perf_dict["end"]["cpu_utilization_percent"]["remote_total"]))

        x[i] = np.array(range(len(perf_dict["intervals"])))
        y[i] = np.array([])
        
        for p in perf_dict["intervals"]:
            y[i] = np.append(y[i], p["sum"]["bits_per_second"])
        
        plt.plot(x[i], y[i], color[i],marker="o",markersize=3)
        t=t+y[i]

        i=i+1
        
    plt.plot(x[0], t, "k", marker="X", markersize=5, linewidth=2)
    plt.show()

    # process esxtop CSV files
    for file in csv_files:
        init_plt_esxtop()

        print(file)
        print("| Red - CPU Total Util | Blue - CPU Total Core Util | Green - CPU Total Proc Time |")
        with open(file) as cf:
            reader = csv.DictReader(cf, delimiter=",")
            perf_dict = []
            for row in reader:
                perf_dict.append(row)

        # X-scale must be coordinated as ESXTOP will be executed per 5 secs.
        x = (np.array(range(len(perf_dict)))+1)*5

        y_util = np.array([])
        y_core = np.array([])
        y_proc = np.array([])

        for p in perf_dict:
            rd = rdict(p)
            y_util = np.append(y_util, float(rd[key_util][0]))
            y_core = np.append(y_core, float(rd[key_core][0]))
            y_proc = np.append(y_proc, float(rd[key_proc][0]))

#        print x, y_util, y_core, y_proc
        plt.plot(x, y_util, "r", marker="o",markersize=3)
        plt.plot(x, y_core, "b", marker="o",markersize=3)
        plt.plot(x, y_proc, "g", marker="o",markersize=3)
        plt.show()
Exemple #3
0
def status(args):
    try:
        dp = deployment.get_deployment(args.namespace, args.name)
        replicasets = deployment.get_replicasets(dp)
    except Exception as e:
        stderr.write('cannot load deployment {0}: {1}\n'.format(
            args.name, kubeutil.get_error(e)))
        exit(1)

    try:
        generation = dp['metadata']['annotations'][
            'deployment.kubernetes.io/revision']
    except KeyError:
        generation = '?'

    stdout.write("deployment {0}/{1}:\n".format(
        dp['metadata']['namespace'],
        dp['metadata']['name'],
    ))
    stdout.write(
        "  current generation is {0}, {2} replicas configured, {1} active replica sets\n"
        .format(
            generation,
            len(replicasets),
            dp['spec']['replicas'],
        ))
    stdout.write(
        "\n  active replicasets (status codes: * current, ! error):\n")

    for rs in replicasets:
        pods = deployment.get_rs_pods(rs)
        error = ' '

        try:
            revision = rs['metadata']['annotations'][
                'deployment.kubernetes.io/revision']
        except KeyError:
            revision = '?'

        if str(revision) == str(generation):
            active = '*'
        else:
            active = ' '

        try:
            nready = rs['status']['readyReplicas']
        except KeyError:
            error = '!'
            nready = 0

        errors = []
        try:
            for condition in rs['status']['conditions']:
                if condition['type'] == 'ReplicaFailure' and condition[
                        'status'] == 'True':
                    errors.append(condition['message'])
                    error = '!'
        except KeyError:
            pass

        stdout.write(
            "    {4}{5}generation {1} is replicaset {0}, {2} replicas configured, {3} ready\n"
            .format(rs['metadata']['name'], revision, rs['spec']['replicas'],
                    nready, active, error))

        for container in rs['spec']['template']['spec']['containers']:
            stdout.write("        container {0}: image {1}\n".format(
                container['name'],
                container['image'],
            ))

        for error in errors:
            stdout.write("        {0}\n".format(error))

        for pod in pods:
            try:
                phase = pod['status']['phase']
            except KeyError:
                phase = '?'

            stdout.write("        pod {0}: {1}\n".format(
                pod['metadata']['name'],
                phase,
            ))

            if 'status' in pod and 'containerStatuses' in pod['status']:
                for cs in pod['status']['containerStatuses']:
                    if 'waiting' in cs['state']:
                        try:
                            message = cs['state']['waiting']['message']
                        except KeyError:
                            message = '(no reason)'

                        stdout.write("          {0}: {1}\n".format(
                            cs['state']['waiting']['reason'],
                            message,
                        ))

    resources = None
    try:
        resources = json.loads(dp['metadata']['annotations']
                               ['kdtool.torchbox.com/attached-resources'])
    except KeyError:
        exit(0)
    except ValueError as e:
        stderr.write(
            "warning: could not decode kdtool.torchbox.com/attached-resources annotation: {0}\n"
            .format(str(e)))
        exit(0)

    if len(resources) == 0:
        exit(0)

    stdout.write("\nattached resources:\n")

    client = kubeutil.get_client()
    v1 = core_v1_api.CoreV1Api(client)
    extv1beta1 = extensions_v1beta1_api.ExtensionsV1beta1Api(client)

    services = [
        resource['name'] for resource in resources
        if resource['kind'] == 'service'
    ]
    for svc_name in services:
        service = v1.read_namespaced_service(svc_name, args.namespace)
        stdout.write("  service {0}: selector is ({1})\n".format(
            service.metadata.name,
            ", ".join([k + "=" + v for k, v in service.spec.selector.items()]),
        ))
        for port in service.spec.ports:
            stdout.write("    port {0}: {1}/{2} -> {3}\n".format(
                port.name, port.port, port.protocol, port.target_port))

    ingresses = [
        resource['name'] for resource in resources
        if resource['kind'] == 'ingress'
    ]

    for ing_name in ingresses:
        ingress = extv1beta1.read_namespaced_ingress(ing_name, args.namespace)
        stdout.write("  ingress {0}:\n".format(ingress.metadata.name))
        for rule in ingress.spec.rules:
            stdout.write("    http[s]://{0} -> {1}/{2}:{3}\n".format(
                rule.host,
                ingress.metadata.namespace,
                rule.http.paths[0].backend.service_name,
                rule.http.paths[0].backend.service_port,
            ))

    volumes = [
        resource['name'] for resource in resources
        if resource['kind'] == 'volume'
    ]

    for vol_name in volumes:
        volume = v1.read_namespaced_persistent_volume_claim(
            vol_name, args.namespace)
        if volume.status:
            stdout.write(
                "  volume {0}: mode is {1}, size {2}, phase {3}\n".format(
                    volume.metadata.name,
                    ",".join(volume.status.access_modes),
                    volume.status.capacity['storage'],
                    volume.status.phase,
                ))
        else:
            stdout.write("  volume {0} is unknown (not provisioned)\n".format(
                volume.metadata.name, ))

    databases = [
        resource['name'] for resource in resources
        if resource['kind'] == 'database'
    ]

    for db_name in databases:
        resource_path = ('/apis/torchbox.com/v1/namespaces/' +
                         dp['metadata']['namespace'] + '/databases/' + db_name)

        header_params = {}
        header_params['Accept'] = client.select_header_accept(
            ['application/json'])
        header_params['Content-Type'] = client.select_header_content_type(
            ['*/*'])
        header_params.update(kubeutil.config.api_key)

        (resp, code, header) = client.call_api(resource_path,
                                               'GET', {}, {},
                                               header_params,
                                               None, [],
                                               _preload_content=False)

        database = json.loads(resp.data.decode('utf-8'))
        if 'status' in database:
            stdout.write(
                "  database {0}: type {1}, phase {2} (on server {3})\n".format(
                    database['metadata']['name'],
                    database['spec']['type'],
                    database['status']['phase'],
                    database['status']['server'],
                ))
        else:
            stdout.write(
                "  database {0}: type {1}, unknown (not provisioned)\n".format(
                    database['metadata']['name'],
                    database['spec']['type'],
                ))
Exemple #4
0
def create_users(cluster_object):
    core_api = core_v1_api.CoreV1Api()
    name = cluster_object['metadata']['name']
    namespace = cluster_object['metadata']['namespace']
    try:
        replicas = cluster_object['spec']['mongodb']['replicas']
    except KeyError:
        replicas = 3

    admin_credentials = read_secret('{}-admin-credentials'.format(name),
                                    namespace)
    admin_username = b64decode(
        admin_credentials.data['username']).decode('utf-8')
    admin_password = b64decode(
        admin_credentials.data['password']).decode('utf-8')

    monitoring_credentials = read_secret(
        '{}-monitoring-credentials'.format(name), namespace)
    monitoring_username = b64decode(
        monitoring_credentials.data['username']).decode('utf-8')
    monitoring_password = b64decode(
        monitoring_credentials.data['password']).decode('utf-8')

    mongo_command = '''
        admin = db.getSiblingDB("admin")
        admin.createUser(
          {{
            user: "******",
            pwd: "{}",
            roles: [ {{ role: "root", db: "admin" }} ]
          }}
        )
        admin.auth(
          "{}",
          "{}"
        )
        admin.createUser(
          {{
            user: "******",
            pwd: "{}",
            roles: [ {{ role: "clusterMonitor", db: "admin" }} ]
          }}
        )
    '''.format(admin_username, admin_password, admin_username, admin_password,
               monitoring_username, monitoring_password)

    for i in range(replicas):
        pod_name = '{}-{}'.format(name, i)
        exec_cmd = [
            'mongo', 'localhost:27017/admin', '--ssl', '--sslCAFile',
            '/etc/ssl/mongod/ca.pem', '--sslPEMKeyFile',
            '/etc/ssl/mongod/mongod.pem', '--eval', '{}'.format(mongo_command)
        ]
        exec_resp = stream(core_api.connect_get_namespaced_pod_exec,
                           pod_name,
                           namespace,
                           command=exec_cmd,
                           container='mongod',
                           stderr=True,
                           stdin=False,
                           stdout=True,
                           tty=False)

        if 'Successfully added user: {' in exec_resp:
            logging.info('created users for {} in ns/{}'.format(
                name, namespace))
            return True
        elif "Error: couldn't add user: not master :" in exec_resp:
            # most of the time member-0 is elected master
            # if it is not we get this error and need to
            # loop through members until we find the master
            continue
        else:
            logging.error('error creating users for {} in ns/{}\n{}'.format(
                name, namespace, exec_resp))
        return False
Exemple #5
0
def _get_pod(name, namespace):
    pod = core_v1_api.CoreV1Api().read_namespaced_pod(name=name,
                                                      namespace=namespace)

    return pod
   CYAN = '\033[96m'
   DARKCYAN = '\033[36m'
   BLUE = '\033[94m'
   GREEN = '\033[92m'
   YELLOW = '\033[93m'
   RED = '\033[91m'
   BOLD = '\033[1m'
   UNDERLINE = '\033[4m'
   END = '\033[0m'

def get_numbers_from_filename(filename):
    return re.search(r'\d+', filename).group(0)

config.load_kube_config()
api_instance = client.CoreV1Api()
api = core_v1_api.CoreV1Api()

app = Flask(__name__)
Swagger(app)

# Functions
def validate_ns(ns):
    pretty = 'true'
    namespace_lst = []
    for i in api_instance.list_namespace(pretty=pretty).items:
       inamespace = i.metadata.name
       namespace_lst.append(inamespace)
    if ns not in namespace_lst :
      return "invalid namespace " + ns + ", valid namespace are: " + " , ".join(namespace_lst) , 500
    
@app.route("/")
Exemple #7
0
def main():

    common.connect()
    api = core_v1_api.CoreV1Api()

    name = args.pod
    namespace = os.environ.get('RD_NODE_DEFAULT_NAMESPACE')

    log.debug("--------------------------")
    log.debug("Pod Name:  %s" % name)
    log.debug("Namespace: %s " % namespace)
    log.debug("--------------------------")

    resp = None
    try:
        resp = api.read_namespaced_pod(name=name, namespace=namespace)
    except ApiException as e:
        if e.status != 404:
            print("Unknown error: %s" % e)
            exit(1)

    if not resp:
        print("Pod %s does not exits." % name)
        exit(1)

    source_file = os.environ.get('RD_FILE_COPY_FILE')
    destination_file = os.environ.get('RD_FILE_COPY_DESTINATION')
    shell = os.environ.get('RD_CONFIG_SHELL')

    log.debug("Copying file from %s to %s" % (source_file, destination_file))

    # Calling exec interactively.
    exec_command = [shell]
    resp = stream(api.connect_get_namespaced_pod_exec,
                  name,
                  namespace,
                  command=exec_command,
                  stderr=True,
                  stdin=True,
                  stdout=True,
                  tty=False,
                  _preload_content=False)

    file = open(source_file, "r")

    commands = []
    commands.append("cat <<'EOF' >" + destination_file + "\n")
    commands.append(file.read())
    commands.append("EOF\n")

    while resp.is_open():
        resp.update(timeout=1)
        if resp.peek_stdout():
            print("STDOUT: %s" % resp.read_stdout())
        if resp.peek_stderr():
            print("STDERR: %s" % resp.read_stderr())

        if commands:
            c = commands.pop(0)
            resp.write_stdin(c)
        else:
            break

    resp.close()
Exemple #8
0
 def __init__(self):
     config.load_kube_config(config_file='/root/zcc/hello/config')
     c = Configuration()
     c.assert_hostname = False
     Configuration.set_default(c)
     self.api = core_v1_api.CoreV1Api()
    def __init__(self, datacenter, **kwargs):
        """
        Initializes Kube context
        :param datacenter: datacenter name
        :param kwargs: Kube details
        """
        self.logger = logging.get_logger(self.__class__.__name__)
        self.datacenter = datacenter

        self.image = kwargs['image']
        self.name = kwargs['name']

        try:
            config.load_kube_config()
            self.apiclient = api_client.ApiClient()
            self.api = core_v1_api.CoreV1Api(self.apiclient)
            self.k8s_beta = client.ExtensionsV1beta1Api()
        except Exception as err:
            self.logger.error(
                "Error loading Kube config, Exception: {} ".format(err))
            sys.exit(1)

        if 'type' in kwargs and kwargs['type'] == "backend":
            self.logger.info("Deploying backend {}".format(self.name))
            self.version = kwargs['version']
            user = kwargs["user"]
        else:
            if 'task' in kwargs and kwargs['task'] == "rollback":
                self.version = kwargs["version"]
            else:
                img = Image(deploy=True, **kwargs)
                self.version = img.current_version()
            user = kwargs["cluster_config"]["APP_DATACENTER"]

        attributes = {
            "datacenter":
            self.datacenter,
            "version":
            self.version,
            "user":
            user,
            "env": {},
            "cpu":
            "250m",
            "limitscpu":
            "500m",
            "lbport":
            kwargs['port']
            if 'type' in kwargs and kwargs['type'] == "backend" else 80,
            "replicas":
            kwargs['count']
            if int(kwargs['count']) > 1 else self._getreplica(),
            "name":
            self.name,
            "registry":
            kwargs["app_docker_registry"],
            "deploy_app":
            deployment_app,
            "service_app":
            service_app,
            "patch_app":
            patch_app
        }
        attributes.update(kwargs)
        self.attributes = attributes
        self.replicas = int(self.attributes["replicas"])
        self.deploy_app = render(attributes['deploy_app'], attributes)
        self.service_app = render(attributes['service_app'], attributes)
        self.patch_app = render(attributes['patch_app'], attributes)
        self.logger.info(
            "Image processing name:{image}, version:{version}".format(
                image=self.image, version=self.version))
        self.logger.warn(
            "Image processing name:{image}, version:{version}".format(
                image=self.image, version=self.version))
Exemple #10
0
def main():

    common.connect()

    api = core_v1_api.CoreV1Api()

    namespace = os.environ.get('RD_CONFIG_NAMESPACE')
    name = os.environ.get('RD_CONFIG_NAME')
    container = os.environ.get('RD_CONFIG_CONTAINER_NAME')

    log.debug("--------------------------")
    log.debug("Pod Name:  %s" % name)
    log.debug("Namespace: %s " % namespace)
    log.debug("Container: %s " % container)
    log.debug("--------------------------")

    data = {}

    data["api_version"] = os.environ.get('RD_CONFIG_API_VERSION')
    data["name"] = os.environ.get('RD_CONFIG_NAME')
    data["container_name"] = os.environ.get('RD_CONFIG_CONTAINER_NAME')
    data["image"] = os.environ.get('RD_CONFIG_IMAGE')
    data["ports"] = os.environ.get('RD_CONFIG_PORTS')
    data["replicas"] = os.environ.get('RD_CONFIG_REPLICAS')
    data["namespace"] = os.environ.get('RD_CONFIG_NAMESPACE')
    data["labels"] = os.environ.get('RD_CONFIG_LABELS')
    if os.environ.get('RD_CONFIG_ENVIRONMENTS'):
        data["environments"] = os.environ.get('RD_CONFIG_ENVIRONMENTS')

    if os.environ.get('RD_CONFIG_ENVIRONMENTS_SECRETS'):
        evs = os.environ.get('RD_CONFIG_ENVIRONMENTS_SECRETS')
        data["environments_secrets"] = evs

    if os.environ.get('RD_CONFIG_LIVENESS_PROBE'):
        data["liveness_probe"] = os.environ.get('RD_CONFIG_LIVENESS_PROBE')

    if os.environ.get('RD_CONFIG_READINESS_PROBE'):
        data["readiness_probe"] = os.environ.get('RD_CONFIG_READINESS_PROBE')

    if os.environ.get('RD_CONFIG_VOLUME_MOUNTS'):
        data["volume_mounts"] = os.environ.get('RD_CONFIG_VOLUME_MOUNTS')

    if os.environ.get('RD_CONFIG_VOLUMES'):
        data["volumes"] = os.environ.get('RD_CONFIG_VOLUMES')

    if os.environ.get('RD_CONFIG_CONTAINER_COMMAND'):
        cc = os.environ.get('RD_CONFIG_CONTAINER_COMMAND')
        data["container_command"] = cc

    if os.environ.get('RD_CONFIG_CONTAINER_ARGS'):
        data["container_args"] = os.environ.get('RD_CONFIG_CONTAINER_ARGS')

    if os.environ.get('RD_CONFIG_RESOURCES_REQUESTS'):
        rr = os.environ.get('RD_CONFIG_RESOURCES_REQUESTS')
        data["resources_requests"] = rr

    if os.environ.get('RD_CONFIG_WAITREADY'):
        data["waitready"] = os.environ.get('RD_CONFIG_WAITREADY')

    pod = create_pod(data)
    resp = None
    try:
        resp = api.create_namespaced_pod(namespace=namespace,
                                         body=pod,
                                         pretty="True")

        print("Pod Created successfully")

    except ApiException as e:
        log.error("Exception creating pod: %s\n" % e)
        exit(1)

    if not resp:
        print("Pod %s does not exits." % name)
        exit(1)
Exemple #11
0
def main():

    common.connect()
    api = core_v1_api.CoreV1Api()

    name = os.environ.get('RD_NODE_DEFAULT_NAME')
    namespace = os.environ.get('RD_NODE_DEFAULT_NAMESPACE')
    container = os.environ.get('RD_NODE_DEFAULT_CONTAINER_NAME')

    log.debug("--------------------------")
    log.debug("Pod Name:  %s" % name)
    log.debug("Namespace: %s " % namespace)
    log.debug("Container: %s " % container)
    log.debug("--------------------------")

    resp = None
    try:
        resp = api.read_namespaced_pod(name=name, namespace=namespace)
    except ApiException as e:
        if e.status != 404:
            print("Unknown error: %s" % e)
            exit(1)

    if not resp:
        print("Pod %s does not exits." % name)
        exit(1)

    source_file = os.environ.get('RD_FILE_COPY_FILE')
    destination_file = os.environ.get('RD_FILE_COPY_DESTINATION')

    #force print destination to avoid error with node-executor
    print destination_file

    log.debug("Copying file from %s to %s" % (source_file, destination_file))

    destination_path = os.path.dirname(destination_file)
    destination_file_name = os.path.basename(destination_file)

    # Copying file client -> pod
    exec_command = ['tar', 'xvf', '-', '-C', '/']
    resp = stream(api.connect_get_namespaced_pod_exec,
                  name,
                  'default',
                  command=exec_command,
                  container=container,
                  stderr=True,
                  stdin=True,
                  stdout=True,
                  tty=False,
                  _preload_content=False)

    with TemporaryFile() as tar_buffer:
        with tarfile.open(fileobj=tar_buffer, mode='w') as tar:
            tar.add(name=source_file,
                    arcname=destination_path + "/" + destination_file_name)

        tar_buffer.seek(0)
        commands = []
        commands.append(tar_buffer.read())

        while resp.is_open():
            resp.update(timeout=1)
            if resp.peek_stdout():
                print("STDOUT: %s" % resp.read_stdout())
            if resp.peek_stderr():
                print("STDERR: %s" % resp.read_stderr())
            if commands:
                c = commands.pop(0)
                # print("Running command... %s\n" % c)
                resp.write_stdin(c)
            else:
                break
        resp.close()
Exemple #12
0
def handler(event, context):
  if 'local_config' in os.environ and os.environ['local_config'].lower() == 'true':
    config.load_kube_config()

  else:
    kube_config = {
      "name": os.environ["kube_name"],
      "server": os.environ["kube_server"],
      "certificate-authority-data": os.environ["kube_cert_auth"].rstrip(),
      "user": os.environ["kube_user"],
      "pass": os.environ["kube_pass"]
    }

    kube_doc = """
apiVersion: v1
kind: Config
preferences: {{}}
clusters:
- cluster:
    certificate-authority-data: {certificate-authority-data}
    server: {server}
  name: {name}
contexts:
- context:
    cluster: {name}
    user: {name}
  name: {name}
current-context: {name}
users:
- name: {name}
  user:
    username: {user}
    password: {pass}
      """.format(**kube_config)

    with open('/tmp/kube_config', 'w') as kube_config_file:
      kube_config_file.write(kube_doc)
      kube_config_file.close()

    config.load_kube_config(config_file="/tmp/kube_config")

  print('Loaded kube config')

  configuration.assert_hostname = False
  api = core_v1_api.CoreV1Api()

  ##########################################
  # Reclaim weave IPs.

  # Get master
  node_selector = 'kubernetes.io/role=master'
  master_name = None
  try:
    nodes = api.list_node(label_selector=node_selector, watch=False)
    for n in nodes.items:
      master_name = n.metadata.name
  except ApiException as e:
    if e.status != 404:
        print("Unknown error: %s" % e)
        exit(1)

  if master_name is None:
    print('No master node found in the cluster!')
    exit(1)

  else:
    print('Found master node: %s' % master_name)

  # Get the weave pod running on the master
  pod_name = None
  pod_ns = 'kube-system'
  pod_container = 'weave'
  pod_fselector = 'spec.nodeName=%s' % master_name
  pod_lselector = 'name=weave-net'
  try:
    pods = api.list_namespaced_pod(namespace=pod_ns, field_selector=pod_fselector, label_selector=pod_lselector, watch=False)
    for p in pods.items:
      pod_name = p.metadata.name
  except ApiException as e:
    if e.status != 404:
        print("Unknown error: %s" % e)
        exit(1)

  if pod_name is None:
    print("Cannot find pod on master %s" % master_name)
    exit(1)

  else:
    print('Found pod matching %s: %s' % (pod_lselector, pod_name))


  # calling exec and wait for response.
  ipam_status_cmd = [
    '/home/weave/weave',
    '--local',
    'status',
    'ipam'
  ]

  mac_addrs = None
  try:
    ipam = api.connect_get_namespaced_pod_exec(pod_name, pod_ns, command=ipam_status_cmd, container=pod_container,
                                               stderr=True, stdin=False, stdout=True, tty=False)

    if ipam is not None:
      ipam_lines = ipam.split('\n')
      mac_addrs = [re.sub('\(.*', '', l) for l in ipam_lines if re.search('unreachable', l)]

  except ApiException as e:
    if e.status != 404:
        print("Unknown error: %s" % e)
        exit(1)

  if mac_addrs:
    for mac in mac_addrs:
      rmpeer_cmd = [
        '/home/weave/weave',
        '--local',
        'status',
        'ipam',
        mac
      ]

      try:
        print('Reclaiming from mac addr: %s' % mac)
        ipam = api.connect_get_namespaced_pod_exec(pod_name, pod_ns, command=rmpeer_cmd, container=pod_container,
                                                   stderr=True, stdin=False, stdout=True, tty=False)

        if ipam is not None:
          ipam_lines = ipam.split('\n')
          mac_addrs = [re.sub('\(.*', '', l) for l in ipam_lines]

      except ApiException as e:
        if e.status != 404:
          print("Unknown error while remove mac addr %s: %s" % (mac, e))
          break
Exemple #13
0
def index(n):
    for i in range(n):
        name = randomword(10)
        core_v1 = core_v1_api.CoreV1Api()

        #service
        service_manifest = {
            "apiVersion": "v1",
            "kind": "Service",
            "metadata": {
                "name": name + "-service"
            },
            "spec": {
                "selector": {
                    "app": name
                },
                "ports": [{
                    "protocol": "TCP",
                    "port": 8003
                }],
                "type": "NodePort"
            }
        }

        service = core_v1.create_namespaced_service(namespace="default",
                                                    body=service_manifest)
        port = service.spec.ports[0].node_port
        ip = get_node_cluster(os.environ['KUBECONFIG'])
        print("name=", name)
        print("address=", ip + ':' + str(port))
        address = ip + ':' + str(port)
        #pod
        pod_manifest = {
            "apiVersion": "v1",
            "kind": "Pod",
            "metadata": {
                "name": name + "-storage",
                "labels": {
                    "app": name
                }
            },
            "spec": {
                "containers": [{
                    "name":
                    name,
                    "image":
                    "ignacioschmid/pubsub:storage_test",
                    "ports": [{
                        "containerPort": 8003
                    }],
                    "env": [{
                        "name": "SENTINEL_HOST",
                        "value": "http://192.168.25.68"
                    }, {
                        "name": "SENTINEL_PORT",
                        "value": "8080"
                    }, {
                        "name": "SERVER_ADDRESS",
                        "value": ip
                    }, {
                        "name": "SERVER_PORT",
                        "value": str(port)
                    }, {
                        "name": "ID",
                        "value": name
                    }]
                }]
            }
        }

        pod = core_v1.create_namespaced_pod(body=pod_manifest,
                                            namespace="default")
    register_storage(address, name)
    return address
Exemple #14
0
def execute_module(module, k8s_ansible_mixin):

    # Load kubernetes.client.Configuration
    api = core_v1_api.CoreV1Api(k8s_ansible_mixin.client.client)

    # hack because passing the container as None breaks things
    optional_kwargs = {}
    if module.params.get("container"):
        optional_kwargs["container"] = module.params["container"]
    else:
        # default to the first container available on pod
        resp = None
        try:
            resp = api.read_namespaced_pod(
                name=module.params["pod"],
                namespace=module.params["namespace"])
        except ApiException:
            pass

        if resp and len(resp.spec.containers) >= 1:
            optional_kwargs["container"] = resp.spec.containers[0].name

    try:
        resp = stream(api.connect_get_namespaced_pod_exec,
                      module.params["pod"],
                      module.params["namespace"],
                      command=shlex.split(module.params["command"]),
                      stdout=True,
                      stderr=True,
                      stdin=False,
                      tty=False,
                      _preload_content=False,
                      **optional_kwargs)
    except Exception as e:
        module.fail_json(msg="Failed to execute on pod %s"
                         " due to : %s" %
                         (module.params.get("pod"), to_native(e)))
    stdout, stderr, rc = [], [], 0
    while resp.is_open():
        resp.update(timeout=1)
        if resp.peek_stdout():
            stdout.append(resp.read_stdout())
        if resp.peek_stderr():
            stderr.append(resp.read_stderr())
    err = resp.read_channel(3)
    err = yaml.safe_load(err)
    if err["status"] == "Success":
        rc = 0
    else:
        rc = int(err["details"]["causes"][0]["message"])

    module.deprecate(
        "The 'return_code' return key is deprecated. Please use 'rc' instead.",
        version="4.0.0",
        collection_name="kubernetes.core",
    )
    module.exit_json(
        # Some command might change environment, but ultimately failing at end
        changed=True,
        stdout="".join(stdout),
        stderr="".join(stderr),
        rc=rc,
        return_code=rc,
    )
Exemple #15
0
def undeploy(args):
    try:
        dp = deployment.get_deployment(args.namespace, args.name)
    except Exception as e:
        stderr.write('cannot load deployment {0}: {1}\n'.format(
            args.name, kubeutil.get_error(e)))
        exit(1)

    resources = None
    try:
        resources = json.loads(dp['metadata']['annotations']
                               ['kdtool.torchbox.com/attached-resources'])
    except KeyError:
        pass
    except ValueError as e:
        stderr.write(
            "error: could not decode kdtool.torchbox.com/attached-resources annotation: {0}\n"
            .format(str(e)))
        exit(1)

    stdout.write("\nthis deployment will be removed:\n")
    stdout.write("- {0}/{1}\n".format(
        dp['metadata']['namespace'],
        dp['metadata']['name'],
    ))

    if len(resources):
        if args.all:
            stdout.write(
                "\nthe following attached resources will also be deleted:\n")
            for res in resources:
                extra = ''
                if res['kind'] == 'database':
                    extra = ' (database will be dropped)'
                elif res['kind'] == 'volume':
                    extra = ' (contents will be deleted)'

                stdout.write("- {0}: {1}{2}\n".format(res['kind'], res['name'],
                                                      extra))
        else:
            stdout.write(
                "\nthe following attached resources will NOT be deleted (use --all):\n"
            )
            for res in resources:
                stdout.write("- {0}: {1}\n".format(
                    res['kind'],
                    res['name'],
                ))

    stdout.write('\n')

    if not args.force:
        pr = input('continue [y/N]? ')
        if pr.lower() not in ['yes', 'y']:
            stdout.write("okay, aborting\n")
            exit(0)

    client = kubeutil.get_client()
    extv1beta1 = extensions_v1beta1_api.ExtensionsV1beta1Api(client)
    v1 = core_v1_api.CoreV1Api(client)

    stdout.write('deleting deployment <{}/{}>: '.format(
        args.namespace, args.name))
    extv1beta1.delete_namespaced_deployment(args.name, args.namespace, body={})
    stdout.write('ok\n')

    if not args.all:
        exit(0)

    for res in resources:
        stdout.write('deleting {} <{}>: '.format(res['kind'], res['name']))
        if res['kind'] == 'volume':
            v1.delete_namespaced_persistent_volume_claim(res['name'],
                                                         args.namespace,
                                                         body={})
        elif res['kind'] == 'secret':
            v1.delete_namespaced_secret(res['name'], args.namespace, body={})
        elif res['kind'] == 'database':
            resource_path = ('/apis/torchbox.com/v1/namespaces/' +
                             dp['metadata']['namespace'] + '/databases/' +
                             res['name'])

            header_params = {}
            header_params['Accept'] = client.select_header_accept(
                ['application/json'])
            header_params['Content-Type'] = client.select_header_content_type(
                ['*/*'])
            header_params.update(kubeutil.config.api_key)

            (resp, code, header) = client.call_api(resource_path,
                                                   'DELETE', {}, {},
                                                   header_params,
                                                   None, [],
                                                   _preload_content=False)
        elif res['kind'] == 'service':
            v1.delete_namespaced_service(res['name'], args.namespace)
        elif res['kind'] == 'ingress':
            extv1beta1.delete_namespaced_ingress(res['name'],
                                                 args.namespace,
                                                 body={})

        stdout.write('ok\n')
Exemple #16
0
import kopf
import yaml
import kubernetes

from kubernetes import client
from factories import server, proxy
from kubernetes.client.apis import core_v1_api
from kubernetes.client.rest import ApiException

corev1 = core_v1_api.CoreV1Api()

@kopf.on.create("arc.com", "v1alpha1", "servers")
def create_server(meta, spec, logger, namespace, **kwargs):
    to_own = []
    name = meta.get('name')
    body = server.create_server_body(name)

    if not pod_exists(name, namespace):
        obj = corev1.create_namespaced_pod(namespace=namespace, body=body)
        logger.info(f"Successfully created server {name}")
        to_own.append(obj)
    else:
        logger.info(f"Pod {name} already exists")

    service = server.create_server_nodeport(name)
    if spec.get('open', False) and not service_exists(name, namespace):
        obj = corev1.create_namespaced_service(namespace=namespace, body=service)
        logger.info(f"Successfully created service {name}")
        to_own.append(obj)
    else:
        logger.info(f"Service {name} already exists")
Exemple #17
0
def delete_pod(data):
    # Delete pod
    api = core_v1_api.CoreV1Api()
    common.delete_pod(api, data)

    print("Pod deleted successfully")
Exemple #18
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--timeout-monitors',
                        type=int,
                        default=0,
                        help='Will remove monitor output which did '
                        'not update in last `timeout` seconds. '
                        'Will not work on last monitor on screen.')
    parser.add_argument('--verbose', action='store_true', default=False)
    parser.add_argument('--hex', action='store_true', default=False)

    # taken from github.com/cilium/cilium/cmd/monitor.go
    type_choices = ['drop', 'debug', 'capture', 'trace', 'l7']
    parser.add_argument('--type',
                        action='append',
                        default=[],
                        choices=type_choices)

    parser.add_argument('--node',
                        action='append',
                        default=[],
                        help='Specify which nodes monitor will be run on. '
                        'Can match either by cilium pod names or k8s node '
                        'names. Can specify multiple.')

    parser.add_argument('--selector',
                        action='append',
                        default=[],
                        help='k8s equality label selectors for pods which '
                        'monitor should listen to. each selector will '
                        'retrieve its own set of pods. '
                        'Format is "label-name=label-value" '
                        'Can specify multiple.')
    parser.add_argument('--pod',
                        action='append',
                        default=[],
                        help='pod names in form of "namespace:pod-name", '
                        'if there is no namespace, default is assumed. '
                        'Can specify multiple.')
    parser.add_argument('--endpoint',
                        action='append',
                        type=int,
                        default=[],
                        help='Cilium endpoint ids. Can specify multiple.')

    parser.add_argument('--to-selector',
                        action='append',
                        default=[],
                        help='k8s equality label selectors for pods which '
                        'monitor should listen to. each selector will '
                        'retrieve its own set of pods. '
                        'Matches events that go to selected pods. '
                        'Format is "label-name=label-value" '
                        'Can specify multiple.')
    parser.add_argument('--to-pod',
                        action='append',
                        default=[],
                        help='pod names in form of "namespace:pod-name", '
                        'if there is no namespace, default is assumed. '
                        'Matches events that go to specified pods. '
                        'Can specify multiple.')
    parser.add_argument('--to-endpoint',
                        action='append',
                        type=int,
                        default=[],
                        help='Cilium endpoint ids. '
                        'Matches events that go to specified endpoints. '
                        'Can specify multiple.')

    parser.add_argument('--from-selector',
                        action='append',
                        default=[],
                        help='k8s equality label selectors for pods which '
                        'monitor should listen to. each selector will '
                        'retrieve its own set of pods. '
                        'Matches events that come from selected pods. '
                        'Format is "label-name=label-value" '
                        'Can specify multiple.')
    parser.add_argument('--from-pod',
                        action='append',
                        default=[],
                        help='pod names in form of "namespace:pod-name", '
                        'if there is no namespace, default is assumed. '
                        'Matches events that come from specified pods. '
                        'Can specify multiple.')
    parser.add_argument('--from-endpoint',
                        action='append',
                        type=int,
                        default=[],
                        help='Cilium endpoint ids. '
                        'Matches events that come from specified endpoints. '
                        'Can specify multiple.')

    parser.add_argument('--send-command',
                        type=str,
                        default="",
                        help='Execute command as-provided in argument on '
                        'all specified nodes and show output.')

    parser.add_argument('--cilium-namespace',
                        type=str,
                        default="kube-system",
                        help='Specify namespace in which Cilium pods reside')

    parser.add_argument('--clear-monitors',
                        action='store_true',
                        default=False,
                        help='Kill all `cilium monitor` on Cilium nodes. '
                        'Helpful for debugging')

    parser.add_argument('--rich',
                        action='store_true',
                        default=False,
                        help='Opens rich ui version')

    parser.add_argument('-n',
                        '--namespace',
                        type=str,
                        default='default',
                        help='Namespace to look for selected endpoints in')

    args = parser.parse_args()

    try:
        config.load_kube_config()
    except FileNotFoundError:
        config.load_incluster_config()

    c = Configuration()
    c.assert_hostname = False
    Configuration.set_default(c)
    api = core_v1_api.CoreV1Api()
    runner = MonitorRunner(args.cilium_namespace, api, args.namespace)

    monitor_args = MonitorArgs(args.verbose, args.hex, args.selector, args.pod,
                               args.endpoint, args.to_selector, args.to_pod,
                               args.to_endpoint, args.from_selector,
                               args.from_pod, args.from_endpoint, args.type,
                               args.namespace)

    try:
        if args.clear_monitors:
            cmd = "pkill -f \"cilium monitor\""
        else:
            cmd = args.send_command

        runner.run(monitor_args, args.node, cmd)
        if args.rich:
            ui(runner, args.timeout_monitors)
        elif not args.clear_monitors:
            batch(runner, args.timeout_monitors)
    except KeyboardInterrupt as e:
        pass
    except NoEndpointException as e:
        print("Cilium endpoints matching pod names/label selectors not found.")
    finally:
        runner.finish()
Exemple #19
0
def main():

    common.connect()

    api = core_v1_api.CoreV1Api()
    namespace = os.environ.get('RD_CONFIG_NAMESPACE')
    name = os.environ.get('RD_CONFIG_NAME')

    log.debug("--------------------------")
    log.debug("Pod Name:  %s" % name)
    log.debug("Namespace: %s " % namespace)
    log.debug("--------------------------")

    delete_on_fail = False
    if os.environ.get('RD_CONFIG_DELETEONFAIL') == 'true':
        delete_on_fail = True

    resp = None
    try:
        resp = api.read_namespaced_pod(name=name,
                                       namespace=namespace)
    except ApiException as e:
        if e.status != 404:
            log.error("Unknown error: %s" % e)
            exit(1)

    if not resp:
        log.error("Pod %s does not exits." % name)
        exit(1)

    core_v1 = client.CoreV1Api()
    response = core_v1.read_namespaced_pod_status(
        name=name,
        namespace=namespace,
        pretty="True"
    )

    if response.spec.containers:
        container = response.spec.containers[0].name
    else:
        log.error("Container not found")
        exit(1)

    script = os.environ.get('RD_CONFIG_SCRIPT')
    invocation = "/bin/bash"
    if 'RD_CONFIG_INVOCATION' in os.environ:
        invocation = os.environ.get('RD_CONFIG_INVOCATION')

    destination_path = "/tmp"

    if 'RD_NODE_FILE_COPY_DESTINATION_DIR' in os.environ:
        destination_path = os.environ.get('RD_NODE_FILE_COPY_DESTINATION_DIR')

    temp = tempfile.NamedTemporaryFile()
    destination_file_name = os.path.basename(temp.name)
    full_path = destination_path + "/" + destination_file_name

    try:
        temp.write(script)
        temp.seek(0)

        log.debug("coping script from %s to %s" % (temp.name,full_path))

        common.copy_file(name=name,
                         container=container,
                         source_file=temp.name,
                         destination_path= destination_path,
                         destination_file_name=destination_file_name
                         )

    finally:
        temp.close()

    permissions_command = ["chmod", "+x", full_path]

    log.debug("setting permissions %s" % permissions_command)
    resp = common.run_command(name=name,
                              namespace=namespace,
                              container=container,
                              command=permissions_command
                              )

    if resp.peek_stdout():
        print(resp.read_stdout())

    if resp.peek_stderr():
        print(resp.read_stderr())
        sys.exit(1)

    # calling exec and wait for response.
    exec_command = invocation.split(" ")
    exec_command.append(full_path)

    if 'RD_CONFIG_ARGUMENTS' in os.environ:
        arguments = os.environ.get('RD_CONFIG_ARGUMENTS')
        exec_command.append(arguments)

    log.debug("running script %s" % exec_command)

    resp, error = common.run_interactive_command(name=name,
                                          namespace=namespace,
                                          container=container,
                                          command=exec_command
                                          )
    if error:
        log.error("error running script")

        if delete_on_fail:
            log.info("removing POD on fail")
            data = {}
            data["name"] = name
            data["namespace"] = namespace
            common.delete_pod(api, data)

            log.info("POD deleted")
        sys.exit(1)

    rm_command = ["rm", full_path]

    log.debug("removing file %s" % rm_command)
    resp = common.run_command(name=name,
                              namespace=namespace,
                              container=container,
                              command=rm_command
                              )

    if resp.peek_stdout():
        log.debug(resp.read_stdout())

    if resp.peek_stderr():
        log.debug(resp.read_stderr())
        sys.exit(1)
    def test_pod_apis(self):
        client = api_client.ApiClient(configuration=self.config)
        api = core_v1_api.CoreV1Api(client)

        name = 'busybox-test-' + short_uuid()
        pod_manifest = {
            'apiVersion': 'v1',
            'kind': 'Pod',
            'metadata': {
                'name': name
            },
            'spec': {
                'containers': [{
                    'image': 'busybox',
                    'name': 'sleep',
                    "args": [
                        "/bin/sh",
                        "-c",
                        "while true;do date;sleep 5; done"
                    ]
                }]
            }
        }

        resp = api.create_namespaced_pod(body=pod_manifest,
                                         namespace='default')
        self.assertEqual(name, resp.metadata.name)
        self.assertTrue(resp.status.phase)

        while True:
            resp = api.read_namespaced_pod(name=name,
                                           namespace='default')
            self.assertEqual(name, resp.metadata.name)
            self.assertTrue(resp.status.phase)
            if resp.status.phase != 'Pending':
                break
            time.sleep(1)

        exec_command = ['/bin/sh',
                        '-c',
                        'for i in $(seq 1 3); do date; done']
        resp = stream(api.connect_get_namespaced_pod_exec, name, 'default',
                                                   command=exec_command,
                                                   stderr=False, stdin=False,
                                                   stdout=True, tty=False)
        print('EXEC response : %s' % resp)
        self.assertEqual(3, len(resp.splitlines()))

        exec_command = 'uptime'
        resp = stream(api.connect_post_namespaced_pod_exec, name, 'default',
                                                    command=exec_command,
                                                    stderr=False, stdin=False,
                                                    stdout=True, tty=False)
        print('EXEC response : %s' % resp)
        self.assertEqual(1, len(resp.splitlines()))

        resp = stream(api.connect_post_namespaced_pod_exec, name, 'default',
                                                    command='/bin/sh',
                                                    stderr=True, stdin=True,
                                                    stdout=True, tty=False,
                                                    _preload_content=False)
        resp.write_stdin("echo test string 1\n")
        line = resp.readline_stdout(timeout=5)
        self.assertFalse(resp.peek_stderr())
        self.assertEqual("test string 1", line)
        resp.write_stdin("echo test string 2 >&2\n")
        line = resp.readline_stderr(timeout=5)
        self.assertFalse(resp.peek_stdout())
        self.assertEqual("test string 2", line)
        resp.write_stdin("exit\n")
        resp.update(timeout=5)
        line = resp.read_channel(ERROR_CHANNEL)
        status = json.loads(line)
        self.assertEqual(status['status'], 'Success')
        resp.update(timeout=5)
        self.assertFalse(resp.is_open())

        number_of_pods = len(api.list_pod_for_all_namespaces().items)
        self.assertTrue(number_of_pods > 0)

        resp = api.delete_namespaced_pod(name=name, body={},
                                         namespace='default')
Exemple #21
0
    def __init__(self, datacenter, **kwargs):
        self.attributes = {'user': '******', 'registry_version': 'v2'}
        self.logger = logging.get_logger(self.__class__.__name__)
        self.attributes.update(kwargs)
        self.attributes['user'] = self.attributes['cluster_config'][
            'APP_DATACENTER']
        self.app_main = self.attributes['cluster_config']['APP_MAIN']
        self.datacenter = self.attributes['cluster_config']['APP_DATACENTER']
        self.from_datacenter = self.attributes["from_datacenter"]
        self.registry = kwargs["app_docker_registry"]
        self.resource_group_prefix = self.attributes['cluster_config'][
            'RESOURCE_GROUP_PREFIX']
        self.resource_group = "{}-{}".format(self.resource_group_prefix,
                                             self.datacenter)

        try:
            if ('AZURE_CLIENT_ID' in os.environ
                    and 'AZURE_CLIENT_SECRET' in os.environ
                    and 'AZURE_TENANT_ID' in os.environ
                    and 'AZURE_SUBSCRIPTION_ID' in os.environ):
                self.__dict__.update({
                    'subscription_id':
                    os.environ['AZURE_SUBSCRIPTION_ID'],
                    'client_id':
                    os.environ['AZURE_CLIENT_ID'],
                    'client_secret':
                    os.environ['AZURE_CLIENT_SECRET'],
                    'tenant_id':
                    os.environ['AZURE_TENANT_ID']
                })
            else:
                current_token_filename = os.path.join(
                    os.path.expanduser("~"), ".joara",
                    "{}.current_token_filename".format(self.resource_group))
                read_from_cache = os.path.isfile(current_token_filename)

                if (read_from_cache):
                    azure_credential = pickle.load(
                        open(current_token_filename, "rb"))
                    self.client_id = azure_credential['AZURE_CLIENT_ID']
                    self.client_secret = azure_credential[
                        'AZURE_CLIENT_SECRET']
                    self.tenant_id = azure_credential['AZURE_TENANT_ID']
                    self.subscription_id = azure_credential[
                        'AZURE_SUBSCRIPTION_ID']

                os.environ['AZURE_CLIENT_ID'] = self.client_id
                os.environ['AZURE_CLIENT_SECRET'] = self.client_secret
                os.environ['AZURE_TENANT_ID'] = self.tenant_id
                os.environ['AZURE_SUBSCRIPTION_ID'] = self.subscription_id
        except Exception as e:
            logs = "### Please update your azure credentials under clusters.ini or to environment variables ###, {}".format(
                e)
            self.logger.exception(logs)
            raise RuntimeError(logs)

        self.credentials = ServicePrincipalCredentials(
            client_id=self.client_id,
            secret=self.client_secret,
            tenant=self.tenant_id)

        storage_client = StorageManagementClient(
            self.credentials, os.environ['AZURE_SUBSCRIPTION_ID'])
        resource_group = "{}-{}".format(self.resource_group_prefix,
                                        self.datacenter)
        storage_name = "{}{}".format(self.resource_group_prefix,
                                     self.datacenter)
        storage_keys = storage_client.storage_accounts.list_keys(
            resource_group, storage_name)
        storage_keys = {v.key_name: v.value for v in storage_keys.keys}

        if storage_keys:
            os.environ['AZURE_STORAGE_KEY'] = storage_keys['key1']
            os.environ['AZURE_STORAGE_ACCOUNT'] = storage_name
            run("az storage container create -n {}".format("imagesversion"))

        run("az login -u {} -p {} --tenant {} --service-principal".format(
            os.environ['AZURE_CLIENT_ID'], os.environ['AZURE_CLIENT_SECRET'],
            os.environ['AZURE_TENANT_ID']))

        run("az acr login --name {}acr{}".format(self.resource_group_prefix,
                                                 self.from_datacenter))

        try:
            config.load_kube_config()
            self.apiclient = api_client.ApiClient()
            self.api = core_v1_api.CoreV1Api(self.apiclient)
            self.k8s_beta = client.ExtensionsV1beta1Api()
        except Exception as err:
            self.logger.error()(
                "Error copying Kube config, Exception: {}".format(err))
            sys.exit(1)

        self.version_manager = VersionManager(**self.__dict__)
Exemple #22
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--verbose', type=bool, default=False)
    parser.add_argument('--node',
                        action='append',
                        default=[],
                        help='Cilium pod names. Can specify multiple.')

    parser.add_argument('--selector',
                        action='append',
                        default=[],
                        help='k8s equality label selectors for pods which '
                        'monitor should listen to. each selector will '
                        'retrieve its own set of pods. '
                        'Format is "label-name=label-value" '
                        'Can specify multiple.')
    parser.add_argument('--pod',
                        action='append',
                        default=[],
                        help='pod names in form of "namespace:pod-name", '
                        'if there is no namespace, default is assumed. '
                        'Can specify multiple.')
    parser.add_argument('--endpoint',
                        action='append',
                        type=int,
                        default=[],
                        help='Cilium endpoint ids. Can specify multiple.')

    parser.add_argument('--to-selector',
                        action='append',
                        default=[],
                        help='k8s equality label selectors for pods which '
                        'monitor should listen to. each selector will '
                        'retrieve its own set of pods. '
                        'Matches events that go to selected pods. '
                        'Format is "label-name=label-value" '
                        'Can specify multiple.')
    parser.add_argument('--to-pod',
                        action='append',
                        default=[],
                        help='pod names in form of "namespace:pod-name", '
                        'if there is no namespace, default is assumed. '
                        'Matches events that go to specified pods. '
                        'Can specify multiple.')
    parser.add_argument('--to-endpoint',
                        action='append',
                        type=int,
                        default=[],
                        help='Cilium endpoint ids. '
                        'Matches events that go to specified endpoints. '
                        'Can specify multiple.')

    parser.add_argument('--from-selector',
                        action='append',
                        default=[],
                        help='k8s equality label selectors for pods which '
                        'monitor should listen to. each selector will '
                        'retrieve its own set of pods. '
                        'Matches events that come from selected pods. '
                        'Format is "label-name=label-value" '
                        'Can specify multiple.')
    parser.add_argument('--from-pod',
                        action='append',
                        default=[],
                        help='pod names in form of "namespace:pod-name", '
                        'if there is no namespace, default is assumed. '
                        'Matches events that come from specified pods. '
                        'Can specify multiple.')
    parser.add_argument('--from-endpoint',
                        action='append',
                        type=int,
                        default=[],
                        help='Cilium endpoint ids. '
                        'Matches events that come from specified endpoints. '
                        'Can specify multiple.')

    args = parser.parse_args()

    try:
        config.load_kube_config()
    except FileNotFoundError:
        config.load_incluster_config()

    c = Configuration()
    c.assert_hostname = False
    Configuration.set_default(c)
    api = core_v1_api.CoreV1Api()
    runner = MonitorRunner('kube-system', api)

    monitor_args = MonitorArgs(args.verbose, args.selector, args.pod,
                               args.endpoint, args.to_selector, args.to_pod,
                               args.to_endpoint, args.from_selector,
                               args.from_pod, args.from_endpoint)

    try:
        runner.run(monitor_args, args.node)
        ui(runner)
    except KeyboardInterrupt as e:
        pass
    finally:
        runner.finish()