def cmd_debug(pod_name): """ Enables you to debug a Pod by taking it offline through removing the `guard=pyk` label. Usage: `debug pod`, for example, `debug webserver-42abc`. """ if not pod_name: print("Sorry, I need a Pod name in order to do my work. Do a `kploy stats` first to glean the Pod name you want to debug, e.g. `webserver-42abc`.") print("With the Pod name you can then run `kploy debug webserver-42abc` to take the Pod offline and subsequently for example use `kubectl exec` to enter the Pod.") sys.exit(1) here = os.path.realpath(".") kployfile = os.path.join(here, DEPLOYMENT_DESCRIPTOR) print("Trying to take Pod %s offline for debugging ..." %(pod_name)) try: kploy, _ = util.load_yaml(filename=kployfile) logging.debug(kploy) pyk_client = kploycommon._connect(api_server=kploy["apiserver"], debug=DEBUG) pod_path = "".join(["/api/v1/namespaces/", kploy["namespace"], "/pods/", pod_name]) pod = pyk_client.describe_resource(pod_path) resource = pod.json() resource["metadata"]["labels"] = {} logging.debug("Removed guard label from Pod, now labeled with: %s" %(resource["metadata"]["labels"])) pyk_client.execute_operation(method='PUT', ops_path=pod_path, payload=util.serialize_tojson(resource)) # now we just need to make sure that the newly created Pod is again owned by kploy: rc_name = pod_name[0:pod_name.rfind("-")] # NOTE: this is a hack, it assumes a certain generator pattern; need to figure a better way to find a Pod's RC logging.debug("Generating RC name from Pod: %s" %(rc_name)) rc_path = "".join(["/api/v1/namespaces/", kploy["namespace"], "/replicationcontrollers/", rc_name]) rc = pyk_client.describe_resource(rc_path) kploycommon._own_pods_of_rc(pyk_client, rc, kploy["namespace"], rc_path, VERBOSE) except (Exception) as e: print("Something went wrong when taking the Pod offline:\n%s" %(e)) sys.exit(1) print(80*"=") print("\nOK, the Pod %s is offline. Now you can, for example, use `kubectl exec` now to debug it." %(pod_name))
def cmd_scale(scale_def): """ Enables you to scale an RC up or down by setting the number of replicas. Usage: `scale rc=replica_count`, for example, `scale webserver-rc=10`. """ if not scale_def: print( "Sorry, I need a scale definition in order to do my work. Do a `kploy list` first to glean the RC name you want to scale, e.g. `webserver-rc`." ) print( "With the RC name you can then run `kploy scale webserver-rc=5` to scale the respective RC to 5 replicas." ) sys.exit(1) here = os.path.realpath(".") kployfile = os.path.join(here, DEPLOYMENT_DESCRIPTOR) try: rc_name = scale_def.split("=")[0] replica_count = int(scale_def.split("=")[1]) except (Exception) as e: print("Can't parse scale definition `%s` due to: %s" % (scale_def, e)) print( "The scale definition should look as follows: `rc=replica_count`, for example, `scale webserver-rc=10`." ) sys.exit(1) print("Trying to scale RC %s to %d replicas" % (rc_name, replica_count)) try: kploy, _ = util.load_yaml(filename=kployfile) logging.debug(kploy) pyk_client = kploycommon._connect(api_server=kploy["apiserver"], debug=DEBUG) rc_path = "".join([ "/api/v1/namespaces/", kploy["namespace"], "/replicationcontrollers/", rc_name ]) rc = pyk_client.describe_resource(rc_path) resource = rc.json() old_replica_count = resource["spec"]["replicas"] if VERBOSE: logging.info("Scaling RC from %d to %d replicas" % (old_replica_count, replica_count)) logging.debug("RC about to be scaled: %s" % (resource)) resource["spec"]["replicas"] = replica_count pyk_client.execute_operation(method='PUT', ops_path=rc_path, payload=util.serialize_tojson(resource)) # and make sure that the newly created Pods are owned by kploy (on scale up) if replica_count > old_replica_count: logging.debug("Scaling up, trying to own new Pods") rc = pyk_client.describe_resource(rc_path) kploycommon._own_pods_of_rc(pyk_client, rc, kploy["namespace"], rc_path, VERBOSE) except (Exception) as e: print("Something went wrong when scaling RC:\n%s" % (e)) sys.exit(1) print(80 * "=") print( "OK, I've scaled RC %s to %d replicas. You can do a `kploy stats` now to verify it." % (rc_name, replica_count))
def cmd_debug(pod_name): """ Enables you to debug a Pod by taking it offline through removing the `guard=pyk` label. Usage: `debug pod`, for example, `debug webserver-42abc`. """ if not pod_name: print( "Sorry, I need a Pod name in order to do my work. Do a `kploy stats` first to glean the Pod name you want to debug, e.g. `webserver-42abc`." ) print( "With the Pod name you can then run `kploy debug webserver-42abc` to take the Pod offline and subsequently for example use `kubectl exec` to enter the Pod." ) sys.exit(1) here = os.path.realpath(".") kployfile = os.path.join(here, DEPLOYMENT_DESCRIPTOR) print("Trying to take Pod %s offline for debugging ..." % (pod_name)) try: kploy, _ = util.load_yaml(filename=kployfile) logging.debug(kploy) pyk_client = kploycommon._connect(api_server=kploy["apiserver"], debug=DEBUG) pod_path = "".join( ["/api/v1/namespaces/", kploy["namespace"], "/pods/", pod_name]) pod = pyk_client.describe_resource(pod_path) resource = pod.json() resource["metadata"]["labels"] = {} logging.debug("Removed guard label from Pod, now labeled with: %s" % (resource["metadata"]["labels"])) pyk_client.execute_operation(method='PUT', ops_path=pod_path, payload=util.serialize_tojson(resource)) # now we just need to make sure that the newly created Pod is again owned by kploy: rc_name = pod_name[0:pod_name.rfind( "-" )] # NOTE: this is a hack, it assumes a certain generator pattern; need to figure a better way to find a Pod's RC logging.debug("Generating RC name from Pod: %s" % (rc_name)) rc_path = "".join([ "/api/v1/namespaces/", kploy["namespace"], "/replicationcontrollers/", rc_name ]) rc = pyk_client.describe_resource(rc_path) kploycommon._own_pods_of_rc(pyk_client, rc, kploy["namespace"], rc_path, VERBOSE) except (Exception) as e: print("Something went wrong when taking the Pod offline:\n%s" % (e)) sys.exit(1) print(80 * "=") print( "\nOK, the Pod %s is offline. Now you can, for example, use `kubectl exec` now to debug it." % (pod_name))
def cmd_scale(scale_def): """ Enables you to scale an RC up or down by setting the number of replicas. Usage: `scale rc=replica_count`, for example, `scale webserver-rc=10`. """ if not scale_def: print("Sorry, I need a scale definition in order to do my work. Do a `kploy list` first to glean the RC name you want to scale, e.g. `webserver-rc`.") print("With the RC name you can then run `kploy scale webserver-rc=5` to scale the respective RC to 5 replicas.") sys.exit(1) here = os.path.realpath(".") kployfile = os.path.join(here, DEPLOYMENT_DESCRIPTOR) try: rc_name = scale_def.split("=")[0] replica_count = int(scale_def.split("=")[1]) except (Exception) as e: print("Can't parse scale definition `%s` due to: %s" %(scale_def, e)) print("The scale definition should look as follows: `rc=replica_count`, for example, `scale webserver-rc=10`.") sys.exit(1) print("Trying to scale RC %s to %d replicas" %(rc_name, replica_count)) try: kploy, _ = util.load_yaml(filename=kployfile) logging.debug(kploy) pyk_client = kploycommon._connect(api_server=kploy["apiserver"], debug=DEBUG) rc_path = "".join(["/api/v1/namespaces/", kploy["namespace"], "/replicationcontrollers/", rc_name]) rc = pyk_client.describe_resource(rc_path) resource = rc.json() old_replica_count = resource["spec"]["replicas"] if VERBOSE: logging.info("Scaling RC from %d to %d replicas" %(old_replica_count, replica_count)) logging.debug("RC about to be scaled: %s" %(resource)) resource["spec"]["replicas"] = replica_count pyk_client.execute_operation(method='PUT', ops_path=rc_path, payload=util.serialize_tojson(resource)) # and make sure that the newly created Pods are owned by kploy (on scale up) if replica_count > old_replica_count: logging.debug("Scaling up, trying to own new Pods") rc = pyk_client.describe_resource(rc_path) kploycommon._own_pods_of_rc(pyk_client, rc, kploy["namespace"], rc_path, VERBOSE) except (Exception) as e: print("Something went wrong when scaling RC:\n%s" %(e)) sys.exit(1) print(80*"=") print("OK, I've scaled RC %s to %d replicas. You can do a `kploy stats` now to verify it." %(rc_name, replica_count))