Ejemplo n.º 1
0
    def _process_deployment_info(self):
        has_error = False
        errmsg = ""

        deployment_info = self._management_data_layer_client.get(self._deployment_info_key)
        num_trials = 0
        sleep_time = 1.0
        while num_trials < 5 and (deployment_info is None or deployment_info == ""):
            time.sleep(sleep_time)
            deployment_info = self._management_data_layer_client.get(self._deployment_info_key)
            num_trials = num_trials + 1
            sleep_time = sleep_time * 2

        if num_trials == 5:
            has_error = True
            errmsg = "Could not retrieve deployment info: " + self._deployment_info_key

        if not has_error:
            # if we're running on kubernetes, the endpoint will correspond to the assigned url
            # if we're running on bare-metal, the endpoint will correspond to the hostip + docker-mapped port
            self._external_endpoint = self._management_data_layer_client.getMapEntry(self._workflowid + "_workflow_endpoint_map", endpoint_key)
            num_trials = 0
            sleep_time = 1.0
            while num_trials < 5 and (self._external_endpoint is None or self._external_endpoint == ""):
                time.sleep(sleep_time)
                self._external_endpoint = self._management_data_layer_client.getMapEntry(self._workflowid + "_workflow_endpoint_map", endpoint_key)
                num_trials = num_trials + 1
                sleep_time = sleep_time * 2

            if num_trials == 5:
                has_error = True
                errmsg = "Could not retrieve endpoint: " + self._endpoint_key

        # in Kubernetes, endpoint is the externally visible URL
        # in bare-metal, endpoint is the current host's address

        # for session support, in FunctionWorker, we need current host address (bare-metal)
        # or current node address (kubernetes)

        # for parallel state support, in FunctionWorker, either would be fine

        # As such, let the FunctionWorker know both and let it decide what to do
        if 'KUBERNETES_SERVICE_HOST' in os.environ:
            # get current node's internal address
            self._internal_endpoint = "http://" + socket.gethostbyname(socket.gethostname()) + ":" + str(os.getenv("PORT", "8080"))
        else:
            # bare-metal mode: the current host's address and external address are the same
            self._internal_endpoint = self._external_endpoint

        if not has_error:
            self._logger.info("External endpoint: %s", self._external_endpoint)
            self._logger.info("Internal endpoint: %s", self._internal_endpoint)
            self._deployment = Deployment(deployment_info,\
                self._hostname, self._userid, self._sandboxid, self._workflowid,\
                self._workflowname, self._queue, self._datalayer, \
                self._logger, self._external_endpoint, self._internal_endpoint)
            self._deployment.set_child_process("fb", self._fluentbit_process, self._command_args_map_fluentbit)
            has_error, errmsg = self._deployment.process_deployment_info()

        return has_error, errmsg
Ejemplo n.º 2
0
Archivo: deploy.py Proyecto: rski/fuel
 def deploy_cloud(self):
     dep = Deployment(self.dea, YAML_CONF_DIR, self.env_id,
                      self.node_roles_dict, self.no_health_check,
                      self.deploy_timeout)
     if not self.no_deploy_environment:
         dep.deploy()
     else:
         log('Configuration is done. Deployment is not launched.')
Ejemplo n.º 3
0
 def deploy_cloud(self):
     dep = Deployment(self.dea, YAML_CONF_DIR, self.env_id,
                      self.node_roles_dict, self.no_health_check,
                      self.deploy_timeout)
     if not self.no_deploy_environment:
         dep.deploy()
     else:
         log('Configuration is done. Deployment is not launched.')
Ejemplo n.º 4
0
def main() -> None:
    args = parse_arguments()
    nr = InitNornir("config.yaml")
    update_host_vars(nr.inventory)
    update_description(nr.inventory)
    deployment = Deployment(args.topologies, nr.inventory)
    import ipdb
    ipdb.set_trace()
Ejemplo n.º 5
0
def simple_deployment_fixture(simple_box):
    """
    This fixture will return a simple Deployment
    with width 10, height 20, x_pos 5, y_pos 15, colour black
    and name "simple"
    """
    deployment = Deployment("simple", simple_box)
    return deployment
Ejemplo n.º 6
0
def execute(action, action_info, environment, consul_api):
    if isinstance(action, InstallAction):
        deployment_config = {
            'cause': 'Deployment',
            'deployment_id': action.deployment_id,
            'environment': environment,
            'last_deployment_id': action_info['last_deployment_id'],
            'platform': platform.system().lower(),
            'service': action.service
        }
        deployment = Deployment(config=deployment_config, consul_api=consul_api, aws_config=config['aws'])
        return deployment.run()
    elif isinstance(action, IgnoreAction):
        logging.info('Found Ignore action, not installing \'{0}\''.format(action.service.id))
        return {'id': action.deployment_id, 'is_success': True}
    elif isinstance(action, UninstallAction):
        logging.info('Uninstall action not yet supported!')
        return {'id': action.deployment_id, 'is_success': True}
Ejemplo n.º 7
0
def main(argv):
    file = argv[1];
    graph = Graph();

    # read input file of teacher and student relation and create graph network
    with open(file) as f:
        for line in f:
             line = line.strip().split(',')
             user1 = User(line[0])
             user2 = User(line[1])
             graph.addConnection(user1, user2, True)

    version = '2.0'

    newversion = Deployment(graph, version)

    print "----Initial state:----"
    printAllUsers(graph)

    # Ask admin to select the node to start the deployment process
    node = raw_input('Select node to deploy: ')
    user = graph.getUser(node)
    threshold = None
    strict = None

    isthreshold = raw_input('Do you want to limit the infection[y/n]: ')
    if isthreshold == 'y':
        threshold = int(raw_input('Enter the limit: '))
        isstrict = raw_input('Is this limit strict[y/n]: ')
        if isstrict == 'y':
            strict = 'strict'

    newversion.deploy(user, threshold, strict)

    print "------Final State-------"
    printAllUsers(graph)

    plotGraph(graph)
Ejemplo n.º 8
0
def createDeployment(studyId, deploymentData):
    #check that deploymentData is valid
    if not ("name" in deploymentData and type(deploymentData["name"])==str and len(deploymentData["name"])>0):
        raise errors.BadRequestError("Deployment must have attribute 'name' (type=str and length>0)")
    if not ("goalSampleSize" in deploymentData and type(deploymentData["goalSampleSize"]) in [int, Decimal] and deploymentData["goalSampleSize"]>0):
        raise errors.BadRequestError("Deployment must have attribute 'goalSampleSize' (type=int and value>0)")
    if not ("facility" in deploymentData and type(deploymentData["facility"]) in [str, dict] and len(deploymentData["facility"])>0):
        raise errors.BadRequestError("Deployment must have attribute 'facility' (type=str or dict and length>0)")
    #construct the deployment
    d = Deployment()
    d.name = deploymentData["name"]
    if "description" in deploymentData:
        d.description = deploymentData["description"]
    d.goalSampleSize = int(deploymentData["goalSampleSize"])
    d.facility = facility_db_access.loadFacility(deploymentData["facility"])
    #create the deployment (by updating its parent study)
    return study_db_access.createDeploymentForStudy(studyId, d)
Ejemplo n.º 9
0
def get_deployments(data):
    """
    Look through the data and pull out a list of deployment objects
    from it

    Args:
      data: the parsed yaml data (nested lists and dicts)

    Returns:
      A list of Deployment objects
    """

    # look for elements of the list that have kind==Deployment
    deployments = []

    for section in data:
        if section.get("kind", "") == "Deployment":
            metadata = section.get("metadata", None)
            if metadata:
                name = metadata.get("name", None)
                if name:
                    deployments.append(Deployment(name))

    return deployments
Ejemplo n.º 10
0
#! /usr/bin/env python

import os.path

from code import InteractiveConsole
from deployment import Deployment

D = Deployment()

console = InteractiveConsole(locals={"D": D, "ec2": D.ec2})
pyrc = os.path.expanduser(os.path.join("~", ".pythonrc.py"))
if os.path.exists(pyrc):
    console.runsource(open(pyrc).read())
console.interact()

Ejemplo n.º 11
0
class SandboxAgent:
    def __init__(self, hostname, queue, datalayer, sandboxid, userid,
                 workflowid, elasticsearch, workflowname, endpoint_key):

        self._start = time.time()

        self._python_version = sys.version_info

        self._hostname = hostname
        self._queue = queue
        self._datalayer = datalayer
        self._elasticsearch = elasticsearch
        self._userid = userid
        self._sandboxid = sandboxid
        self._workflowid = workflowid
        self._workflowname = workflowname
        # _XXX_: we'll use the endpoint_key to look up our endpoint
        self._endpoint_key = endpoint_key
        self._deployment_info_key = "deployment_info_workflow_" + self._workflowid

        self._logger = logging_helpers.setup_logger(self._sandboxid,
                                                    LOG_FILENAME)
        self._fluentbit_process, self._command_args_map_fluentbit = logging_helpers.setup_fluentbit_and_elasticsearch_index(
            self._logger, FLUENTBIT_FOLDER, self._elasticsearch,
            ELASTICSEARCH_INDEX_WF, ELASTICSEARCH_INDEX_FE)

        self._logger.info("hostname (and container name): %s", self._hostname)
        self._logger.info("elasticsearch nodes: %s", self._elasticsearch)
        self._logger.info("queueservice: %s", self._queue)
        self._logger.info("datalayer: %s", self._datalayer)
        self._logger.info("user id: %s", self._userid)
        self._logger.info("sandbox id: %s", self._sandboxid)
        self._logger.info("workflow id: %s", self._workflowid)
        self._logger.info("workflow name: %s", self._workflowname)
        self._logger.info("endpoint_key: %s", self._endpoint_key)

        self._instructions_topic = "instructions_" + self._sandboxid

        self._management_data_layer_client = DataLayerClient(
            locality=1,
            sid="Management",
            wid="Management",
            is_wf_private=True,
            connect=self._datalayer)
        self._logger.info("Management data layer client connected after %s s",
                          str(time.time() - self._start))

        # to be declared later
        self._local_queue_client = None
        self._deployment = None
        self._queue_service_process = None
        self._frontend_process = None
        # visible to the outside world: either kubernetes assigned URL or bare-metal host address + exposed port
        self._external_endpoint = None
        # visible internally: kubernetes node address or same as bare-metal external endpoint
        self._internal_endpoint = None

        self._is_running = False
        self._shutting_down = False

    def _handle_instruction(self, instruction):
        error = None

        action = instruction["action"]
        if "parameters" in instruction:
            parameters = instruction["parameters"]

        if action == "stop-function-worker":
            self._deployment.stop_function_worker(parameters["functionTopic"])
        elif action == "shutdown":
            self.shutdown()
        else:
            error = "Unsupported 'action' in instruction: " + action

        return error

    def _get_and_handle_message(self):
        error = None

        lqm = self._local_queue_client.getMessage(self._instructions_topic,
                                                  POLL_TIMEOUT)
        if lqm is not None:
            lqcm = LocalQueueClientMessage(lqm)
            key = lqcm.get_key()
            value = lqcm.get_value()
            self._logger.info(key + " " + value)
            try:
                instruction = json.loads(value)
                error = self._handle_instruction(instruction)
            except Exception as exc:
                error = "Couldn't decode instruction: " + str(exc)
                self._logger.error(error)

            if error is None:
                self._logger.info(
                    "Handled instruction successfully at t+ %s s",
                    str(time.time() - self._start))

    def _process_deployment_info(self):
        has_error = False
        errmsg = ""

        deployment_info = self._management_data_layer_client.get(
            self._deployment_info_key)
        num_trials = 0
        sleep_time = 1.0
        while num_trials < 5 and (deployment_info is None
                                  or deployment_info == ""):
            time.sleep(sleep_time)
            deployment_info = self._management_data_layer_client.get(
                self._deployment_info_key)
            num_trials = num_trials + 1
            sleep_time = sleep_time * 2

        if num_trials == 5:
            has_error = True
            errmsg = "Could not retrieve deployment info: " + self._deployment_info_key

        if not has_error:
            # if we're running on kubernetes, the endpoint will correspond to the assigned url
            # if we're running on bare-metal, the endpoint will correspond to the hostip + docker-mapped port
            self._external_endpoint = self._management_data_layer_client.getMapEntry(
                self._workflowid + "_workflow_endpoint_map", endpoint_key)
            num_trials = 0
            sleep_time = 1.0
            while num_trials < 5 and (self._external_endpoint is None
                                      or self._external_endpoint == ""):
                time.sleep(sleep_time)
                self._external_endpoint = self._management_data_layer_client.getMapEntry(
                    self._workflowid + "_workflow_endpoint_map", endpoint_key)
                num_trials = num_trials + 1
                sleep_time = sleep_time * 2

            if num_trials == 5:
                has_error = True
                errmsg = "Could not retrieve endpoint: " + self._endpoint_key

        # in Kubernetes, endpoint is the externally visible URL
        # in bare-metal, endpoint is the current host's address

        # for session support, in FunctionWorker, we need current host address (bare-metal)
        # or current node address (kubernetes)

        # for parallel state support, in FunctionWorker, either would be fine

        # As such, let the FunctionWorker know both and let it decide what to do
        if 'KUBERNETES_SERVICE_HOST' in os.environ:
            # get current node's internal address
            self._internal_endpoint = "http://" + socket.gethostbyname(
                socket.gethostname()) + ":" + str(os.getenv("PORT", "8080"))
        else:
            # bare-metal mode: the current host's address and external address are the same
            self._internal_endpoint = self._external_endpoint

        if not has_error:
            self._logger.info("External endpoint: %s", self._external_endpoint)
            self._logger.info("Internal endpoint: %s", self._internal_endpoint)
            self._deployment = Deployment(deployment_info,\
                self._hostname, self._userid, self._sandboxid, self._workflowid,\
                self._workflowname, self._queue, self._datalayer, \
                self._logger, self._external_endpoint, self._internal_endpoint)
            self._deployment.set_child_process(
                "fb", self._fluentbit_process,
                self._command_args_map_fluentbit)
            has_error, errmsg = self._deployment.process_deployment_info()

        return has_error, errmsg

    # SIGTERM kills Thrift before we can handle stuff
    def sigterm(self, signum, frame):
        self.shutdown()
        # raise interrupt to kill main sequence when shutdown was not received through the queue
        raise InterruptedError

    def sigchld(self, signum, _):
        if not self._shutting_down:
            should_shutdown, pid = self._deployment.check_child_process()

            if should_shutdown:
                self._update_deployment_status(
                    True, "A sandbox process stopped unexpectedly.")
                self.shutdown(reason="Process with pid: " + str(pid) +
                              " stopped unexpectedly.")

    def shutdown(self, reason=None):
        self._shutting_down = True
        if reason is not None:
            self._logger.error("Shutting down sandboxagent due to reason: " +
                               reason)
        else:
            self._logger.info("Gracefully shutting down sandboxagent")

        self._logger.info("Shutting down the frontend...")
        if self._frontend_process is not None:
            self._frontend_process.terminate()

        self._logger.info("Shutting down the function worker(s)...")
        self._deployment.shutdown()

        # shut down the local queue client, so that we can also shut down the queue service
        self._local_queue_client.removeTopic(self._instructions_topic)
        self._local_queue_client.shutdown()

        self._logger.info("Shutting down the queue service...")
        if self._queue_service_process is not None:
            process_utils.terminate_and_wait_child(self._queue_service_process,
                                                   "queue service", 5,
                                                   self._logger)

        # we can't do this here, because there may be other sandboxes running the same workflow
        #self._management_data_layer_client.put("workflow_status_" + self._workflowid, "undeployed")
        self._management_data_layer_client.shutdown()

        self._logger.info("Shutting down fluent-bit...")
        time.sleep(2)  # flush interval of fluent-bit
        process_utils.terminate_and_wait_child(self._fluentbit_process,
                                               "fluent-bit", 5, self._logger)
        self._is_running = False

        try:
            self._frontend_process.wait(30)
        except subprocess.TimeoutExpired as exc:
            self._frontend_process.kill()
            _, _ = self._frontend_process.communicate()
        self._logger.info("Shutdown complete")

    def _stop_deployment(self, reason, errmsg):
        self._logger.error(
            "Stopping deployment due to error in launching %s...", reason)
        self._logger.error(errmsg)
        self._update_deployment_status(True, errmsg)
        self._management_data_layer_client.shutdown()
        os._exit(1)

    def _update_deployment_status(self, has_error, errmsg):
        sbstatus = {}
        sbstatus["errmsg"] = errmsg
        if has_error:
            sbstatus["status"] = "failed"
        else:
            sbstatus["status"] = "deployed"
        # set our own status in the map
        self._management_data_layer_client.putMapEntry(
            self._workflowid + "_sandbox_status_map", self._endpoint_key,
            json.dumps(sbstatus))

    def run(self):
        has_error = False
        errmsg = ""

        ts_qs_launch = time.time()
        # 1. launch the QueueService here
        self._logger.info("Launching QueueService...")
        cmdqs = "java -jar /opt/mfn/queueservice.jar"
        command_args_map_qs = {}
        command_args_map_qs["command"] = cmdqs
        command_args_map_qs["wait_until"] = "Starting local queue..."
        error, self._queue_service_process = process_utils.run_command(
            cmdqs, self._logger, wait_until="Starting local queue...")
        if error is not None:
            has_error = True
            errmsg = "Could not start the sandbox queue service: " + str(error)

        if has_error:
            self._stop_deployment("queue service", errmsg)

        ts_fw_launch = time.time()
        # 2. process the deployment info and start function workers
        self._logger.info(
            "Going to parse the deployment info and get the endpoint...")
        has_error, errmsg = self._process_deployment_info()

        if has_error:
            self._stop_deployment("workflow", errmsg)

        ts_fe_launch = time.time()
        # 3. launch the frontend
        self._logger.info("Launching frontend...")

        cmdweb = "/opt/mfn/frontend"
        fenv = dict(os.environ)
        workflow = self._deployment.get_workflow()
        fenv["MFN_ENTRYTOPIC"] = workflow.getWorkflowEntryTopic()
        fenv["MFN_RESULTTOPIC"] = workflow.getWorkflowExitTopic()
        fenv["MFN_QUEUE"] = self._queue
        # MFN_DATALAYER already set

        command_args_map_fe = {}
        command_args_map_fe["command"] = cmdweb
        command_args_map_fe["custom_env"] = fenv
        command_args_map_fe[
            "wait_until"] = "Frontend is ready to handle requests"
        error, self._frontend_process = process_utils.run_command(
            cmdweb,
            self._logger,
            custom_env=fenv,
            wait_until="Frontend is ready to handle requests")
        if error is not None:
            has_error = True
            errmsg = "Could not start the frontend: " + str(error)

        if has_error:
            self._stop_deployment("frontend", errmsg)

        self._logger.info("frontend started")

        t_fe = (time.time() - ts_fe_launch) * 1000.0
        t_fw = (ts_fe_launch - ts_fw_launch) * 1000.0
        t_qs = (ts_fw_launch - ts_qs_launch) * 1000.0

        self._logger.info(
            "QS launch time: %s (ms), FWs download + launch time: %s (ms), FE launch time: %s (ms)",
            str(t_qs), str(t_fw), str(t_fe))

        self._deployment.set_child_process("qs", self._queue_service_process,
                                           command_args_map_qs)
        self._deployment.set_child_process("fe", self._frontend_process,
                                           command_args_map_fe)

        # 4. start listening for additional instructions if any
        self._local_queue_client = LocalQueueClient(connect=self._queue)
        self._local_queue_client.addTopic(self._instructions_topic)

        self._is_running = True

        signal.signal(signal.SIGTERM, self.sigterm)

        children_pids = self._deployment.get_all_children_pids()
        children_pids.sort()
        self._logger.info("All children pids: " + str(children_pids))

        signal.signal(signal.SIGCHLD, self.sigchld)

        # update our own sandbox status
        self._update_deployment_status(False, errmsg)

        #self._management_data_layer_client.put("workflow_status_" + self._workflowid, "deployed")
        #self._management_data_layer_client.delete("workflow_status_error_" + self._workflowid)

        self._logger.info("Successfully deployed.")

        while self._is_running:
            try:
                self._get_and_handle_message()
            except Exception as exc:
                self._logger.error("%s", str(exc))
                time.sleep(2)
 def CreateDeployment(self, info):
     deployment = Deployment(info)
     self.etcd.deploymentList.append(deployment)
     print("Deployment " + deployment.deploymentLabel + " created")
Ejemplo n.º 13
0
from os import path

from kubernetes import client, config, watch

from deployment import Deployment
from pod import Pod

config.load_kube_config()

v1 = client.CoreV1Api()
print("Listing pods with their IPs:")
ret = v1.list_pod_for_all_namespaces(watch=False)
for i in ret.items:
    print("%s\t%s\t%s" %
          (i.status.pod_ip, i.metadata.namespace, i.metadata.name))

if __name__ == '__main__':
    yaml_file = "yamls/deployments/nginx-deployment.yaml"
    yaml_file_path = path.join(path.dirname(__file__), yaml_file)
    deployment = Deployment("nginx-deployment", "default", yaml_file_path)
    deployment.patch()
    # deployment.patch()
    # deployment.delete()
    pods = Pod()
    pods.get_pods()
Ejemplo n.º 14
0
 def deploy_cloud(self):
     dep = Deployment(self.dea, YAML_CONF_DIR, self.env_id,
                      self.node_roles_dict, self.no_health_check)
     dep.deploy()
Ejemplo n.º 15
0
def deploy(args):
    D = Deployment()
    D.ensure_static_resources()

    with D.security():
        D.rolling_base()
        D.ensure_dynamic_instances(args.revision)

    # The idea behind the D.security() context manager is to have an easy way to
    # expose port 22 on AWS instances, run ansible playbooks on them, and then
    # promptly close off port 22 access.
    #
    # However, D.security() will only consider instances that were around at the
    # time the context was created.  So, if ensure_dynamic_instances ends up
    # deciding that it needs to (re)create some instances, then the new instanes
    # will not have their ports exposed.
    #
    # The straightforward solution, which is what is implemented here, is to
    # just exit the security context and enter a new one, at which point we'd be
    # sure to have all the ports we need exposed.
    with D.security():
        D.rolling_deploy()

    D.send_bot("deploy complete")
Ejemplo n.º 16
0
def test_create_deployment(name, simple_box):
    """
    Test that we can create a deployment with the right name
    """
    deployment = Deployment(name, simple_box)
    assert deployment.name == name
Ejemplo n.º 17
0
def stage(args):
    D = Deployment()
    rev, staged = D.check_rev(args.revision)
    if staged:
        D.send_bot("revision {} already staged".format(rev))
    else:
        D.ensure_static_resources()
        with D.security():
            D.rolling_base()
            D.ensure_dynamic_instances(args.revision)

        # See above comment explaining why we use two separate block contexts
        with D.security():
            D.rolling_stage(args.revision)

        D.send_bot("revision {} successfully staged".format(rev))
Ejemplo n.º 18
0
 def deploy_cloud(self):
     dep = Deployment(self.dea, YAML_CONF_DIR, self.env_id,
                      self.node_id_roles_dict)
     dep.deploy()
Ejemplo n.º 19
0
def loadDeployment(deploymentData):
    """Returns a Deployment object for the given data"""
    #check that the deploymentData is valid
    if not ("deploymentId" in deploymentData and type(deploymentData["deploymentId"])==str and len(deploymentData["deploymentId"])>0):
        raise errors.BadRequestError("Deployment must have attribute 'deploymentId' (type=str and length>0)")
    if not ("name" in deploymentData and type(deploymentData["name"])==str and len(deploymentData["name"])>0):
        raise errors.BadRequestError("Deployment must have attribute 'name' (type=str and length>0)")
    if not ("status" in deploymentData and type(deploymentData["status"])==str and len(deploymentData["status"])>0):
        raise errors.BadRequestError("Deployment must have attribute 'status' (type=str and length>0)")
    if not ("dateCreated" in deploymentData and type(deploymentData["dateCreated"])==str and len(deploymentData["dateCreated"])>0):
        raise errors.BadRequestError("Deployment must have attribute 'dateCreated' (type=str and length>0)")
    if not ("dateModified" in deploymentData and type(deploymentData["dateModified"])==str and len(deploymentData["dateModified"])>0):
        raise errors.BadRequestError("Deployment must have attribute 'dateCreated' (type=str and length>0)")
    if not ("archived" in deploymentData and type(deploymentData["archived"])==bool):
        raise errors.BadRequestError("Deployment must have attribute 'archived' (type=bool)")
    if not ("goalSampleSize" in deploymentData and type(deploymentData["goalSampleSize"]) in [int, Decimal] and deploymentData["goalSampleSize"]>0):
        raise errors.BadRequestError("Deployment must have attribute 'goalSampleSize' (type=int and value>0)")
    if not ("currentSampleSize" in deploymentData and type(deploymentData["currentSampleSize"]) in [int, Decimal] and deploymentData["currentSampleSize"]>=0):
        raise errors.BadRequestError("Deployment must have attribute 'currentSampleSize' (type=int and value>=0)")
    if not ("facility" in deploymentData and type(deploymentData["facility"]) in [str, dict] and len(deploymentData["facility"])>0):
        raise errors.BadRequestError("Deployment must have attribute 'facility' (type=str or dict and length>0)")
    #construct the deployment
    d = Deployment()
    d.deploymentId = deploymentData["deploymentId"]
    d.name = deploymentData["name"]
    if "description" in deploymentData:
        d.description = deploymentData["description"]
    d.status = deploymentData["status"]
    d.dateCreated = deploymentData["dateCreated"]
    d.dateModified = deploymentData["dateModified"]
    d.archived = deploymentData["archived"]
    d.goalSampleSize = int(deploymentData["goalSampleSize"])
    d.currentSampleSize = int(deploymentData["currentSampleSize"])
    d.facility = facility_db_access.loadFacility(deploymentData["facility"])
    return d
Ejemplo n.º 20
0
parser.add_argument('--end', '-e', type=int, default=16, help='end step')
parser.add_argument('--clean', '-c', action='store_true', help='cleanup instances')
args = parser.parse_args()


def validate_deployment_id(deployment_id):
    # Since hydra depends on dashes and gce api doesn't allow other non-aphanumeric characters
    # in instance names, don't allow them in deployment_ids
    if re.match(r"^[a-z0-9]+$", deployment_id):
        return deployment_id
    else:
        exception_msg = "--deployment_id | -i <%s> cannot contain non-alphanumeric characters" % (deployment_id)
        raise ValueError(exception_msg)


if __name__ == "__main__":
    deployment_id = validate_deployment_id(args.deployment_id)
    config_file = args.config_file

    config = Config(deployment_id)
    config.parse_config_file(config_file)  # Will populate config section list of config.
    dep = Deployment(config)

    if args.clean:
        print("==> Removing deployment nodes for deployment-id: %s" % deployment_id)
        dep.cleanup()
    else:
        for step in range(args.start, args.end + 1):
            print ("======================= STEP %d =======================" % step)
            dep.deploy(step)
Ejemplo n.º 21
0
def validate_deployment_id(deployment_id):
    # Since hydra depends on dashes and gce api doesn't allow other non-aphanumeric characters
    # in instance names, don't allow them in deployment_ids
    if re.match(r"^[A-Za-z0-9]+$", deployment_id):
        return deployment_id
    else:
        exception_msg = "--deployment_id | -i <%s> cannot contain non-alphanumeric characters" % (
            deployment_id)
        raise ValueError(exception_msg)


if __name__ == "__main__":
    deployment_id = validate_deployment_id(args.deployment_id)
    config_file = args.config_file

    config = Config(deployment_id)
    config.parse_config_file(
        config_file)  # Will populate config section list of config.
    dep = Deployment(config)

    if args.clean:
        print("==> Removing deployment nodes for deployment-id: %s" %
              deployment_id)
        dep.cleanup()
    else:
        for step in range(args.start, args.end + 1):
            print("======================= STEP %d =======================" %
                  step)
            dep.deploy(step)
 def CreateDeployment(self, info):
     deployment = Deployment(info)
     self.etcd.deploymentList.append(deployment)