Example #1
0
def create_celery_configuration(ctx, runner, agent_config, resource_loader):
    create_celery_includes_file(ctx, runner, agent_config)
    loader = jinja2.FunctionLoader(resource_loader)
    env = jinja2.Environment(loader=loader)
    config_template_path = get_agent_resource_local_path(
        ctx, agent_config, 'celery_config_path')
    config_template = env.get_template(config_template_path)
    config_template_values = {
        'includes_file_path':
        agent_config['includes_file'],
        'celery_base_dir':
        agent_config['celery_base_dir'],
        'worker_modifier':
        agent_config['name'],
        'management_ip':
        utils.get_manager_ip(),
        'broker_ip':
        utils.get_manager_ip(),
        'agent_ip':
        get_agent_ip(ctx, agent_config),
        'celery_user':
        agent_config['user'],
        'celery_group':
        agent_config['user'],
        'worker_autoscale':
        '{0},{1}'.format(agent_config['max_workers'],
                         agent_config['min_workers'])
    }

    ctx.logger.debug(
        'Populating celery config jinja2 template with the following '
        'values: {0}'.format(config_template_values))

    config = config_template.render(config_template_values)
    init_template_path = get_agent_resource_local_path(ctx, agent_config,
                                                       'celery_init_path')
    init_template = env.get_template(init_template_path)
    init_template_values = {
        'celery_base_dir': agent_config['celery_base_dir'],
        'worker_modifier': agent_config['name']
    }

    ctx.logger.debug(
        'Populating celery init.d jinja2 template with the following '
        'values: {0}'.format(init_template_values))

    init = init_template.render(init_template_values)

    ctx.logger.debug(
        'Creating celery config and init files [cloudify_agent={0}]'.format(
            agent_config))

    runner.put(agent_config['config_file'], config, use_sudo=True)
    runner.put(agent_config['init_file'], init, use_sudo=True)
def cfy_agent_attributes(cloudify_agent):

    if 'process_management' not in cloudify_agent:

        # user did not specify process management configuration, choose the
        # default one according to os type.
        if cloudify_agent['windows']:
            cloudify_agent['process_management'] = {
                'name': 'nssm'
            }
        else:
            cloudify_agent['process_management'] = {
                'name': 'init.d'
            }

    if 'name' not in cloudify_agent:
        if cloudify_agent['local']:
            workflows_worker = cloudify_agent.get('workflows_worker', False)
            suffix = '_workflows' if workflows_worker else ''
            name = '{0}{1}'.format(ctx.deployment.id, suffix)
        else:
            name = ctx.instance.id
        cloudify_agent['name'] = name

    if 'queue' not in cloudify_agent:

        # by default, the queue of the agent is the same as the name
        cloudify_agent['queue'] = cloudify_agent['name']

    if 'manager_ip' not in cloudify_agent:

        # by default, the manager ip will be set by an environment variable
        cloudify_agent['manager_ip'] = get_manager_ip()
Example #3
0
def download_blueprint_file(blueprint_file, ctx):
  ip = get_manager_ip()
  # HACK:
  port = 53229
  blueprint_id = ctx.blueprint_id
  url = 'http://%s:%d/%s/%s' % (ip, port, blueprint_id, blueprint_file)
  return download(url, ctx.logger)
    def broker_config(self, fallback_to_manager_ip=True):
        """
        Returns dictionary containing broker configuration.

        :param fallback_to_manager_ip: If True and there is no broker_ip in
        context, manager ip will be used. Note that manager ip detection is
        only possible within agent.
        """
        attributes = {}
        bootstrap_agent = self.cloudify_agent
        broker_user, broker_pass = utils.internal.get_broker_credentials(
            bootstrap_agent
        )
        if bootstrap_agent.broker_ip:
            attributes['broker_ip'] = bootstrap_agent.broker_ip
        elif fallback_to_manager_ip:
            attributes['broker_ip'] = utils.get_manager_ip()
        attributes['broker_user'] = broker_user
        attributes['broker_pass'] = broker_pass
        attributes['broker_ssl_enabled'] = bootstrap_agent.broker_ssl_enabled
        attributes['broker_ssl_cert'] = bootstrap_agent.broker_ssl_cert
        if bootstrap_agent.broker_ssl_enabled:
            broker_port = constants.BROKER_PORT_SSL
        else:
            broker_port = constants.BROKER_PORT_NO_SSL
        attributes['broker_port'] = broker_port
        return attributes
Example #5
0
def _cfy_agent_attributes_no_defaults(cloudify_agent):

    if not cloudify_agent.get('process_management'):
        cloudify_agent['process_management'] = {}

    if not cloudify_agent['process_management'].get('name'):
        # user did not specify process management configuration, choose the
        # default one according to os type.
        if cloudify_agent['windows']:
            name = 'nssm'
        else:
            name = 'init.d'
        cloudify_agent['process_management']['name'] = name

    if not cloudify_agent.get('name'):
        if cloudify_agent['local']:
            workflows_worker = cloudify_agent.get('workflows_worker', False)
            suffix = '_workflows' if workflows_worker else ''
            name = '{0}{1}'.format(ctx.deployment.id, suffix)
        else:
            name = ctx.instance.id
        cloudify_agent['name'] = name

    if not cloudify_agent.get('queue'):
        # by default, the queue of the agent is the same as the name
        cloudify_agent['queue'] = cloudify_agent['name']

    if not cloudify_agent.get('manager_ip'):
        # by default, the manager ip will be set by an environment variable
        cloudify_agent['manager_ip'] = get_manager_ip()
    def broker_config(self, fallback_to_manager_ip=True):
        """
        Returns dictionary containing broker configuration.

        :param fallback_to_manager_ip: If True and there is no broker_ip in
        context, manager ip will be used. Note that manager ip detection is
        only possible within agent.
        """
        attributes = {}
        bootstrap_agent = self.cloudify_agent
        broker_user, broker_pass = utils.internal.get_broker_credentials(
            bootstrap_agent)
        if bootstrap_agent.broker_ip:
            attributes['broker_ip'] = bootstrap_agent.broker_ip
        elif fallback_to_manager_ip:
            attributes['broker_ip'] = utils.get_manager_ip()
        attributes['broker_user'] = broker_user
        attributes['broker_pass'] = broker_pass
        attributes['broker_ssl_enabled'] = bootstrap_agent.broker_ssl_enabled
        attributes['broker_ssl_cert'] = bootstrap_agent.broker_ssl_cert
        if bootstrap_agent.broker_ssl_enabled:
            broker_port = constants.BROKER_PORT_SSL
        else:
            broker_port = constants.BROKER_PORT_NO_SSL
        attributes['broker_port'] = broker_port
        return attributes
Example #7
0
def _publish_configuration_event(state, deployment_config_dir_path):
    manager_queue = 'manager-riemann'
    exchange_name = 'cloudify-monitoring'
    connection = pika.BlockingConnection(
        pika.ConnectionParameters(host=get_manager_ip())
    )
    try:
        channel = connection.channel()
        channel.exchange_declare(exchange=exchange_name,
                                 type='topic',
                                 durable=False,
                                 auto_delete=True,
                                 internal=False)
        channel.queue_declare(
            queue=manager_queue,
            auto_delete=True,
            durable=False,
            exclusive=False)
        channel.queue_bind(exchange=exchange_name,
                           queue=manager_queue,
                           routing_key=manager_queue)
        channel.basic_publish(
            exchange=exchange_name,
            routing_key=manager_queue,
            body=json.dumps({
                'service': 'cloudify.configuration',
                'state': state,
                'config_path': deployment_config_dir_path,
                'deployment_id': ctx.deployment.id,
                'time': int(time.time())
            }))
    finally:
        connection.close()
    def __init__(self,
                 amqp_user='******',
                 amqp_pass='******',
                 amqp_host=None,
                 ssl_enabled=False,
                 ssl_cert_path=''):
        if amqp_host is None:
            amqp_host = get_manager_ip()

        self.events_queue = None
        self.logs_queue = None

        credentials = pika.credentials.PlainCredentials(
            username=amqp_user,
            password=amqp_pass,
        )

        amqp_port, ssl_options = internal.get_broker_ssl_and_port(
            ssl_enabled=ssl_enabled,
            cert_path=ssl_cert_path,
        )

        self.connection = pika.BlockingConnection(
            pika.ConnectionParameters(
                host=amqp_host,
                port=amqp_port,
                credentials=credentials,
                ssl=ssl_enabled,
                ssl_options=ssl_options,
            ))
        settings = {'auto_delete': True, 'durable': True, 'exclusive': False}
        self.logs_queue = self.connection.channel()
        self.logs_queue.queue_declare(queue=self.logs_queue_name, **settings)
        self.events_queue = self.connection.channel()
        self.events_queue.queue_declare(queue=self.logs_queue_name, **settings)
def install(ctx, runner=None, cloudify_agent=None, **kwargs):
    """
    Installs the cloudify agent service on the machine.
    The agent installation consists of the following:

        1. Download and extract necessary files.
        2. Configure the agent service to auto start on vm launch.
        3. Configure the agent service to restart on failure.


    :param ctx: Invocation context - injected by the @operation
    :param runner: Injected by the @init_worker_installer
    :param cloudify_agent: Injected by the @init_worker_installer
    """

    if cloudify_agent.get('delete_amqp_queues'):
        _delete_amqp_queues(cloudify_agent['name'])

    ctx.logger.info('Installing agent {0}'.format(cloudify_agent['name']))

    agent_exec_path = 'C:\{0}'.format(AGENT_EXEC_FILE_NAME)

    runner.download(get_agent_package_url(), agent_exec_path)
    ctx.logger.debug('Extracting agent to C:\\ ...')

    runner.run('{0} -o{1} -y'.format(agent_exec_path, RUNTIME_AGENT_PATH),
               quiet=True)

    params = ('--broker=amqp://guest:guest@{0}:5672// '
              '-Ofair '
              '--events '
              '--app=cloudify '
              '-Q {1} '
              '--hostname={1} '
              '--logfile={2}\celery.log '
              '--pidfile={2}\celery.pid '
              '--autoscale={3},{4} '
              '--include={5} '
              '--without-gossip '
              '--without-mingle '.format(
                  utils.get_manager_ip(), cloudify_agent['name'],
                  RUNTIME_AGENT_PATH,
                  cloudify_agent[win_constants.MIN_WORKERS_KEY],
                  cloudify_agent[win_constants.MAX_WORKERS_KEY],
                  AGENT_INCLUDES))
    runner.run(
        '{0}\\nssm\\nssm.exe install {1} {0}\Scripts\celeryd.exe {2}'.format(
            RUNTIME_AGENT_PATH, AGENT_SERVICE_NAME, params))
    env = create_env_string(cloudify_agent)
    runner.run('{0}\\nssm\\nssm.exe set {1} AppEnvironmentExtra {2}'.format(
        RUNTIME_AGENT_PATH, AGENT_SERVICE_NAME, env))
    runner.run('sc config {0} start= auto'.format(AGENT_SERVICE_NAME))
    runner.run('sc failure {0} reset= {1} actions= restart/{2}'.format(
        AGENT_SERVICE_NAME,
        cloudify_agent['service'][win_const.SERVICE_FAILURE_RESET_TIMEOUT_KEY],
        cloudify_agent['service'][
            win_const.SERVICE_FAILURE_RESTART_DELAY_KEY]))

    ctx.logger.info('Creating parameters file from {0}'.format(params))
    runner.put(params, '{0}\AppParameters'.format(RUNTIME_AGENT_PATH))
Example #10
0
def install(ctx, runner=None, cloudify_agent=None, **kwargs):

    """
    Installs the cloudify agent service on the machine.
    The agent installation consists of the following:

        1. Download and extract necessary files.
        2. Configure the agent service to auto start on vm launch.
        3. Configure the agent service to restart on failure.


    :param ctx: Invocation context - injected by the @operation
    :param runner: Injected by the @init_worker_installer
    :param cloudify_agent: Injected by the @init_worker_installer
    """

    ctx.logger.info('Installing agent {0}'.format(cloudify_agent['name']))

    agent_exec_path = 'C:\{0}'.format(AGENT_EXEC_FILE_NAME)

    runner.download(get_agent_package_url(), agent_exec_path)
    ctx.logger.debug('Extracting agent to C:\\ ...')

    runner.run('{0} -o{1} -y'.format(agent_exec_path, RUNTIME_AGENT_PATH),
               quiet=True)

    params = ('--broker=amqp://guest:guest@{0}:5672// '
              '--events '
              '--app=cloudify '
              '-Q {1} '
              '-n celery.{1} '
              '--logfile={2}\celery.log '
              '--pidfile={2}\celery.pid '
              '--autoscale={3},{4} '
              '--include={5} '
              .format(utils.get_manager_ip(),
                      cloudify_agent['name'],
                      RUNTIME_AGENT_PATH,
                      cloudify_agent[win_constants.MIN_WORKERS_KEY],
                      cloudify_agent[win_constants.MAX_WORKERS_KEY],
                      AGENT_INCLUDES))
    runner.run('{0}\\nssm\\nssm.exe install {1} {0}\Scripts\celeryd.exe {2}'
               .format(RUNTIME_AGENT_PATH, AGENT_SERVICE_NAME, params))
    env = create_env_string(cloudify_agent)
    runner.run('{0}\\nssm\\nssm.exe set {1} AppEnvironmentExtra {2}'
               .format(RUNTIME_AGENT_PATH, AGENT_SERVICE_NAME, env))
    runner.run('sc config {0} start= auto'.format(AGENT_SERVICE_NAME))
    runner.run(
        'sc failure {0} reset= {1} actions= restart/{2}'.format(
            AGENT_SERVICE_NAME,
            cloudify_agent['service'][
                win_const.SERVICE_FAILURE_RESET_TIMEOUT_KEY
            ],
            cloudify_agent['service'][
                win_const.SERVICE_FAILURE_RESTART_DELAY_KEY
            ]))

    ctx.logger.info('Creating parameters file from {0}'.format(params))
    runner.put(params, '{0}\AppParameters'.format(RUNTIME_AGENT_PATH))
Example #11
0
def create_celery_configuration(ctx, runner, agent_config, resource_loader):
    create_celery_includes_file(ctx, runner, agent_config)
    loader = jinja2.FunctionLoader(resource_loader)
    env = jinja2.Environment(loader=loader)
    config_template_path = get_agent_resource_local_path(
        ctx, agent_config, 'celery_config_path')
    config_template = env.get_template(config_template_path)
    config_template_values = {
        'includes_file_path': agent_config['includes_file'],
        'celery_base_dir': agent_config['celery_base_dir'],
        'worker_modifier': agent_config['name'],
        'management_ip': utils.get_manager_ip(),
        'broker_ip': '127.0.0.1' if is_on_management_worker(ctx)
        else utils.get_manager_ip(),
        'agent_ip': get_agent_ip(ctx, agent_config),
        'celery_user': agent_config['user'],
        'celery_group': agent_config['user'],
        'worker_autoscale': '{0},{1}'.format(agent_config['max_workers'],
                                             agent_config['min_workers'])
    }

    ctx.logger.debug(
        'Populating celery config jinja2 template with the following '
        'values: {0}'.format(config_template_values))

    config = config_template.render(config_template_values)
    init_template_path = get_agent_resource_local_path(
        ctx, agent_config, 'celery_init_path')
    init_template = env.get_template(init_template_path)
    init_template_values = {
        'celery_base_dir': agent_config['celery_base_dir'],
        'worker_modifier': agent_config['name']
    }

    ctx.logger.debug(
        'Populating celery init.d jinja2 template with the following '
        'values: {0}'.format(init_template_values))

    init = init_template.render(init_template_values)

    ctx.logger.debug(
        'Creating celery config and init files [cloudify_agent={0}]'.format(
            agent_config))

    runner.put(agent_config['config_file'], config, use_sudo=True)
    runner.put(agent_config['init_file'], init, use_sudo=True)
def process_subs(s):

    with open("/tmp/subs", "a+") as f:
        f.write("processing " + s)

    pat = '@{([^}]+)}|%{([^}]+)}'
    client = None
    m = re.search(pat, s)

    with open("/tmp/subs", "a+") as f:
        f.write(" m " + str(m) + "\n")

    if (not m):
        #no patterns found
        ctx.logger.info('no pattern found:{}'.format(s))
        return s
    while (m):

        # Match @ syntax.  Gets runtime properties
        if (m.group(1)):
            with open("/tmp/subs", "a+") as f:
                f.write(" m.group(1)=" + str(m.group(1)) + "\n")
            fields = m.group(1).split(',')
            if m and len(fields) > 1:
                # do substitution
                if (not client):
                    client = manager.get_rest_client()
                instances = client.node_instances.list(
                    deployment_id=ctx.deployment.id, node_name=fields[0])
                if (instances and len(instances)):
                    #just use first instance if more than one
                    val = instances[0].runtime_properties
                    for field in fields[1:]:
                        field = field.strip()
                        val = val[field]  #handle nested maps

                    s = s[:m.start()] + str(val) + s[m.end(1) + 1:]
                    m = re.search(pat, s)
                else:
                    raise Exception("no instances found for node: {}".format(
                        fields[0]))
            else:
                raise Exception("invalid pattern: " + s)

        # Match % syntax.  Gets context property.
        # also handles special token "management_ip"
        elif (m.group(2)):
            with open("/tmp/subs", "a+") as f:
                f.write("m.group(2)=" + str(m.group(2)) + "\n")
            if (m.group(2) == "management_ip"):
                s = s[:m.start()] + str(
                    utils.get_manager_ip()) + s[m.end(2) + 1:]
            else:
                s = s[:m.start()] + str(
                    eval("ctx." + m.group(2))) + s[m.end(2) + 1:]
            m = re.search(pat, s)

    return s
Example #13
0
 def on_manager_created(self):
     cfy = utils.get_cfy()
     kwargs = {}
     rest_port = os.environ.get(constants.CLOUDIFY_REST_PORT)
     if rest_port:
         kwargs = {'rest_port': rest_port}
     cfy.use(utils.get_manager_ip(),
             manager_user='******',
             manager_key=docl.ssh_key_path(),
             **kwargs)
     self.start_events_printer()
def process_subs(s):

  with open("/tmp/subs","a+") as f:
    f.write("processing "+s)

  pat='@{([^}]+)}|%{([^}]+)}'
  client=None
  m=re.search(pat,s)

  with open("/tmp/subs","a+") as f:
    f.write(" m "+str(m)+"\n")

  if(not m):
    #no patterns found
    ctx.logger.info('no pattern found:{}'.format(s))
    return s;
  while(m):

    # Match @ syntax.  Gets runtime properties
    if(m.group(1)):
      with open("/tmp/subs","a+") as f:
        f.write(" m.group(1)="+str(m.group(1))+"\n")
      fields=m.group(1).split(',')
      if m and len(fields)>1:
        # do substitution
        if(not client):
          client=manager.get_rest_client()
        instances=client.node_instances.list(deployment_id=ctx.deployment.id,node_name=fields[0])
        if(instances and len(instances)):
          #just use first instance if more than one
          val=instances[0].runtime_properties
          for field in fields[1:]:
            field=field.strip()
            val=val[field]    #handle nested maps
  
          s=s[:m.start()]+str(val)+s[m.end(1)+1:]
          m=re.search(pat,s)
        else:
          raise Exception("no instances found for node: {}".format(fields[0]))
      else:
        raise Exception("invalid pattern: "+s)

    # Match % syntax.  Gets context property.
    # also handles special token "management_ip"
    elif(m.group(2)):
      with open("/tmp/subs","a+") as f:
        f.write("m.group(2)="+str(m.group(2))+"\n")
      if(m.group(2)=="management_ip"):
        s=s[:m.start()]+str(utils.get_manager_ip())+s[m.end(2)+1:]
      else:
        s=s[:m.start()]+str(eval("ctx."+m.group(2)))+s[m.end(2)+1:]
      m=re.search(pat,s)
      
  return s
Example #15
0
def create_celery_configuration(ctx, runner, worker_config, resource_loader):
    create_celery_includes_file(ctx, runner, worker_config)
    loader = jinja2.FunctionLoader(resource_loader)
    env = jinja2.Environment(loader=loader)
    config_template = env.get_template(CELERY_CONFIG_PATH)
    config_template_values = {
        'includes_file_path': worker_config['includes_file'],
        'celery_base_dir': worker_config['celery_base_dir'],
        'worker_modifier': worker_config['name'],
        'management_ip': utils.get_manager_ip(),
        'broker_ip': '127.0.0.1' if is_deployment_worker(ctx)
        else utils.get_manager_ip(),
        'agent_ip': get_agent_ip(ctx, worker_config),
        'celery_user': worker_config['user'],
        'celery_group': worker_config['user']
    }

    ctx.logger.debug(
        'Populating celery config jinja2 template with the following '
        'values: {0}'.format(config_template_values))

    config = config_template.render(config_template_values)
    init_template = env.get_template(CELERY_INIT_PATH)
    init_template_values = {
        'celery_base_dir': worker_config['celery_base_dir'],
        'worker_modifier': worker_config['name']
    }

    ctx.logger.debug(
        'Populating celery init.d jinja2 template with the following '
        'values: {0}'.format(init_template_values))

    init = init_template.render(init_template_values)

    ctx.logger.debug(
        'Creating celery config and init files [worker_config={0}]'.format(
            worker_config))

    runner.put(worker_config['config_file'], config, use_sudo=True)
    runner.put(worker_config['init_file'], init, use_sudo=True)
 def __init__(self):
     self.events_queue = None
     self.logs_queue = None
     self.connection = pika.BlockingConnection(
         pika.ConnectionParameters(host=get_manager_ip()))
     settings = {
         'auto_delete': True,
         'durable': True,
         'exclusive': False
     }
     self.logs_queue = self.connection.channel()
     self.logs_queue.queue_declare(queue=self.logs_queue_name, **settings)
     self.events_queue = self.connection.channel()
     self.events_queue.queue_declare(queue=self.logs_queue_name, **settings)
Example #17
0
  def __init__(self,ctx):
    self._esurl=''
    ESPORT=9200
    mgr_ip='127.0.0.1'
    try:
      mgr_ip=utils.get_manager_ip()
    except:
      pass

    self._esurl="http://{}:{}/".format(mgr_ip,ESPORT)

    r=requests.get(self._esurl)
    if r.status_code != 200:
      raise "elasticsearch not found at {}:{}".format(mgr_ip,ESPORT)
Example #18
0
    def publish_log(self, log):
        if self.amqp_client is None:
            connection = pika.BlockingConnection(
                pika.ConnectionParameters(host=get_manager_ip()))
            channel = connection.channel()
            channel.queue_declare(queue='cloudify-logs',
                                  auto_delete=True,
                                  durable=True,
                                  exclusive=False)
            self.amqp_client = channel

        self.amqp_client.basic_publish(exchange='',
                                       routing_key='cloudify-logs',
                                       body=json.dumps(log))
def create_celery_configuration(ctx, runner, agent_config, resource_loader):
    create_celery_includes_file(ctx, runner, agent_config)
    loader = jinja2.FunctionLoader(resource_loader)
    env = jinja2.Environment(loader=loader)
    config_template_path = get_agent_resource_local_path(ctx, agent_config, "celery_config_path")
    config_template = env.get_template(config_template_path)
    config_template_values = {
        "includes_file_path": agent_config["includes_file"],
        "celery_base_dir": agent_config["celery_base_dir"],
        "worker_modifier": agent_config["name"],
        "management_ip": utils.get_manager_ip(),
        "broker_ip": utils.get_manager_ip(),
        "agent_ip": get_agent_ip(ctx, agent_config),
        "celery_user": agent_config["user"],
        "celery_group": agent_config["user"],
        "worker_autoscale": "{0},{1}".format(agent_config["max_workers"], agent_config["min_workers"]),
    }

    ctx.logger.debug(
        "Populating celery config jinja2 template with the following " "values: {0}".format(config_template_values)
    )

    config = config_template.render(config_template_values)
    init_template_path = get_agent_resource_local_path(ctx, agent_config, "celery_init_path")
    init_template = env.get_template(init_template_path)
    init_template_values = {"celery_base_dir": agent_config["celery_base_dir"], "worker_modifier": agent_config["name"]}

    ctx.logger.debug(
        "Populating celery init.d jinja2 template with the following " "values: {0}".format(init_template_values)
    )

    init = init_template.render(init_template_values)

    ctx.logger.debug("Creating celery config and init files [cloudify_agent={0}]".format(agent_config))

    runner.put(agent_config["config_file"], config, use_sudo=True)
    runner.put(agent_config["init_file"], init, use_sudo=True)
Example #20
0
def _publish_configuration_event(state, deployment_config_dir_path):
    manager_queue = 'manager-riemann'
    exchange_name = 'cloudify-monitoring'

    broker_port, ssl_options = utils.internal.get_broker_ssl_and_port(
        ssl_enabled=broker_config.broker_ssl_enabled,
        cert_path=broker_config.broker_cert_path,
    )

    credentials = pika.credentials.PlainCredentials(
        username=broker_config.broker_username,
        password=broker_config.broker_password,
    )

    connection = pika.BlockingConnection(
        pika.ConnectionParameters(host=get_manager_ip(),
                                  port=broker_port,
                                  credentials=credentials,
                                  ssl=broker_config.broker_ssl_enabled,
                                  ssl_options=ssl_options)
    )

    try:
        channel = connection.channel()
        channel.exchange_declare(exchange=exchange_name,
                                 type='topic',
                                 durable=False,
                                 auto_delete=True,
                                 internal=False)
        channel.queue_declare(
            queue=manager_queue,
            auto_delete=True,
            durable=False,
            exclusive=False)
        channel.queue_bind(exchange=exchange_name,
                           queue=manager_queue,
                           routing_key=manager_queue)
        channel.basic_publish(
            exchange=exchange_name,
            routing_key=manager_queue,
            body=json.dumps({
                'service': 'cloudify.configuration',
                'state': state,
                'config_path': deployment_config_dir_path,
                'deployment_id': ctx.deployment.id,
                'time': int(time.time())
            }))
    finally:
        connection.close()
Example #21
0
def setup_environment(ctx):
  '''Add some useful environment variables to the environment'''
  env = os.environ.copy()
  # See in context.py
  # https://github.com/CloudifySource/cosmo-celery-common/blob/develop/cloudify/context.py
  env['CLOUDIFY_NODE_ID'] = ctx.node_id.encode('utf-8')
  env['CLOUDIFY_BLUEPRINT_ID'] = ctx.blueprint_id.encode('utf-8')
  env['CLOUDIFY_DEPLOYMENT_ID'] = ctx.deployment_id.encode('utf-8')
  env['CLOUDIFY_MANAGER_IP'] = get_manager_ip().encode('utf-8')
  env['CLOUDIFY_EXECUTION_ID'] = ctx.execution_id.encode('utf-8')
  for k, v in ctx.properties.iteritems():
    env['CLOUDIFY_PROPERTY_%s' % k] = str(v).encode('utf-8')
  # for k, v in ctx.runtime_properties.iteritems():
  #   env['CLOUDIFY_RUNTIME_PROPERTY_%s' % k] = v.encode('utf-8')
  return env
Example #22
0
def _wait_for_services(container_ip=None):
    if container_ip is None:
        container_ip = utils.get_manager_ip()
    logger.info('Waiting for RabbitMQ')
    _retry(func=utils.create_pika_connection,
           exceptions=AMQPConnectionError,
           cleanup=lambda conn: conn.close())
    logger.info('Waiting for REST service and Storage')
    rest_client = utils.create_rest_client()
    _retry(func=rest_client.blueprints.list,
           exceptions=(requests.exceptions.ConnectionError,
                       cloudify_rest_client.exceptions.CloudifyClientError))
    logger.info('Waiting for postgres')
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    _retry(func=lambda: sock.connect((container_ip, 5432)),
           cleanup=lambda _: sock.close(),
           exceptions=IOError)
Example #23
0
def _wait_for_services(container_ip=None):
    if container_ip is None:
        container_ip = utils.get_manager_ip()
    logger.info('Waiting for RabbitMQ')
    _retry(func=utils.create_pika_connection,
           exceptions=AMQPConnectionError,
           cleanup=lambda conn: conn.close())
    logger.info('Waiting for REST service and Storage')
    rest_client = utils.create_rest_client()
    _retry(func=rest_client.blueprints.list,
           exceptions=(requests.exceptions.ConnectionError,
                       cloudify_rest_client.exceptions.CloudifyClientError))
    logger.info('Waiting for postgres')
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    _retry(func=lambda: sock.connect((container_ip, 5432)),
           cleanup=lambda _: sock.close(),
           exceptions=IOError)
def check_liveness(nodes_to_monitor,depl_id):
    c = CloudifyClient(host=utils.get_manager_ip(),
                       port=utils.get_manager_rest_service_port(),
                       protocol='https',
                       cert=utils.get_local_rest_certificate(),
                       token= utils.get_rest_token(),
                       tenant= utils.get_tenant_name())

    c_influx = InfluxDBClient(host='localhost', port=8086, database='cloudify')
    log ('nodes_to_monitor: {0}'.format(nodes_to_monitor))

    # compare influx data (monitoring) to cloudify desired state

    for node_name in nodes_to_monitor:
        instances=c.node_instances.list(depl_id,node_name)
        for instance in instances:
            q_string='SELECT MEAN(value) FROM /' + depl_id + '\.' + node_name + '\.' + instance.id + '\.cpu_total_system/ GROUP BY time(10s) '\
                   'WHERE  time > now() - 40s'
            log ('query string is {0}'.format(q_string))
            try:
               result=c_influx.query(q_string)
               log ('result is {0}'.format(result))
               if not result:
                  executions=c.executions.list(depl_id)
                  has_pending_execution = False
                  if executions and len(executions)>0:
                      for execution in executions:
                      # log("Execution {0} : {1}".format(execution.id, execution.status))
                          if execution.status not in execution.END_STATES:
                              has_pending_execution = True
                  if not has_pending_execution:
                      log ('Setting state to error for instance {0} and its children'.format(instance.id))
                      update_nodes_tree_state(c, depl_id, instance, 'error')
                      params = {'node_instance_id': instance.id}
                      log ('Calling Auto-healing workflow for container instance {0}'.format(instance.id))
                      c.executions.start(depl_id, 'a4c_heal', params)
                  else:
                      log ('pending executions on the deployment...waiting for the end before calling heal workflow...')
            except InfluxDBClientError as ee:
                log ('DBClienterror {0}'.format(str(ee)), level='ERROR')
                log ('instance id is {0}'.format(instance), level='ERROR')
            except Exception as e:
                log (str(e), level='ERROR')
def create_env_string(cloudify_agent):
    env = {
        constants.CELERY_WORK_DIR_PATH_KEY:
        RUNTIME_AGENT_PATH,
        constants.LOCAL_IP_KEY:
        cloudify_agent['host'],
        constants.MANAGER_IP_KEY:
        utils.get_manager_ip(),
        constants.MANAGER_FILE_SERVER_BLUEPRINTS_ROOT_URL_KEY:
        utils.get_manager_file_server_blueprints_root_url(),
        constants.MANAGER_FILE_SERVER_URL_KEY:
        utils.get_manager_file_server_url(),
        constants.MANAGER_REST_PORT_KEY:
        utils.get_manager_rest_service_port()
    }
    env_string = ''
    for key, value in env.iteritems():
        env_string = '{0} {1}={2}' \
            .format(env_string, key, value)
    return env_string.strip()
Example #26
0
def create_env_string(cloudify_agent):
    env = {
        constants.CELERY_WORK_DIR_PATH_KEY:
        RUNTIME_AGENT_PATH,
        constants.LOCAL_IP_KEY:
        cloudify_agent['host'],
        constants.MANAGER_IP_KEY:
        utils.get_manager_ip(),
        constants.MANAGER_FILE_SERVER_BLUEPRINTS_ROOT_URL_KEY:
        utils.get_manager_file_server_blueprints_root_url(),
        constants.MANAGER_FILE_SERVER_URL_KEY:
        utils.get_manager_file_server_url(),
        constants.MANAGER_REST_PORT_KEY:
        utils.get_manager_rest_service_port()
    }
    env_string = ''
    for key, value in env.iteritems():
        env_string = '{0} {1}={2}' \
            .format(env_string, key, value)
    return env_string.strip()
    def __init__(self,
                 amqp_user='******',
                 amqp_pass='******',
                 amqp_host=None,
                 ssl_enabled=False,
                 ssl_cert_path=''):
        if amqp_host is None:
            amqp_host = get_manager_ip()

        self.events_queue = None
        self.logs_queue = None
        self._is_closed = False

        credentials = pika.credentials.PlainCredentials(
            username=amqp_user,
            password=amqp_pass,
        )

        amqp_port, ssl_options = internal.get_broker_ssl_and_port(
            ssl_enabled=ssl_enabled,
            cert_path=ssl_cert_path,
        )

        self.connection = pika.BlockingConnection(
            pika.ConnectionParameters(
                host=amqp_host,
                port=amqp_port,
                credentials=credentials,
                ssl=ssl_enabled,
                ssl_options=ssl_options,
            )
        )
        settings = {
            'auto_delete': True,
            'durable': True,
            'exclusive': False
        }
        self.logs_queue = self.connection.channel()
        self.logs_queue.queue_declare(queue=self.LOGS_QUEUE_NAME, **settings)
        self.events_queue = self.connection.channel()
        self.events_queue.queue_declare(queue=self.LOGS_QUEUE_NAME, **settings)
def config_handlers(ctx, handlers, config_path, handlers_path):
    """
    create handler configuration files.
    copy over handler if path to file was provided.
    return list of active handlers.
    """
    if handlers is None:
        handlers = copy_objects.deepcopy(DEFAULT_HANDLERS)

        # If we do not have a real manager cloudify_agent is expected to be an
        # empty dict. This will be handled by get_broker_credentials.
        cloudify_agent = ctx.bootstrap_context.cloudify_agent

        broker_user, broker_pass = utils.internal.get_broker_credentials(
            cloudify_agent
        )

        config_changes = {
            'server': get_manager_ip(),
            'user': broker_user,
            'password': broker_pass,
        }

        handlers['cloudify_handler.cloudify.CloudifyHandler'][
            'config'].update(config_changes)

    elif not handlers:
        raise exceptions.NonRecoverableError('Empty handlers dict')

    for name, prop in handlers.items():
        if 'path' in prop.keys():
            handler_file = os.path.join(handlers_path,
                                        '{0}.py'.format(name.split('.')[-2]))
            ctx.download_resource(prop['path'], handler_file)

        path = os.path.join(config_path, '{0}.conf'.format(
            name.split('.')[-1]))
        write_config(path, prop.get('config', {}))

    return handlers.keys()
def config_handlers(ctx, handlers, config_path, handlers_path):
    """
    create handler configuration files.
    copy over handler if path to file was provided.
    return list of active handlers.
    """
    if handlers is None:
        handlers = copy_objects.deepcopy(DEFAULT_HANDLERS)

        # If we do not have a real manager cloudify_agent is expected to be an
        # empty dict. This will be handled by get_broker_credentials.
        cloudify_agent = ctx.bootstrap_context.cloudify_agent

        broker_user, broker_pass = utils.internal.get_broker_credentials(
            cloudify_agent)

        config_changes = {
            'server': get_manager_ip(),
            'user': broker_user,
            'password': broker_pass,
        }

        handlers['cloudify_handler.cloudify.CloudifyHandler']['config'].update(
            config_changes)

    elif not handlers:
        raise exceptions.NonRecoverableError('Empty handlers dict')

    for name, prop in handlers.items():
        if 'path' in prop.keys():
            handler_file = os.path.join(handlers_path,
                                        '{0}.py'.format(name.split('.')[-2]))
            ctx.download_resource(prop['path'], handler_file)

        path = os.path.join(config_path,
                            '{0}.conf'.format(name.split('.')[-1]))
        write_config(path, prop.get('config', {}))

    return handlers.keys()
Example #30
0
def setup_environment(ctx):
    """
    Add some useful environment variables to the environment
    """

    env = os.environ.copy()
    # See in context.py
    # https://github.com/CloudifySource
    # /cosmo-celery-common/blob/develop/cloudify/context.py
    env['CLOUDIFY_NODE_ID'] = ctx.node_id.encode('utf-8')
    env['CLOUDIFY_BLUEPRINT_ID'] = ctx.blueprint_id.encode('utf-8')
    env['CLOUDIFY_DEPLOYMENT_ID'] = ctx.deployment_id.encode('utf-8')
    env['CLOUDIFY_MANAGER_IP'] = get_manager_ip().encode('utf-8')
    env['CLOUDIFY_EXECUTION_ID'] = ctx.execution_id.encode('utf-8')

    logging_script_path = os.path.join(dirname(resources.__file__),
                                       "logging.sh")

    file_server_script_path = os.path.join(dirname(resources.__file__),
                                           "file_server.sh")

    env['CLOUDIFY_LOGGING'] = logging_script_path
    env['CLOUDIFY_FILE_SERVER'] = file_server_script_path

    url = '{0}/{1}'.format(
        utils.get_manager_file_server_blueprints_root_url(),
        ctx.blueprint_id)

    env['CLOUDIFY_FILE_SERVER_BLUEPRINT_ROOT'] = url.encode('utf-8')

    # assuming properties are flat.
    # inject each property as an environment variable.
    for key, value in flatten(ctx.properties).iteritems():
        env[key] = value.encode('utf-8') \
            if isinstance(value, unicode) \
            or isinstance(value, str) else repr(value)

    return env
Example #31
0
def install(ctx, plugins, **kwargs):

    '''

    Installs plugins as celery tasks according to the provided plugins details.

    the plugins parameter is expected to be a list where each element is in
    one of the following formats:

        1. { name: "...", url: "..." }
        The plugin url should be a URL pointing to either a zip or tar.gz file.

        2. { name: "...", folder: "..." }
        The plugin folder should be a a folder name
        inside the blueprint 'plugins' directory containing the plugin.

    :param ctx: Invocation context - injected by the @operation
    :param plugins: An iterable of plugins to install.
    :return:
    '''

    for plugin in plugins:
        ctx.logger.info("Installing plugin {0}".format(plugin['name']))

        if "folder" in plugin:

            # convert the folder into a url inside the file server
            management_ip = get_manager_ip()
            if management_ip:
                plugin["url"] = 'http://{0}:53229/blueprints/{1}/plugins/{2}.zip'\
                                .format(
                    management_ip,
                    ctx.blueprint_id,
                    plugin['folder'])

        ctx.logger.info("Installing plugin from {0}".format(plugin['url']))
        install_celery_plugin(plugin['url'])
Example #32
0
def setup_environment(ctx):
    """
    Add some useful environment variables to the environment
    """

    env = os.environ.copy()
    # See in context.py
    # https://github.com/CloudifySource
    # /cosmo-celery-common/blob/develop/cloudify/context.py
    env['CLOUDIFY_NODE_ID'] = ctx.node_id.encode('utf-8')
    env['CLOUDIFY_BLUEPRINT_ID'] = ctx.blueprint_id.encode('utf-8')
    env['CLOUDIFY_DEPLOYMENT_ID'] = ctx.deployment_id.encode('utf-8')
    env['CLOUDIFY_MANAGER_IP'] = get_manager_ip().encode('utf-8')
    env['CLOUDIFY_EXECUTION_ID'] = ctx.execution_id.encode('utf-8')

    logging_script_path = os.path.join(dirname(resources.__file__),
                                       "logging.sh")

    file_server_script_path = os.path.join(dirname(resources.__file__),
                                           "file_server.sh")

    env['CLOUDIFY_LOGGING'] = logging_script_path
    env['CLOUDIFY_FILE_SERVER'] = file_server_script_path

    url = '{0}/{1}'.format(utils.get_manager_file_server_blueprints_root_url(),
                           ctx.blueprint_id)

    env['CLOUDIFY_FILE_SERVER_BLUEPRINT_ROOT'] = url.encode('utf-8')

    # assuming properties are flat.
    # inject each property as an environment variable.
    for key, value in flatten(ctx.properties).iteritems():
        env[key] = value.encode('utf-8') \
            if isinstance(value, unicode) \
            or isinstance(value, str) else repr(value)

    return env
Example #33
0
def install(ctx, plugins, **kwargs):
    '''

    Installs plugins as celery tasks according to the provided plugins details.

    the plugins parameter is expected to be a list where each element is in
    one of the following formats:

        1. { name: "...", url: "..." }
        The plugin url should be a URL pointing to either a zip or tar.gz file.

        2. { name: "...", folder: "..." }
        The plugin folder should be a a folder name
        inside the blueprint 'plugins' directory containing the plugin.

    :param ctx: Invocation context - injected by the @operation
    :param plugins: An iterable of plugins to install.
    :return:
    '''

    for plugin in plugins:
        ctx.logger.info("Installing plugin {0}".format(plugin['name']))

        if "folder" in plugin:

            # convert the folder into a url inside the file server
            management_ip = get_manager_ip()
            if management_ip:
                plugin["url"] = 'http://{0}:53229/blueprints/{1}/plugins/{2}.zip'\
                                .format(
                    management_ip,
                    ctx.blueprint_id,
                    plugin['folder'])

        ctx.logger.info("Installing plugin from {0}".format(plugin['url']))
        install_celery_plugin(plugin['url'])
 def __init__(self,
              amqp_user='******',
              amqp_pass='******',
              amqp_host=None,
              ssl_enabled=False,
              ssl_cert_path=''):
     self.connection = None
     self.channel = None
     self._is_closed = False
     if amqp_host is None:
         amqp_host = utils.get_manager_ip()
     credentials = pika.credentials.PlainCredentials(
         username=amqp_user,
         password=amqp_pass)
     amqp_port, ssl_options = utils.internal.get_broker_ssl_and_port(
         ssl_enabled=ssl_enabled,
         cert_path=ssl_cert_path)
     self._connection_parameters = pika.ConnectionParameters(
             host=amqp_host,
             port=amqp_port,
             credentials=credentials,
             ssl=ssl_enabled,
             ssl_options=ssl_options)
     self._connect()
def config_handlers(ctx, handlers, config_path, handlers_path):
    """
    create handler configuration files.
    copy over handler if path to file was provided.
    return list of active handlers.
    """
    if handlers is None:
        handlers = copy_objects.deepcopy(DEFAULT_HANDLERS)
        handlers['cloudify_handler.cloudify.CloudifyHandler']['config'][
            'server'] = get_manager_ip()
    elif not handlers:
        raise exceptions.NonRecoverableError('Empty handlers dict')

    for name, prop in handlers.items():
        if 'path' in prop.keys():
            handler_file = os.path.join(handlers_path,
                                        '{0}.py'.format(name.split('.')[-2]))
            ctx.download_resource(prop['path'], handler_file)

        path = os.path.join(config_path, '{0}.conf'.format(
            name.split('.')[-1]))
        write_config(path, prop.get('config', {}))

    return handlers.keys()
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils

client = CloudifyClient(utils.get_manager_ip(), utils.get_manager_rest_service_port())


def convert_env_value_to_string(envDict):
    for key, value in envDict.items():
        envDict[str(key)] = str(envDict.pop(key))


def get_host(entity):
    if entity.instance.relationships:
        for relationship in entity.instance.relationships:
            if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
                return relationship.target
    return None

Example #37
0
def get_agent_ip(ctx, agent_config):
    if is_on_management_worker(ctx):
        return utils.get_manager_ip()
    return agent_config['host']
Example #38
0
 def get_manager_ip():
     return utils.get_manager_ip()
Example #39
0
def get_agent_ip(ctx, worker_config):
    if is_deployment_worker(ctx):
        return utils.get_manager_ip()
    return worker_config['host']
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils

if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
  client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
  client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())

def convert_env_value_to_string(envDict):
    for key, value in envDict.items():
        envDict[str(key)] = str(envDict.pop(key))


def get_host(entity):
    if entity.instance.relationships:
        for relationship in entity.instance.relationships:
            if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
                return relationship.target
    return None
Example #41
0
def get_manager_ip():
    return utils.get_manager_ip()
Example #42
0
def get_agent_ip(ctx, agent_config):
    if is_on_management_worker(ctx):
        return utils.get_manager_ip()
    return agent_config['host']
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils

client = CloudifyClient(utils.get_manager_ip(),
                        utils.get_manager_rest_service_port())


def convert_env_value_to_string(envDict):
    for key, value in envDict.items():
        envDict[str(key)] = str(envDict.pop(key))


def get_host(entity):
    if entity.instance.relationships:
        for relationship in entity.instance.relationships:
            if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
                return relationship.target
    return None

from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils

client = CloudifyClient(utils.get_manager_ip(), utils.get_manager_rest_service_port())


def get_host(entity):
    if entity.instance.relationships:
        for relationship in entity.instance.relationships:
            if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
                return relationship.target
    return None


def has_attribute_mapping(entity, attribute_name):
    ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
    return ('_a4c_att_' + attribute_name) in entity.node.properties


def process_attribute_mapping(entity, attribute_name, data_retriever_function):
    return public_ip

def get_attribute_from_top_host(entity, attribute_name):
    host = get_host(entity)
    while host is not None:
        entity = host
        host = get_host(entity)
    return get_attribute(entity, attribute_name)

from cloudify import utils
from cloudify_rest_client import CloudifyClient
from cloudify.state import ctx_parameters as inputs

import os

client = CloudifyClient(host=utils.get_manager_ip(),
                        port=utils.get_manager_rest_service_port(),
                        protocol='https',
                        cert=utils.get_local_rest_certificate(),
                        token= utils.get_rest_token(),
                        tenant= utils.get_tenant_name())

def convert_env_value_to_string(envDict):
    for key, value in envDict.items():
        envDict[str(key)] = str(envDict.pop(key))


def parse_output(output):
    # by convention, the last output is the result of the operation
    last_output = None
    outputs = {}