def generate_configs(args, job_name, host_id, instance_id):
  hosts = args.kafka_config.jobs[job_name].hosts
  task_id = deploy_utils.get_task_id(hosts, host_id, instance_id)

  kafka_cfg_dict = args.kafka_config.configuration.generated_files["kafka.cfg"]
  kafka_cfg_dict["broker.id"] = task_id
  kafka_cfg = deploy_utils.generate_properties_file(args, kafka_cfg_dict)

  config_files = {
    "kafka.cfg": kafka_cfg,
  }
  config_files.update(args.kafka_config.configuration.raw_files)

  return config_files
Beispiel #2
0
  def update_active_tasks(self):
    # Add all active tasks
    self.metric_sources = []
    for service_name, service in self.collector_config.services.iteritems():
      # Save to database.
      # The active field has the default value True.
      service_record, created = Service.objects.get_or_create(
          name=service_name,
          defaults={"metric_url":service.metric_url})
      if not created:
        # Mark it as active if it exists.
        service_record.active = True
        service_record.save()

      for cluster_name, cluster in service.clusters.iteritems():
        cluster_record, created = Cluster.objects.get_or_create(
            service=service_record, name=cluster_name)
        if not created:
          cluster_record.active = True
          cluster_record.save()

        for job_name in service.jobs:
          job_record, created = Job.objects.get_or_create(
              cluster=cluster_record, name=job_name)
          if not created:
            job_record.active = True
            job_record.save()

          job = cluster.jobs[job_name]
          # We assume http port is always base_port + 1
          port = job.base_port + 1
          # support multiple instances
          hosts = job.hosts
          for host_id, host in hosts.iteritems():
            host_name = job.hostnames[host_id]
            for instance_id in range(host.instance_num):
              task_id = deploy_utils.get_task_id(hosts, host_id, instance_id)
              instance_port = deploy_utils.get_base_port(port,instance_id)
              task_record, created = Task.objects.get_or_create(
                job=job_record, task_id=task_id,
                defaults={"host":host_name, "port":instance_port})
              if not created or task_record.host != host_name or (
                task_record.port != instance_port):
                task_record.active = True
                task_record.host = host_name
                task_record.port = instance_port
                task_record.save()
              self.metric_sources.append(
                MetricSource(self.collector_config, task_record))
def generate_bootstrap_script(args, host, job_name, host_id, instance_id):
  supervisor_client = deploy_utils.get_supervisor_client(host,
    "zookeeper", args.zookeeper_config.cluster.name, job_name, instance_id=instance_id)
  data_dir = supervisor_client.get_available_data_dirs()[0]
  myid_file = "%s/%s" % (data_dir, MYID_FILE)

  hosts = args.zookeeper_config.jobs["zookeeper"].hosts
  task_id = deploy_utils.get_task_id(hosts, host_id, instance_id)

  script_dict = {
    'myid_file': myid_file,
    'host_id': task_id,
  }
  return deploy_utils.create_run_script(
      '%s/bootstrap_zk.sh.tmpl' % deploy_utils.get_template_dir(),
      script_dict)
Beispiel #4
0
def generate_bootstrap_script(args, host, job_name, host_id, instance_id):
  supervisor_client = deploy_utils.get_supervisor_client(host,
    "zookeeper", args.zookeeper_config.cluster.name, job_name, instance_id=instance_id)
  data_dir = supervisor_client.get_available_data_dirs()[0]
  myid_file = "%s/%s" % (data_dir, MYID_FILE)

  hosts = args.zookeeper_config.jobs["zookeeper"].hosts
  task_id = deploy_utils.get_task_id(hosts, host_id, instance_id)

  script_dict = {
    'myid_file': myid_file,
    'host_id': task_id,
  }
  return deploy_utils.create_run_script(
      '%s/bootstrap_zk.sh.tmpl' % deploy_utils.get_template_dir(),
      script_dict)
Beispiel #5
0
def generate_configs(args, job_name, host_id, instance_id):
  kafka_cfg_dict = args.kafka_config.configuration.generated_files["kafka.cfg"]
  hosts = args.kafka_config.jobs[job_name].hosts
  kafka_cfg_dict["broker.id"] = deploy_utils.get_task_id(hosts, host_id, instance_id)
  kafka_cfg = deploy_utils.generate_properties_file(args, kafka_cfg_dict)

  kafka_scribe_cfg_dict = args.kafka_config.configuration.generated_files["kafka-scribe.cfg"]
  kafka_job = args.kafka_config.jobs["kafka"]
  kafka_scribe_cfg_dict["metadata.broker.list"] = ",".join(
      service_config.get_job_host_port_list(kafka_job))
  kafka_scribe_cfg = deploy_utils.generate_properties_file(args, kafka_scribe_cfg_dict)

  config_files = {
    "kafka.cfg": kafka_cfg,
    "kafka-scribe.cfg": kafka_scribe_cfg,
  }
  config_files.update(args.kafka_config.configuration.raw_files)

  return config_files
Beispiel #6
0
def generate_configs(args, job_name, host_id, instance_id):
    kafka_cfg_dict = args.kafka_config.configuration.generated_files[
        "kafka.cfg"]
    hosts = args.kafka_config.jobs[job_name].hosts
    kafka_cfg_dict["broker.id"] = deploy_utils.get_task_id(
        hosts, host_id, instance_id)
    kafka_cfg = deploy_utils.generate_properties_file(args, kafka_cfg_dict)

    kafka_scribe_cfg_dict = args.kafka_config.configuration.generated_files[
        "kafka-scribe.cfg"]
    kafka_job = args.kafka_config.jobs["kafka"]
    kafka_scribe_cfg_dict["metadata.broker.list"] = ",".join(
        service_config.get_job_host_port_list(kafka_job))
    kafka_scribe_cfg = deploy_utils.generate_properties_file(
        args, kafka_scribe_cfg_dict)

    config_files = {
        "kafka.cfg": kafka_cfg,
        "kafka-scribe.cfg": kafka_scribe_cfg,
    }
    config_files.update(args.kafka_config.configuration.raw_files)

    return config_files