示例#1
0
    def __init__(self, name, ports, env_name=None):
        self.name = name
        self.ports = ports
        self.env_name = env_name

        volume_name = get_volume_name(name)
        volumes = [
            MarathonContainerVolume(container_path=volume_name,
                                    host_path=None,
                                    mode='RW',
                                    persistent={'size': 512}),
            MarathonContainerVolume(container_path='/var/lib/zookeeper',
                                    host_path=volume_name,
                                    mode='RW',
                                    persistent=None)
        ]

        constraints = [
            MarathonConstraint(field='hostname', operator='LIKE', value='')
        ]
        residency = Residency(task_lost_behavior='WAIT_FOREVER')

        health_checks = [
            MarathonHealthCheck(grace_period_seconds=300,
                                interval_seconds=20,
                                max_consecutive_failures=3,
                                protocol='TCP',
                                timeout_seconds=20,
                                ignore_http1xx=False,
                                port=ports[0])
        ]

        cmd = 'export ZOO_SERVERS="{}" && /docker-entrypoint.sh zkServer.sh start-foreground'

        labels = {
            '_tonomi_application': 'zookeeper',
            '_client_conn_port': str(ports[0]),
            '_follower_conn_port': str(ports[1]),
            '_server_conn_port': str(ports[2])
        }

        if self.env_name:
            labels['_tonomi_environment'] = self.env_name

        env = {'ZOO_MY_ID': '', 'ZOO_PORT': str(ports[0])}

        super().__init__(name,
                         image='zookeeper',
                         volumes=volumes,
                         network='HOST',
                         labels=labels,
                         cmd=cmd,
                         constraints=constraints,
                         residency=residency,
                         env=env,
                         health_checks=health_checks,
                         cpus=0.5,
                         mem=400,
                         instances=1,
                         disk=400)
示例#2
0
def migrate_tasks(marathon_client, tasks, hosts, force=False):
  """
  Migrate tasks from the hosts going to go for maintenance
  """
  for app_id in tasks.iterkeys():
    print(">>> Migrating the following tasks")
    print(app_id)

    # Redeploy all the applications running on maintenance hosts by adding constraints
    # Generate constraints list
    constraints_to_add = map(lambda host: MarathonConstraint('hostname', 'UNLIKE', host), hosts)

    # Generate app to deploy
    app_to_redeploy = marathon_client.get_app(app_id)
    task_host = tasks[app_id].host
    for constraint in constraints_to_add:
        if constraint.value == task_host:
            app_to_redeploy.constraints.append(constraint)

    # Clean the app
    app_to_redeploy.tasks = []
    if app_to_redeploy.container:
        app_to_redeploy.fetch = []

    # Redeploy
    marathon_client.update_app(app_id, app_to_redeploy, force=force)

  print(">>> Migrated all the tasks")
示例#3
0
def main(args):
    migration_hosts = args.hosts.replace('"', '').replace('\'', '').split(',')
    marathon_client = MarathonClient(args.url)

    # Get the running marathon application dictionary with constraints
    all_apps = utils.dict_by_key_and_value(lambda x: x.id,
                                           lambda y: y.constraints,
                                           marathon_client.list_apps())
    print(">>> All Running Applications: ")
    print(
        json.dumps(all_apps.keys(),
                   sort_keys=True,
                   indent=4,
                   separators=(',', ': ')))

    # Constraints to remove
    sentinels = map(lambda x: MarathonConstraint('hostname', 'UNLIKE', x),
                    migration_hosts)

    # Find all apps with a leftover constraint
    filtered_apps = {}
    for sentinel in sentinels:
        for app_id in all_apps:
            if sentinel in all_apps[app_id]:
                to_update = {app_id: all_apps[app_id]}
                print ">>> Adding app to filtered list: %s" % (app_id)
                filtered_apps.update(to_update)

    # Tasks unmigration
    unmigrate_tasks(marathon_client, filtered_apps, sentinels, args.force)
  def __init__(self, name, port, zoo_host, zoo_port, env_name=None):
    self.name = name
    self.port = port
    self.zoo_host = zoo_host
    self.zoo_port = zoo_port
    self.env_name = env_name

    volume_name = get_volume_name(name)
    volumes = [
      MarathonContainerVolume(container_path=volume_name, mode='RW', persistent={'size': 512}),
      MarathonContainerVolume(container_path='/kafka', host_path=volume_name, mode='RW')
    ]

    port_mappings = [
      MarathonContainerPortMapping(container_port=port, host_port=port, service_port=port, protocol='tcp')
    ]

    constraints = [MarathonConstraint(field='hostname', operator='UNIQUE')]
    residency = Residency(task_lost_behavior='WAIT_FOREVER')

    labels = {
      '_tonomi_application': 'kafka',
      '_cluster_port': str(port)
    }

    if self.env_name:
      labels['_tonomi_environment'] = self.env_name

    cmd = 'export KAFKA_ADVERTISED_HOST_NAME=$HOST && start-kafka.sh'

    env = {
      'KAFKA_ADVERTISED_PORT': str(port),
      'KAFKA_ZOOKEEPER_CONNECT': '{}:{}'.format(self.zoo_host, self.zoo_port),
      'KAFKA_PORT': str(port)
    }

    health_checks = [get_health_check(port=port)]

    super().__init__(name, image='wurstmeister/kafka:0.10.1.0', network='BRIDGE', labels=labels, cmd=cmd,
                     env=env, health_checks=health_checks, cpus=0.5, mem=512, instances=3,
                     disk=256, volumes=volumes, port_mappings=port_mappings, residency=residency,
                     constraints=constraints)
示例#5
0
 def app_constraints(self, field, operator, value=None):
     """
     Constraints control where apps run. It is to allow optimizing for either fault tolerance (by spreading a task
     out on multiple nodes) or locality (by running all of an application tasks on the same node). Constraints have
     three parts
     @args:
     field: Field can be the hostname of the agent node or any attribute of the agent node.
     operator: e.g. UNIQUE tells Marathon to enforce uniqueness of the attribute across all of an app's tasks.
                           This allows you, for example, to run only one app taks on each host.
                           CLUSTER allows you to run all of your app's tasks on agent nodes that share a certain
                           attribute. Think about having special hardware needs.
                           GROUP_BY can be used to distribute tasks evenly across racks or datacenters for high
                           availibility.
                           LIKE accepts a regular expression as parameter, and allows you to run your tasks only on
                           the agent nodes whose field values match the regular expression.
                           UNLIKE accepts a regular expression as parameter, and allows you to run your tasks on
                           agent nodes whose field values do NOT match the regular expression.
     value:
     :return:
     """
     return MarathonConstraint(field=field, operator=operator, value=value)
  def __init__(self, name, port, is_master=True, env_name=None):
    self.name = name
    self.env_name = env_name

    volume_name = get_volume_name(name)
    volumes = [
      MarathonContainerVolume(container_path=volume_name, mode='RW', persistent={'size': 512}),
      MarathonContainerVolume(container_path='/var/lib/redis', host_path=volume_name, mode='RW')
    ]

    constraints = [MarathonConstraint(field='hostname', operator='UNIQUE')]
    residency = Residency(task_lost_behavior='WAIT_FOREVER')

    health_checks = [get_health_check(port=port)]

    service_port = 0 if not is_master else port
    port_mappings = [
      MarathonContainerPortMapping(container_port=port, host_port=port,
                                   service_port=service_port, protocol='tcp')
    ]

    cmd = 'docker-entrypoint.sh redis-server --port $REDIS_PORT '
    if not is_master:
      cmd += '--slaveof {} $REDIS_PORT'

    labels = {
      '_tonomi_application': 'redis',
      '_cluster_port': str(port)
    }

    if self.env_name:
      labels['_tonomi_environment'] = self.env_name

    env = {
      'REDIS_PORT': str(port)
    }

    super().__init__(name, image='redis:3.2', volumes=volumes, network='BRIDGE', labels=labels, cmd=cmd,
                     constraints=constraints, residency=residency, env=env, health_checks=health_checks,
                     cpus=0.5, mem=300, instances=1, disk=512, port_mappings=port_mappings)
  def __init__(self, name, ports, env_name=None):
    self.name = name
    self.ports = ports
    self.env_name = env_name

    volume_name = get_volume_name(name)
    volumes = [
      MarathonContainerVolume(container_path='{}-data'.format(volume_name), mode='RW', persistent={'size': 512}),
      MarathonContainerVolume(container_path='/data', host_path='{}-data'.format(volume_name), mode='RW'),
      MarathonContainerVolume(container_path='{}-datalog'.format(volume_name), mode='RW', persistent={'size': 512}),
      MarathonContainerVolume(container_path='/datalog', host_path='{}-datalog'.format(volume_name), mode='RW')
    ]

    constraints = [MarathonConstraint(field='hostname', operator='LIKE', value='')]
    residency = Residency(task_lost_behavior='WAIT_FOREVER')

    health_checks = [get_health_check(port=ports[0])]

    cmd = 'export ZOO_SERVERS="{}" && /docker-entrypoint.sh zkServer.sh start-foreground'

    labels = {
      '_tonomi_application': 'zookeeper',
      '_client_conn_port': str(ports[0]),
      '_follower_conn_port': str(ports[1]),
      '_server_conn_port': str(ports[2])
    }

    if self.env_name:
      labels['_tonomi_environment'] = self.env_name

    env = {
      'ZOO_MY_ID': '',
      'ZOO_PORT': str(ports[0])
    }

    super().__init__(name, image='zookeeper:3.4.9', volumes=volumes, network='HOST', labels=labels,
                     cmd=cmd, constraints=constraints, residency=residency, env=env,
                     health_checks=health_checks, cpus=0.5, mem=400, instances=1, disk=400)
示例#8
0
 def write(self, user, request_app, original_app):
     request_app.constraints.append(MarathonConstraint.from_json("key:LIKE:value".split(":")))
     return request_app
示例#9
0
    def __init__(self, name, ports, is_seed=True, env_name=None):

        self.env_name = env_name

        volume_name = get_volume_name(name)
        volumes = [
            MarathonContainerVolume(container_path=volume_name,
                                    host_path=None,
                                    mode='RW',
                                    persistent={'size': 512}),
            MarathonContainerVolume(container_path='/var/lib/cassandra',
                                    host_path=volume_name,
                                    mode='RW',
                                    persistent=None)
        ]

        constraints = [MarathonConstraint(field='hostname', operator='UNIQUE')]
        residency = Residency(task_lost_behavior='WAIT_FOREVER')
        health_checks = [
            MarathonHealthCheck(grace_period_seconds=300,
                                interval_seconds=20,
                                max_consecutive_failures=3,
                                protocol='TCP',
                                timeout_seconds=20,
                                ignore_http1xx=False,
                                port=ports[9042])
        ]

        ports_map = {
            'p11': 9042,
            'p12': ports[9042],
            'p21': 9160,
            'p22': ports[9160],
            'p31': 7199,
            'p32': ports[7199],
            'p41': 7000,
            'p42': ports[7000],
            'p51': 7001,
            'p52': ports[7001]
        }

        cmd = "chown -R cassandra /var/lib/cassandra && sed -i 's/{p11}/{p12}/' /etc/cassandra/default.conf/cqlshrc.sample && sed -i 's/{p31}/{p32}/' /etc/cassandra/default.conf/cassandra-env.sh && sed -i 's/{p41}/{p42}/;s/{p51}/{p52}/;s/{p11}/{p12}/;s/{p21}/{p22}/;s/{p31}/{p32}/' /etc/cassandra/default.conf/cassandra.yaml".format(
            **ports_map)
        if is_seed:
            cmd += ' && cd ${MESOS_SANDBOX}/cassandra-schema && ./apply_schema.sh & start'
        else:
            cmd += ' && start'

        labels = {
            '_tonomi_application': 'cassandra',
            '_jmx_port': str(ports[7199]),
            '_internode_communication_port': str(ports[7000]),
            '_tls_internode_communication_port': str(ports[7001]),
            '_thrift_client_port': str(ports[9160]),
            '_cql_native_port': str(ports[9042])
        }

        if self.env_name:
            labels['_tonomi_environment'] = self.env_name

        env = {'SEEDS': '', 'CASSANDRA_PORT': str(ports[9042])}

        uris = [
            'https://s3-us-west-1.amazonaws.com/streaming-artifacts/mk-cassandra-schema.tar.gz'
        ]

        super().__init__(name,
                         image='poklet/cassandra',
                         volumes=volumes,
                         network='HOST',
                         labels=labels,
                         cmd=cmd,
                         constraints=constraints,
                         residency=residency,
                         env=env,
                         health_checks=health_checks,
                         uris=uris,
                         cpus=0.5,
                         mem=400,
                         instances=1,
                         disk=512)
  def __init__(self, name, ports, is_seed=True, env_name=None):

    self.env_name = env_name

    volume_name = get_volume_name(name)

    volumes = []

    if is_seed:
      volumes = [
        MarathonContainerVolume(container_path=volume_name, mode='RW', persistent={'size': 512}),
        MarathonContainerVolume(container_path='/var/lib/cassandra', host_path=volume_name, mode='RW')
      ]

    constraints = [MarathonConstraint(field='hostname', operator='UNIQUE')]

    residency = None
    if is_seed:
      residency = Residency(task_lost_behavior='WAIT_FOREVER')

    health_checks = [get_health_check(port=ports[9042])]

    ports_map = {
      'p11': 9042,
      'p12': ports[9042],
      'p21': 9160,
      'p22': ports[9160],
      'p31': 7199,
      'p32': ports[7199],
      'p41': 7000,
      'p42': ports[7000],
      'p51': 7001,
      'p52': ports[7001]
    }

    cmd = "export CASSANDRA_BROADCAST_ADDRESS=$HOST && sed -i 's/{p41}/{p42}/;s/{p51}/{p52}/;s/{p11}/{p12}/;s/{p21}/{p22}/;s/{p31}/{p32}/' /etc/cassandra/cassandra.yaml && sed -i 's/{p31}/{p32}/' /etc/cassandra/cassandra-env.sh".format(**ports_map)
    if is_seed:
      cmd += " && cd ${MESOS_SANDBOX}/cassandra-schema && sed -i '2i sleep 20' apply_schema.sh && ./apply_schema.sh & /docker-entrypoint.sh cassandra -f"
    else:
      cmd += " && /docker-entrypoint.sh cassandra -f"

    labels = {
      '_tonomi_application': 'cassandra',
      '_jmx_port': str(ports[7199]),
      '_internode_communication_port': str(ports[7000]),
      '_tls_internode_communication_port': str(ports[7001]),
      '_thrift_client_port': str(ports[9160]),
      '_cql_native_port': str(ports[9042])
    }

    if self.env_name:
      labels['_tonomi_environment'] = self.env_name

    env = {
      'CASSANDRA_SEEDS': '',
      'CASSANDRA_PORT': str(ports[9042])
    }

    uris = ['https://s3-us-west-1.amazonaws.com/streaming-artifacts/mk-cassandra-schema.tar.gz']

    super().__init__(name, image='cassandra:3.7', volumes=volumes, network='HOST', labels=labels, cmd=cmd,
                     constraints=constraints, residency=residency, env=env, health_checks=health_checks,
                     uris=uris, cpus=0.5, mem=2512, instances=1, disk=512)