Esempio n. 1
0
def start(job):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)

    service = job.service
    container = Container.from_ays(service, job.context['token'], logger=service.logger)
    container.start()

    if container.is_running():
        service.model.data.status = "running"
    else:
        raise j.exceptions.RuntimeError("container didn't start")

    has_zt_nic = False
    for nic in service.model.data.nics:
        if nic.type == 'zerotier':
            has_zt_nic = True
            zerotier_nic_config(service, job.logger, container, nic)

    if has_zt_nic and not service.model.data.identity:
        service.model.data.identity = container.client.zerotier.info()['secretIdentity']

    service.saveAll()
Esempio n. 2
0
def install(job):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.configuration import get_jwt_token
    import time

    service = job.service

    job.context['token'] = get_jwt_token(service.aysrepo)
    container_service = service.aysrepo.serviceGet(role='container', instance=service.model.data.container)
    container = Container.from_ays(container_service, job.context['token'], logger=service.logger)

    id = container.id
    client = container.node.client
    r = client.container.backup(id, service.model.data.url)

    service.model.data.type = 'container'

    meta = {
        'name': container.name,
        'node': container.node.addr,
        'nics': container.nics,
        'hostname': container.hostname,
        'flist': container.flist,
        'ports': container.ports,
        'host_network': container.host_network,
        'storage': container.storage,
        'init_processes': container.init_processes,
        'privileged': container.privileged,
    }

    service.model.data.timestamp = int(time.time())
    service.model.data.meta = j.data.serializer.json.dumps(meta)
    service.model.data.snapshot = r.get()
    service.saveAll()
Esempio n. 3
0
def config_cloud_init(job, nics=None):
    import yaml
    import json
    from zeroos.orchestrator.sal.gateway.cloudinit import CloudInit
    from zeroos.orchestrator.sal.Container import Container

    container = Container.from_ays(job.service.parent, job.context['token'])
    nics = nics or []
    config = {}

    for nic in nics:
        if not nic.get("dhcpserver"):
            continue

        for host in nic["dhcpserver"].get("hosts", []):
            if host.get("cloudinit"):
                if host["cloudinit"]["userdata"] and host["cloudinit"]["metadata"]:
                    userdata = yaml.load(host["cloudinit"]["userdata"])
                    metadata = yaml.load(host["cloudinit"]["metadata"])
                    config[host['macaddress'].lower()] = json.dumps({
                        "meta-data": metadata,
                        "user-data": userdata,
                    })

    cloudinit = CloudInit(container, config)
    if config != {}:
        cloudinit.apply_config()
    return cloudinit
Esempio n. 4
0
def processChange(job):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.sal.influxdb.influxdb import InfluxDB
    from zeroos.orchestrator.configuration import get_jwt_token_from_job

    service = job.service
    args = job.model.args
    if args.pop('changeCategory') != 'dataschema' or service.model.actionsState['install'] in ['new', 'scheduled']:
        return

    container_service = get_container(service)

    container = Container.from_ays(container_service, get_jwt_token_from_job(job))
    influx = InfluxDB(
        container, service.parent.model.data.redisAddr, service.model.data.port)

    if args.get('port'):
        if container.is_running() and influx.is_running()[0]:
            influx.stop()
            service.model.data.status = 'halted'
            influx.port = args['port']
            influx.start()
            service.model.data.status = 'running'
        service.model.data.port = args['port']

    if args.get('databases'):
        if container.is_running() and influx.is_running()[0]:
            create_dbs = set(args['databases']) - set(service.model.data.databases)
            drop_dbs = set(service.model.data.databases) - set(args['databases'])
            influx.create_databases(create_dbs)
            influx.drop_databases(drop_dbs)
        service.model.data.databases = args['databases']

    service.saveAll()
Esempio n. 5
0
def processChange(job):
    from zeroos.orchestrator.sal.grafana.grafana import Grafana
    from zeroos.orchestrator.sal.Container import Container

    service = job.service
    args = job.model.args

    if args.pop('changeCategory'
                ) != 'dataschema' or service.model.actionsState['install'] in [
                    'new', 'scheduled'
                ]:
        return
    container = get_container(service)
    container_ays = Container.from_ays(container, job.context['token'])
    grafana = Grafana(container_ays, service.parent.model.data.redisAddr,
                      job.service.model.data.port)

    if args.get('port'):
        if container_ays.is_running() and grafana.is_running()[0]:
            grafana.stop()
            service.model.data.status = 'halted'
            grafana.port = args['port']
            grafana.start()
            service.model.data.status = 'running'
    service.model.data.port = args['port']

    # @TODO: Handle influxdb list change

    service.saveAll()
Esempio n. 6
0
def processChange(job):
    from zeroos.orchestrator.sal.grafana.grafana import Grafana
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)

    service = job.service
    args = job.model.args

    if args.pop('changeCategory'
                ) != 'dataschema' or service.model.actionsState['install'] in [
                    'new', 'scheduled'
                ]:
        return
    container = get_container(service)
    container_ays = Container.from_ays(container,
                                       job.context['token'],
                                       logger=service.logger)
    grafana = Grafana(container_ays, service.parent.model.data.redisAddr,
                      job.service.model.data.port, job.service.model.data.url)

    if 'url' in args:
        service.model.data.url = args.get('url', service.model.data.url)
        if container_ays.is_running() and grafana.is_running()[0]:
            grafana.stop()
            grafana.url = service.model.data.url
            grafana.start()

    service.saveAll()
Esempio n. 7
0
def _init_zerodisk_services(job,
                            nbd_container_service,
                            tlog_container_service=None,
                            tlog_container_sal=None):
    service = job.service
    # Create nbderver service
    nbdserver_service = create_service(service, nbd_container_service)
    job.logger.info("creates nbd server for vm {}".format(service.name))
    service.consume(nbdserver_service)

    if tlog_container_service:
        # Create tlogserver service
        if not tlog_container_sal:
            from zeroos.orchestrator.sal.Container import Container
            tlog_container_sal = Container.from_ays(tlog_container_service,
                                                    job.context['token'],
                                                    logger=service.logger)
        ports, tcp = get_baseports(job, tlog_container_service.parent,
                                   tlog_container_sal.node, 11211, 2)
        bind = "%s:%s" % (tlog_container_sal.node.storageAddr, ports[0])
        waitListenBind = "%s:%s" % (tlog_container_sal.node.storageAddr,
                                    ports[1])
        tlogserver_service = create_service(service,
                                            tlog_container_service,
                                            role='tlogserver',
                                            bind=bind,
                                            waitListenBind=waitListenBind)
        tlogserver_service.consume(tcp[0])
        tlogserver_service.consume(tcp[1])
        job.logger.info("creates tlog server for vm {}".format(service.name))
        service.consume(tlogserver_service)
        nbdserver_service.consume(tlogserver_service)
Esempio n. 8
0
def apply_rules(job, gwdata=None):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.sal.gateway.firewall import Firewall, Network
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)

    gwdata = {} if gwdata is None else gwdata
    container = Container.from_ays(job.service.parent,
                                   job.context['token'],
                                   logger=job.service.logger)
    portforwards = gwdata.get('portforwards', [])
    # lets assume the public ip is the ip of the nic which has a gateway configured

    publicnetwork = None
    privatenetworks = []
    for nic in gwdata["nics"]:
        if nic.get("config"):
            if nic["config"].get("gateway", None):
                publicnetwork = Network(nic["name"], nic["config"]["cidr"])
            else:
                privatenetworks.append(
                    Network(nic["name"], nic["config"]["cidr"]))
    if publicnetwork and privatenetworks:
        firewall = Firewall(container, publicnetwork, privatenetworks,
                            portforwards)
        firewall.apply_rules()
Esempio n. 9
0
def apply_config(job, gwdata=None):
    import ipaddress
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.sal.gateway.dhcp import DHCP
    from zeroos.orchestrator.configuration import get_jwt_token
    service = job.service
    job.context['token'] = get_jwt_token(job.service.aysrepo)

    container = Container.from_ays(job.service.parent, job.context['token'], logger=job.service.logger)

    gwdata = {} if gwdata is None else gwdata
    nics = gwdata.get('nics', [])
    dhcpservers = []

    for nic in nics:
        dhcpserver = nic.get('dhcpserver')
        if not dhcpserver:
            continue

        cidr = ipaddress.IPv4Interface(nic['config']['cidr'])
        dhcpserver['subnet'] = str(cidr.network.network_address)
        dhcpserver['gateway'] = str(cidr.ip)
        dhcpserver['interface'] = nic['name']
        dhcpservers.append(dhcpserver)

    dhcp = DHCP(container, gwdata['domain'], dhcpservers)
    dhcp.stop()
    service.model.data.status = 'halted'
    service.saveAll()
    dhcp.apply_config()
    service.model.data.status = 'running'
    service.saveAll()
Esempio n. 10
0
def _start_nbd(job, nbdname=None):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)

    # get all path to the vdisks serve by the nbdservers
    medias = []
    if not nbdname:
        nbdservers = job.service.producers.get('nbdserver', None)
    else:
        nbdservers = [
            job.service.aysrepo.serviceGet(role='nbdserver', instance=nbdname)
        ]

    if not nbdservers:
        raise j.exceptions.RuntimeError(
            "Failed to start nbds, no nbds created to start")
    nbdserver = nbdservers[0]
    # build full path of the nbdserver unix socket on the host filesystem
    container = Container.from_ays(nbdserver.parent,
                                   job.context['token'],
                                   logger=job.service.logger)
    if not container.is_running():
        # start container
        nbdserver.parent.executeAction('start', context=job.context)

    # make sure the nbdserver is started
    nbdserver.executeAction('start', context=job.context)
    for vdisk in job.service.model.data.vdisks:
        url = _nbd_url(job, container, nbdserver, vdisk)
        medias.append({'url': url})
    return medias
Esempio n. 11
0
def monitor(job):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.configuration import get_jwt_token

    service = job.service

    if service.model.actionsState['install'] == 'ok':
        container = Container.from_ays(job.service, get_jwt_token(job.service.aysrepo))
        running = container.is_running()
        if not running and service.model.data.status == 'running':
            try:
                job.logger.warning("container {} not running, trying to restart".format(service.name))
                service.model.dbobj.state = 'error'
                container.start()

                if container.is_running():
                    service.model.dbobj.state = 'ok'
            except:
                job.logger.error("can't restart container {} not running".format(service.name))
                service.model.dbobj.state = 'error'
        elif running and service.model.data.status == 'halted':
            try:
                job.logger.warning("container {} running, trying to stop".format(service.name))
                service.model.dbobj.state = 'error'
                container.stop()
                running, _ = container.is_running()
                if not running:
                    service.model.dbobj.state = 'ok'
            except:
                job.logger.error("can't stop container {} is running".format(service.name))
                service.model.dbobj.state = 'error'
Esempio n. 12
0
def create_from_template_container(job, parent):
    """
    if not it creates it.
    return the container service
    """
    from zeroos.orchestrator.configuration import get_configuration
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.sal.Node import Node
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)

    container_name = 'vdisk_{}_{}'.format(job.service.name, parent.name)
    node = Node.from_ays(parent, job.context['token'])
    config = get_configuration(job.service.aysrepo)
    flist = config.get(
        '0-disk-flist',
        'https://hub.gig.tech/gig-official-apps/0-disk-master.flist')

    print("Creating container for flist: %s" % flist)

    container = Container(name=container_name,
                          flist=flist,
                          host_network=True,
                          node=node)
    container.start()
    return container
Esempio n. 13
0
def monitor(job):
    from zeroos.orchestrator.configuration import get_jwt_token
    from zeroos.orchestrator.sal.Node import Node
    from zeroos.orchestrator.sal.Container import Container

    service = job.service
    if service.model.actionsState['install'] != 'ok' or service.parent.model.data.status != 'running':
        return

    token = get_jwt_token(job.service.aysrepo)
    node = Node.from_ays(service.parent, token, timeout=5)
    if not node.is_configured():
        return

    container = Container.from_ays(job.service, token, logger=service.logger)
    running = container.is_running()

    if not running and service.model.data.status == 'running' and container.node.is_configured(service.parent.name):
        ovs_name = '{}_ovs'.format(container.node.name)
        if ovs_name != service.name:
            ovs_service = service.aysrepo.serviceGet(role='container', instance=ovs_name)
            ovs_container = Container.from_ays(ovs_service, token)
            if not ovs_container.is_running():
                job.logger.warning\
                    ("Can't attempt to restart container {}, container {} is not running".format(
                        service.name, ovs_name))
        try:
            job.logger.warning("container {} not running, trying to restart".format(service.name))
            service.model.dbobj.state = 'error'
            container.start()

            if container.is_running():
                service.model.dbobj.state = 'ok'
        except:
            job.logger.error("can't restart container {} not running".format(service.name))
            service.model.dbobj.state = 'error'
    elif running and service.model.data.status == 'halted':
        try:
            job.logger.warning("container {} running, trying to stop".format(service.name))
            service.model.dbobj.state = 'error'
            container.stop()
            running, _ = container.is_running()
            if not running:
                service.model.dbobj.state = 'ok'
        except:
            job.logger.error("can't stop container {} is running".format(service.name))
            service.model.dbobj.state = 'error'
Esempio n. 14
0
def updateDisks(job, client, args):
    if args.get('disks') is None:
        return
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)
    service = job.service
    uuid = None
    if service.model.data.status == 'running':
        domain = get_domain(job)
        if domain:
            uuid = domain['uuid']
    # Get new and old disks
    new_disks = _diff(args['disks'], service.model.data.disks)
    old_disks = _diff(service.model.data.disks, args['disks'])

    # Do nothing if no disk change
    if new_disks == [] and old_disks == []:
        return

    # Set model to new data
    service.model.data.disks = args['disks']
    vdisk_container_service = create_zerodisk_container_service(
        job, service.parent, "nbd")
    container = Container.from_ays(vdisk_container_service,
                                   job.context['token'],
                                   logger=service.logger)

    # Detatching and Cleaning old disks
    if old_disks != []:
        nbdserver = service.producers.get('nbdserver', [])[0]
        for old_disk in old_disks:
            url = _nbd_url(job, container, nbdserver, old_disk['vdiskid'])
            if uuid:
                client.client.kvm.detach_disk(uuid, {'url': url})
            nbdserver.executeAction('install', context=job.context)

    # Attaching new disks
    if new_disks != []:
        _init_zerodisk_services(job, vdisk_container_service)
        for disk in new_disks:
            diskservice = service.aysrepo.serviceGet('vdisk', disk['vdiskid'])
            service.consume(diskservice)
        service.saveAll()
        _start_nbd(job)
        nbdserver = service.producers.get('nbdserver', [])[0]
        for disk in new_disks:
            media = {
                'url': _nbd_url(job, container, nbdserver, disk['vdiskid'])
            }
            if disk['maxIOps']:
                media['iotune'] = {
                    'totaliopssec': disk['maxIOps'],
                    'totaliopssecset': True
                }
            if uuid:
                client.client.kvm.attach_disk(uuid, media)
    service.saveAll()
Esempio n. 15
0
def updateDisks(job, client, args):
    from zeroos.orchestrator.sal.Container import Container
    service = job.service
    uuid = None
    if service.model.data.status == 'running':
        uuid = get_domain(job)['uuid']

    # mean we want to migrate vm from a node to another
    if 'node' in args and args['node'] != service.model.data.node:
        j.tools. async .wrappers.sync(
            service.executeAction('migrate',
                                  context=job.context,
                                  args={'node': args['node']}))

    # Get new and old disks
    new_disks = _diff(args.get('disks', []), service.model.data.disks)
    old_disks = _diff(service.model.data.disks, args.get('disks', []))

    # Do nothing if no disk change
    if new_disks == [] and old_disks == []:
        return

    # Set model to new data
    service.model.data.disks = args.get('disks', [])
    vdisk_container = create_zerodisk_container(job, service.parent)
    container = Container.from_ays(vdisk_container, job.context['token'])

    # Detatching and Cleaning old disks
    if old_disks != []:
        nbdserver = service.producers.get('nbdserver', [])[0]
        for old_disk in old_disks:
            url = _nbd_url(job, container, nbdserver, old_disk['vdiskid'])
            if uuid:
                client.client.kvm.detach_disk(uuid, {'url': url})
            j.tools. async .wrappers.sync(
                nbdserver.executeAction('install', context=job.context))

    # Attaching new disks
    if new_disks != []:
        _init_zerodisk_services(job, vdisk_container)
        for disk in new_disks:
            diskservice = service.aysrepo.serviceGet('vdisk', disk['vdiskid'])
            service.consume(diskservice)
        service.saveAll()
        _start_nbd(job)
        nbdserver = service.producers.get('nbdserver', [])[0]
        for disk in new_disks:
            media = {
                'url': _nbd_url(job, container, nbdserver, disk['vdiskid'])
            }
            if disk['maxIOps']:
                media['iotune'] = {
                    'totaliopssec': disk['maxIOps'],
                    'totaliopssecset': True
                }
            if uuid:
                client.client.kvm.attach_disk(uuid, media)
    service.saveAll()
Esempio n. 16
0
def stop(job):
    from zeroos.orchestrator.sal.Container import Container

    container = Container.from_ays(job.service, job.context['token'])
    container.stop()

    if not container.is_running():
        job.service.model.data.status = "halted"
    else:
        raise j.exceptions.RuntimeError("container didn't stopped")
Esempio n. 17
0
def stop(job):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)

    container = Container.from_ays(job.service, job.context['token'], logger=job.service.logger)
    container.stop()

    if not container.is_running():
        job.service.model.data.status = "halted"
    else:
        raise j.exceptions.RuntimeError("container didn't stop")
Esempio n. 18
0
def start(job):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.sal.influxdb.influxdb import InfluxDB

    service = job.service
    container = get_container(service)
    j.tools.async.wrappers.sync(container.executeAction('start', context=job.context))
    container_ays = Container.from_ays(container, job.context['token'])
    influx = InfluxDB(
        container_ays, service.parent.model.data.redisAddr, service.model.data.port)
    influx.start()
    service.model.data.status = 'running'
    influx.create_databases(service.model.data.databases)
    service.saveAll()
Esempio n. 19
0
def stop(job):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.sal.influxdb.influxdb import InfluxDB

    service = job.service
    container = get_container(service)
    container_ays = Container.from_ays(container, job.context['token'])

    if container_ays.is_running():
        influx = InfluxDB(
            container_ays, service.parent.model.data.redisAddr, service.model.data.port)
        influx.stop()
        j.tools.async.wrappers.sync(container.executeAction('stop', context=job.context))
    service.model.data.status = 'halted'
    service.saveAll()
Esempio n. 20
0
def start(job):
    from zeroos.orchestrator.sal.grafana.grafana import Grafana
    from zeroos.orchestrator.sal.Container import Container

    service = job.service
    container = get_container(service)
    j.tools. async .wrappers.sync(
        container.executeAction('start', context=job.context))
    container_ays = Container.from_ays(container, job.context['token'])
    grafana = Grafana(container_ays, service.parent.model.data.redisAddr,
                      job.service.model.data.port)
    grafana.start()
    service.model.data.status = 'running'
    configure_datasources(job, grafana)
    service.saveAll()
Esempio n. 21
0
def start(job):
    import time
    from zerotier import client
    from zeroos.orchestrator.sal.Container import Container

    service = job.service
    container = Container.from_ays(service, job.context['token'])
    container.start()

    if container.is_running():
        service.model.data.status = "running"
    else:
        raise j.exceptions.RuntimeError("container didn't started")

    def get_member():
        start = time.time()
        while start + 60 > time.time():
            resp = zerotier.network.getMember(service.model.data.zerotiernodeid, nic.id)
            if resp.content:
                return resp.json()
            time.sleep(0.5)
        raise j.exceptions.RuntimeError('Could not find member on zerotier network')

    def wait_for_interface():
        start = time.time()
        while start + 60 > time.time():
            for link in container.client.ip.link.list():
                if link['type'] == 'tun':
                    return
            time.sleep(0.5)
        raise j.exceptions.RuntimeError("Could not find zerotier network interface")

    for nic in service.model.data.nics:
        if nic.type == 'zerotier':
            wait_for_interface()
            service.model.data.zerotiernodeid = container.client.zerotier.info()['address']
            if nic.token:
                zerotier = client.Client()
                zerotier.set_auth_header('bearer {}'.format(nic.token))
                member = get_member()
                if not member['config']['authorized']:
                    # authorized new member
                    job.logger.info("authorize new member {} to network {}".format(member['nodeId'], nic.id))
                    member['config']['authorized'] = True
                    zerotier.network.updateMember(member, member['nodeId'], nic.id)

    service.saveAll()
Esempio n. 22
0
def apply_rules(job, httpproxies=None):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.sal.gateway.http import HTTPServer

    container = Container.from_ays(job.service.parent, job.context['token'])

    httpproxies = [] if httpproxies is None else httpproxies

    # for cloud init we we add some proxies specially for cloud-init
    httpproxies.append({
        'host': '169.254.169.254',
        'destinations': ['http://127.0.0.1:8080'],
        'types': ['http']
    })

    http = HTTPServer(container, httpproxies)
    http.apply_rules()
Esempio n. 23
0
def start_tlog(job):
    from zeroos.orchestrator.sal.Container import Container

    tlogservers = job.service.producers.get('tlogserver', None)
    if not tlogservers:
        raise j.exceptions.RuntimeError(
            "Failed to start tlogs, no tlogs created to start")
    tlogserver = tlogservers[0]
    # build full path of the tlogserver unix socket on the host filesystem
    container = Container.from_ays(tlogserver.parent,
                                   password=job.context['token'])
    # make sure container is up
    if not container.is_running():
        j.tools. async .wrappers.sync(
            tlogserver.parent.executeAction('start', context=job.context))

    # make sure the tlogserver is started
    j.tools. async .wrappers.sync(
        tlogserver.executeAction('start', context=job.context))
Esempio n. 24
0
def start(job):
    from zeroos.orchestrator.sal.grafana.grafana import Grafana
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)

    service = job.service
    service.model.data.status = 'running'
    container = get_container(service)
    container.executeAction('start', context=job.context)
    container_ays = Container.from_ays(container,
                                       job.context['token'],
                                       logger=service.logger)
    grafana = Grafana(container_ays, service.parent.model.data.redisAddr,
                      job.service.model.data.port, job.service.model.data.url)
    grafana.start()
    add_datasources(grafana, service.producers.get('influxdb'))
    service.saveAll()
Esempio n. 25
0
def start(job):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.sal.influxdb.influxdb import InfluxDB
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)

    service = job.service
    service.model.data.status = 'running'
    container = get_container(service)
    container.executeAction('start', context=job.context)
    container_ays = Container.from_ays(container,
                                       job.context['token'],
                                       logger=service.logger)
    influx = InfluxDB(container_ays, service.parent.model.data.redisAddr,
                      service.model.data.port, service.model.data.rpcport)
    influx.start()

    influx.create_databases(service.model.data.databases)
    service.saveAll()
Esempio n. 26
0
def start(job):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)

    service = job.service
    container = service.producers.get('container')[0]
    container.executeAction('start', context=job.context)

    containerobj = Container.from_ays(container,
                                      job.context['token'],
                                      logger=service.logger)
    # setup resolv.conf
    containerobj.upload_content('/etc/resolv.conf', 'nameserver 127.0.0.1\n')

    # setup zerotier bridges
    setup_zerotierbridges(job)

    # setup cloud-init magical ip
    ip = containerobj.client.ip
    loaddresses = ip.addr.list('lo')
    magicip = '169.254.169.254/32'
    if magicip not in loaddresses:
        ip.addr.add('lo', magicip)

    restore_certificates(job, containerobj)
    # start services
    http = container.consumers.get('http')
    dhcp = container.consumers.get('dhcp')[0]
    cloudinit = container.consumers.get('cloudinit')[0]
    firewall = container.consumers.get('firewall')[0]

    container.executeAction('start', context=job.context)
    dhcp.executeAction('start', context=job.context)
    for i in http:
        i.executeAction('start', context=job.context)
    firewall.executeAction('start', context=job.context)
    cloudinit.executeAction('start', context=job.context)
    save_certificates(job, containerobj)
    service.model.data.status = "running"
Esempio n. 27
0
def start_tlog(job):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)

    tlogservers = job.service.producers.get('tlogserver', None)
    if not tlogservers:
        raise j.exceptions.RuntimeError(
            "Failed to start tlogs, no tlogs created to start")
    tlogserver = tlogservers[0]
    # build full path of the tlogserver unix socket on the host filesystem
    container = Container.from_ays(tlogserver.parent,
                                   password=job.context['token'],
                                   logger=job.service.logger)
    # make sure container is up
    if not container.is_running():
        tlogserver.parent.executeAction('start', context=job.context)

    # make sure the tlogserver is started
    tlogserver.executeAction('start', context=job.context)
Esempio n. 28
0
def stop(job):
    from zeroos.orchestrator.sal.grafana.grafana import Grafana
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)

    service = job.service
    container = get_container(service)
    container_ays = Container.from_ays(container,
                                       job.context['token'],
                                       logger=service.logger)
    if container_ays.is_running():
        grafana = Grafana(container_ays, service.parent.model.data.redisAddr,
                          job.service.model.data.port,
                          job.service.model.data.url)
        grafana.stop()
        container.executeAction('stop', context=job.context)

    service.model.data.status = 'halted'
    service.saveAll()
Esempio n. 29
0
def migrate(job, dest):
    from zeroos.orchestrator.sal.Container import Container

    service = job.service
    node = service.aysrepo.serviceGet(role='node', instance=dest)
    containers = []
    for container in service.producers.get('container'):
        containers.append(
            Container.from_ays(container,
                               job.context['token'],
                               logger=job.service.logger))

        container.model.changeParent(node)
        container.saveAll()
        container.executeAction('install', context=job.context)

    service.model.changeParent(node)
    service.saveAll()
    service.executeAction('start', context=job.context)

    for container_sal in containers:
        container_sal.stop()
Esempio n. 30
0
def start(job):
    from zeroos.orchestrator.sal.Container import Container
    from zeroos.orchestrator.sal.stats_collector.stats_collector import StatsCollector
    from zeroos.orchestrator.configuration import get_jwt_token

    job.context['token'] = get_jwt_token(job.service.aysrepo)

    service = job.service
    service.model.data.status = 'running'
    container = get_container(service)
    container.executeAction('start', context=job.context)
    container_ays = Container.from_ays(container,
                                       job.context['token'],
                                       logger=service.logger)
    stats_collector = StatsCollector(container_ays, service.model.data.ip,
                                     service.model.data.port,
                                     service.model.data.db,
                                     service.model.data.retention,
                                     job.context['token'])
    stats_collector.start()

    service.saveAll()