Пример #1
0
def create_one(version):
    jenkins_image = SourceImage(
        name='jenkins',
        parent=Image(namespace='yandex', repository='trusty'),
        scripts=[
            'apt-get update && apt-get install -y --no-install-recommends wget git curl zip openjdk-7-jdk'
            ' maven ant ruby rbenv make apt-transport-https',
            'apt-key adv --keyserver hkp://keyserver.ubuntu.com:80'
            ' --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9',
            'wget -q -O - http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key | apt-key add -',
            'echo deb http://pkg.jenkins-ci.org/debian binary/ > /etc/apt/sources.list.d/jenkins.list && '
            'echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list && '
            'apt-get update',
            'apt-get install -y jenkins={} lxc-docker'.format(version),
            'mkdir -p /var/jenkins_home && chown -R jenkins /var/jenkins_home',
        ],
        env={
            'JENKINS_HOME': '/var/jenkins_home',
        },
        ports={
            'http': 8080,
            'agent': 50000,
        },
        command='bash /scripts/jenkins.sh',
        files={
            '/scripts/jenkins.sh': resource_string('jenkins.sh'),
        },
        volumes={'data': '/var/jenkins_home'},
    )

    jenkins = Container(
        name='jenkins',
        image=jenkins_image,
        doors={
            'http': Door(schema='http', port=jenkins_image.ports['http']),
            'agent': Door(schema='jnlp', port=jenkins_image.ports['agent']),
        },
        volumes={
            'logs':
            LogVolume(
                dest='/var/log/jenkins',
                files={'jenkins.log': LogFile()},
            ),
            'config':
            ConfigVolume(
                dest='/etc/jenkins',
                files={
                    'init.groovy':
                    TemplateFile(resource_string('init.groovy')),
                },
            ),
            'data':
            DataVolume(dest='/var/jenkins_home'),
        },
    )

    return jenkins
Пример #2
0
def create_one(version):
    jenkins_image = SourceImage(
        name='jenkins',
        parent=Image(namespace='yandex', repository='trusty'),
        scripts=[
            'apt-get update && apt-get install -y --no-install-recommends wget git curl zip openjdk-7-jdk'
            ' maven ant ruby rbenv make apt-transport-https',
            'apt-key adv --keyserver hkp://keyserver.ubuntu.com:80'
            ' --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9',
            'wget -q -O - http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key | apt-key add -',
            'echo deb http://pkg.jenkins-ci.org/debian binary/ > /etc/apt/sources.list.d/jenkins.list && '
            'echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list && '
            'apt-get update',
            'apt-get install -y jenkins={} lxc-docker'.format(version),
            'mkdir -p /var/jenkins_home && chown -R jenkins /var/jenkins_home',
        ],
        env={
            'JENKINS_HOME': '/var/jenkins_home',
        },
        ports={
            'http': 8080,
            'agent': 50000,
        },
        command='bash /scripts/jenkins.sh',
        files={
            '/scripts/jenkins.sh': resource_string('jenkins.sh'),
        },
        volumes={'data': '/var/jenkins_home'},
    )

    jenkins = Container(
        name='jenkins',
        image=jenkins_image,
        doors={
            'http': Door(schema='http', port=jenkins_image.ports['http']),
            'agent': Door(schema='jnlp', port=jenkins_image.ports['agent']),
        },
        volumes={
            'logs': LogVolume(
                dest='/var/log/jenkins',
                files={
                    'jenkins.log': LogFile()
                },
            ),
            'config': ConfigVolume(
                dest='/etc/jenkins',
                files={
                    'init.groovy': TemplateFile(resource_string('init.groovy')),
                },
            ),
            'data': DataVolume(dest='/var/jenkins_home'),
        },
    )

    return jenkins
Пример #3
0
 def make_nginx_site_config(nginx=nginx, upstreams=upstreams):
     template = resource_string('elk.site')
     config = mako.template.Template(template).render(
         upstreams=upstreams,
         certificate_path=os.path.join(nginx.volumes['ssl'].dest, 'server.pem'),
     )
     return TextFile(config)
Пример #4
0
def get_zookeeper_image():
    return SourceImage(
        name='zookeeper',
        parent=Image(namespace='yandex', repository='trusty'),
        env={'DEBIAN_FRONTEND': 'noninteractive'},
        scripts=[
            'apt-get -q update && apt-get -qy install openjdk-7-jre-headless && apt-get clean',
            'curl -s http://apache.mirrors.hoobly.com/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz'
            ' | tar --strip-components=1 -xz',
        ],
        files={'/root/run.sh': resource_string('run.sh')},
        volumes={
            'logs': '/var/log/zookeeper',
            'data': '/var/lib/zookeeper',
            'config': '/opt/zookeeper/conf',
        },
        ports={
            'client': 2181,
            'peer': 2888,
            'election': 3888,
            'jmx': 4888,
        },
        command=['/root/run.sh'],
        entrypoint=['sh'],
        workdir='/opt/zookeeper',
    )
Пример #5
0
def attach_elasticsearch_to_kibana(kibana, httpdoor, httpsdoor):
    """Adds Elasticsearch ports to Kibana config.
    Ports should be already exposed for this to work.
    """
    kibana.links['elasticsearch.http'] = httpdoor
    kibana.links['elasticsearch.https'] = httpsdoor

    config_template = resource_string('config.js')
    config = mako.template.Template(config_template).render(
        http_port=httpdoor.port,
        https_port=httpsdoor.port,
    )
    kibana.volumes['config'].files['config.js'] = TextFile(config)
Пример #6
0
def create_jmxtrans_image():
    return SourceImage(
        name='jmxtrans',
        parent=Image(namespace='yandex', repository='trusty'),
        scripts=[
            'apt-get -q update && apt-get install -qy openjdk-7-jdk maven && apt-get clean',
            'git clone https://github.com/Naishy/jmxtrans.git',
            'cd jmxtrans && mvn install',
        ],
        files={
            '/root/run.sh': resource_string('jmxtrans.run.sh'),
        },
        volumes={
            'config': '/etc/jmxtrans',
            'logs': '/var/log/jmxtrans',
        },
        entrypoint=['sh'],
        command=['/root/run.sh'],
    )
Пример #7
0
def make(version='1:2.4.1-1+trusty'):
    frontend_image = SourceImage(
        name='zabbix-frontend',
        parent=make_zabbix_image(),
        scripts=[
            'apt-get update && '
            'apt-get install -y zabbix-frontend-php={version} apache2 php5-pgsql'
            .format(version=version),
            'chmod go+rx /etc/zabbix',
        ],
        files={
            '/scripts/frontend.sh': resource_stream('frontend.sh'),
        },
        command=['/scripts/frontend.sh'],
    )

    postgres_image = SourceImage(
        name='postgres',
        parent=Image(repository='postgres', namespace=None, registry=None),
    )

    postgres = Container(
        name='postgres',
        image=postgres_image,
        doors={'postgres': Door(port=5432, schema='postgres')},
        volumes={
            'logs': LogVolume(dest='/var/log/postgresql'),
            'data': DataVolume(dest='/var/lib/postgresql/data'),
            # 'config': ConfigVolume(dest='/etc/postgresql'),
        },
    )

    @cached
    def make_server_image():
        return SourceImage(
            name='zabbix-server',
            parent=make_zabbix_image(),
            ports={'zabbix-trapper': 10051},
            scripts=[
                'apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y '
                + 'zabbix-server-pgsql={version} '.format(version=version) +
                'strace snmp-mibs-downloader fping nmap',
                'ln -fs /usr/bin/fping /usr/sbin/',
            ],
            files={
                '/scripts/zabbix.sh':
                resource_stream('zabbix.sh'),
                '/usr/lib/zabbix/alertscripts/golem-alert-handler.sh':
                resource_stream('golem-alert-handler.sh'),
            },
            volumes={
                'logs': '/var/log/zabbix',
                'config': '/etc/zabbix',
            },
            command=['/scripts/zabbix.sh'],
        )

    backend = Container(
        name='zabbix-backend',
        image=make_server_image(),
        doors={'zabbix-trapper': Door(schema='zabbix-trapper')},
        privileged=True,  # needed for strace to work
        volumes={
            'logs':
            LogVolume(
                dest='/var/log/zabbix',
                files={
                    'zabbix_server.log': LogFile(),
                    'snmptt.log': LogFile(),
                    'golem-alert.log': LogFile(),
                },
            ),
            'config':
            ConfigVolume(
                dest='/etc/zabbix',
                files={'zabbix_server.conf': None},
            ),
        },
    )
    # This variable is needed for golem-alert-handler.sh script
    backend.env['GOLEM_ALERT_LOG'] = backend.volumes['logs'].files[
        'golem-alert.log'].fulldest

    def make_zabbix_server_conf(backend=backend, postgres=postgres):
        logfiles = backend.volumes['logs'].files
        config = {
            'LogFile': logfiles['zabbix_server.log'].fulldest,
            'LogFileSize': 0,
            'PidFile': '/var/run/zabbix_server.pid',
            'DBHost': postgres.ship.fqdn,
            'DBName': 'zabbix',
            'DBUser': '******',
            'DBPassword': '',
            'DBPort': postgres.doors['postgres'].port,
            'StartPollers': 5,
            'StartIPMIPollers': 0,
            'StartTrappers': 1,
            'JavaGateway': '127.0.0.1',
            'StartJavaPollers': 0,
            'StartVMwareCollectors': 0,
            'VMwareFrequency': 10,
            'VMwareCacheSize': '256K',
            'SNMPTrapperFile': logfiles['snmptt.log'].fulldest,
            'SenderFrequency': 10,
            'CacheUpdateFrequency': 10,
            'StartDBSyncers': 4,
            'HistoryCacheSize': '2G',
            'TrendCacheSize': '2G',
            'HistoryTextCacheSize': '2G',
            'ValueCacheSize': '2G',
            'Timeout': 30,
            'UnreachablePeriod': 10,
            'UnavailableDelay': 10,
            'UnreachableDelay': 10,
            'AlertScriptsPath': '/usr/lib/zabbix/alertscripts',
            'ExternalScripts': '/usr/lib/zabbix/externalscripts',
            'ProxyConfigFrequency': 10,
            'AllowRoot': 1,
        }

        return TextFile('\n'.join([
            '{}={}'.format(key, value) for key, value in sorted(config.items())
        ]))

    backend.volumes['config'].files[
        'zabbix_server.conf'] = make_zabbix_server_conf

    frontend = Container(
        name='zabbix-frontend',
        image=frontend_image,
        privileged=True,  # needed to chmod /etc/zabbix
        doors={
            'http': Door(
                schema='http',
                urls={'default': Url('zabbix')},
            ),
        },
        volumes={
            'logs':
            LogVolume(
                dest='/var/log/apache2',
                files={
                    'access.log': LogFile(),
                    'error.log': LogFile(),
                },
            ),
            'config-apache':
            ConfigVolume(
                dest='/etc/zabbix',
                files={
                    'apache.conf':
                    TextFile(resource_string('apache.conf')),
                    'web/zabbix.conf.php':
                    TemplateFile(
                        resource_string('zabbix.conf.php'),
                        postgres=postgres,
                        backend=backend,
                    ),
                },
            ),
        },
    )

    def make_db_task(name, script):
        return Task(
            name=name,
            image=make_server_image(),
            volumes={
                'scripts': ConfigVolume(dest='/scripts',
                                        files={'run.sh': script}),
            },
            command=['/scripts/run.sh'],
        )

    def make_reinit_script():
        return TextFile(
            dedent('''#!/bin/bash
            cmd='psql -U postgres -h {door.host} -p {door.port}'
            echo 'drop database zabbix; create database zabbix;' | $cmd
            cd /usr/share/zabbix-server-pgsql
            cat schema.sql images.sql data.sql | $cmd zabbix
            '''.format(door=postgres.doors['postgres'])))

    reinit = make_db_task('reinit', make_reinit_script)

    def make_dump_script():
        return TextFile(
            'pg_dump -U postgres -h {door.host} -p {door.port} -c -C '
            'zabbix'.format(door=postgres.doors['postgres']))

    dump = make_db_task('dump', make_dump_script)

    def make_restore_script():
        return TextFile(
            'psql -U postgres -h {door.host} -p {door.port}'.format(
                door=postgres.doors['postgres']))

    restore = make_db_task('restore', make_restore_script)

    return [postgres, frontend, backend], [reinit, dump, restore]
Пример #8
0
def create(ships, zookeepers, name, ports=None, marvel_hosts=[]):
    ports = ports or {}
    containers = []
    image = SourceImage(
        name='elasticsearch',
        parent=Image(namespace='yandex', repository='trusty'),
        scripts=[
            'curl http://packages.elasticsearch.org/GPG-KEY-elasticsearch | apt-key add -',
            'echo "deb http://packages.elasticsearch.org/elasticsearch/1.3/debian stable main"'
            ' > /etc/apt/sources.list.d/elasticsearch.list',
            'apt-get update',
            'apt-get install -y --no-install-recommends maven elasticsearch=1.3.2 openjdk-7-jdk',
            'git clone https://github.com/grmblfrz/elasticsearch-zookeeper.git /tmp/elasticsearch-zookeeper',
            'cd /tmp/elasticsearch-zookeeper && git checkout v1.3.1 && '
            'mvn package -Dmaven.test.skip=true -Dzookeeper.version=3.4.6',
            '/usr/share/elasticsearch/bin/plugin -v '
            '  -u file:///tmp/elasticsearch-zookeeper/target/releases/elasticsearch-zookeeper-1.3.1.zip '
            '  -i elasticsearch-zookeeper-1.3.1',
            '/usr/share/elasticsearch/bin/plugin -v -i elasticsearch/marvel/latest',
            '/usr/share/elasticsearch/bin/plugin -v -i mobz/elasticsearch-head',
        ],
        ports={'http': 9200, 'peer': 9300, 'jmx': 9400},
        volumes={
            'logs': '/var/log/elasticsearch',
            'data': '/var/lib/elasticsearch',
            'config': '/etc/elasticsearch'
        },
        files={'/root/run.sh': resource_string('run.sh')},
        command='bash /root/run.sh',
    )
    config = ConfigVolume(
        dest=image.volumes['config'],
        files={
            'elasticsearch.yml': TemplateFile(
                resource_string('elasticsearch.yml'),
                name=name, zookeepers=zookeepers,
                containers=containers, marvel_hosts=marvel_hosts
            ),
            'mapping.json': TextFile(filename='mapping.json'),
            'logging.yml': TextFile(filename='logging.yml'),
        },
    )
    data = DataVolume(image.volumes['data'])
    logs = LogVolume(
        image.volumes['logs'],
        files={
            '{}.log'.format(name): RotatedLogFile('[%Y-%m-%d %H:%M:%S,%f]', 25)
        },
    )

    containers.extend([
        Container(
            name='elasticsearch',
            ship=ship,
            image=image,
            volumes={
                'data': data,
                'logs': logs,
                'config': config,
            },
            doors={
                'http': Door(
                    schema='http',
                    port=image.ports['http'],
                    externalport=ports.get('http'),
                    paths=[
                        '/',
                        '/_plugin/head/',
                        '/_plugin/marvel/',
                    ],
                ),
                'peer': Door(schema='elasticsearch-peer', port=image.ports['peer'],
                             externalport=ports.get('peer')),
                'jmx': Door(schema='rmi', port=image.ports['jmx'], externalport=ports.get('jxm')),
            },
            env={
                'JAVA_RMI_PORT': image.ports['jmx'],
                'JAVA_RMI_SERVER_HOSTNAME': ship.fqdn,
                'ES_HEAP_SIZE': ship.memory // 2,
                'ES_JAVA_OPTS': '-XX:NewRatio=5',
            },
            memory=ship.memory * 3 // 4,
        ) for ship in ships])
    return containers
Пример #9
0
def create_elasticsearch(clustername, version):
    """Returns Elasticsearch container.
    Tested versions: 1.3.1, 1.4.1
    """
    image = get_elasticsearch_image(version=version)
    data = DataVolume(image.volumes['data'])
    logs = LogVolume(
        image.volumes['logs'],
        files={
            '{}.log'.format(clustername): RotatedLogFile('[%Y-%m-%d %H:%M:%S,%f]', 25)
        },
    )

    config = ConfigVolume(
        dest=image.volumes['config'],
        files={
            'mapping.json': TextFile(resource_string('mapping.json')),
            'logging.yml': TextFile(resource_string('logging.yml')),
        },
    )

    container = Container(
        name='elasticsearch',
        image=image,
        volumes={
            'data': data,
            'logs': logs,
            'config': config,
        },
        doors={
            'http': Door(
                schema='http',
                port=image.ports['http'],
                urls={
                    'head': Url('_plugin/head/'),
                    'marvel': Url('_plugin/marvel/'),
                },
            ),
            'peer': Door(schema='elasticsearch-peer', port=image.ports['peer']),
            'jmx': Door(schema='rmi', port=image.ports['jmx']),
        },
        env={
            'ES_JAVA_OPTS': '-XX:NewRatio=5',
            'ES_CLASSPATH': config.files['logging.yml'].fulldest,
        },
    )

    def create_elasticsearch_config(container=container):
        marvel_agent = {}
        if 'marvel' in container.links:
            marvel_agent['exporter.es.hosts'] = [link.hostport for link in container.links['marvel']]
        else:
            marvel_agent['enabled'] = False

        ships = [door.container.ship for door in container.links['elasticsearch']] + [container.ship]
        config = {
            'cluster.name': clustername,
            'node': {
                'name': container.ship.name,
                'datacenter': container.ship.datacenter,
            },
            'transport.tcp.port': container.doors['peer'].internalport,
            'transport.publish_port': container.doors['peer'].port,
            'http.port': container.doors['http'].internalport,
            'network.publish_host': container.ship.fqdn,
            'discovery': None,
            'cluster.routing.allocation': {
                'awareness': {
                    'force.datacenter.values': sorted({ship.datacenter for ship in ships}),
                    'attributes': 'datacenter',
                },
                'cluster_concurrent_rebalance': 10,
                'disk.threshold_enabled': True,
                'node_initial_primaries_recoveries': 10,
                'node_concurrent_recoveries': 10,
            },
            'index': {
                'number_of_shards': 5,
                'number_of_replicas': 2,
                'mapper.default_mapping_location': container.volumes['config'].files['mapping.json'].fulldest,
                'query.default_field': 'msg',
                'store.type': 'mmapfs',
                'translog.flush_threshold_ops': 50000,
                'refresh_interval': '10s',
            },
            'indices': {
                'recovery.concurrent_streams': 20,
                'memory.index_buffer_size': '30%',
            },
            'marvel.agent': marvel_agent,
        }
        if 'zookeeper' in container.links:
            config['discovery'] = {'type': 'com.sonian.elasticsearch.zookeeper.discovery.ZooKeeperDiscoveryModule'}
            config['sonian.elasticsearch.zookeeper'] = {
                'settings.enabled': False,
                'client.host': ','.join([link.hostport for link in container.links['zookeeper']]),
                'discovery.state_publishing.enabled': True,
            }
            config['zookeeper.root'] = '/{}/elasticsearch'.format(clustername)
        else:
            config['discovery.zen'] = {
                'ping': {
                    'multicast.enabled': False,
                    'unicast.hosts': [door.hostport for door in container.links['elasticsearch']],
                },
                'minimum_master_nodes': (len(container.links['elasticsearch']) + 1) // 2 + 1,
            }

        return YamlFile(config)

    def create_env(container=container):
        arguments = [
            '-server',
            '-showversion',
        ]
        jmxport = container.doors['jmx'].internalport
        options = {
            '-Des.default.config': os.path.join(config.dest, 'elasticsearch.yml'),
            '-Des.default.path.home': '/usr/share/elasticsearch',
            '-Des.default.path.logs': logs.dest,
            '-Des.default.path.data': data.dest,
            '-Des.default.path.work': '/tmp/elasticsearch',
            '-Des.default.path.conf': config.dest,
            '-Dcom.sun.management.jmxremote.authenticate': False,
            '-Dcom.sun.management.jmxremote.ssl': False,
            '-Dcom.sun.management.jmxremote.local.only': False,
            '-Dcom.sun.management.jmxremote.port': jmxport,
            '-Dcom.sun.management.jmxremote.rmi.port': jmxport,
            '-Djava.rmi.server.hostname': container.ship.fqdn,
            '-Dvisualvm.display.name': container.fullname,
        }

        jvmflags = arguments + ['{}={}'.format(key, value) for key, value in options.items()]
        text = 'export JAVA_OPTS="{}"'.format(' '.join(sorted(jvmflags)))
        if container.memory > 0:
            text += '\nexport ES_HEAP_SIZE={}'.format(container.memory // 2)
        return TextFile(text)

    config.files['elasticsearch.yml'] = create_elasticsearch_config
    config.files['env.sh'] = create_env
    return container
Пример #10
0
def make(version='1:2.4.1-1+trusty'):
    frontend_image = SourceImage(
        name='zabbix-frontend',
        parent=make_zabbix_image(),
        scripts=[
            'apt-get update && '
            'apt-get install -y zabbix-frontend-php={version} apache2 php5-pgsql'.format(version=version),
            'chmod go+rx /etc/zabbix',
        ],
        files={
            '/scripts/frontend.sh': resource_stream('frontend.sh'),
        },
        command=['/scripts/frontend.sh'],
    )

    postgres_image = SourceImage(
        name='postgres',
        parent=Image(repository='postgres', namespace=None, registry=None),
    )

    postgres = Container(
        name='postgres',
        image=postgres_image,
        doors={'postgres': Door(port=5432, schema='postgres')},
        volumes={
            'logs': LogVolume(dest='/var/log/postgresql'),
            'data': DataVolume(dest='/var/lib/postgresql/data'),
            # 'config': ConfigVolume(dest='/etc/postgresql'),
        },
    )

    @cached
    def make_server_image():
        return SourceImage(
            name='zabbix-server',
            parent=make_zabbix_image(),
            ports={'zabbix-trapper': 10051},
            scripts=[
                'apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y ' +
                'zabbix-server-pgsql={version} '.format(version=version) +
                'strace snmp-mibs-downloader fping nmap',
                'ln -fs /usr/bin/fping /usr/sbin/',
            ],
            files={
                '/scripts/zabbix.sh': resource_stream('zabbix.sh'),
                '/usr/lib/zabbix/alertscripts/golem-alert-handler.sh': resource_stream('golem-alert-handler.sh'),
            },
            volumes={
                'logs': '/var/log/zabbix',
                'config': '/etc/zabbix',
            },
            command=['/scripts/zabbix.sh'],
        )

    backend = Container(
        name='zabbix-backend',
        image=make_server_image(),
        doors={'zabbix-trapper': Door(schema='zabbix-trapper')},
        privileged=True,  # needed for strace to work
        volumes={
            'logs': LogVolume(
                dest='/var/log/zabbix',
                files={
                    'zabbix_server.log': LogFile(),
                    'snmptt.log': LogFile(),
                    'golem-alert.log': LogFile(),
                },
            ),
            'config': ConfigVolume(
                dest='/etc/zabbix',
                files={'zabbix_server.conf': None},
            ),
        },
    )
    # This variable is needed for golem-alert-handler.sh script
    backend.env['GOLEM_ALERT_LOG'] = backend.volumes['logs'].files['golem-alert.log'].fulldest

    def make_zabbix_server_conf(backend=backend, postgres=postgres):
        logfiles = backend.volumes['logs'].files
        config = {
            'LogFile': logfiles['zabbix_server.log'].fulldest,
            'LogFileSize': 0,
            'PidFile': '/var/run/zabbix_server.pid',
            'DBHost': postgres.ship.fqdn,
            'DBName': 'zabbix',
            'DBUser': '******',
            'DBPassword': '',
            'DBPort': postgres.doors['postgres'].port,
            'StartPollers': 5,
            'StartIPMIPollers': 0,
            'StartTrappers': 1,
            'JavaGateway': '127.0.0.1',
            'StartJavaPollers': 0,
            'StartVMwareCollectors': 0,
            'VMwareFrequency': 10,
            'VMwareCacheSize': '256K',
            'SNMPTrapperFile': logfiles['snmptt.log'].fulldest,
            'SenderFrequency': 10,
            'CacheUpdateFrequency': 10,
            'StartDBSyncers': 4,
            'HistoryCacheSize': '2G',
            'TrendCacheSize': '2G',
            'HistoryTextCacheSize': '2G',
            'ValueCacheSize': '2G',
            'Timeout': 30,
            'UnreachablePeriod': 10,
            'UnavailableDelay': 10,
            'UnreachableDelay': 10,
            'AlertScriptsPath': '/usr/lib/zabbix/alertscripts',
            'ExternalScripts': '/usr/lib/zabbix/externalscripts',
            'ProxyConfigFrequency': 10,
            'AllowRoot': 1,
        }

        return TextFile('\n'.join(['{}={}'.format(key, value) for key, value in sorted(config.items())]))

    backend.volumes['config'].files['zabbix_server.conf'] = make_zabbix_server_conf

    frontend = Container(
        name='zabbix-frontend',
        image=frontend_image,
        privileged=True,  # needed to chmod /etc/zabbix
        doors={
            'http': Door(
                schema='http',
                urls={'default': Url('zabbix')},
            ),
        },
        volumes={
            'logs': LogVolume(
                dest='/var/log/apache2',
                files={
                    'access.log': LogFile(),
                    'error.log': LogFile(),
                },
            ),
            'config-apache': ConfigVolume(
                dest='/etc/zabbix',
                files={
                    'apache.conf': TextFile(resource_string('apache.conf')),
                    'web/zabbix.conf.php': TemplateFile(
                        resource_string('zabbix.conf.php'),
                        postgres=postgres,
                        backend=backend,
                    ),
                },
            ),
        },
    )

    def make_db_task(name, script):
        return Task(
            name=name,
            image=make_server_image(),
            volumes={
                'scripts': ConfigVolume(
                    dest='/scripts',
                    files={'run.sh': script}
                ),
            },
            command=['/scripts/run.sh'],
        )

    def make_reinit_script():
        return TextFile(dedent('''#!/bin/bash
            cmd='psql -U postgres -h {door.host} -p {door.port}'
            echo 'drop database zabbix; create database zabbix;' | $cmd
            cd /usr/share/zabbix-server-pgsql
            cat schema.sql images.sql data.sql | $cmd zabbix
            '''.format(door=postgres.doors['postgres'])))
    reinit = make_db_task('reinit', make_reinit_script)

    def make_dump_script():
        return TextFile('pg_dump -U postgres -h {door.host} -p {door.port} -c -C '
                        'zabbix'.format(door=postgres.doors['postgres']))
    dump = make_db_task('dump', make_dump_script)

    def make_restore_script():
        return TextFile('psql -U postgres -h {door.host} -p {door.port}'.format(door=postgres.doors['postgres']))
    restore = make_db_task('restore', make_restore_script)

    return [postgres, frontend, backend], [reinit, dump, restore]
Пример #11
0
def getbuilder(
        zookeepers,
        ssh_keys=(),
        smtp_host='smtp.example.com',
        smtp_port=25,
        golem_url_ro='http://ro.admin.yandex-team.ru',
        golem_url_rw='https://golem.yandex-team.ru',
        threads=10,
        restapi_port=7887,
        gitapi_port=2022,
        elasticsearch_urls=(),
        pownyversion='0.4',
        memory=1024**3,
        ):

    logging_config = yaml.load(resource_string('logging.yaml'))

    if elasticsearch_urls:
        elog_config = yaml.load(resource_string('logging.elog.yaml'))
        logging_config['handlers']['elog'] = elog_config
        elog_config['urls'] = elasticsearch_urls
        logging_config['root']['handlers'].append('elog')

    rules = DataVolume(
        dest='/var/lib/powny/rules',
        path='/var/lib/powny/rules',
    )

    rulesgit = DataVolume(
        dest='/var/lib/powny/rules.git',
        path='/var/lib/powny/rules.git',
    )

    restapilogs = LogVolume(
        dest='/var/log/powny',
        files={
            'uwsgi.log': LogFile('%Y%m%d-%H%M%S'),
        },
    )
    pownylogs = LogVolume(
        dest='/var/log/powny',
        files={
            'powny.log': LogFile(''),
        },
    )
    gitapilogs = LogVolume(
        dest='/var/log/powny',
        files={
            'gitapi.log': LogFile(''),
        },
    )

    configdest = '/etc/powny'
    uwsgi_ini_file = TemplateFile(TextFile('uwsgi.ini'))

    def stoppable(cmd):
        return 'trap exit TERM; {} & wait'.format(cmd)

    parent = Image(namespace='yandex', repository='trusty')

    pownyimage = SourceImage(
        name='powny',
        parent=parent,
        env={
            'PATH': '$PATH:/opt/pypy3/bin',
            'LANG': 'C.UTF-8',
        },
        scripts=[
            'curl http://buildbot.pypy.org/nightly/py3k/pypy-c-jit-latest-linux64.tar.bz2 2>/dev/null | tar -jxf -',
            'mv pypy* /opt/pypy3',
            'curl https://bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py 2>/dev/null | pypy',
            'easy_install pip==1.4.1',
            'pip install contextlog elog gns=={}'.format(pownyversion),
        ],
        volumes={
            'config': configdest,
            'rules': rules.dest,
            'logs': pownylogs.dest,
        },
        command=stoppable('gns $POWNY_MODULE -c /etc/powny/powny.yaml'),
    )
    apiimage = SourceImage(
        name='powny-cpython',
        parent=parent,
        env={'LANG': 'C.UTF-8'},
        scripts=[
            'apt-add-repository ppa:fkrull/deadsnakes -y',
            'apt-get update',
            'apt-get install python3-pip -yy',
            'pip3 install contextlog elog uwsgi gns=={}'.format(pownyversion),
        ],
        volumes={
            'config': configdest,
            'rules': rules.dest,
            'logs': restapilogs.dest,
        },
        command=stoppable('uwsgi --ini uwsgi.ini'),
    )

    gitimage = SourceImage(
        name='gitsplit',
        parent=parent,
        files={
            '/post-receive': resource_string('post-receive'),
            '/etc/ssh/sshd_config': resource_string('sshd_config'),
            '/root/run.sh': resource_string('run.sh'),
        },
        ports={'ssh': 22},
        volumes={
            'rules': rules.dest,
            'rules.git': rulesgit.dest,
            'logs': pownylogs.dest,
        },
        command='bash /root/run.sh',
        scripts=[
            'apt-get install -y openssh-server',
            'useradd --non-unique --uid 0 --system --shell /usr/bin/git-shell -d / git',
            'mkdir /run/sshd',
            'chmod 0755 /run/sshd',
        ],
    )

    keys = ConfigVolume(dest='/var/lib/keys', files={'authorized_keys': TextFile(text='\n'.join(ssh_keys))})

    def make_config(ship):
        config = {
            'core': {
                'zoo-nodes': ['{}:{}'.format(z.ship.fqdn, z.getport('client')) for z in zookeepers],
                'max-input-queue-size': 5000,
            },
            'logging': logging_config,
        }
        return config

    def add_service(config, name):
        config[name] = {
            'workers': threads,
            'die-after': None,
        }

    def add_rules(config):
        config['core']['import-alias'] = 'rules'
        config['core']['rules-dir'] = rules.dest

    def add_output(config, email_from):
        config['golem'] = {'url-ro': golem_url_ro, 'url-rw': golem_url_rw}
        config['output'] = {
            'email': {
                'from': email_from,
                'server': smtp_host,
                'port': smtp_port,
            },
        }

    def container(ship, name, config, doors=None, backdoor=None, volumes=None, memory=memory, image=pownyimage,
                  files=None, logs=pownylogs):
        doors = doors or {}
        volumes = volumes or {}
        files = files or {}

        if backdoor is not None:
            config['backdoor'] = {'enabled': True, 'port': backdoor}
            doors['backdoor'] = Door(schema='http', port=backdoor, externalport=backdoor)

        files = files.copy()
        files['powny.yaml'] = YamlFile(config)

        _volumes = {
            'config': ConfigVolume(dest=configdest, files=files),
            'logs': logs,
        }
        _volumes.update(volumes)

        return Container(
            name=name,
            ship=ship,
            image=image,
            memory=memory,
            volumes=_volumes,
            env={'POWNY_MODULE': name},
            doors=doors,
        )

    class Builder:
        @staticmethod
        def splitter(ship):
            config = make_config(ship)
            add_service(config, 'splitter')
            add_rules(config)
            return container(ship, 'splitter', config, volumes={'rules': rules}, backdoor=11002, doors={})

        @staticmethod
        def worker(ship):
            config = make_config(ship)
            add_service(config, 'worker')
            add_rules(config)
            add_output(config, 'powny@'+ship.fqdn)
            return container(ship, 'worker', config, volumes={'rules': rules}, backdoor=11001, doors={})

        @staticmethod
        def restapi(ship, name='api', port=restapi_port):
            config = make_config(ship)
            return container(ship, name, config, files={'uwsgi.ini': uwsgi_ini_file},
                             backdoor=None, doors={'http': port}, image=apiimage, logs=restapilogs)

        @staticmethod
        def collector(ship):
            config = make_config(ship)
            add_service(config, 'collector')
            return container(ship, 'collector', config, backdoor=11003, doors={})

        @staticmethod
        def gitapi(ship):
            return Container(
                name='gitapi',
                ship=ship,
                image=gitimage,
                memory=128*1024*1024,
                volumes={
                    'rules.git': rulesgit,
                    'rules': rules,
                    'keys': keys,
                    'logs': gitapilogs,
                },
                doors={'ssh': Door(schema='ssh', port=gitimage.ports['ssh'], externalport=gitapi_port)},
                env={
                    'rules_git_path': rulesgit.dest,
                    'rules_path': rules.dest,
                },
            )

        @staticmethod
        def reinit(ship):
            config = make_config(ship)
            return Task(container(ship, 'reinit', config))

        @classmethod
        @aslist
        def build(cls, ships):
            for ship in ships:
                yield cls.worker(ship)
                yield cls.splitter(ship)
                yield cls.collector(ship)
                yield cls.restapi(ship)
                yield cls.gitapi(ship)

    return Builder
Пример #12
0
def create_zookeeper(
    memory=1024**3,
    snap_count=10000,
    global_outstanding_limit=1000,
    max_client_connections=0,
):
    image = get_zookeeper_image()
    data = DataVolume(dest=image.volumes['data'])
    logs = LogVolume(
        dest=image.volumes['logs'],
        files={
            'zookeeper.log': RotatedLogFile(format='%Y-%m-%d %H:%M:%S,%f', length=23),
        },
    )

    container = Container(
        name='zookeeper',
        image=image,
        volumes={
            'data': data,
            'logs': logs,
            'config': ConfigVolume(
                dest='/opt/zookeeper/conf',
                files={
                    'log4j.properties': TextFile(resource_string('log4j.properties')),
                    'zoo.cfg': None,
                    'myid': None,
                    'env.sh': None,
                }
            ),
        },
        doors={
            'election': Door(schema='zookeeper-election', port=image.ports['election']),
            'peer': Door(schema='zookeeper-peer', port=image.ports['peer']),
            'client': Door(schema='zookeeper', port=image.ports['client']),
            'jmx': Door(schema='rmi', port=image.ports['jmx'], sameports=True),
        },
        memory=memory,
    )

    def make_zoo_cfg(container=container):
        config = {
            'tickTime': 2000,
            'initLimit': 100,
            'syncLimit': 50,
            'dataDir': data.dest,
            'clientPort': image.ports['client'],
            'autopurge.purgeInterval': 1,
            'snapCount': snap_count,
            'globalOutstandingLimit': global_outstanding_limit,
            'maxClientCnxns': max_client_connections,
        }
        for peerid, (peer, election) in container.links.items():
            assert peer.container == election.container, "peer and election doors should be on the same container"
            if peer.container == container:
                # use 0.0.0.0 as workaround for https://issues.apache.org/jira/browse/ZOOKEEPER-1711
                host = '0.0.0.0'
                peerport = peer.internalport
                electionport = election.internalport
            else:
                host = peer.host
                peerport = peer.port
                electionport = election.port
            config['server.{}'.format(peerid)] = '{host}:{peerport}:{electionport}'.format(
                host=host,
                peerport=peerport,
                electionport=electionport,
            )
        return IniFile(config)

    def make_myid(container=container):
        return TextFile(str(container.zkid))

    def make_env(container=container):
        arguments = [
            '-server',
            '-showversion',
            '-Xmx{}'.format(memory*3//4),
        ]
        jmxport = container.doors['jmx'].internalport
        options = {
            '-Dcom.sun.management.jmxremote.authenticate': False,
            '-Dcom.sun.management.jmxremote.ssl': False,
            '-Dcom.sun.management.jmxremote.local.only': False,
            '-Dcom.sun.management.jmxremote.port': jmxport,
            '-Dcom.sun.management.jmxremote.rmi.port': jmxport,
            '-Djava.rmi.server.hostname': container.ship.fqdn,
            '-Dvisualvm.display.name': container.fullname,
        }

        jvmflags = arguments + ['{}={}'.format(key, value) for key, value in sorted(options.items())]
        return TextFile('export JVMFLAGS="{}"'.format(' '.join(sorted(jvmflags))))

    container.volumes['config'].files['zoo.cfg'] = make_zoo_cfg
    container.volumes['config'].files['myid'] = make_myid
    container.volumes['config'].files['env.sh'] = make_env
    container.zkid = 1

    return container
Пример #13
0
 def make_logging_config(container):
     logging_config = yaml.load(resource_string('logging.yaml'))
     return logging_config