コード例 #1
0
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    upd.print_log('Change log template in rsyslog configuration...')

    run("sed -i '/^{0}/d; a{0};{1}' {2}".format(PARAM, TEMPLATE, CONF))
    run('systemctl restart rsyslog')

    upd.print_log('Update logging pod...')
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_name = get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            break
    else:
        return

    PodCollection(ki).delete(pod['id'], force=True)
    logs_config = get_kuberdock_logs_config(
        env.host_string,
        pod_name,
        pod['kube_type'],
        pod['containers'][0]['kubes'],
        pod['containers'][1]['kubes'],
        MASTER_IP,
        ki.get_token(),
    )
    check_internal_pod_data(logs_config, user=ki)
    logs_pod = PodCollection(ki).add(logs_config, skip_check=True)

    run('docker pull kuberdock/fluentd:1.4')

    PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
コード例 #2
0
    def test_post(self):
        data = dict(username='******',
                    first_name='',
                    last_name='',
                    middle_initials='',
                    password='******',
                    email='*****@*****.**',
                    active=True,
                    rolename='User',
                    package='Standard package')

        # add
        response = self.admin_open(method='POST', json=data)
        self.assert200(response)
        user = User.get(data['username']).to_dict(full=True)
        user['join_date'] = user['join_date'].replace(
            tzinfo=pytz.utc).isoformat()
        self.assertEqual(dict(user, actions=mock.ANY), response.json['data'])

        # check conversion of extended boolean fields
        data['username'] += '1'
        data['email'] = '1' + data['email']
        data['active'] = 'TrUe'
        response = self.admin_open(method='POST', json=data)
        self.assert200(response)  # active is valid
        self.assertEqual(response.json['data']['active'], True)

        data['username'] += '1'
        data['email'] = '1' + data['email']
        data['suspended'] = '1'
        response = self.admin_open(method='POST', json=data)
        self.assert200(response)  # suspended is valid
        self.assertEqual(response.json['data']['suspended'], True)
コード例 #3
0
def _upgrade_node_202(upd, with_testing, env, *args, **kwargs):
    """Update log pod"""

    upd.print_log("Upgrading logs pod ...")
    ki = User.get_internal()
    pod_name = nodes.get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            PodCollection(ki).delete(pod['id'], force=True)
            break
    else:
        upd.print_log(u"Warning: logs pod '{}' not found".format(pod_name))

    run('docker pull kuberdock/elasticsearch:2.2')
    run('docker pull kuberdock/fluentd:1.8')
    log_pod = nodes.create_logs_pod(env.host_string, ki)

    # Also we should update rsyslog config, because log pod IP was changed.
    pod_ip = log_pod['podIP']
    put(StringIO('$LocalHostName {node_name}\n'
                 '$template LongTagForwardFormat,'
                 '"<%PRI%>%TIMESTAMP:::date-rfc3339% %HOSTNAME% '
                 '%syslogtag%%msg:::sp-if-no-1st-sp%%msg%"\n'
                 '*.* @{pod_ip}:5140;LongTagForwardFormat\n'.format(
                     node_name=env.host_string, pod_ip=pod_ip)),
        RSYSLOG_CONF,
        mode=0644)
    run('systemctl restart rsyslog')

    upd.print_log("Logs pod successfully upgraded")
コード例 #4
0
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    upd.print_log('Add node hostname to rsyslog configuration...')

    run("sed -i '/^{0}/d; i{0} {1}' {2}".format(PARAM, env.host_string, CONF))
    run('systemctl restart rsyslog')

    upd.print_log('Update logging pod...')
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_name = get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            break
    else:
        return

    PodCollection(ki).delete(pod['id'], force=True)
    logs_config = get_kuberdock_logs_config(
        env.host_string,
        pod_name,
        pod['kube_type'],
        pod['containers'][0]['kubes'],
        pod['containers'][1]['kubes'],
        MASTER_IP,
        ki.get_token(),
    )
    check_internal_pod_data(logs_config, user=ki)
    logs_pod = PodCollection(ki).add(logs_config, skip_check=True)

    run('rm -fr /var/lib/elasticsearch/kuberdock/nodes/*/indices/syslog-*')

    PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
コード例 #5
0
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Add PersistentDisk model.')
    upd.print_log('Upgrading db...')
    helpers.upgrade_db(revision='56f9182bf415')

    upd.print_log('Populate db...')
    drives = []
    if CEPH:
        drives.extend(CephStorage().get())
    if AWS:
        drives.extend(AmazonStorage().get())
    if not drives:
        return

    pods_by_drives = get_pods_by_drives()
    for drive in drives:
        owner = User.filter_by(username=drive['owner']).one()
        pod = pods_by_drives.get(drive['drive_name'])
        pd = PersistentDisk(id=drive['id'],
                            drive_name=drive['drive_name'],
                            name=drive['name'],
                            owner=owner,
                            size=drive['size'],
                            pod=pod)
        db.session.add(pd)
    db.session.commit()
コード例 #6
0
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    run('yum --enablerepo=kube,kube-testing clean metadata')

    # 00091_update.py
    upd.print_log('Upgrading nodes with docker-cleaner.sh')
    run("""rm -f /var/lib/kuberdock/scripts/docker-cleaner.sh""")
    run("""crontab -l | grep -v "docker-cleaner.sh" | crontab - """)

    # 00092_update.py
    put('/var/opt/kuberdock/node_network_plugin.sh',
        '/usr/libexec/kubernetes/kubelet-plugins/net/exec/kuberdock/kuberdock')

    # 00093_update.py
    upd.print_log('Use custom log template with rsyslog...')
    run("sed -i '/^{0}/d' {1}".format(PARAM1, CONF))
    run("sed -i '/^{0}/d' {1}".format(PARAM2, CONF))
    run("sed -i '$ a{0} {1}' {2}".format(PARAM1, TEMPLATE, CONF))
    run("sed -i '$ a{0};{1}' {2}".format(PARAM2, TEMPLATE_NAME, CONF))
    run('systemctl restart rsyslog')

    # 00096_update.py
    upd.print_log('Disabling swap and backing up fstab to {0}...'.format(FSTAB_BACKUP))
    run('swapoff -a')
    run('mkdir -p /var/lib/kuberdock/backups')
    run('test -f {0} && echo "{0} is already exists" || cp /etc/fstab {0}'.format(FSTAB_BACKUP))
    run("sed -r -i '/[[:space:]]+swap[[:space:]]+/d' /etc/fstab")

    # 00097_update.py
    upd.print_log('Update elasticsearch for logs...')
    upd.print_log(put('/var/opt/kuberdock/make_elastic_config.py',
                      '/var/lib/elasticsearch',
                      mode=0755))
    upd.print_log('Update logging pod...')
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_name = get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            break
    else:
        return

    PodCollection(ki).delete(pod['id'], force=True)
    logs_config = get_kuberdock_logs_config(
        env.host_string,
        pod_name,
        pod['kube_type'],
        pod['containers'][0]['kubes'],
        pod['containers'][1]['kubes'],
        MASTER_IP,
        ki.get_token(),
    )
    check_internal_pod_data(logs_config, user=ki)
    logs_pod = PodCollection(ki).add(logs_config, skip_check=True)

    run('docker pull kuberdock/elasticsearch:2.2')
    run('docker pull kuberdock/fluentd:1.5')

    PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
コード例 #7
0
def _master_dns_policy():
    ki = User.get_internal()
    dns_pod = Pod.query.filter_by(name=KUBERDOCK_DNS_POD_NAME,
                                  owner=ki).first()
    if dns_pod is not None:
        dns_policy = get_dns_policy_config(ki.id, dns_pod.id)
        Etcd(ETCD_NETWORK_POLICY_SERVICE).put(KUBERDOCK_DNS_POD_NAME,
                                              value=dns_policy)
コード例 #8
0
 def _validate_kube_type_in_user_package(self, exists, field, value):
     if exists and self.user:
         if self.user == KUBERDOCK_INTERNAL_USER and \
                 value == Kube.get_internal_service_kube_type():
             return
         package = User.get(self.user).package
         if value not in [k.kube_id for k in package.kubes]:
             self._error(field,
                         "Pod can't be created, because your package "
                         "\"{0}\" does not include kube type with id "
                         "\"{1}\"".format(package.name, value))
コード例 #9
0
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Add roles {}, resources {} and its permissions...'.format(
        ROLES, RESOURCES))
    fixtures.add_permissions(roles=ROLES,
                             resources=RESOURCES,
                             permissions=PERMISSIONS)
    upd.print_log('Add {} user...'.format(USER))
    u = db.session.query(User).filter(User.username == USER).first()
    if not u:
        r = Role.filter_by(rolename='HostingPanel').first()
        u = User.create(username=USER, password=USER, role=r, active=True)
        u.save()
コード例 #10
0
def upgrade_node(upd, with_testing, env, *args, **kwargs):
    # 00076_update.py
    upd.print_log('Add kernel parameters to make pod isolation work...')

    run('sed -i "/net.bridge.bridge-nf-call-ip6\?tables/d" {0}'.format(CONF))

    run("echo net.bridge.bridge-nf-call-iptables = 1 >> {0}".format(CONF))
    run("echo net.bridge.bridge-nf-call-ip6tables = 1 >> {0}".format(CONF))

    run("sysctl -w net.bridge.bridge-nf-call-iptables=1")
    run("sysctl -w net.bridge.bridge-nf-call-ip6tables=1")

    # 00079_update.py
    upd.print_log('Copy Elasticsearch config maker...')
    upd.print_log(
        put('/var/opt/kuberdock/make_elastic_config.py',
            '/var/lib/elasticsearch',
            mode=0755))
    upd.print_log('Update logging pod...')
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_name = get_kuberdock_logs_pod_name(env.host_string)

    for pod in PodCollection(ki).get(as_json=False):
        if pod['name'] == pod_name:
            break
    else:
        return

    PodCollection(ki).delete(pod['id'], force=True)
    logs_config = get_kuberdock_logs_config(
        env.host_string,
        pod_name,
        pod['kube_type'],
        pod['containers'][0]['kubes'],
        pod['containers'][1]['kubes'],
        MASTER_IP,
        ki.get_token(),
    )
    check_internal_pod_data(logs_config, user=ki)
    logs_pod = PodCollection(ki).add(logs_config, skip_check=True)

    run('docker pull kuberdock/elasticsearch:1.5')

    PodCollection(ki).update(logs_pod['id'], {'command': 'start'})

    # 00082_update.py
    upd.print_log('Upgrading nodes with docker-cleaner.sh')
    run("cat > /var/lib/kuberdock/scripts/docker-cleaner.sh << 'EOF' {0}".
        format(DOCKERCLEANER))
    run("""chmod +x /var/lib/kuberdock/scripts/docker-cleaner.sh""")
    run("""crontab -l | { cat; echo "0 */6 * * * /var/lib/kuberdock/scripts/docker-cleaner.sh"; } | crontab - """
        )
コード例 #11
0
def downgrade(upd, with_testing, exception, *args, **kwargs):
    upd.print_log('Creating HostingPanel role and user...')
    add_roles([('HostingPanel', True)])
    role = Role.filter_by(rolename='HostingPanel').first()
    user = User.filter_by(username='******').first()
    if not user:
        db.session.add(
            User(username='******',
                 role=role,
                 password='******',
                 active=True))
    else:
        user.deleted = False
        user.role_id = role.id
    perms = dict(
        permissions_base, **{
            ('images', 'get'): True,
            ('images', 'isalive'): True,
            ('predefined_apps', 'get'): True,
        })
    _add_permissions([(resource, role.rolename, action, allow)
                      for (resource, action), allow in perms.iteritems()])
    db.session.commit()
コード例 #12
0
def _update_00203_upgrade(upd):
    def _update_ingress_container(config):
        config_map_cmd = "--nginx-configmap={}/{}".format(
            constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAMESPACE,
            constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAME)

        for c in config['containers']:
            if c['name'] == 'nginx-ingress':
                if config_map_cmd not in c['command']:
                    c['command'].append(config_map_cmd)
                    return True
        return False

    def _create_or_update_ingress_config():
        client = configmap.ConfigMapClient(KubeQuery())
        try:
            client.get(
                constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAME,
                namespace=constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAMESPACE)

            client.patch(
                constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAME,
                data={'server-name-hash-bucket-size': '128'},
                namespace=constants.KUBERDOCK_INGRESS_CONFIG_MAP_NAMESPACE)

        except configmap.ConfigMapNotFound:
            ingress.create_ingress_nginx_configmap()

    owner = User.get_internal()
    pod = Pod.query.filter_by(name=constants.KUBERDOCK_INGRESS_POD_NAME,
                              owner=owner).first()

    if pod is None:
        upd.print_log('Ingress POD hasn\'t been created yet. Skipping')
        return

    _create_or_update_ingress_config()

    config = pod.get_dbconfig()
    if not _update_ingress_container(config):
        upd.print_log('Ingress contoller RC is up-to-date. Skipping')
        return

    collection = PodCollection()

    replace_pod_config(pod, config)
    collection.patch_running_pod(
        pod.id, {'spec': {
            'containers': config['containers']
        }}, restart=True)
コード例 #13
0
def _update_00179_upgrade_node(env):
    nodename = env.host_string
    log_pod_name = get_kuberdock_logs_pod_name(nodename)
    internal_user = User.get_internal()
    pc = PodCollection(internal_user)
    dbpod = Pod.query.filter(Pod.owner_id == internal_user.id,
                             Pod.name == log_pod_name,
                             Pod.status != 'deleted').first()
    if not dbpod:
        raise Exception('Node {} have no logs pod. '
                        'Delete the node and try again'.format(nodename))
    pod = pc.get(dbpod.id, as_json=False)
    old_ip = '127.0.0.1'
    new_ip = pod['podIP']
    run('sed -i "s/@{old_ip}:/@{new_ip}:/g" {conf}'.format(old_ip=old_ip,
                                                           new_ip=new_ip,
                                                           conf=RSYSLOG_CONF))
    run('systemctl restart rsyslog')
コード例 #14
0
def _upgrade_199(upd, with_testing, *args, **kwargs):
    ku = User.get_internal()
    pod = db.session.query(Pod).filter_by(name=KUBERDOCK_DNS_POD_NAME,
                                          owner=ku).first()
    nodes = Node.query.all()

    if not nodes:
        upd.print_log('No nodes found, exiting')
        return

    for node in nodes:
        k8s_node = node_utils._get_k8s_node_by_host(node.hostname)
        status, _ = node_utils.get_status(node, k8s_node)
        if status == NODE_STATUSES.running:
            if pod:
                pc = PodCollection()
                pc.delete(pod.id, force=True)
            create_dns_pod(node.hostname, ku)
            return

    raise helpers.UpgradeError("Can't find any running node to run dns pod")
コード例 #15
0
def _recreate_ingress_pod_if_needed():
    kd_user = User.get_internal()
    ingress_pod = Pod.filter_by(name=KUBERDOCK_INGRESS_POD_NAME,
                                owner=kd_user).first()
    if ingress_pod or BaseDomain.query.first():
        PodCollection(kd_user).delete(ingress_pod.id, force=True)
        default_backend_pod = Pod.filter_by(name=KUBERDOCK_BACKEND_POD_NAME,
                                            owner=kd_user).first()
        if not default_backend_pod:
            raise Exception(
                'Nginx ingress controller pod exists, but default backend pod '
                'is not found. Something wrong. Please contact support to get '
                'help.')
        PodCollection(kd_user).delete(default_backend_pod.id, force=True)
        c = ConfigMapClient(KubeQuery())
        try:
            c.delete(name=KUBERDOCK_INGRESS_CONFIG_MAP_NAME,
                     namespace=KUBERDOCK_INGRESS_CONFIG_MAP_NAMESPACE)
        except ConfigMapNotFound:
            pass
        sleep(30)  # TODO: Workaround. Remove it when AC-5470 will be fixed
        ingress.prepare_ip_sharing()
コード例 #16
0
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Update dns pod...')

    local(
        'etcd-ca --depot-path /root/.etcd-ca new-cert --ip "10.254.0.10" --passphrase "" etcd-dns'
    )
    local('etcd-ca --depot-path /root/.etcd-ca sign --passphrase "" etcd-dns')
    local(
        'etcd-ca --depot-path /root/.etcd-ca export etcd-dns --insecure --passphrase "" | tar -xf -'
    )
    local('mv etcd-dns.crt /etc/pki/etcd/etcd-dns.crt')
    local('mv etcd-dns.key.insecure /etc/pki/etcd/etcd-dns.key')

    user = User.filter_by(username=KUBERDOCK_INTERNAL_USER).one()

    dns_pod = Pod.filter_by(name='kuberdock-dns', owner=user).first()

    if dns_pod:
        PodCollection(user).delete(dns_pod.id, force=True)

    dns_config = get_dns_pod_config()
    check_internal_pod_data(dns_config, user)
    dns_pod = PodCollection(user).add(dns_config, skip_check=True)
    PodCollection(user).update(dns_pod['id'], {'command': 'start'})
コード例 #17
0
def upgrade(upd, with_testing, *args, **kwargs):
    upd.print_log('Fix logging pods path...')
    with open('/etc/kubernetes/configfile_for_nodes') as node_configfile:
        node_config = yaml.load(node_configfile.read())
    for user in node_config['users']:
        token = user['user']['token']
        if user['name'] == 'kubelet':
            break
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    for pod in PodCollection(ki).get(as_json=False):
        if pod_name_pattern.match(pod['name']):
            PodCollection(ki).delete(pod['id'], force=True)
            logs_config = get_kuberdock_logs_config(
                pod['node'],
                pod['name'],
                pod['kube_type'],
                pod['containers'][0]['kubes'],
                pod['containers'][1]['kubes'],
                MASTER_IP,
                token,
            )
            check_new_pod_data(logs_config, user=ki)
            logs_pod = PodCollection(ki).add(logs_config)
            PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
コード例 #18
0
def get_internal_pods_state():
    ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first()
    pod_statuses = {}
    for pod in PodCollection(ki).get(as_json=False):
        pod_statuses[pod['name']] = pod['status'] == POD_STATUSES.running
    return pod_statuses
コード例 #19
0
def _node_policy_agent(hostname):
    ki = User.get_internal()
    token = get_node_token()
    create_policy_pod(hostname, ki, token)
コード例 #20
0
    def test_execute_es_query(self, es_mock):
        """Test elasticsearch_utils.execute_es_query function."""

        # Add two log pods config + two approprate nodes
        internal_user = User.get_internal()
        pod_id1 = str(uuid4())
        service1 = 'srv1'
        namespace1 = 'ns1'
        pod_id2 = str(uuid4())
        service2 = 'srv2'
        namespace2 = 'ns2'

        host1 = 'h1'
        host2 = 'h2'

        kube_id = Kube.get_default_kube_type()

        pod1 = Pod(id=pod_id1,
                   name=get_kuberdock_logs_pod_name(host1),
                   owner_id=internal_user.id,
                   kube_id=kube_id,
                   config=json.dumps({
                       "service": service1,
                       "namespace": namespace1
                   }),
                   status='RUNNING')
        pod2 = Pod(id=pod_id2,
                   name=get_kuberdock_logs_pod_name(host2),
                   owner_id=internal_user.id,
                   kube_id=kube_id,
                   config=json.dumps({
                       "service": service2,
                       "namespace": namespace2
                   }),
                   status='RUNNING')
        db.session.add_all([pod1, pod2])
        db.session.commit()

        node1 = Node(ip='123.123.123',
                     hostname=host1,
                     kube_id=kube_id,
                     state='completed',
                     upgrade_status='applied')
        node2 = Node(ip='123.123.124',
                     hostname=host2,
                     kube_id=kube_id,
                     state='completed',
                     upgrade_status='applied')
        db.session.add_all([node1, node2])
        db.session.commit()

        size = 123
        index = '1234qwerty'
        query = None
        sort = None
        search_result = {'hits': {'total': 333, 'hits': [1, 2, 3]}}
        search_mock = es_mock.return_value.search
        search_mock.return_value = search_result
        res = elasticsearch_utils.execute_es_query(index, query, size, sort)
        self.assertEqual(
            res, {
                'total': search_result['hits']['total'],
                'hits': search_result['hits']['hits'],
            })
        prefix1 = elasticsearch_utils.K8S_PROXY_PREFIX + \
            '/namespaces/' + namespace1 + '/services/' + service1 + ':9200/'
        prefix2 = elasticsearch_utils.K8S_PROXY_PREFIX + \
            '/namespaces/' + namespace2 + '/services/' + service2 + ':9200/'
        es_mock.assert_called_once_with([
            {
                'host': KUBE_API_HOST,
                'port': KUBE_API_PORT,
                'url_prefix': prefix1,
            },
            {
                'host': KUBE_API_HOST,
                'port': KUBE_API_PORT,
                'url_prefix': prefix2,
            },
        ])
        search_mock.assert_called_once_with(index=index, body={'size': size})

        query = {'a': 1}
        sort = {'b': 2}
        elasticsearch_utils.execute_es_query(index, query, size, sort)
        search_mock.assert_called_with(index=index,
                                       body={
                                           'size': size,
                                           'sort': sort,
                                           'query': query
                                       })
        search_mock.side_effect = RequestException('!!!')
        with self.assertRaises(elasticsearch_utils.LogsError):
            elasticsearch_utils.execute_es_query(index, query, size, sort)
コード例 #21
0
    def setUp(self):
        from kubedock.nodes.models import Node
        from kubedock.users.models import User
        from kubedock.kapi.podcollection import POD_STATUSES

        self.node = Node(
            ip='12.13.14.15',
            hostname='test-node-1',
            kube=bill_models.Kube.get_default_kube(),
            state=NODE_STATUSES.running,
        )
        self.internal_user = User.get_internal()
        self.pod = self.fixtures.pod(name='logs pod',
                                     status=POD_STATUSES.running)

        # disable redis caching
        patcher = mock.patch.object(es_logs, 'check_logs_pod',
                                    es_logs._check_logs_pod)
        self.addCleanup(patcher.stop)
        self.PodCollectionMock = patcher.start()

        patcher = mock.patch.object(es_logs, 'PodCollection')
        self.addCleanup(patcher.stop)
        self.PodCollectionMock = patcher.start()
        self.PodCollectionMock.return_value.get.return_value = [
            self.pod.to_dict()]

        patcher = mock.patch.object(es_logs, 'get_kuberdock_logs_pod_name')
        self.addCleanup(patcher.stop)
        self.get_kuberdock_logs_pod_name_mock = patcher.start()
        self.get_kuberdock_logs_pod_name_mock.return_value = self.pod.name

        pod_state = usage_models.PodState(
            pod_id=self.pod.id,
            start_time=datetime(2015, 2, 5),
            last_event_time=datetime.utcnow(),
            last_event='MODIFIED',
            hostname=self.node.hostname,
            kube_id=self.pod.kube_id,
        )

        db.session.add_all([
            self.node,
            pod_state,
            usage_models.ContainerState(
                pod_state=pod_state,
                container_name='elasticsearch',
                docker_id='om3xcnhonfao9nhc',
                start_time=datetime(2015, 2, 5),
                end_time=datetime.utcnow(),
                exit_code=2,
            ),
            usage_models.ContainerState(
                pod_state=pod_state,
                container_name='fluentd',
                docker_id='aoncrh47rhwdcevf',
                start_time=datetime(2015, 2, 5),
                end_time=datetime.utcnow(),
                exit_code=2,
            ),
            usage_models.ContainerState(
                pod_state=pod_state,
                container_name='elasticsearch',
                docker_id='p93urmqahdeef',
                start_time=datetime.utcnow() - timedelta(minutes=1),
            ),
            usage_models.ContainerState(
                pod_state=pod_state,
                container_name='fluentd',
                docker_id='1wlsj2enhdfo4838',
                start_time=datetime.utcnow() - timedelta(minutes=1),
            ),
        ])
        db.session.flush()