def get_nodes(ver="7", arch="x86_64", count=4):
    """ Utility function to get nodes from CICO infrastructure.

    Request given count of CentOS of particular version and architecture.
    CICO returns JSON, with hostnames of nodes provisioned.
    duffy.key is need for requesting nodes.

    Args:
        ver (str):
            Version of CentOS to be installed on node
            viz ["6", "7"]. Defaults to "7".
        arch (str): Architecture of CentOS to be install on node.
            Defaults to "x86_64".
        count (int): Number of CentOS nodes to be requested. Defaults to 4.

    Returns:
        List of hostnames received from CICO.

    Note:
        This function also appends DUFFY_SSID to a local file
        env.properties .
        Also prints the output received from CICO.
    """
    out = run_cmd(
        'export CICO_API_KEY=`cat ~/duffy.key` && '
        'cico node get --arch %s --release %s --count %s '
        '--format json' % (arch, ver, count))
    _print('Get nodes output: %s' % out)
    hosts = json.loads(out)

    with open('env.properties', 'a') as f:
        f.write('DUFFY_SSID=%s' % hosts[0]['comment'])
        f.close()

    return [host['hostname'] for host in hosts]
 def assertOsProjectBuildStatus(self, project, expected_builds,
                                expected_state, retries=20, delay=60):
     print "=" * 30
     print "Test if openshift builds are running"
     print "=" * 30
     openshift_host = self.hosts['openshift']['host']
     oc_config = (
         '/var/lib/origin/openshift.local.config/master/admin.kubeconfig')
     cmd = (
         "sudo oc --config {config} project {project} > /dev/null && "
         "sudo oc --config {config} get builds".format(
             config=oc_config, project=project)
     )
     retry_count = 0
     success = False
     while retry_count < retries and success is False:
         if retry_count > 0:
             time.sleep(delay)
             _print("Retries: %d/%d" % (retry_count, retries))
         try:
             output = self.run_cmd(cmd, host=openshift_host)
             _print(output)
             lines = output.splitlines()
             pods = set([line.split()[0] for line in lines[1:]
                         if line and expected_state in line.split()])
             success = not set(
                 # FIXME: we're ignoring delivery build right now as it will
                 # need the atomic scan host for that.
                 # ['build-1', 'delivery-1', 'test-1'])
                 expected_builds
             ).difference(pods)
         except Exception:
             success = False
         retry_count += 1
     self.assertTrue(success)
def print_nodes():
    """
    Function to print nodes from a local file env.properties.
    """
    with open('env.properties') as f:
        s = f.read()

    _print('\n'.join(s.splitlines()[3:]))
def get_nodes(ver="7", arch="x86_64", count=4):
    get_nodes_url = "%s/Node/get?key=%s&ver=%s&arch=%s&count=%s" % (
        url_base, api, ver, arch, count)

    resp = urllib.urlopen(get_nodes_url).read()
    data = json.loads(resp)
    with open('env.properties', 'a') as f:
        f.write('DUFFY_SSID=%s' % data['ssid'])
        f.close()
    _print(resp)
    return data['hosts']
def fail_nodes():
    with open('env.properties') as f:
        s = f.read()

    ssid = None
    for line in s.splitlines():
        key, value = line.split('=')
        if key == 'DUFFY_SSID':
            ssid = value
            break

    fail_nodes_url = "{url_base}/Node/fail?key={key}&ssid={ssid}".format(
        url_base=url_base, key=api, ssid=ssid)
    resp = urllib.urlopen(fail_nodes_url).read()
    _print(resp)
    def provision(self, force=False, extra_args=""):
        """
        Provision CCCP nodes.

        By default, it runs provisioning only for the first time, and
        skips for the subsequent calls.

        Args:
            force (bool): Provision forcefully.
            extra_args (str): Extra cmd line args for running ansible playbook
        """
        controller = copy.copy(self.hosts['controller'])
        controller['hosts'] = None
        _print('Provisioning...')
        provisioned, out = provision(
            controller, force=force, extra_args=extra_args)
        if provisioned:
            _print(out[-1000:])
Exemple #7
0
 def test_00_openshift_builds_are_complete(self):
     self.provision()
     print "=" * 30
     print "Test if openshift builds are running"
     print "=" * 30
     cmd = (
         "oc login https://{openshift}:8443 "
         "--insecure-skip-tls-verify=true "
         "-u test-admin -p test > /dev/null && "
         "oc project bamachrn-python-release > /dev/null && "
         "oc get pods"
     ).format(openshift=self.hosts[self.node]["host"])
     self.run_cmd(cmd)
     retries = 0
     success = False
     while retries < 10 and success is False:
         if retries > 0:
             time.sleep(60)
         _print("Retries: %d/100" % retries)
         try:
             output = self.run_cmd(cmd)
             _print(output)
             lines = output.splitlines()
             pods = set([line.split()[0] for line in lines[1:] if line and line.split()[2] == "Completed"])
             success = not set(
                 # FIXME: we're ignoring delivery build right now as it will
                 # need the atomic scan host for that.
                 # ['build-1-build', 'delivery-1-build', 'test-1-build'])
                 ["build-1-build", "test-1-build", "delivery-1-build"]
             ).difference(pods)
         except Exception:
             success = False
         retries += 1
     self.assertTrue(success)
     _print("Openshift builds completed successfully.")
Exemple #8
0
 def test_01_openshift_builds_persist_after_provision(self):
     self.provision(force=True, extra_args="--skip-tags=ci-build-test-project")
     _print("=" * 30)
     _print("Test if openshift builds persist after reprovision")
     _print("=" * 30)
     cmd = (
         "oc login https://%s:8443 --insecure-skip-tls-verify=true "
         "-u test-admin -p test > /dev/null && "
         "oc project bamachrn-python-release > /dev/null && "
         "oc get pods"
     ) % (self.hosts["openshift"]["host"])
     output = self.run_cmd(cmd)
     _print(output)
     lines = output.splitlines()
     pods = set([line.split()[0] for line in lines[1:]])
     success = set(["build-1-build", "test-1-build", "delivery-1-build"]).difference(pods)
     self.assertFalse(success)
    def test_00_openshift_builds_are_complete(self):
        self.provision()
        self.cleanup_openshift()
        self.cleanup_beanstalkd()
        print self.run_cmd(
            'sudo java -jar /opt/jenkins-cli.jar '
            '-s http://localhost:8080 enable-job bamachrn-python-release',
            host=self.hosts['jenkins_master']['host'])
        print self.run_cmd(
            'sudo java -jar /opt/jenkins-cli.jar '
            '-s http://localhost:8080 '
            'build bamachrn-python-release -f -v',
            host=self.hosts['jenkins_master']['host'])
        print self.run_cmd(
            'sudo java -jar /opt/jenkins-cli.jar '
            '-s http://localhost:8080 disable-job bamachrn-python-release',
            host=self.hosts['jenkins_master']['host'])

        self.assertOsProjectBuildStatus(
            '53b1a8ddd3df5d4fd94756e8c20ae160e565a4b339bfb47165285955',
            ['build-1', 'test-1', 'delivery-1'],
            'Complete'
        )
        _print("Openshift builds completed successfully.")
def _if_debug():
    """
    If whitelisted github user has added "dotests-debug" string as comment
    on a given PR, the nodes are kept for 2 hours for debugging, once
    time lapses the nodes are returned to CICO infrastructure.
    """
    if DEBUG:
        _print('Reserving nodes for debugging...')
        _print('=' * 10 + 'Node Info' + '=' * 10)
        print_nodes()
        try:
            _print('Sleeping for %s seconds for debugging...'
                   % str(DEBUG_SECONDS))
            sleep(DEBUG_SECONDS)
        except Exception as e:
            _print(e)
        with open('env.properties', 'a') as f:
            f.write('\nBUILD_FAILED=true\n')
def print_nodes():
    with open('env.properties') as f:
        s = f.read()

    _print('\n'.join(s.splitlines()[3:]))
def run():
    os.environ.pop('CCCP_CI_PROVISIONED', None)
    os.environ.pop('CCCP_CI_HOSTS', None)

    nodes = get_nodes(count=5)

    jenkins_master_host = nodes[0]
    jenkins_slave_host = nodes[1]
    openshift_host = nodes[2]
    scanner_host = nodes[3]
    controller = nodes.pop()

    nodes_env = (
        "\nJENKINS_MASTER_HOST=%s\n"
        "JENKINS_SLAVE_HOST=%s\n"
        "OPENSHIFT_HOST=%s\n"
        "CONTROLLER=%s\n"
        "SCANNER_HOST=%s\n"
    ) % (jenkins_master_host, jenkins_slave_host,
         openshift_host, controller, scanner_host)

    with open('env.properties', 'a') as f:
        f.write(nodes_env)

    hosts_data = {
        'openshift': {
            'host': openshift_host,
            'remote_user': '******'
        },
        'jenkins_master': {
            'host': jenkins_master_host,
            'remote_user': '******'
        },
        'jenkins_slave': {
            'host': jenkins_slave_host,
            'remote_user': '******'
        },
        'controller': {
            'host': controller,
            'user': '******',
            'workdir': '/root/container-pipeline-service',
            # relative to this workdir
            'inventory_path': 'hosts'
        }
    }

    _print(hosts_data)

    generate_ansible_inventory(jenkins_master_host,
                               jenkins_slave_host,
                               openshift_host,
                               scanner_host)

    run_cmd('iptables -F', host=openshift_host)
    run_cmd('iptables -F', host=jenkins_slave_host)

    setup_controller(controller)

    provision(hosts_data['controller'])

    os.environ['CCCP_CI_PROVISIONED'] = "true"

    os.environ['CCCP_CI_HOSTS'] = json.dumps(hosts_data)

    run_cmd('~/venv/bin/nosetests ci/tests', stream=True)

    os.environ.pop('CCCP_CI_PROVISIONED', None)
    os.environ.pop('CCCP_CI_HOSTS', None)
                               openshift_host,
                               scanner_host)

    run_cmd('iptables -F', host=openshift_host)
    run_cmd('iptables -F', host=jenkins_slave_host)

    setup_controller(controller)

    provision(hosts_data['controller'])

    os.environ['CCCP_CI_PROVISIONED'] = "true"

    os.environ['CCCP_CI_HOSTS'] = json.dumps(hosts_data)

    run_cmd('~/venv/bin/nosetests ci/tests', stream=True)

    os.environ.pop('CCCP_CI_PROVISIONED', None)
    os.environ.pop('CCCP_CI_HOSTS', None)


if __name__ == '__main__':
    try:
        run()
    except Exception as e:
        _print('Build failed: %s' % e)
        _print('Reserving nodes for debugging...')
        fail_nodes()
        _print('=' * 10 + 'Node Info' + '=' * 10)
        print_nodes()
        sys.exit(1)
    def test_03_serialized_builds(self):
        self.cleanup_beanstalkd()
        self.cleanup_openshift()
        _print(self.run_cmd(
            'sudo java -jar /opt/jenkins-cli.jar -s '
            'http://localhost:8080 enable-job '
            'centos-kubernetes-master-latest',
            host=self.hosts['jenkins_master']['host']))
        _print(self.run_cmd(
            'sudo java -jar /opt/jenkins-cli.jar -s http://localhost:8080 '
            'build centos-kubernetes-master-latest -f -v',
            host=self.hosts['jenkins_master']['host']
        ))
        _print(self.run_cmd(
            'sudo java -jar /opt/jenkins-cli.jar -s '
            'http://localhost:8080 disable-job '
            'centos-kubernetes-master-latest',
            host=self.hosts['jenkins_master']['host']))

        time.sleep(5)

        # We are not testing jenkins' feature to trigger child builds,
        # so, we are triggering the child build manually, so that
        # we can avoid race condition between the builds
        _print(self.run_cmd(
            'sudo java -jar /opt/jenkins-cli.jar -s '
            'http://localhost:8080 enable-job '
            'centos-kubernetes-apiserver-latest',
            host=self.hosts['jenkins_master']['host']))
        _print(self.run_cmd(
            'sudo java -jar /opt/jenkins-cli.jar -s http://localhost:8080 '
            'build centos-kubernetes-apiserver-latest -f -v',
            host=self.hosts['jenkins_master']['host']
        ))
        _print(self.run_cmd(
            'sudo java -jar /opt/jenkins-cli.jar -s '
            'http://localhost:8080 disable-job '
            'centos-kubernetes-apiserver-latest',
            host=self.hosts['jenkins_master']['host']))

        k8s_master_os_project = hashlib.sha224(
            'centos-kubernetes-master-latest').hexdigest()
        k8s_apiserver_os_project = hashlib.sha224(
            'centos-kubernetes-apiserver-latest').hexdigest()

        # Wait for build worker to run build
        self.assertOsProjectBuildStatus(
            k8s_master_os_project, ['build-1'], 'Running', retries=40,
            delay=15)

        time.sleep(20)

        # Then assert that lock file for centos-kubernetes-master-latest exists
        self.assertTrue(self.run_cmd(
            'ls /srv/pipeline-logs/centos-kubernetes-master-latest'))
        self.assertOsProjectBuildStatus(
            k8s_master_os_project, ['build-1', 'test-1', 'delivery-1'],
            'Complete', retries=80, delay=15
        )

        # Assert that delivery of centos-kubernetes-master-latest triggered
        # build for centos-kubernetes-apiserver-latest
        self.assertOsProjectBuildStatus(
            k8s_apiserver_os_project, ['build-1'], 'Running', retries=40,
            delay=5)

        # Assert that lock file for centos-kubernetes-master-latest does not
        # exist anymore
        self.assertRaises(
            Exception,
            self.run_cmd,
            'ls /srv/pipeline-logs/centos-kubernetes-master-latest')
def clean_nodes():
    _print("Cleaning nodes")
    _print(run_cmd(
        'export CICO_API_KEY=`cat ~/duffy.key` && '
        'cico node done %s' % DUFFY_SSID))
                   % str(DEBUG_SECONDS))
            sleep(DEBUG_SECONDS)
        except Exception as e:
            _print(e)
        with open('env.properties', 'a') as f:
            f.write('\nBUILD_FAILED=true\n')


if __name__ == '__main__':
    try:
        # get nodes from CICO infra
        while CICO_GET_RETRY_COUNT > 0:
            try:
                nodes = get_nodes(count=5)
            except Exception as e:
                _print("Failed to get nodes from CICO. Error %s" % str(e))
                CICO_GET_RETRY_COUNT -= 1
                _print("Retrying get nodes from CICO, count=%d" %
                       CICO_GET_RETRY_COUNT)
                # sleep for one minute
                sleep(int(60))
            else:
                _print(str(nodes))
                break
        if CICO_GET_RETRY_COUNT == 0:
            _print('Build failed while receiving nodes from CICO:\n%s' % e)
            # _if_debug is not needed, since we dont have even nodes to debug
            sys.exit(1)
    except Exception as e:
        _print('Build failed while receiving nodes from CICO:\n%s' % e)
        # _if_debug is not needed, since we dont have even nodes to debug