Esempio n. 1
0
def OnosTest():
    start_time = time.time()
    stop_time = start_time
    if INSTALLER_TYPE == "joid":
        logger.debug("Installer is Joid")
        SetOnosIpForJoid()
    else:
        SetOnosIp()
    RunScript("FUNCvirNetNB")
    RunScript("FUNCvirNetNBL3")
    try:
        logger.debug("Push ONOS results into DB")
        # TODO check path result for the file
        result = GetResult()
        stop_time = time.time()

        # ONOS success criteria = all tests OK
        # i.e. FUNCvirNet & FUNCvirNetL3
        status = "FAIL"
        try:
            if (result['FUNCvirNet']['result'] == "Success"
                    and result['FUNCvirNetL3']['result'] == "Success"):
                status = "PASS"
        except:
            logger.error("Unable to set ONOS criteria")

        ft_utils.push_results_to_db("functest", "onos", start_time, stop_time,
                                    status, result)

    except:
        logger.error("Error pushing results into Database")

    if status == "FAIL":
        EXIT_CODE = -1
        exit(EXIT_CODE)
Esempio n. 2
0
def main():
    start_time = time.time()
    stop_time = start_time
    # DownloadCodes()
    if args.installer == "joid":
        logger.debug("Installer is Joid")
        SetOnosIpForJoid()
    else:
        SetOnosIp()
    RunScript("FUNCvirNetNB")
    RunScript("FUNCvirNetNBL3")

    try:
        logger.debug("Push ONOS results into DB")
        # TODO check path result for the file
        result = GetResult()
        stop_time = time.time()

        # ONOS success criteria = all tests OK
        # i.e. FUNCvirNet & FUNCvirNetL3
        status = "failed"
        try:
            if (result['FUNCvirNet']['result'] == "Success"
                    and result['FUNCvirNetL3']['result'] == "Success"):
                status = "passed"
        except:
            logger.error("Unable to set ONOS criteria")

        functest_utils.push_results_to_db("functest", "onos", logger,
                                          start_time, stop_time, status,
                                          result)

    except:
        logger.error("Error pushing results into Database")
Esempio n. 3
0
def push_results(testname, start_time, end_time, criteria, details):
    logger.info("Push testcase '%s' results into the DB...\n" % testname)
    ft_utils.push_results_to_db("sdnvpn",
                                testname,
                                start_time,
                                end_time,
                                criteria,
                                details)
Esempio n. 4
0
def step_failure(step_name, error_msg):
    logger.error(error_msg)
    set_result(step_name, 0, error_msg)
    status = "FAIL"
    # in case of failure starting and stoping time are not correct
    stop_time = time.time()
    if step_name == "sig_test":
        status = "PASS"
    ft_utils.push_results_to_db("functest", "vims", TESTCASE_START_TIME,
                                stop_time, status, RESULTS)
    exit(-1)
 def _test_push_results_to_db_missing_env(self, env_var):
     dic = self._get_env_dict(env_var)
     CONST.__setattr__('results_test_db_url', self.db_url)
     with mock.patch.dict(os.environ,
                          dic,
                          clear=True), \
             mock.patch('functest.utils.functest_utils.logger.error') \
             as mock_logger_error:
         functest_utils.push_results_to_db(self.project, self.case_name,
                                           self.start_date, self.stop_date,
                                           self.result, self.details)
         mock_logger_error.assert_called_once_with("Please set env var: " +
                                                   str("\'" + env_var +
                                                       "\'"))
Esempio n. 6
0
def step_failure(step_name, error_msg):
    logger.error(error_msg)
    set_result(step_name, 0, error_msg)
    status = "FAIL"
    # in case of failure starting and stoping time are not correct
    stop_time = time.time()
    if step_name == "sig_test":
        status = "PASS"
    functest_utils.push_results_to_db("functest",
                                      "vHello",
                                      None,
                                      TESTCASE_START_TIME,
                                      stop_time,
                                      status,
                                      RESULTS)
    exit(-1)
Esempio n. 7
0
    def push_to_db(self):
        """Push the results of TestCase to the DB.

        It allows publishing the results and to check the status.

        It could be overriden if the common implementation is not
        suitable. The following attributes must be set before pushing
        the results to DB:
            * project_name,
            * case_name,
            * criteria,
            * start_time,
            * stop_time.

        Returns:
            TestCase.EX_OK if results were pushed to DB.
            TestCase.EX_PUSH_TO_DB_ERROR otherwise.
        """
        try:
            assert self.project_name
            assert self.case_name
            assert self.criteria
            assert self.start_time
            assert self.stop_time
            if ft_utils.push_results_to_db(self.project_name, self.case_name,
                                           self.start_time, self.stop_time,
                                           self.criteria, self.details):
                self.logger.info("The results were successfully pushed to DB")
                return TestCase.EX_OK
            else:
                self.logger.error("The results cannot be pushed to DB")
                return TestCase.EX_PUSH_TO_DB_ERROR
        except Exception:  # pylint: disable=broad-except
            self.logger.exception("The results cannot be pushed to DB")
            return TestCase.EX_PUSH_TO_DB_ERROR
Esempio n. 8
0
def main():
    cmd = 'cd %s/tests && ./run.sh' % DOCTOR_REPO
    start_time = time.time()

    ret = functest_utils.execute_command(cmd, logger, exit_on_error=False)

    stop_time = time.time()
    duration = round(stop_time - start_time, 1)
    if ret == 0:
        logger.info("doctor OK")
        test_status = 'OK'
    else:
        logger.info("doctor FAILED")
        test_status = 'NOK'

    details = {
        'timestart': start_time,
        'duration': duration,
        'status': test_status,
    }
    pod_name = functest_utils.get_pod_name(logger)
    scenario = functest_utils.get_scenario(logger)
    version = functest_utils.get_version(logger)
    build_tag = functest_utils.get_build_tag(logger)

    status = "FAIL"
    if details['status'] == "OK":
        status = "PASS"

    logger.info("Pushing Doctor results: TEST_DB_URL=%(db)s pod_name=%(pod)s "
                "version=%(v)s scenario=%(s)s criteria=%(c)s details=%(d)s" % {
                    'db': TEST_DB_URL,
                    'pod': pod_name,
                    'v': version,
                    's': scenario,
                    'c': status,
                    'b': build_tag,
                    'd': details,
                })
    functest_utils.push_results_to_db("doctor",
                                      "doctor-notification",
                                      logger,
                                      start_time,
                                      stop_time,
                                      status,
                                      details)
Esempio n. 9
0
 def test_wrong_shema(self):
     CONST.__setattr__('results_test_db_url', '/dev/null')
     self.assertFalse(
         functest_utils.push_results_to_db(self._project_name,
                                           self._case_name,
                                           self._start_time,
                                           self._stop_time, self._result,
                                           {}))
Esempio n. 10
0
def main():
    cmd = 'cd %s/tests && ./run.sh' % DOCTOR_REPO
    start_time = time.time()

    ret = functest_utils.execute_command(cmd, logger, exit_on_error=False)

    stop_time = time.time()
    duration = round(stop_time - start_time, 1)
    if ret:
        logger.info("doctor OK")
        test_status = 'OK'
    else:
        logger.info("doctor FAILED")
        test_status = 'NOK'

    details = {
        'timestart': start_time,
        'duration': duration,
        'status': test_status,
    }
    pod_name = functest_utils.get_pod_name(logger)
    scenario = functest_utils.get_scenario(logger)
    version = functest_utils.get_version(logger)
    build_tag = functest_utils.get_build_tag(logger)

    status = "failed"
    if details['status'] == "OK":
        status = "passed"

    logger.info("Pushing Doctor results: TEST_DB_URL=%(db)s pod_name=%(pod)s "
                "version=%(v)s scenario=%(s)s criteria=%(c)s details=%(d)s" % {
                    'db': TEST_DB_URL,
                    'pod': pod_name,
                    'v': version,
                    's': scenario,
                    'c': status,
                    'b': build_tag,
                    'd': details,
                })
    functest_utils.push_results_to_db("doctor",
                                      "doctor-notification",
                                      logger,
                                      start_time,
                                      stop_time,
                                      status,
                                      details)
Esempio n. 11
0
 def test_makedirs_exc(self, *args):
     CONST.__setattr__('results_test_db_url', URL)
     self.assertFalse(
         functest_utils.push_results_to_db(self._project_name,
                                           self._case_name,
                                           self._start_time,
                                           self._stop_time, self._result,
                                           {}))
     args[0].assert_called_once_with(DIR)
Esempio n. 12
0
def PushDB(status, info):
    logger.info("Summary :")
    try:
        logger.debug("Push ONOS SFC results into DB")
        stop_time = time.time()

        # ONOS SFC success criteria = all tests OK
        duration = round(stop_time - start_time, 1)
        logger.info("Result is " + status)
        ft_utils.push_results_to_db("functest",
                                    "onos_sfc",
                                    start_time,
                                    stop_time,
                                    status,
                                    details={'duration': duration,
                                             'error': info})
    except:
        logger.error("Error pushing results into Database")
Esempio n. 13
0
def main():
    start_time = time.time()

    result_os = test_moon_openstack()
    result_odl = test_federation()

    stop_time = time.time()
    duration = round(stop_time - start_time, 1)
    if result_os[0] == 0 and result_odl[0]:
        logger.info("OS MOON PASSED")
        test_status = 'PASS'
    else:
        logger.info("OS MOON ERROR")
        test_status = 'FAIL'
        logger.info("Errors from OpenStack tests:")
        logger.info(result_os[1])
        logger.info("Errors from Federation tests:")
        logger.info(result_odl[1])

    details = {
        'timestart': start_time,
        'duration': duration,
        'status': test_status,
        'results': {
            'openstack': result_os,
            'opendaylight': result_odl
        }
    }

    functest_utils.logger_test_results("moon",
                                       "moon_authentication",
                                       test_status, details)
    if args.report:
        functest_utils.push_results_to_db("moon",
                                          "moon_authentication",
                                          start_time,
                                          stop_time,
                                          test_status,
                                          details)
        logger.info("Moon results pushed to DB")

    if result_os[0] != 0 or not result_odl[0]:
        return False
    return True
Esempio n. 14
0
def main():
    cmd = "%s/tests/run.sh %s/tests" % (COPPER_REPO_DIR, COPPER_REPO_DIR)

    start_time = time.time()

    log_file = RESULTS_DIR + "/copper.log"
    ret_val = functest_utils.execute_command(cmd,
                                             output_file=log_file)

    stop_time = time.time()
    duration = round(stop_time - start_time, 1)
    if ret_val == 0:
        logger.info("COPPER PASSED")
        test_status = 'PASS'
    else:
        logger.info("COPPER FAILED")
        test_status = 'FAIL'

    details = {
        'timestart': start_time,
        'duration': duration,
        'status': test_status,
    }
    functest_utils.logger_test_results("Copper",
                                       "copper-notification",
                                       details['status'], details)
    try:
        if args.report:
            functest_utils.push_results_to_db("copper",
                                              "copper-notification",
                                              start_time,
                                              stop_time,
                                              details['status'],
                                              details)
            logger.info("COPPER results pushed to DB")
    except:
        logger.error("Error pushing results into Database '%s'"
                     % sys.exc_info()[0])

    if ret_val != 0:
        sys.exit(-1)

    sys.exit(0)
Esempio n. 15
0
def main():
    cmd = ('%s/tests/run.sh' % COPPER_REPO)
    start_time = time.time()

    ret = functest_utils.execute_command(cmd, logger, exit_on_error=False)

    stop_time = time.time()
    duration = round(stop_time - start_time, 1)
    if ret == 0:
        logger.info("COPPER PASSED")
        test_status = 'PASS'
    else:
        logger.info("COPPER FAILED")
        test_status = 'FAIL'

    details = {
        'timestart': start_time,
        'duration': duration,
        'status': test_status,
    }
    pod_name = functest_utils.get_pod_name(logger)
    scenario = functest_utils.get_scenario(logger)
    version = functest_utils.get_version(logger)
    build_tag = functest_utils.get_build_tag(logger)

    logger.info("Pushing COPPER results: TEST_DB_URL=%(db)s pod_name=%(pod)s "
                "version=%(v)s scenario=%(s)s criteria=%(c)s details=%(d)s" % {
                    'db': TEST_DB_URL,
                    'pod': pod_name,
                    'v': version,
                    's': scenario,
                    'c': details['status'],
                    'b': build_tag,
                    'd': details,
                })
    functest_utils.push_results_to_db("COPPER",
                                      "COPPER-notification",
                                      logger,
                                      start_time,
                                      stop_time,
                                      details['status'],
                                      details)
Esempio n. 16
0
def main():
    exit_code = -1

    # if the image name is explicitly set for the doctor suite, set it as
    # enviroment variable
    if 'doctor' in functest_yaml and 'image_name' in functest_yaml['doctor']:
        os.environ["IMAGE_NAME"] = functest_yaml['doctor']['image_name']

    cmd = 'cd %s/tests && ./run.sh' % DOCTOR_REPO_DIR
    log_file = RESULTS_DIR + "/doctor.log"

    start_time = time.time()

    ret = functest_utils.execute_command(cmd, info=True, output_file=log_file)

    stop_time = time.time()
    duration = round(stop_time - start_time, 1)
    if ret == 0:
        logger.info("Doctor test case OK")
        test_status = 'OK'
        exit_code = 0
    else:
        logger.info("Doctor test case FAILED")
        test_status = 'NOK'

    details = {
        'timestart': start_time,
        'duration': duration,
        'status': test_status,
    }
    status = "FAIL"
    if details['status'] == "OK":
        status = "PASS"
    functest_utils.logger_test_results("Doctor", "doctor-notification", status,
                                       details)
    if args.report:
        functest_utils.push_results_to_db("doctor", "doctor-notification",
                                          start_time, stop_time, status,
                                          details)
        logger.info("Doctor results pushed to DB")

    exit(exit_code)
Esempio n. 17
0
def main():
    start_time = time.time()
    stop_time = start_time
    # DownloadCodes()
    # if args.installer == "joid":
    if INSTALLER_TYPE == "joid":
        logger.debug("Installer is Joid")
        SetOnosIpForJoid()
    else:
        SetOnosIp()
    RunScript("FUNCvirNetNB")
    RunScript("FUNCvirNetNBL3")
    if DEPLOY_SCENARIO == "os-onos-sfc-ha":
        CreateImage()
        SetSfcConf()
        SfcTest()
    try:
        logger.debug("Push ONOS results into DB")
        # TODO check path result for the file
        result = GetResult()
        stop_time = time.time()

        # ONOS success criteria = all tests OK
        # i.e. FUNCvirNet & FUNCvirNetL3
        status = "FAIL"
        try:
            if (result['FUNCvirNet']['result'] == "Success" and
                    result['FUNCvirNetL3']['result'] == "Success"):
                    status = "PASS"
        except:
            logger.error("Unable to set ONOS criteria")

        functest_utils.push_results_to_db("functest",
                                          "onos",
                                          logger,
                                          start_time,
                                          stop_time,
                                          status,
                                          result)

    except:
        logger.error("Error pushing results into Database")
 def test_push_results_to_db_default(self):
     dic = self._get_env_dict(None)
     CONST.__setattr__('results_test_db_url', self.db_url)
     with mock.patch.dict(os.environ,
                          dic,
                          clear=True), \
             mock.patch('functest.utils.functest_utils.requests.post'):
         self.assertTrue(
             functest_utils.push_results_to_db(self.project, self.case_name,
                                               self.start_date,
                                               self.stop_date, self.result,
                                               self.details))
Esempio n. 19
0
 def test_push_results_to_db_default(self):
     dic = self._get_env_dict(None)
     with mock.patch('functest.utils.functest_utils.get_db_url',
                     return_value=self.db_url), \
             mock.patch.dict(os.environ,
                             dic,
                             clear=True), \
             mock.patch('functest.utils.functest_utils.requests.post'):
         self.assertTrue(
             functest_utils.push_results_to_db(self.project, self.case_name,
                                               self.start_date,
                                               self.stop_date,
                                               self.criteria, self.details))
Esempio n. 20
0
 def test_http_shema(self, *args):
     CONST.__setattr__('results_test_db_url', 'http://127.0.0.1')
     self.assertTrue(
         functest_utils.push_results_to_db(self._project_name,
                                           self._case_name,
                                           self._start_time,
                                           self._stop_time, self._result,
                                           {}))
     args[1].assert_called_once_with()
     args[0].assert_called_once_with(
         'http://127.0.0.1',
         data=self._get_json(),
         headers={'Content-Type': 'application/json'})
Esempio n. 21
0
def main():
    start_time = time.time()
    stop_time = start_time
    # DownloadCodes()
    if args.installer == "joid":
        logger.debug("Installer is Joid")
        SetOnosIpForJoid()
    else:
        SetOnosIp()
    RunScript("FUNCvirNetNB")
    RunScript("FUNCvirNetNBL3")

    try:
        logger.debug("Push ONOS results into DB")
        # TODO check path result for the file
        result = GetResult()
        stop_time = time.time()

        # ONOS success criteria = all tests OK
        # i.e. FUNCvirNet & FUNCvirNetL3
        status = "failed"
        try:
            if (result['FUNCvirNet']['result'] == "Success" and
                    result['FUNCvirNetL3']['result'] == "Success"):
                    status = "passed"
        except:
            logger.error("Unable to set ONOS criteria")

        functest_utils.push_results_to_db("functest",
                                          "onos",
                                          logger,
                                          start_time,
                                          stop_time,
                                          status,
                                          result)

    except:
        logger.error("Error pushing results into Database")
 def test_push_results_to_db_request_post_exception(self):
     dic = self._get_env_dict(None)
     CONST.__setattr__('results_test_db_url', self.db_url)
     with mock.patch.dict(os.environ,
                          dic,
                          clear=True), \
             mock.patch('functest.utils.functest_utils.logger.error') \
             as mock_logger_error, \
             mock.patch('functest.utils.functest_utils.requests.post',
                        side_effect=Exception):
         self.assertFalse(
             functest_utils.push_results_to_db(self.project, self.case_name,
                                               self.start_date,
                                               self.stop_date, self.result,
                                               self.details))
         self.assertTrue(mock_logger_error.called)
Esempio n. 23
0
 def push_to_db(self):
     try:
         assert self.project_name
         assert self.case_name
         assert self.criteria
         assert self.start_time
         assert self.stop_time
         if ft_utils.push_results_to_db(self.project_name, self.case_name,
                                        self.start_time, self.stop_time,
                                        self.criteria, self.details):
             self.logger.info("The results were successfully pushed to DB")
             return TestcaseBase.EX_OK
         else:
             self.logger.error("The results cannot be pushed to DB")
             return TestcaseBase.EX_PUSH_TO_DB_ERROR
     except Exception:
         self.logger.exception("The results cannot be pushed to DB")
         return TestcaseBase.EX_PUSH_TO_DB_ERROR
Esempio n. 24
0
 def test_push_results_to_db_incorrect_buildtag(self):
     dic = self._get_env_dict(None)
     dic['BUILD_TAG'] = 'incorrect_build_tag'
     with mock.patch('functest.utils.functest_utils.get_db_url',
                     return_value=self.db_url), \
             mock.patch.dict(os.environ,
                             dic,
                             clear=True), \
             mock.patch('functest.utils.functest_utils.logger.error') \
             as mock_logger_error:
         self.assertFalse(
             functest_utils.push_results_to_db(self.project, self.case_name,
                                               self.start_date,
                                               self.stop_date,
                                               self.criteria, self.details))
         mock_logger_error.assert_called_once_with("Please fix BUILD_TAG"
                                                   " env var: incorrect_"
                                                   "build_tag")
Esempio n. 25
0
 def _test_dump(self, *args):
     CONST.__setattr__('results_test_db_url', URL)
     with mock.patch.object(decorators,
                            'open',
                            mock.mock_open(),
                            create=True) as mock_open:
         self.assertTrue(
             functest_utils.push_results_to_db(self._project_name,
                                               self._case_name,
                                               self._start_time,
                                               self._stop_time,
                                               self._result, {}))
     mock_open.assert_called_once_with(FILE, 'a')
     handle = mock_open()
     call_args, _ = handle.write.call_args
     self.assertIn('POST', call_args[0])
     self.assertIn(self._get_json(), call_args[0])
     args[0].assert_called_once_with()
 def test_push_results_to_db_request_post_failed(self):
     dic = self._get_env_dict(None)
     CONST.__setattr__('results_test_db_url', self.db_url)
     with mock.patch.dict(os.environ,
                          dic,
                          clear=True), \
             mock.patch('functest.utils.functest_utils.logger.error') \
             as mock_logger_error, \
             mock.patch('functest.utils.functest_utils.requests.post',
                        side_effect=requests.RequestException):
         self.assertFalse(
             functest_utils.push_results_to_db(self.project, self.case_name,
                                               self.start_date,
                                               self.stop_date, self.result,
                                               self.details))
         mock_logger_error.assert_called_once_with(
             test_utils.RegexMatch("Pushing "
                                   "Result to"
                                   " DB"
                                   "(\S+\s*) "
                                   "failed:"))
Esempio n. 27
0
def main():

    creds_nova = openstack_utils.get_credentials("nova")
    nova_client = novaclient.Client('2', **creds_nova)
    creds_neutron = openstack_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    creds_keystone = openstack_utils.get_credentials("keystone")
    keystone_client = keystoneclient.Client(**creds_keystone)
    glance_endpoint = keystone_client.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1, glance_endpoint,
                                        token=keystone_client.auth_token)
    EXIT_CODE = -1

    image_id = None
    flavor = None

    # Check if the given image exists
    image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    if image_id != '':
        logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME)
        global image_exists
        image_exists = True
    else:
        logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
                                                          GLANCE_IMAGE_PATH))
        image_id = openstack_utils.create_glance_image(glance_client,
                                                       GLANCE_IMAGE_NAME,
                                                       GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create a Glance image...")
            return(EXIT_CODE)
        logger.debug("Image '%s' with ID=%s created successfully."
                     % (GLANCE_IMAGE_NAME, image_id))

    network_dic = openstack_utils.create_network_full(logger,
                                                      neutron_client,
                                                      PRIVATE_NET_NAME,
                                                      PRIVATE_SUBNET_NAME,
                                                      ROUTER_NAME,
                                                      PRIVATE_SUBNET_CIDR)
    if not network_dic:
        logger.error(
            "There has been a problem when creating the neutron network")
        return(EXIT_CODE)
    network_id = network_dic["net_id"]

    create_security_group(neutron_client)

    # Check if the given flavor exists
    try:
        flavor = nova_client.flavors.find(name=FLAVOR)
        logger.info("Flavor found '%s'" % FLAVOR)
    except:
        logger.error("Flavor '%s' not found." % FLAVOR)
        logger.info("Available flavors are: ")
        pMsg(nova_client.flavor.list())
        exit(-1)

    # Deleting instances if they exist
    servers = nova_client.servers.list()
    for server in servers:
        if server.name == NAME_VM_1 or server.name == NAME_VM_2:
            logger.info("Instance %s found. Deleting..." % server.name)
            server.delete()

    # boot VM 1
    # basic boot
    # tune (e.g. flavor, images, network) to your specific
    # openstack configuration here
    # we consider start time at VM1 booting
    start_time = time.time()
    stop_time = start_time
    logger.info("vPing Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time).strftime(
            '%Y-%m-%d %H:%M:%S')))

    # create VM
    logger.info("Creating instance '%s'..." % NAME_VM_1)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n" % (NAME_VM_1, flavor, image_id, network_id))
    vm1 = nova_client.servers.create(
        name=NAME_VM_1,
        flavor=flavor,
        image=image_id,
        config_drive=True,
        nics=[{"net-id": network_id}]
    )

    # wait until VM status is active
    if not waitVmActive(nova_client, vm1):

        logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
            NAME_VM_1, openstack_utils.get_instance_status(nova_client, vm1)))
        return (EXIT_CODE)
    else:
        logger.info("Instance '%s' is ACTIVE." % NAME_VM_1)

    # Retrieve IP of first VM
    test_ip = vm1.networks.get(PRIVATE_NET_NAME)[0]
    logger.debug("Instance '%s' got %s" % (NAME_VM_1, test_ip))

    # boot VM 2
    # we will boot then execute a ping script with cloud-init
    # the long chain corresponds to the ping procedure converted with base 64
    # tune (e.g. flavor, images, network) to your specific openstack
    #  configuration here
    u = ("#!/bin/sh\n\nwhile true; do\n ping -c 1 %s 2>&1 >/dev/null\n "
         "RES=$?\n if [ \"Z$RES\" = \"Z0\" ] ; then\n  echo 'vPing OK'\n "
         "break\n else\n  echo 'vPing KO'\n fi\n sleep 1\ndone\n" % test_ip)

    # create VM
    logger.info("Creating instance '%s'..." % NAME_VM_2)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s "
        "\n userdata= \n%s" % (
            NAME_VM_2, flavor, image_id, network_id, u))
    vm2 = nova_client.servers.create(
        name=NAME_VM_2,
        flavor=flavor,
        image=image_id,
        nics=[{"net-id": network_id}],
        config_drive=True,
        userdata=u
    )

    if not waitVmActive(nova_client, vm2):
        logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
            NAME_VM_2, openstack_utils.get_instance_status(nova_client, vm2)))
        return (EXIT_CODE)
    else:
        logger.info("Instance '%s' is ACTIVE." % NAME_VM_2)

    logger.info("Waiting for ping...")
    sec = 0
    metadata_tries = 0
    console_log = vm2.get_console_output()
    duration = 0
    stop_time = time.time()

    while True:
        time.sleep(1)
        console_log = vm2.get_console_output()
        # print "--"+console_log
        # report if the test is failed
        if "vPing OK" in console_log:
            logger.info("vPing detected!")

            # we consider start time at VM1 booting
            stop_time = time.time()
            duration = round(stop_time - start_time, 1)
            logger.info("vPing duration:'%s'" % duration)
            EXIT_CODE = 0
            break
        elif ("failed to read iid from metadata" in console_log or
              metadata_tries > 5):
            EXIT_CODE = -2
            break
        elif sec == PING_TIMEOUT:
            logger.info("Timeout reached.")
            break
        elif sec % 10 == 0:
            if "request failed" in console_log:
                logger.debug("It seems userdata is not supported in "
                             "nova boot. Waiting a bit...")
                metadata_tries += 1
            else:
                logger.debug("Pinging %s. Waiting for response..." % test_ip)
        sec += 1

    test_status = "NOK"
    if EXIT_CODE == 0:
        logger.info("vPing OK")
        test_status = "OK"
    elif EXIT_CODE == -2:
        duration = 0
        logger.info("Userdata is not supported in nova boot. Aborting test...")
    else:
        duration = 0
        logger.error("vPing FAILED")

    if args.report:
        try:
            logger.debug("Pushing vPing userdata results into DB...")
            functest_utils.push_results_to_db("functest",
                                              "vping_userdata",
                                              logger,
                                              start_time,
                                              stop_time,
                                              test_status,
                                              details={'timestart': start_time,
                                                       'duration': duration,
                                                       'status': test_status})
        except:
            logger.error("Error pushing results into Database '%s'"
                         % sys.exc_info()[0])

    exit(EXIT_CODE)
Esempio n. 28
0
def run_task(test_name):
    #
    # the "main" function of the script who launch rally for a task
    # :param test_name: name for the rally test
    # :return: void
    #
    global SUMMARY
    logger.info('Starting test scenario "{}" ...'.format(test_name))
    start_time = time.time()
    stop_time = start_time

    task_file = '{}task.yaml'.format(RALLY_DIR)
    if not os.path.exists(task_file):
        logger.error("Task file '%s' does not exist." % task_file)
        exit(-1)

    test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
                                              test_name)
    if not os.path.exists(test_file_name):
        logger.error("The scenario '%s' does not exist." % test_file_name)
        exit(-1)

    logger.debug('Scenario fetched from : {}'.format(test_file_name))

    cmd_line = ("rally task start --abort-on-sla-failure " +
                "--task {} ".format(task_file) +
                "--task-args \"{}\" ".format(build_task_args(test_name)))
    logger.debug('running command line : {}'.format(cmd_line))

    p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
                         stderr=RALLY_STDERR, shell=True)
    output = get_output(p, test_name)
    task_id = get_task_id(output)
    logger.debug('task_id : {}'.format(task_id))

    if task_id is None:
        logger.error('Failed to retrieve task_id, validating task...')
        cmd_line = ("rally task validate " +
                    "--task {} ".format(task_file) +
                    "--task-args \"{}\" ".format(build_task_args(test_name)))
        logger.debug('running command line : {}'.format(cmd_line))
        p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT, shell=True)
        output = get_cmd_output(p)
        logger.error("Task validation result:" + "\n" + output)
        return

    # check for result directory and create it otherwise
    if not os.path.exists(RESULTS_DIR):
        logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
        os.makedirs(RESULTS_DIR)

    # write html report file
    report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
    cmd_line = "rally task report {} --out {}".format(task_id,
                                                      report_file_name)

    logger.debug('running command line : {}'.format(cmd_line))
    os.popen(cmd_line)

    # get and save rally operation JSON result
    cmd_line = "rally task results %s" % task_id
    logger.debug('running command line : {}'.format(cmd_line))
    cmd = os.popen(cmd_line)
    json_results = cmd.read()
    with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
        logger.debug('saving json file')
        f.write(json_results)

    with open('{}opnfv-{}.json'
              .format(RESULTS_DIR, test_name)) as json_file:
        json_data = json.load(json_file)

    """ parse JSON operation result """
    status = "failed"
    if task_succeed(json_results):
        logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
        status = "passed"
    else:
        logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")

    # Push results in payload of testcase
    if args.report:
        stop_time = time.time()
        logger.debug("Push Rally detailed results into DB")
        functest_utils.push_results_to_db("functest",
                                          "Rally_details",
                                          logger,
                                          start_time,
                                          stop_time,
                                          status,
                                          json_data)
Esempio n. 29
0
def run_tempest(OPTION):
    #
    # the "main" function of the script which launches Rally to run Tempest
    # :param option: tempest option (smoke, ..)
    # :return: void
    #
    logger.info("Starting Tempest test suite: '%s'." % OPTION)
    start_time = time.time()
    stop_time = start_time
    cmd_line = "rally verify start " + OPTION + " --system-wide"

    header = ("Tempest environment:\n"
              "  Installer: %s\n  Scenario: %s\n  Node: %s\n  Date: %s\n" %
              (ft_constants.CI_INSTALLER_TYPE, ft_constants.CI_SCENARIO,
               ft_constants.CI_NODE, time.strftime("%a %b %d %H:%M:%S %Z %Y")))

    f_stdout = open(TEMPEST_RESULTS_DIR + "/tempest.log", 'w+')
    f_stderr = open(TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+')
    f_env = open(TEMPEST_RESULTS_DIR + "/environment.log", 'w+')
    f_env.write(header)

    # subprocess.call(cmd_line, shell=True, stdout=f_stdout, stderr=f_stderr)
    p = subprocess.Popen(cmd_line,
                         shell=True,
                         stdout=subprocess.PIPE,
                         stderr=f_stderr,
                         bufsize=1)

    with p.stdout:
        for line in iter(p.stdout.readline, b''):
            if re.search("\} tempest\.", line):
                logger.info(line.replace('\n', ''))
            f_stdout.write(line)
    p.wait()

    f_stdout.close()
    f_stderr.close()
    f_env.close()

    cmd_line = "rally verify show"
    output = ""
    p = subprocess.Popen(cmd_line,
                         shell=True,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    for line in p.stdout:
        if re.search("Tests\:", line):
            break
        output += line
    logger.info(output)

    cmd_line = "rally verify list"
    cmd = os.popen(cmd_line)
    output = (((cmd.read()).splitlines()[-2]).replace(" ", "")).split("|")
    # Format:
    # | UUID | Deployment UUID | smoke | tests | failures | Created at |
    # Duration | Status  |
    num_tests = output[4]
    num_failures = output[5]
    time_start = output[6]
    duration = output[7]
    # Compute duration (lets assume it does not take more than 60 min)
    dur_min = int(duration.split(':')[1])
    dur_sec_float = float(duration.split(':')[2])
    dur_sec_int = int(round(dur_sec_float, 0))
    dur_sec_int = dur_sec_int + 60 * dur_min
    stop_time = time.time()

    try:
        diff = (int(num_tests) - int(num_failures))
        success_rate = 100 * diff / int(num_tests)
    except:
        success_rate = 0

    if 'smoke' in args.mode:
        case_name = 'tempest_smoke_serial'
    elif 'feature' in args.mode:
        case_name = args.mode.replace("feature_", "")
    else:
        case_name = 'tempest_full_parallel'

    status = ft_utils.check_success_rate(case_name, success_rate)
    logger.info("Tempest %s success_rate is %s%%, is marked as %s" %
                (case_name, success_rate, status))

    # Push results in payload of testcase
    if args.report:
        # add the test in error in the details sections
        # should be possible to do it during the test
        logger.debug("Pushing tempest results into DB...")
        with open(TEMPEST_RESULTS_DIR + "/tempest.log", 'r') as myfile:
            output = myfile.read()
        error_logs = ""

        for match in re.findall('(.*?)[. ]*FAILED', output):
            error_logs += match

        # Generate json results for DB
        json_results = {
            "timestart": time_start,
            "duration": dur_sec_int,
            "tests": int(num_tests),
            "failures": int(num_failures),
            "errors": error_logs
        }
        logger.info("Results: " + str(json_results))
        # split Tempest smoke and full

        try:
            ft_utils.push_results_to_db("functest", case_name, start_time,
                                        stop_time, status, json_results)
        except:
            logger.error("Error pushing results into Database '%s'" %
                         sys.exc_info()[0])

    if status == "PASS":
        return 0
    else:
        return -1
Esempio n. 30
0
def main():

    creds_nova = openstack_utils.get_credentials("nova")
    nova_client = novaclient.Client('2', **creds_nova)
    creds_neutron = openstack_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    creds_keystone = openstack_utils.get_credentials("keystone")
    keystone_client = keystoneclient.Client(**creds_keystone)
    glance_endpoint = keystone_client.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1, glance_endpoint,
                                        token=keystone_client.auth_token)
    EXIT_CODE = -1

    image_id = None
    flavor = None

    # Check if the given image exists
    image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    if image_id != '':
        logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME)
        global image_exists
        image_exists = True
    else:
        logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
                                                          GLANCE_IMAGE_PATH))
        image_id = openstack_utils.create_glance_image(glance_client,
                                                       GLANCE_IMAGE_NAME,
                                                       GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create a Glance image...")
            return(EXIT_CODE)
        logger.debug("Image '%s' with ID=%s created successfully."
                     % (GLANCE_IMAGE_NAME, image_id))

    network_dic = openstack_utils.create_network_full(logger,
                                                      neutron_client,
                                                      PRIVATE_NET_NAME,
                                                      PRIVATE_SUBNET_NAME,
                                                      ROUTER_NAME,
                                                      PRIVATE_SUBNET_CIDR)
    if not network_dic:
        logger.error(
            "There has been a problem when creating the neutron network")
        return(EXIT_CODE)

    network_id = network_dic["net_id"]

    sg_id = create_security_group(neutron_client)

    # Check if the given flavor exists
    try:
        flavor = nova_client.flavors.find(name=FLAVOR)
        logger.info("Using existing Flavor '%s'..." % FLAVOR)
    except:
        logger.error("Flavor '%s' not found." % FLAVOR)
        logger.info("Available flavors are: ")
        pMsg(nova_client.flavor.list())
        return(EXIT_CODE)

    # Deleting instances if they exist
    servers = nova_client.servers.list()
    for server in servers:
        if server.name == NAME_VM_1 or server.name == NAME_VM_2:
            logger.info("Instance %s found. Deleting..." % server.name)
            server.delete()

    # boot VM 1
    start_time = time.time()
    stop_time = start_time
    logger.info("vPing Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time).strftime(
            '%Y-%m-%d %H:%M:%S')))

    logger.info("Creating instance '%s'..." % NAME_VM_1)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n" % (NAME_VM_1, flavor, image_id, network_id))
    vm1 = nova_client.servers.create(
        name=NAME_VM_1,
        flavor=flavor,
        image=image_id,
        nics=[{"net-id": network_id}]
    )

    # wait until VM status is active
    if not waitVmActive(nova_client, vm1):
        logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
            NAME_VM_1, openstack_utils.get_instance_status(nova_client, vm1)))
        return (EXIT_CODE)
    else:
        logger.info("Instance '%s' is ACTIVE." % NAME_VM_1)

    # Retrieve IP of first VM
    test_ip = vm1.networks.get(PRIVATE_NET_NAME)[0]
    logger.debug("Instance '%s' got private ip '%s'." % (NAME_VM_1, test_ip))

    logger.info("Adding '%s' to security group '%s'..."
                % (NAME_VM_1, SECGROUP_NAME))
    openstack_utils.add_secgroup_to_instance(nova_client, vm1.id, sg_id)

    # boot VM 2
    logger.info("Creating instance '%s'..." % NAME_VM_2)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n" % (NAME_VM_2, flavor, image_id, network_id))
    vm2 = nova_client.servers.create(
        name=NAME_VM_2,
        flavor=flavor,
        image=image_id,
        nics=[{"net-id": network_id}]
    )

    if not waitVmActive(nova_client, vm2):
        logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
            NAME_VM_2, openstack_utils.get_instance_status(nova_client, vm2)))
        return (EXIT_CODE)
    else:
        logger.info("Instance '%s' is ACTIVE." % NAME_VM_2)

    logger.info("Adding '%s' to security group '%s'..." % (NAME_VM_2,
                                                           SECGROUP_NAME))
    openstack_utils.add_secgroup_to_instance(nova_client, vm2.id, sg_id)

    logger.info("Creating floating IP for VM '%s'..." % NAME_VM_2)
    floatip_dic = openstack_utils.create_floating_ip(neutron_client)
    floatip = floatip_dic['fip_addr']
    # floatip_id = floatip_dic['fip_id']

    if floatip is None:
        logger.error("Cannot create floating IP.")
        return (EXIT_CODE)
    logger.info("Floating IP created: '%s'" % floatip)

    logger.info("Associating floating ip: '%s' to VM '%s' "
                % (floatip, NAME_VM_2))
    if not openstack_utils.add_floating_ip(nova_client, vm2.id, floatip):
        logger.error("Cannot associate floating IP to VM.")
        return (EXIT_CODE)

    logger.info("Trying to establish SSH connection to %s..." % floatip)
    username = '******'
    password = '******'
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    timeout = 50
    nolease = False
    got_ip = False
    discover_count = 0
    cidr_first_octet = PRIVATE_SUBNET_CIDR.split('.')[0]
    while timeout > 0:
        try:
            ssh.connect(floatip, username=username,
                        password=password, timeout=2)
            logger.debug("SSH connection established to %s." % floatip)
            break
        except:
            logger.debug("Waiting for %s..." % floatip)
            time.sleep(6)
            timeout -= 1

        console_log = vm2.get_console_output()

        # print each "Sending discover" captured on the console log
        if (len(re.findall("Sending discover", console_log)) >
                discover_count and not got_ip):
            discover_count += 1
            logger.debug("Console-log '%s': Sending discover..."
                         % NAME_VM_2)

        # check if eth0 got an ip,the line looks like this:
        # "inet addr:192.168."....
        # if the dhcp agent fails to assing ip, this line will not appear
        if "inet addr:" + cidr_first_octet in console_log and not got_ip:
            got_ip = True
            logger.debug("The instance '%s' succeeded to get the IP "
                         "from the dhcp agent.")

        # if dhcp doesnt work,it shows "No lease, failing".The test will fail
        if "No lease, failing" in console_log and not nolease and not got_ip:
                nolease = True
                logger.debug("Console-log '%s': No lease, failing..."
                             % NAME_VM_2)
                logger.info("The instance failed to get an IP from the "
                            "DHCP agent. The test will probably timeout...")

    if timeout == 0:  # 300 sec timeout (5 min)
        logger.error("Cannot establish connection to IP '%s'. Aborting"
                     % floatip)
        return (EXIT_CODE)

    scp = SCPClient(ssh.get_transport())

    ping_script = REPO_PATH + "testcases/OpenStack/vPing/ping.sh"
    try:
        scp.put(ping_script, "~/")
    except:
        logger.error("Cannot SCP the file '%s' to VM '%s'"
                     % (ping_script, floatip))
        return (EXIT_CODE)

    cmd = 'chmod 755 ~/ping.sh'
    (stdin, stdout, stderr) = ssh.exec_command(cmd)
    for line in stdout.readlines():
        print line

    logger.info("Waiting for ping...")
    sec = 0
    stop_time = time.time()
    duration = 0

    cmd = '~/ping.sh ' + test_ip
    flag = False
    status = "FAIL"

    while True:
        time.sleep(1)
        (stdin, stdout, stderr) = ssh.exec_command(cmd)
        output = stdout.readlines()

        for line in output:
            if "vPing OK" in line:
                logger.info("vPing detected!")
                status = "PASS"
                # we consider start time at VM1 booting
                stop_time = time.time()
                duration = round(stop_time - start_time, 1)
                logger.info("vPing duration:'%s' s." % duration)
                EXIT_CODE = 0
                flag = True
                break

            elif sec == PING_TIMEOUT:
                logger.info("Timeout reached.")
                flag = True
                break
        if flag:
            break
        logger.debug("Pinging %s. Waiting for response..." % test_ip)
        sec += 1

    if status == "PASS":
        logger.info("vPing OK")
    else:
        duration = 0
        logger.error("vPing FAILED")

    if args.report:
        try:
            logger.debug("Pushing vPing SSH results into DB...")
            functest_utils.push_results_to_db("functest",
                                              "vping_ssh",
                                              logger,
                                              start_time,
                                              stop_time,
                                              status,
                                              details={'timestart': start_time,
                                                       'duration': duration,
                                                       'status': status})
        except:
            logger.error("Error pushing results into Database '%s'"
                         % sys.exc_info()[0])

    exit(EXIT_CODE)
Esempio n. 31
0
def main():
    global SUMMARY
    global network_dict
    start_time = time.time()
    stop_time = start_time

    # configure script
    if not (args.test_name in tests):
        logger.error('argument not valid')
        exit(-1)

    SUMMARY = []
    creds_nova = openstack_utils.get_credentials("nova")
    nova_client = novaclient.Client('2', **creds_nova)
    creds_neutron = openstack_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    creds_keystone = openstack_utils.get_credentials("keystone")
    keystone_client = keystoneclient.Client(**creds_keystone)
    glance_endpoint = keystone_client.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1, glance_endpoint,
                                        token=keystone_client.auth_token)
    creds_cinder = openstack_utils.get_credentials("cinder")
    cinder_client = cinderclient.Client('2', creds_cinder['username'],
                                        creds_cinder['api_key'],
                                        creds_cinder['project_id'],
                                        creds_cinder['auth_url'],
                                        service_type="volume")

    client_dict['neutron'] = neutron_client

    volume_types = openstack_utils.list_volume_types(cinder_client,
                                                     private=False)
    if not volume_types:
        volume_type = openstack_utils.create_volume_type(
            cinder_client, CINDER_VOLUME_TYPE_NAME)
        if not volume_type:
            logger.error("Failed to create volume type...")
            exit(-1)
        else:
            logger.debug("Volume type '%s' created succesfully..."
                         % CINDER_VOLUME_TYPE_NAME)
    else:
        logger.debug("Using existing volume type(s)...")

    image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    image_exists = False

    if image_id == '':
        logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
                                                           GLANCE_IMAGE_PATH))
        image_id = openstack_utils.create_glance_image(glance_client,
                                                       GLANCE_IMAGE_NAME,
                                                       GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create the Glance image...")
            exit(-1)
        else:
            logger.debug("Image '%s' with ID '%s' created succesfully ."
                         % (GLANCE_IMAGE_NAME, image_id))
    else:
        logger.debug("Using existing image '%s' with ID '%s'..."
                     % (GLANCE_IMAGE_NAME, image_id))
        image_exists = True

    logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
    network_dict = openstack_utils.create_network_full(logger,
                                                       client_dict['neutron'],
                                                       PRIVATE_NET_NAME,
                                                       PRIVATE_SUBNET_NAME,
                                                       ROUTER_NAME,
                                                       PRIVATE_SUBNET_CIDR)
    if not network_dict:
        logger.error("Failed to create network...")
        exit(-1)
    else:
        if not openstack_utils.update_neutron_net(client_dict['neutron'],
                                                  network_dict['net_id'],
                                                  shared=True):
            logger.error("Failed to update network...")
            exit(-1)
        else:
            logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)

    if args.test_name == "all":
        for test_name in tests:
            if not (test_name == 'all' or
                    test_name == 'vm'):
                run_task(test_name)
    else:
        logger.debug("Test name: " + args.test_name)
        run_task(args.test_name)

    report = ("\n"
              "                                                              "
              "\n"
              "                     Rally Summary Report\n"
              "\n"
              "+===================+============+===============+===========+"
              "\n"
              "| Module            | Duration   | nb. Test Run  | Success   |"
              "\n"
              "+===================+============+===============+===========+"
              "\n")
    payload = []
    stop_time = time.time()

    # for each scenario we draw a row for the table
    total_duration = 0.0
    total_nb_tests = 0
    total_success = 0.0
    for s in SUMMARY:
        name = "{0:<17}".format(s['test_name'])
        duration = float(s['overall_duration'])
        total_duration += duration
        duration = time.strftime("%M:%S", time.gmtime(duration))
        duration = "{0:<10}".format(duration)
        nb_tests = "{0:<13}".format(s['nb_tests'])
        total_nb_tests += int(s['nb_tests'])
        success = "{0:<10}".format(str(s['success']) + '%')
        total_success += float(s['success'])
        report += ("" +
                   "| " + name + " | " + duration + " | " +
                   nb_tests + " | " + success + "|\n" +
                   "+-------------------+------------"
                   "+---------------+-----------+\n")
        payload.append({'module': name,
                        'details': {'duration': s['overall_duration'],
                                    'nb tests': s['nb_tests'],
                                    'success': s['success']}})

    total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
    total_duration_str2 = "{0:<10}".format(total_duration_str)
    total_nb_tests_str = "{0:<13}".format(total_nb_tests)
    total_success = "{:0.2f}".format(total_success / len(SUMMARY))
    total_success_str = "{0:<10}".format(str(total_success) + '%')
    report += "+===================+============+===============+===========+"
    report += "\n"
    report += ("| TOTAL:            | " + total_duration_str2 + " | " +
               total_nb_tests_str + " | " + total_success_str + "|\n")
    report += "+===================+============+===============+===========+"
    report += "\n"

    logger.info("\n" + report)
    payload.append({'summary': {'duration': total_duration,
                                'nb tests': total_nb_tests,
                                'nb success': total_success}})

    # Generate json results for DB
    # json_results = {"timestart": time_start, "duration": total_duration,
    #                "tests": int(total_nb_tests),
    #                "success": int(total_success)}
    # logger.info("Results: "+str(json_results))

    # Evaluation of the success criteria
    status = "failed"
    # for Rally we decided that the overall success rate must be above 90%
    if total_success >= 90:
        status = "passed"

    if args.sanity:
        case_name = "rally_sanity"
    else:
        case_name = "rally_full"

    if args.report:
        logger.debug("Pushing Rally summary into DB...")
        functest_utils.push_results_to_db("functest",
                                          case_name,
                                          None,
                                          start_time,
                                          stop_time,
                                          status,
                                          payload)
    if args.noclean:
        exit(0)

    if not image_exists:
        logger.debug("Deleting image '%s' with ID '%s'..."
                     % (GLANCE_IMAGE_NAME, image_id))
        if not openstack_utils.delete_glance_image(nova_client, image_id):
            logger.error("Error deleting the glance image")

    if not volume_types:
        logger.debug("Deleting volume type '%s'..."
                     % CINDER_VOLUME_TYPE_NAME)
        if not openstack_utils.delete_volume_type(cinder_client, volume_type):
            logger.error("Error in deleting volume type...")
Esempio n. 32
0
def run_task(test_name):
    #
    # the "main" function of the script who launch rally for a task
    # :param test_name: name for the rally test
    # :return: void
    #
    global SUMMARY
    logger.info('Starting test scenario "{}" ...'.format(test_name))
    start_time = time.time()
    stop_time = start_time

    task_file = '{}task.yaml'.format(RALLY_DIR)
    if not os.path.exists(task_file):
        logger.error("Task file '%s' does not exist." % task_file)
        exit(-1)

    test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
                                              test_name)
    if not os.path.exists(test_file_name):
        logger.error("The scenario '%s' does not exist." % test_file_name)
        exit(-1)

    logger.debug('Scenario fetched from : {}'.format(test_file_name))

    cmd_line = ("rally task start --abort-on-sla-failure " +
                "--task {} ".format(task_file) +
                "--task-args \"{}\" ".format(build_task_args(test_name)))
    logger.debug('running command line : {}'.format(cmd_line))

    p = subprocess.Popen(cmd_line,
                         stdout=subprocess.PIPE,
                         stderr=RALLY_STDERR,
                         shell=True)
    output = get_output(p, test_name)
    task_id = get_task_id(output)
    logger.debug('task_id : {}'.format(task_id))

    if task_id is None:
        logger.error('Failed to retrieve task_id, validating task...')
        cmd_line = ("rally task validate " + "--task {} ".format(task_file) +
                    "--task-args \"{}\" ".format(build_task_args(test_name)))
        logger.debug('running command line : {}'.format(cmd_line))
        p = subprocess.Popen(cmd_line,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             shell=True)
        output = get_cmd_output(p)
        logger.error("Task validation result:" + "\n" + output)
        return

    # check for result directory and create it otherwise
    if not os.path.exists(RESULTS_DIR):
        logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
        os.makedirs(RESULTS_DIR)

    # write html report file
    report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
    cmd_line = "rally task report {} --out {}".format(task_id,
                                                      report_file_name)

    logger.debug('running command line : {}'.format(cmd_line))
    os.popen(cmd_line)

    # get and save rally operation JSON result
    cmd_line = "rally task results %s" % task_id
    logger.debug('running command line : {}'.format(cmd_line))
    cmd = os.popen(cmd_line)
    json_results = cmd.read()
    with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
        logger.debug('saving json file')
        f.write(json_results)

    with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name)) as json_file:
        json_data = json.load(json_file)
    """ parse JSON operation result """
    status = "failed"
    if task_succeed(json_results):
        logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
        status = "passed"
    else:
        logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")

    # Push results in payload of testcase
    if args.report:
        stop_time = time.time()
        logger.debug("Push Rally detailed results into DB")
        functest_utils.push_results_to_db("functest", "Rally_details", logger,
                                          start_time, stop_time, status,
                                          json_data)
Esempio n. 33
0
def main():
    global SUMMARY
    global network_dict
    start_time = time.time()
    stop_time = start_time

    # configure script
    if not (args.test_name in tests):
        logger.error('argument not valid')
        exit(-1)

    SUMMARY = []
    creds_nova = openstack_utils.get_credentials("nova")
    nova_client = novaclient.Client('2', **creds_nova)
    creds_neutron = openstack_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    creds_keystone = openstack_utils.get_credentials("keystone")
    keystone_client = keystoneclient.Client(**creds_keystone)
    glance_endpoint = keystone_client.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1,
                                        glance_endpoint,
                                        token=keystone_client.auth_token)
    creds_cinder = openstack_utils.get_credentials("cinder")
    cinder_client = cinderclient.Client('2',
                                        creds_cinder['username'],
                                        creds_cinder['api_key'],
                                        creds_cinder['project_id'],
                                        creds_cinder['auth_url'],
                                        service_type="volume")

    client_dict['neutron'] = neutron_client

    volume_types = openstack_utils.list_volume_types(cinder_client,
                                                     private=False)
    if not volume_types:
        volume_type = openstack_utils.create_volume_type(
            cinder_client, CINDER_VOLUME_TYPE_NAME)
        if not volume_type:
            logger.error("Failed to create volume type...")
            exit(-1)
        else:
            logger.debug("Volume type '%s' created succesfully..." %
                         CINDER_VOLUME_TYPE_NAME)
    else:
        logger.debug("Using existing volume type(s)...")

    image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    image_exists = False

    if image_id == '':
        logger.debug("Creating image '%s' from '%s'..." %
                     (GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH))
        image_id = openstack_utils.create_glance_image(glance_client,
                                                       GLANCE_IMAGE_NAME,
                                                       GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create the Glance image...")
            exit(-1)
        else:
            logger.debug("Image '%s' with ID '%s' created succesfully ." %
                         (GLANCE_IMAGE_NAME, image_id))
    else:
        logger.debug("Using existing image '%s' with ID '%s'..." %
                     (GLANCE_IMAGE_NAME, image_id))
        image_exists = True

    logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
    network_dict = openstack_utils.create_network_full(
        logger, client_dict['neutron'], PRIVATE_NET_NAME, PRIVATE_SUBNET_NAME,
        ROUTER_NAME, PRIVATE_SUBNET_CIDR)
    if not network_dict:
        logger.error("Failed to create network...")
        exit(-1)
    else:
        if not openstack_utils.update_neutron_net(
                client_dict['neutron'], network_dict['net_id'], shared=True):
            logger.error("Failed to update network...")
            exit(-1)
        else:
            logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)

    if args.test_name == "all":
        for test_name in tests:
            if not (test_name == 'all' or test_name == 'vm'):
                run_task(test_name)
    else:
        logger.debug("Test name: " + args.test_name)
        run_task(args.test_name)

    report = ("\n"
              "                                                              "
              "\n"
              "                     Rally Summary Report\n"
              "\n"
              "+===================+============+===============+===========+"
              "\n"
              "| Module            | Duration   | nb. Test Run  | Success   |"
              "\n"
              "+===================+============+===============+===========+"
              "\n")
    payload = []
    stop_time = time.time()

    # for each scenario we draw a row for the table
    total_duration = 0.0
    total_nb_tests = 0
    total_success = 0.0
    for s in SUMMARY:
        name = "{0:<17}".format(s['test_name'])
        duration = float(s['overall_duration'])
        total_duration += duration
        duration = time.strftime("%M:%S", time.gmtime(duration))
        duration = "{0:<10}".format(duration)
        nb_tests = "{0:<13}".format(s['nb_tests'])
        total_nb_tests += int(s['nb_tests'])
        success = "{0:<10}".format(str(s['success']) + '%')
        total_success += float(s['success'])
        report += ("" + "| " + name + " | " + duration + " | " + nb_tests +
                   " | " + success + "|\n" +
                   "+-------------------+------------"
                   "+---------------+-----------+\n")
        payload.append({
            'module': name,
            'details': {
                'duration': s['overall_duration'],
                'nb tests': s['nb_tests'],
                'success': s['success']
            }
        })

    total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
    total_duration_str2 = "{0:<10}".format(total_duration_str)
    total_nb_tests_str = "{0:<13}".format(total_nb_tests)
    total_success = "{:0.2f}".format(total_success / len(SUMMARY))
    total_success_str = "{0:<10}".format(str(total_success) + '%')
    report += "+===================+============+===============+===========+"
    report += "\n"
    report += ("| TOTAL:            | " + total_duration_str2 + " | " +
               total_nb_tests_str + " | " + total_success_str + "|\n")
    report += "+===================+============+===============+===========+"
    report += "\n"

    logger.info("\n" + report)
    payload.append({
        'summary': {
            'duration': total_duration,
            'nb tests': total_nb_tests,
            'nb success': total_success
        }
    })

    # Generate json results for DB
    # json_results = {"timestart": time_start, "duration": total_duration,
    #                "tests": int(total_nb_tests),
    #                "success": int(total_success)}
    # logger.info("Results: "+str(json_results))

    # Evaluation of the success criteria
    status = "failed"
    # for Rally we decided that the overall success rate must be above 90%
    if total_success >= 90:
        status = "passed"

    if args.sanity:
        case_name = "rally_sanity"
    else:
        case_name = "rally_full"

    if args.report:
        logger.debug("Pushing Rally summary into DB...")
        functest_utils.push_results_to_db("functest", case_name, None,
                                          start_time, stop_time, status,
                                          payload)
    if args.noclean:
        exit(0)

    if not image_exists:
        logger.debug("Deleting image '%s' with ID '%s'..." %
                     (GLANCE_IMAGE_NAME, image_id))
        if not openstack_utils.delete_glance_image(nova_client, image_id):
            logger.error("Error deleting the glance image")

    if not volume_types:
        logger.debug("Deleting volume type '%s'..." % CINDER_VOLUME_TYPE_NAME)
        if not openstack_utils.delete_volume_type(cinder_client, volume_type):
            logger.error("Error in deleting volume type...")
Esempio n. 34
0
def main():
    installer_type = os.environ.get("INSTALLER_TYPE")
    if installer_type != "fuel":
        logger.error(
            '\033[91mCurrently supported only Fuel Installer type\033[0m')
        sys.exit(1)

    installer_ip = os.environ.get("INSTALLER_IP")
    if not installer_ip:
        logger.error(
            '\033[91minstaller ip is not set\033[0m')
        logger.error(
            '\033[91mexport INSTALLER_IP=<ip>\033[0m')
        sys.exit(1)

    start_time = time.time()
    status = "PASS"
    configure_iptables()
    download_image()
    _, custom_flv_id = os_utils.get_or_create_flavor(
        FLAVOR, 1500, 10, 1, public=True)
    if not custom_flv_id:
        logger.error("Failed to create custom flavor")
        sys.exit(1)

    glance_client = os_utils.get_glance_client()
    neutron_client = os_utils.get_neutron_client()
    nova_client = os_utils.get_nova_client()

    controller_clients = get_ssh_clients("controller")
    compute_clients = get_ssh_clients("compute")

    ovs_logger = ovs_utils.OVSLogger(
        os.path.join(os.getcwd(), 'ovs-logs'),
        FUNCTEST_RESULTS_DIR)

    image_id = setup_glance(glance_client)
    network_id = setup_neutron(neutron_client)
    sg_id = setup_security_groups(neutron_client)

    boot_instance(
        nova_client, CLIENT, "custom-3", "97a399a4-a736-449d-9d20-0cc92cf2cbe4", network_id, sg_id)
    srv_prv_ip = boot_instance(
        nova_client, SERVER, FLAVOR, image_id, network_id, sg_id)

    subprocess.call(TACKER_SCRIPT, shell=True)

    # Start measuring the time it takes to implement the classification rules
#    try:
#        thread.start_new_thread(capture_time_log,
#                                (ovs_logger, compute_clients,))
#    except Exception, e:
#        logger.error("Unable to start the thread that counts time %s" % e)

    server_ip, client_ip, sf1, sf2 = get_floating_ips(
        nova_client, neutron_client)

    if not check_ssh([sf1, sf2]):
        logger.error("Cannot establish SSH connection to the SFs")
        sys.exit(1)

    logger.info("Starting HTTP server on %s" % server_ip)
    if not start_http_server(server_ip):
        logger.error(
            '\033[91mFailed to start HTTP server on %s\033[0m' % server_ip)
        sys.exit(1)

    logger.info("Starting HTTP firewall on %s" % sf2)
    vxlan_firewall(sf2, port="80")
    logger.info("Starting SSH firewall on %s" % sf1)
    vxlan_firewall(sf1, port="22")

    logger.info("Wait for ODL to update the classification rules in OVS")
    time.sleep(120)

    logger.info("Test SSH")
    if is_ssh_blocked(srv_prv_ip, client_ip):
        logger.info('\033[92mTEST 1 [PASSED] ==> SSH BLOCKED\033[0m')
        update_json_results("Test 1: SSH Blocked", "Passed")
    else:
        error = ('\033[91mTEST 1 [FAILED] ==> SSH NOT BLOCKED\033[0m')
        logger.error(error)
        capture_err_logs(
            ovs_logger, controller_clients, compute_clients, error)
        update_json_results("Test 1: SSH Blocked", "Failed")

    logger.info("Test HTTP")
    if not is_http_blocked(srv_prv_ip, client_ip):
        logger.info('\033[92mTEST 2 [PASSED] ==> HTTP WORKS\033[0m')
        update_json_results("Test 2: HTTP works", "Passed")
    else:
        error = ('\033[91mTEST 2 [FAILED] ==> HTTP BLOCKED\033[0m')
        logger.error(error)
        capture_err_logs(
            ovs_logger, controller_clients, compute_clients, error)
        update_json_results("Test 2: HTTP works", "Failed")


    sys.exit(-1)

    logger.info("Changing the classification")
    subprocess.call(TACKER_CHANGECLASSI, shell=True)

    # Start measuring the time it takes to implement the classification rules
#    try:
#        thread.start_new_thread(capture_time_log,
#                                (ovs_logger, compute_clients,))
#    except Exception, e:
#        logger.error("Unable to start the thread that counts time %s" % e)

    logger.info("Wait for ODL to update the classification rules in OVS")
    time.sleep(100)

    logger.info("Test HTTP")
    if is_http_blocked(srv_prv_ip, client_ip):
        logger.info('\033[92mTEST 3 [PASSED] ==> HTTP Blocked\033[0m')
        update_json_results("Test 3: HTTP Blocked", "Passed")
    else:
        error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
        logger.error(error)
        capture_err_logs(controller_clients, compute_clients, error)
        update_json_results("Test 3: HTTP Blocked", "Failed")

    logger.info("Test SSH")
    if not is_ssh_blocked(srv_prv_ip, client_ip):
        logger.info('\033[92mTEST 4 [PASSED] ==> SSH Works\033[0m')
        update_json_results("Test 4: SSH Works", "Passed")
    else:
        error = ('\033[91mTEST 4 [FAILED] ==> SSH BLOCKED\033[0m')
        logger.error(error)
        capture_err_logs(controller_clients, compute_clients, error)
        update_json_results("Test 4: SSH Works", "Failed")

    if json_results["failures"]:
        status = "FAIL"
        logger.error('\033[91mSFC TESTS: %s :( FOUND %s FAIL \033[0m' % (
            status, json_results["failures"]))

    ovs_logger.create_artifact_archive()

    if args.report:
        stop_time = time.time()
        logger.debug("Promise Results json: " + str(json_results))
        ft_utils.push_results_to_db("sfc",
                                    "functest-odl-sfc",
                                    start_time,
                                    stop_time,
                                    status,
                                    json_results)

    if status == "PASS":
        logger.info('\033[92mSFC ALL TESTS: %s :)\033[0m' % status)
        sys.exit(0)

    sys.exit(1)
Esempio n. 35
0
def main():
    start_time = time.time()
    ks_creds = openstack_utils.get_credentials("keystone")
    nv_creds = openstack_utils.get_credentials("nova")
    nt_creds = openstack_utils.get_credentials("neutron")

    keystone = ksclient.Client(**ks_creds)

    user_id = openstack_utils.get_user_id(keystone, ks_creds['username'])
    if user_id == '':
        logger.error("Error : Failed to get id of %s user" %
                     ks_creds['username'])
        exit(-1)

    logger.info("Creating tenant '%s'..." % TENANT_NAME)
    tenant_id = openstack_utils.create_tenant(
        keystone, TENANT_NAME, TENANT_DESCRIPTION)
    if tenant_id == '':
        logger.error("Error : Failed to create %s tenant" % TENANT_NAME)
        exit(-1)
    logger.debug("Tenant '%s' created successfully." % TENANT_NAME)

    roles_name = ["admin", "Admin"]
    role_id = ''
    for role_name in roles_name:
        if role_id == '':
            role_id = openstack_utils.get_role_id(keystone, role_name)

    if role_id == '':
        logger.error("Error : Failed to get id for %s role" % role_name)
        exit(-1)

    logger.info("Adding role '%s' to tenant '%s'..." % (role_id, TENANT_NAME))
    if not openstack_utils.add_role_user(keystone, user_id,
                                         role_id, tenant_id):
        logger.error("Error : Failed to add %s on tenant %s" %
                     (ks_creds['username'], TENANT_NAME))
        exit(-1)
    logger.debug("Role added successfully.")

    logger.info("Creating user '%s'..." % USER_NAME)
    user_id = openstack_utils.create_user(
        keystone, USER_NAME, USER_PWD, None, tenant_id)

    if user_id == '':
        logger.error("Error : Failed to create %s user" % USER_NAME)
        exit(-1)
    logger.debug("User '%s' created successfully." % USER_NAME)

    logger.info("Updating OpenStack credentials...")
    ks_creds.update({
        "username": TENANT_NAME,
        "password": TENANT_NAME,
        "tenant_name": TENANT_NAME,
    })

    nt_creds.update({
        "tenant_name": TENANT_NAME,
    })

    nv_creds.update({
        "project_id": TENANT_NAME,
    })

    glance_endpoint = keystone.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance = glclient.Client(1, glance_endpoint, token=keystone.auth_token)
    nova = nvclient.Client("2", **nv_creds)

    logger.info("Creating image '%s' from '%s'..." % (IMAGE_NAME,
                                                      GLANCE_IMAGE_PATH))
    image_id = openstack_utils.create_glance_image(glance,
                                                   IMAGE_NAME,
                                                   GLANCE_IMAGE_PATH)
    if not image_id:
        logger.error("Failed to create the Glance image...")
        exit(-1)
    logger.debug("Image '%s' with ID '%s' created successfully." % (IMAGE_NAME,
                                                                    image_id))
    flavor_id = openstack_utils.get_flavor_id(nova, FLAVOR_NAME)
    if flavor_id == '':
        logger.info("Creating flavor '%s'..." % FLAVOR_NAME)
        flavor_id = openstack_utils.create_flavor(nova,
                                                  FLAVOR_NAME,
                                                  FLAVOR_RAM,
                                                  FLAVOR_DISK,
                                                  FLAVOR_VCPUS)
        if not flavor_id:
            logger.error("Failed to create the Flavor...")
            exit(-1)
        logger.debug("Flavor '%s' with ID '%s' created successfully." %
                     (FLAVOR_NAME, flavor_id))
    else:
        logger.debug("Using existing flavor '%s' with ID '%s'..."
                     % (FLAVOR_NAME, flavor_id))

    neutron = ntclient.Client(**nt_creds)

    network_dic = openstack_utils.create_network_full(logger,
                                                      neutron,
                                                      NET_NAME,
                                                      SUBNET_NAME,
                                                      ROUTER_NAME,
                                                      SUBNET_CIDR)
    if network_dic is False:
        logger.error("Failed to create the private network...")
        exit(-1)

    logger.info("Exporting environment variables...")
    os.environ["NODE_ENV"] = "functest"
    os.environ["OS_TENANT_NAME"] = TENANT_NAME
    os.environ["OS_USERNAME"] = USER_NAME
    os.environ["OS_PASSWORD"] = USER_PWD
    os.environ["OS_TEST_IMAGE"] = image_id
    os.environ["OS_TEST_FLAVOR"] = flavor_id
    os.environ["OS_TEST_NETWORK"] = network_dic["net_id"]

    os.chdir(PROMISE_REPO)
    results_file_name = 'promise-results.json'
    results_file = open(results_file_name, 'w+')
    cmd = 'npm run -s test -- --reporter json'

    logger.info("Running command: %s" % cmd)
    ret = subprocess.call(cmd, shell=True, stdout=results_file,
                          stderr=subprocess.STDOUT)
    results_file.close()

    if ret == 0:
        logger.info("The test succeeded.")
        # test_status = 'OK'
    else:
        logger.info("The command '%s' failed." % cmd)
        # test_status = "Failed"

    # Print output of file
    with open(results_file_name, 'r') as results_file:
        data = results_file.read()
        logger.debug("\n%s" % data)
        json_data = json.loads(data)

        suites = json_data["stats"]["suites"]
        tests = json_data["stats"]["tests"]
        passes = json_data["stats"]["passes"]
        pending = json_data["stats"]["pending"]
        failures = json_data["stats"]["failures"]
        start_time_json = json_data["stats"]["start"]
        end_time = json_data["stats"]["end"]
        duration = float(json_data["stats"]["duration"]) / float(1000)

    logger.info("\n"
                "****************************************\n"
                "          Promise test report\n\n"
                "****************************************\n"
                " Suites:  \t%s\n"
                " Tests:   \t%s\n"
                " Passes:  \t%s\n"
                " Pending: \t%s\n"
                " Failures:\t%s\n"
                " Start:   \t%s\n"
                " End:     \t%s\n"
                " Duration:\t%s\n"
                "****************************************\n\n"
                % (suites, tests, passes, pending, failures,
                   start_time_json, end_time, duration))

    if args.report:
        stop_time = time.time()
        json_results = {"timestart": start_time, "duration": duration,
                        "tests": int(tests), "failures": int(failures)}
        logger.debug("Promise Results json: " + str(json_results))

        # criteria for Promise in Release B was 100% of tests OK
        status = "FAIL"
        if int(tests) > 32 and int(failures) < 1:
            status = "PASS"

        functest_utils.push_results_to_db("promise",
                                          "promise",
                                          logger,
                                          start_time,
                                          stop_time,
                                          status,
                                          json_results)
Esempio n. 36
0
def run_tempest(OPTION):
    #
    # the "main" function of the script which launches Rally to run Tempest
    # :param option: tempest option (smoke, ..)
    # :return: void
    #
    logger.info("Starting Tempest test suite: '%s'." % OPTION)
    start_time = time.time()
    stop_time = start_time
    cmd_line = "rally verify start " + OPTION + " --system-wide"

    header = ("Tempest environment:\n"
              "  Installer: %s\n  Scenario: %s\n  Node: %s\n  Date: %s\n" %
              (os.getenv('INSTALLER_TYPE', 'Unknown'),
               os.getenv('DEPLOY_SCENARIO', 'Unknown'),
               os.getenv('NODE_NAME', 'Unknown'),
               time.strftime("%a %b %d %H:%M:%S %Z %Y")))

    f_stdout = open(TEMPEST_RESULTS_DIR + "/tempest.log", 'w+')
    f_stderr = open(TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+')
    f_env = open(TEMPEST_RESULTS_DIR + "/environment.log", 'w+')
    f_env.write(header)

    subprocess.call(cmd_line, shell=True, stdout=f_stdout, stderr=f_stderr)

    f_stdout.close()
    f_stderr.close()
    f_env.close()

    cmd_line = "rally verify show"
    ft_utils.execute_command(cmd_line, logger,
                             exit_on_error=True, info=True)

    cmd_line = "rally verify list"
    logger.debug('Executing command : {}'.format(cmd_line))
    cmd = os.popen(cmd_line)
    output = (((cmd.read()).splitlines()[-2]).replace(" ", "")).split("|")
    # Format:
    # | UUID | Deployment UUID | smoke | tests | failures | Created at |
    # Duration | Status  |
    num_tests = output[4]
    num_failures = output[5]
    time_start = output[6]
    duration = output[7]
    # Compute duration (lets assume it does not take more than 60 min)
    dur_min = int(duration.split(':')[1])
    dur_sec_float = float(duration.split(':')[2])
    dur_sec_int = int(round(dur_sec_float, 0))
    dur_sec_int = dur_sec_int + 60 * dur_min
    stop_time = time.time()
    # Push results in payload of testcase
    if args.report:
        logger.debug("Pushing tempest results into DB...")
        # Note criteria hardcoded...TODO move to testcase.yaml
        status = "FAIL"
        try:
            diff = (int(num_tests) - int(num_failures))
            success_rate = 100 * diff / int(num_tests)
        except:
            success_rate = 0

        # For Tempest we assume that the success rate is above 90%
        if success_rate >= 90:
            status = "PASS"

        # add the test in error in the details sections
        # should be possible to do it during the test
        with open(TEMPEST_RESULTS_DIR + "/tempest.log", 'r') as myfile:
            output = myfile.read()
        error_logs = ""

        for match in re.findall('(.*?)[. ]*FAILED', output):
                error_logs += match

        # Generate json results for DB
        json_results = {"timestart": time_start, "duration": dur_sec_int,
                        "tests": int(num_tests), "failures": int(num_failures),
                        "errors": error_logs}
        logger.info("Results: " + str(json_results))
        # split Tempest smoke and full
        if "smoke" in args.mode:
            case_name = "tempest_smoke_serial"
        else:
            case_name = "tempest_full_parallel"

        try:
            ft_utils.push_results_to_db("functest",
                                        case_name,
                                        None,
                                        start_time,
                                        stop_time,
                                        status,
                                        json_results)
        except:
            logger.error("Error pushing results into Database '%s'"
                         % sys.exc_info()[0])
Esempio n. 37
0
def main():

    GlobalVariables.nova_client = os_utils.get_nova_client()
    GlobalVariables.neutron_client = os_utils.get_neutron_client()
    cinder_client = os_utils.get_cinder_client()

    start_time = time.time()

    # configure script
    if not (args.test_name in tests):
        logger.error('argument not valid')
        exit(-1)

    GlobalVariables.SUMMARY = []

    volume_types = os_utils.list_volume_types(cinder_client, private=False)
    if not volume_types:
        volume_type = os_utils.create_volume_type(cinder_client,
                                                  CINDER_VOLUME_TYPE_NAME)
        if not volume_type:
            logger.error("Failed to create volume type...")
            exit(-1)
        else:
            logger.debug("Volume type '%s' created succesfully..." %
                         CINDER_VOLUME_TYPE_NAME)
    else:
        logger.debug("Using existing volume type(s)...")

    image_exists, image_id = os_utils.get_or_create_image(
        GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH, GLANCE_IMAGE_FORMAT)
    if not image_id:
        exit(-1)

    logger.debug("Creating network '%s'..." % RALLY_PRIVATE_NET_NAME)
    GlobalVariables.network_dict = \
        os_utils.create_shared_network_full(RALLY_PRIVATE_NET_NAME,
                                            RALLY_PRIVATE_SUBNET_NAME,
                                            RALLY_ROUTER_NAME,
                                            RALLY_PRIVATE_SUBNET_CIDR)
    if not GlobalVariables.network_dict:
        exit(1)

    if args.test_name == "all":
        for test_name in tests:
            if not (test_name == 'all' or test_name == 'vm'):
                run_task(test_name)
    else:
        logger.debug("Test name: " + args.test_name)
        run_task(args.test_name)

    report = ("\n"
              "                                                              "
              "\n"
              "                     Rally Summary Report\n"
              "\n"
              "+===================+============+===============+===========+"
              "\n"
              "| Module            | Duration   | nb. Test Run  | Success   |"
              "\n"
              "+===================+============+===============+===========+"
              "\n")
    payload = []
    stop_time = time.time()

    # for each scenario we draw a row for the table
    total_duration = 0.0
    total_nb_tests = 0
    total_success = 0.0
    for s in GlobalVariables.SUMMARY:
        name = "{0:<17}".format(s['test_name'])
        duration = float(s['overall_duration'])
        total_duration += duration
        duration = time.strftime("%M:%S", time.gmtime(duration))
        duration = "{0:<10}".format(duration)
        nb_tests = "{0:<13}".format(s['nb_tests'])
        total_nb_tests += int(s['nb_tests'])
        success = "{0:<10}".format(str(s['success']) + '%')
        total_success += float(s['success'])
        report += ("" + "| " + name + " | " + duration + " | " + nb_tests +
                   " | " + success + "|\n" +
                   "+-------------------+------------"
                   "+---------------+-----------+\n")
        payload.append({
            'module': name,
            'details': {
                'duration': s['overall_duration'],
                'nb tests': s['nb_tests'],
                'success': s['success']
            }
        })

    total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
    total_duration_str2 = "{0:<10}".format(total_duration_str)
    total_nb_tests_str = "{0:<13}".format(total_nb_tests)

    if len(GlobalVariables.SUMMARY):
        success_rate = total_success / len(GlobalVariables.SUMMARY)
    else:
        success_rate = 100
    success_rate = "{:0.2f}".format(success_rate)
    success_rate_str = "{0:<10}".format(str(success_rate) + '%')
    report += "+===================+============+===============+===========+"
    report += "\n"
    report += ("| TOTAL:            | " + total_duration_str2 + " | " +
               total_nb_tests_str + " | " + success_rate_str + "|\n")
    report += "+===================+============+===============+===========+"
    report += "\n"

    logger.info("\n" + report)
    payload.append({
        'summary': {
            'duration': total_duration,
            'nb tests': total_nb_tests,
            'nb success': success_rate
        }
    })

    if args.sanity:
        case_name = "rally_sanity"
    else:
        case_name = "rally_full"

    # Evaluation of the success criteria
    status = ft_utils.check_success_rate(case_name, success_rate)

    exit_code = -1
    if status == "PASS":
        exit_code = 0

    if args.report:
        logger.debug("Pushing Rally summary into DB...")
        ft_utils.push_results_to_db("functest", case_name, start_time,
                                    stop_time, status, payload)
    if args.noclean:
        exit(exit_code)

    if not image_exists:
        logger.debug("Deleting image '%s' with ID '%s'..." %
                     (GLANCE_IMAGE_NAME, image_id))
        if not os_utils.delete_glance_image(GlobalVariables.nova_client,
                                            image_id):
            logger.error("Error deleting the glance image")

    if not volume_types:
        logger.debug("Deleting volume type '%s'..." % CINDER_VOLUME_TYPE_NAME)
        if not os_utils.delete_volume_type(cinder_client, volume_type):
            logger.error("Error in deleting volume type...")

    exit(exit_code)
Esempio n. 38
0
def main():
    exit_code = -1
    start_time = time.time()
    ks_creds = openstack_utils.get_credentials("keystone")
    nv_creds = openstack_utils.get_credentials("nova")
    nt_creds = openstack_utils.get_credentials("neutron")

    keystone = ksclient.Client(**ks_creds)

    user_id = openstack_utils.get_user_id(keystone, ks_creds['username'])
    if user_id == '':
        logger.error("Error : Failed to get id of %s user" %
                     ks_creds['username'])
        exit(-1)

    logger.info("Creating tenant '%s'..." % PROMISE_TENANT_NAME)
    tenant_id = openstack_utils.create_tenant(keystone, PROMISE_TENANT_NAME,
                                              TENANT_DESCRIPTION)
    if not tenant_id:
        logger.error("Error : Failed to create %s tenant" %
                     PROMISE_TENANT_NAME)
        exit(-1)
    logger.debug("Tenant '%s' created successfully." % PROMISE_TENANT_NAME)

    roles_name = ["admin", "Admin"]
    role_id = ''
    for role_name in roles_name:
        if role_id == '':
            role_id = openstack_utils.get_role_id(keystone, role_name)

    if role_id == '':
        logger.error("Error : Failed to get id for %s role" % role_name)
        exit(-1)

    logger.info("Adding role '%s' to tenant '%s'..." %
                (role_id, PROMISE_TENANT_NAME))
    if not openstack_utils.add_role_user(keystone, user_id, role_id,
                                         tenant_id):
        logger.error("Error : Failed to add %s on tenant %s" %
                     (ks_creds['username'], PROMISE_TENANT_NAME))
        exit(-1)
    logger.debug("Role added successfully.")

    logger.info("Creating user '%s'..." % PROMISE_USER_NAME)
    user_id = openstack_utils.create_user(keystone, PROMISE_USER_NAME,
                                          PROMISE_USER_PWD, None, tenant_id)

    if not user_id:
        logger.error("Error : Failed to create %s user" % PROMISE_USER_NAME)
        exit(-1)
    logger.debug("User '%s' created successfully." % PROMISE_USER_NAME)

    logger.info("Updating OpenStack credentials...")
    ks_creds.update({
        "username": PROMISE_TENANT_NAME,
        "password": PROMISE_TENANT_NAME,
        "tenant_name": PROMISE_TENANT_NAME,
    })

    nt_creds.update({
        "tenant_name": PROMISE_TENANT_NAME,
    })

    nv_creds.update({
        "project_id": PROMISE_TENANT_NAME,
    })

    glance = openstack_utils.get_glance_client()
    nova = nvclient.Client("2", **nv_creds)

    logger.info("Creating image '%s' from '%s'..." %
                (PROMISE_IMAGE_NAME, GLANCE_IMAGE_PATH))
    image_id = openstack_utils.create_glance_image(glance, PROMISE_IMAGE_NAME,
                                                   GLANCE_IMAGE_PATH)
    if not image_id:
        logger.error("Failed to create the Glance image...")
        exit(-1)
    logger.debug("Image '%s' with ID '%s' created successfully." %
                 (PROMISE_IMAGE_NAME, image_id))
    flavor_id = openstack_utils.get_flavor_id(nova, PROMISE_FLAVOR_NAME)
    if flavor_id == '':
        logger.info("Creating flavor '%s'..." % PROMISE_FLAVOR_NAME)
        flavor_id = openstack_utils.create_flavor(nova, PROMISE_FLAVOR_NAME,
                                                  PROMISE_FLAVOR_RAM,
                                                  PROMISE_FLAVOR_DISK,
                                                  PROMISE_FLAVOR_VCPUS)
        if not flavor_id:
            logger.error("Failed to create the Flavor...")
            exit(-1)
        logger.debug("Flavor '%s' with ID '%s' created successfully." %
                     (PROMISE_FLAVOR_NAME, flavor_id))
    else:
        logger.debug("Using existing flavor '%s' with ID '%s'..." %
                     (PROMISE_FLAVOR_NAME, flavor_id))

    neutron = ntclient.Client(**nt_creds)

    network_dic = openstack_utils.create_network_full(neutron,
                                                      PROMISE_NET_NAME,
                                                      PROMISE_SUBNET_NAME,
                                                      PROMISE_ROUTER_NAME,
                                                      PROMISE_SUBNET_CIDR)
    if not network_dic:
        logger.error("Failed to create the private network...")
        exit(-1)

    logger.info("Exporting environment variables...")
    os.environ["NODE_ENV"] = "functest"
    os.environ["OS_PASSWORD"] = PROMISE_USER_PWD
    os.environ["OS_TEST_IMAGE"] = image_id
    os.environ["OS_TEST_FLAVOR"] = flavor_id
    os.environ["OS_TEST_NETWORK"] = network_dic["net_id"]
    os.environ["OS_TENANT_NAME"] = PROMISE_TENANT_NAME
    os.environ["OS_USERNAME"] = PROMISE_USER_NAME

    os.chdir(PROMISE_REPO_DIR + '/source/')
    results_file_name = os.path.join(RESULTS_DIR, 'promise-results.json')
    results_file = open(results_file_name, 'w+')
    cmd = 'npm run -s test -- --reporter json'

    logger.info("Running command: %s" % cmd)
    ret = subprocess.call(cmd,
                          shell=True,
                          stdout=results_file,
                          stderr=subprocess.STDOUT)
    results_file.close()

    if ret == 0:
        logger.info("The test succeeded.")
        # test_status = 'OK'
    else:
        logger.info("The command '%s' failed." % cmd)
        # test_status = "Failed"

    # Print output of file
    with open(results_file_name, 'r') as results_file:
        data = results_file.read()
        logger.debug("\n%s" % data)
        json_data = json.loads(data)

        suites = json_data["stats"]["suites"]
        tests = json_data["stats"]["tests"]
        passes = json_data["stats"]["passes"]
        pending = json_data["stats"]["pending"]
        failures = json_data["stats"]["failures"]
        start_time_json = json_data["stats"]["start"]
        end_time = json_data["stats"]["end"]
        duration = float(json_data["stats"]["duration"]) / float(1000)

    logger.info("\n"
                "****************************************\n"
                "          Promise test report\n\n"
                "****************************************\n"
                " Suites:  \t%s\n"
                " Tests:   \t%s\n"
                " Passes:  \t%s\n"
                " Pending: \t%s\n"
                " Failures:\t%s\n"
                " Start:   \t%s\n"
                " End:     \t%s\n"
                " Duration:\t%s\n"
                "****************************************\n\n" %
                (suites, tests, passes, pending, failures, start_time_json,
                 end_time, duration))

    if args.report:
        stop_time = time.time()
        json_results = {
            "timestart": start_time,
            "duration": duration,
            "tests": int(tests),
            "failures": int(failures)
        }
        logger.debug("Promise Results json: " + str(json_results))

        # criteria for Promise in Release B was 100% of tests OK
        status = "FAIL"
        if int(tests) > 32 and int(failures) < 1:
            status = "PASS"
            exit_code = 0

        ft_utils.push_results_to_db("promise", "promise", start_time,
                                    stop_time, status, json_results)

    exit(exit_code)
Esempio n. 39
0
def test_clearwater():
    script = "source " + VIMS_DATA_DIR + "venv_cloudify/bin/activate; "
    script += "cd " + VIMS_DATA_DIR + "; "
    script += "cfy status | grep -Eo \"([0-9]{1,3}\.){3}[0-9]{1,3}\""
    cmd = "/bin/bash -c '" + script + "'"

    try:
        logger.debug("Trying to get clearwater manager IP ... ")
        mgr_ip = os.popen(cmd).read()
        mgr_ip = mgr_ip.splitlines()[0]
    except:
        step_failure("sig_test", "Unable to retrieve the IP of the "
                     "cloudify manager server !")

    api_url = "http://" + mgr_ip + "/api/v2"
    dep_outputs = requests.get(api_url + "/deployments/" +
                               CW_DEPLOYMENT_NAME + "/outputs")
    dns_ip = dep_outputs.json()['outputs']['dns_ip']
    ellis_ip = dep_outputs.json()['outputs']['ellis_ip']

    ellis_url = "http://" + ellis_ip + "/"
    url = ellis_url + "accounts"

    params = {"password": "******",
              "full_name": "opnfv functest user",
              "email": "*****@*****.**",
              "signup_code": "secret"}

    rq = requests.post(url, data=params)
    i = 20
    while rq.status_code != 201 and i > 0:
        rq = requests.post(url, data=params)
        i = i - 1
        time.sleep(10)

    if rq.status_code == 201:
        url = ellis_url + "session"
        rq = requests.post(url, data=params)
        cookies = rq.cookies

    url = ellis_url + "accounts/" + params['email'] + "/numbers"
    if cookies != "":
        rq = requests.post(url, cookies=cookies)
        i = 24
        while rq.status_code != 200 and i > 0:
            rq = requests.post(url, cookies=cookies)
            i = i - 1
            time.sleep(25)

    if rq.status_code != 200:
        step_failure("sig_test", "Unable to create a number: %s"
                     % rq.json()['reason'])

    start_time_ts = time.time()
    end_time_ts = start_time_ts
    logger.info("vIMS functional test Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time_ts).strftime(
            '%Y-%m-%d %H:%M:%S')))
    nameservers = functest_utils.get_resolvconf_ns()
    resolvconf = ""
    for ns in nameservers:
        resolvconf += "\nnameserver " + ns

    if dns_ip != "":
        script = ('echo -e "nameserver ' + dns_ip + resolvconf +
                  '" > /etc/resolv.conf; ')
        script += 'source /etc/profile.d/rvm.sh; '
        script += 'cd ' + VIMS_TEST_DIR + '; '
        script += ('rake test[' + CW_INPUTS["public_domain"] +
                   '] SIGNUP_CODE="secret"')

        cmd = "/bin/bash -c '" + script + "'"
        output_file = "output.txt"
        f = open(output_file, 'w+')
        subprocess.call(cmd, shell=True, stdout=f,
                        stderr=subprocess.STDOUT)
        f.close()
        end_time_ts = time.time()
        duration = round(end_time_ts - start_time_ts, 1)
        logger.info("vIMS functional test duration:'%s'" % duration)
        f = open(output_file, 'r')
        result = f.read()
        if result != "" and logger:
            logger.debug(result)

        vims_test_result = ""
        try:
            logger.debug("Trying to load test results")
            with open(VIMS_TEST_DIR + "temp.json") as f:
                vims_test_result = json.load(f)
            f.close()
        except:
            logger.error("Unable to retrieve test results")

        set_result("sig_test", duration, vims_test_result)

        # success criteria for vIMS (for Brahmaputra)
        # - orchestrator deployed
        # - VNF deployed
        # TODO use test criteria defined in config file
        status = "FAIL"
        try:
            if (RESULTS['orchestrator']['duration'] > 0 and
                    RESULTS['vIMS']['duration'] > 0):
                status = "PASS"
        except:
            logger.error("Unable to set test status")

        functest_utils.push_results_to_db("functest",
                                          "vims",
                                          logger,
                                          TESTCASE_START_TIME,
                                          end_time_ts,
                                          status,
                                          RESULTS)

        try:
            os.remove(VIMS_TEST_DIR + "temp.json")
        except:
            logger.error("Deleting file failed")
Esempio n. 40
0
def main(argv):
    (xml_file, pod, installer, scenario) = None, None, None, None
    try:
        opts, args = getopt.getopt(argv,
                                   'x:p:i:s:h',
                                   ['xml=', 'pod=',
                                    'installer=',
                                    'scenario=',
                                    'help'])
    except getopt.GetoptError:
        usage()

    for opt, arg in opts:
        if opt in ('-h', '--help'):
            usage()
        elif opt in ('-x', '--xml'):
            xml_file = arg
        elif opt in ('-p', '--pod'):
            pod = arg
        elif opt in ('-i', '--installer'):
            installer = arg
        elif opt in ('-s', '--scenario'):
            scenario = arg
        else:
            usage()

    if not all(x is not None for x in (xml_file, pod, installer, scenario)):
        usage()

    with open(xml_file, "r") as myfile:
        xml_input = myfile.read().replace('\n', '')

    # dictionary populated with data from xml file
    all_data = xmltodict.parse(xml_input)['robot']

    try:
        data = parse_suites(all_data['suite']['suite'])
        data['description'] = all_data['suite']['@name']
        data['version'] = all_data['@generator']
        data['test_project'] = "functest"
        data['case_name'] = "odl"
        data['pod_name'] = pod
        data['installer'] = installer

        json.dumps(data, indent=4, separators=(',', ': '))

        # example:
        # python odlreport2db.py -x ~/Pictures/Perso/odl/output3.xml
        #                        -i fuel
        #                        -p opnfv-jump-2
        #                        -s os-odl_l2-ha

        # success criteria for ODL = 100% of tests OK
        status = "FAIL"
        # TODO as part of the tests are executed before in the bash
        # start and stoptime have no real meaning
        start_time = time.time()
        stop_time = start_time
        tests_passed = 0
        tests_failed = 0
        for v in data['details']:
            if v['test_status']['@status'] == "PASS":
                tests_passed += 1
            else:
                tests_failed += 1

        if (tests_failed < 1):
            status = "PASS"

        functest_utils.push_results_to_db(data['test_project'],
                                          data['case_name'],
                                          None,
                                          start_time,
                                          stop_time,
                                          status,
                                          data)

    except:
        print("Error pushing ODL results into DB '%s'" % sys.exc_info()[0])