Пример #1
0
def integration_tests():
    """
    Loops over all valid tests as defined by ./validate_all.py. Runs them one by one and gives an update of the statuses at the end.
    """
    global test_count
    valid = valid_tests()
    if not valid:
      print pretty.WARNING("Integration tests skipped")
      return 0

    test_count += len(valid)
    print pretty.HEADER("Starting " + str(len(valid)) + " integration test(s)")
    processes = []

    fail_count = 0
    for path in valid:
        processes.append(Test(path, clean = args.clean).start())

    # Collect test results
    print pretty.HEADER("Collecting integration test results")

    for p in processes:
      fail_count += 1 if p.wait_status() else 0

    # Exit early if any tests failed
    if fail_count and args.fail:
      print pretty.FAIL(str(fail_count) + "integration tests failed")
      sys.exit(fail_count)

    return fail_count
Пример #2
0
def load_configs(config_path="."):
    global vms

    # Clear out the default unconfigured vm
    if (not vms[0]._config):
        vms = []

    print color.HEADER("IncludeOS vmrunner loading VM configs")

    schema_path = package_path + "/vm.schema.json"

    print INFO, "Validating JSON according to schema ", schema_path

    validate_vm.load_schema(schema_path)
    validate_vm.load_configs(config_path)

    if validate_vm.valid_vms:
        print INFO, "Loaded VM specification(s) from JSON"
        for spec in validate_vm.valid_vms:
            print INFO, "Found VM spec: "
            print color.DATA(spec.__str__())
            vms.append(vm(spec))

    else:
        print color.WARNING(
            nametag), "No VM specification JSON found, trying default config"
        vms.append(vm(default_config))

    return vms
Пример #3
0
def integration_tests(tests):
    """ Function that runs the tests that are passed to it.
    Filters out any invalid tests before running

    Arguments:
        tests: List containing test objects to be run

    Returns:
        integer: Number of tests that failed
    """

    # Only run the valid tests
    tests = [ x for x in tests if not x.skip_ and x.type_ == 'integration' ]

    # Print info before starting to run
    print pretty.HEADER("Starting " + str(len(tests)) + " integration test(s)")
    for test in tests:
        print pretty.INFO("Test"), "starting", test.name_

    processes = []
    fail_count = 0
    global test_count
    test_count += len(tests)

    # Start running tests in parallell
    for test in tests:
        processes.append(test.start())

	# Collect test results
    print pretty.HEADER("Collecting integration test results")

    for p in processes:
        fail_count += 1 if p.wait_status() else 0

    # Exit early if any tests failed
    if fail_count and args.fail:
        print pretty.FAIL(str(fail_count) + "integration tests failed")
        sys.exit(fail_count)

    return fail_count
Пример #4
0
def unit_tests():
  """Perform unit tests"""
  global test_count
  test_count += 1
  if ("unit" in args.skip):
    print pretty.WARNING("Unit tests skipped")
    return 0
  print pretty.HEADER("Building and running unit tests")
  build_status = Test(".", name="Unit test build", command=["make"], clean = args.clean).start().wait_status()
  unit_status = Test(".", name="Unit tests", command = ["./test.lest"]).start().wait_status()

  if (build_status or unit_status) and args.fail:
    print pretty.FAIL("Unit tests failed")
    sys.exit(max(build_status, unit_status))

  return max(build_status, unit_status)
Пример #5
0
def examples_working():
  global test_count
  if ("examples" in args.skip):
    print pretty.WARNING("Examples test skipped")
    return 0

  examples_dir = '../examples'
  dirs = os.walk(examples_dir).next()[1]
  print pretty.HEADER("Building " + str(len(dirs)) + " examples")
  test_count += len(dirs)
  fail_count = 0
  for directory in dirs:
    example = examples_dir + "/" + directory
    print "Building Example ", example
    build = Test(example, command = ["make"], name = directory + " build").start().wait_status()
    run = 0 #TODO: Make a 'test' folder for each example, containing test.py, vm.json etc.
    fail_count += 1 if build or run else 0
  return fail_count
Пример #6
0
def stress_test():
  """Perform stresstest"""
  global test_count
  test_count += 1
  if ("stress" in args.skip):
    print pretty.WARNING("Stress test skipped")
    return 0

  if (not validate_test.validate_path("stress")):
    raise Exception("Stress test failed validation")

  print pretty.HEADER("Starting stress test")
  stress = Test("stress", clean = args.clean).start()

  if (stress and args.fail):
    print pretty.FAIL("Stress test failed")
    sys.exit(stress)

  return 1 if stress.wait_status() else 0
Пример #7
0
def misc_working():
    global test_count
    if ("misc" in args.skip):
        print pretty.WARNING("Misc test skipped")
        return 0

    misc_dir = 'misc'
    dirs = os.walk(misc_dir).next()[1]
    dirs.sort()
    print pretty.HEADER("Building " + str(len(dirs)) + " misc")
    test_count += len(dirs)
    fail_count = 0
    for directory in dirs:
        misc = misc_dir + "/" + directory
        print "Building misc ", misc
        build = Test(misc, command=['./test.sh'],
                     name=directory).start().wait_status()
        run = 0  #TODO: Make a 'test' folder for each miscellanous test, containing test.py, vm.json etc.
        fail_count += 1 if build or run else 0
    return fail_count
Пример #8
0
        self._hyper.stop()
        if hasattr(self, "_timer") and self._timer:
            self._timer.cancel()
        return self

    def wait(self):
        if hasattr(self, "_timer") and self._timer:
            self._timer.join()
        self._hyper.wait()
        return self._exit_status

    def poll(self):
        return self._hyper.poll()


print color.HEADER("IncludeOS vmrunner initializing tests")
print color.INFO(nametag), "Validating test service"
validate_test.load_schema(
    os.environ.get(
        "INCLUDEOS_SRC",
        os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(
            __file__))).split('/test')[0]) + "/test/vm.schema.json")
validate_test.has_required_stuff(".")

default_spec = {"image": "test.img"}

# Provide a list of VM's with validated specs
vms = []

if validate_test.valid_vms:
    print
Пример #9
0
        if (self.poll() == None):
            self.stop()

        # We might have an exit status, e.g. set by a callback noticing something wrong with VM output
        if self._exit_status:
            self.exit(self._exit_status, self._exit_msg)

        # Process might have ended prematurely
        elif self.poll():
            self.exit(self._hyper.poll(), self._hyper.get_error_messages())

        # If everything went well we can return
        return self


print color.HEADER("IncludeOS vmrunner loading VM configs")

schema_path = package_path + "/vm.schema.json"
print color.INFO(nametag), "Validating JSON according to schema ",schema_path
validate_vm.load_schema(schema_path)
validate_vm.has_required_stuff(".")

default_spec = {"image" : "service.img"}

# Provide a list of VM's with validated specs
vms = []

if validate_vm.valid_vms:
    print color.INFO(nametag), "Loaded VM specification(s) from JSON"
    for spec in validate_vm.valid_vms:
        print color.INFO(nametag), "Found VM spec: "