Ejemplo n.º 1
0
    def check_points_unchanged(self, target):
        """
        Check if points are unchanged
        """
        new_target = {}
        if platform.system() == 'Windows':
            mbeds = mbed_lstools.create()
            if target['serial_port'] != mbeds.get_mbed_com_port(
                    target['target_id']):
                new_target['serial_port'] = mbeds.get_mbed_com_port(
                    target['target_id'])

            return self._get_target(new_target=new_target, target=target)

        if platform.system() == 'Darwin':
            return self._get_target(new_target, target)

        return_code = self._check_serial_point_duplicates(
            target=target, new_target=new_target)
        if return_code:
            return return_code

        return_code = self._check_device_point_duplicates(
            target=target, new_target=new_target)
        if return_code:
            return return_code

        return_code = self._verify_mount_point(target=target,
                                               new_target=new_target)
        if return_code:
            return return_code

        return self._get_target(new_target, target)
Ejemplo n.º 2
0
 def test_erase_with_all(self):
     eraser = Erase()
     mbeds = mbed_lstools.create()
     device_amount_before = len(mbeds.list_mbeds())
     ret = eraser.erase(target_id='all', method='simple')
     self.assertEqual(ret, 0)
     self.assertEqual(device_amount_before, len(mbeds.list_mbeds()))
Ejemplo n.º 3
0
    def test_run_fail_binary(self, mock_stdout):
        mbeds = mbed_lstools.create()
        targets = mbeds.list_mbeds()
        target_id = None
        mount_point = None
        fail_bin_path = os.path.join('test', 'fail.bin')
        for target in targets:
            if target['platform_name'] == 'K64F':
                if 'target_id' in target and 'mount_point' in target:
                    target_id = target['target_id']
                    mount_point = target['mount_point']
                    break

        with open(fail_bin_path, 'w') as new_file:
            new_file.write("0000000000000000000000000000000000")

        with self.assertRaises(FlashError) as cm:
            flasher = Flash()
            flasher.flash(build=fail_bin_path,
                          target_id=target_id,
                          platform_name='K64F',
                          device_mapping_table=None,
                          method='simple')

        if platform.system() == 'Windows':
            os.system('del /F %s' % os.path.join(mount_point, 'FAIL.TXT'))
            os.system('del %s' % os.path.join(os.getcwd(), fail_bin_path))
        else:
            os.system('rm -f %s' % os.path.join(mount_point, 'FAIL.TXT'))
            os.system('rm %s' % os.path.join(os.getcwd(), fail_bin_path))

        self.assertEqual(cm.exception.return_code,
                         EXIT_CODE_DAPLINK_USER_ERROR)
Ejemplo n.º 4
0
 def get_available_devices():
     """
     :return: list of available devices
     """
     mbeds = mbed_lstools.create()
     # device_type introduced in mbedls version 1.4.0
     return mbeds.list_mbeds(filter_function=lambda m: m['device_type'] == 'jlink')
Ejemplo n.º 5
0
 def test_run_fail_file(self, mock_stdout):
     mbeds = mbed_lstools.create()
     targets = mbeds.list_mbeds()
     mount_point = None
     target_to_test = None
     fail_txt_path = os.path.join('test', 'failing.txt')
     for target in targets:
         if target['platform_name'] == 'K64F':
             if 'target_id' in target and 'mount_point' in target:
                 target_to_test = target
                 mount_point = target['mount_point']
                 break
     if target_to_test:
         flasher = FlasherMbed()
         flasher.FLASHING_VERIFICATION_TIMEOUT = 2
         with open(fail_txt_path, 'w') as new_file:
             new_file.write("0000000000000000000000000000000000")
         ret = flasher.flash(source=fail_txt_path,
                             target=target_to_test,
                             method='simple',
                             no_reset=False)
         if platform.system() == 'Windows':
             os.system('del %s' % os.path.join(mount_point, 'failing.txt'))
             os.system('del %s' % os.path.join(os.getcwd(), fail_txt_path))
         else:
             os.system('rm %s' % os.path.join(mount_point, 'failing.txt'))
             os.system('rm %s' % os.path.join(os.getcwd(), fail_txt_path))
         self.assertEqual(ret, -15)
     if mock_stdout:
         pass
Ejemplo n.º 6
0
    def test_run_fail_file(self, mock_stdout):
        mbeds = mbed_lstools.create()
        targets = mbeds.list_mbeds()
        mount_point = None
        target_to_test = None
        fail_txt_path = os.path.join('test', 'failing.txt')
        for target in targets:
            if target['platform_name'] == 'K64F':
                if 'target_id' in target and 'mount_point' in target:
                    target_to_test = target
                    mount_point = target['mount_point']
                    break

        with open(fail_txt_path, 'w') as new_file:
            new_file.write("0000000000000000000000000000000000")

        with self.assertRaises(FlashError) as cm:
            flasher = FlasherMbed()
            flasher.flash(source=fail_txt_path,
                          target=target_to_test,
                          method='simple',
                          no_reset=False)

        if platform.system() == 'Windows':
            os.system('del %s' % os.path.join(mount_point, 'failing.txt'))
            os.system('del %s' % os.path.join(os.getcwd(), fail_txt_path))
        else:
            os.system('rm %s' % os.path.join(mount_point, 'failing.txt'))
            os.system('rm %s' % os.path.join(os.getcwd(), fail_txt_path))

        self.assertEqual(cm.exception.return_code,
                         EXIT_CODE_FILE_STILL_PRESENT)
Ejemplo n.º 7
0
def get_serial_port_for_mbed(target_id):
    """
    Gets serial port address for the device with Mbed LS tool
    :param target_id: mbed device target_id
    :return: Serial port address
    """
    selected_mbed = None
    mbeds = mbed_lstools.create()
    mbed_devices = mbeds.list_mbeds(unique_names=True)
    if target_id:
        for dev in mbed_devices:
            if dev['target_id'] == target_id:
                selected_mbed = dev
                break
    else:
        if mbed_devices:
            log.debug('Found {} mbed device(s), taking the first one for test - '
                      'give "--target_id" argument to get specific device'.format(len(mbed_devices)))
            selected_mbed = mbed_devices[0]

    if selected_mbed:
        log.info('Using "{}: {}" device at "{}" port for tests'.format(selected_mbed['platform_name_unique'],
                                                                       selected_mbed['target_id'],
                                                                       selected_mbed['serial_port']))
        return selected_mbed['serial_port']
    log.error('Could not find any mbed devices, please make sure you have connected one with power on')
    return None
Ejemplo n.º 8
0
def get_interface_version(mount_point):
    """ Function returns interface version from the target mounted on the specified mount point
    
        mount_point can be acquired via the following:
            muts = get_autodetected_MUTS_list()
            for mut in muts.values():
                mount_point = mut['disk']
                    
        @param mount_point Name of disk where platform is connected to host machine.
    """
    if get_module_avail('mbed_lstools'):
        try :
            mbeds = mbed_lstools.create()
            details_txt = mbeds.get_details_txt(mount_point)
            
            if 'Interface Version' in details_txt:
                return details_txt['Interface Version']
            
            elif 'Version' in details_txt:
                return details_txt['Version']
            
        except :
            return 'unknown'
        
    return 'unknown'
Ejemplo n.º 9
0
 def get_available_devices():
     """
     :return: list of available devices
     """
     mbeds = mbed_lstools.create()
     # device_type introduced in mbedls version 1.4.0
     return mbeds.list_mbeds(filter_function=FlasherSTLink.can_flash)
Ejemplo n.º 10
0
    def check_serial_port_ready(self, serial_port, target_id=None, timeout=60):
        """! Function checks (using mbed-ls) and updates serial port name information for DUT with specified target_id.
        If no target_id is specified function returns old serial port name.
        @param serial_port Current serial port name
        @param target_id Target ID of a device under test which serial port will be checked and updated if needed
        @param timeout Serial port pooling timeout in seconds
        @return Tuple with result (always True) and serial port read from mbed-ls
        """
        # If serial port changed (check using mbed-ls), use new serial port
        new_serial_port = None

        if target_id:
            # Sometimes OSes take a long time to mount devices (up to one minute).
            # Current pooling time: 120x 500ms = 1 minute
            self.print_plugin_info("Waiting up to %d sec for '%s' serial port (current is '%s')..."% (timeout, target_id, serial_port))
            timeout_step = 0.5
            timeout = int(timeout / timeout_step)
            for i in range(timeout):
                # mbed_lstools.create() should be done inside the loop. Otherwise it will loop on same data.
                mbeds = mbed_lstools.create()
                mbeds_by_tid = mbeds.list_mbeds_by_targetid()   # key: target_id, value mbedls_dict()
                if target_id in mbeds_by_tid:
                    if 'serial_port' in mbeds_by_tid[target_id]:
                        if mbeds_by_tid[target_id]['serial_port']:
                            # Only assign if serial port is known (not None)
                            new_serial_port = mbeds_by_tid[target_id]['serial_port']
                            if new_serial_port != serial_port:
                                # Serial port changed, update to new serial port from mbed-ls
                                self.print_plugin_info("Serial port for tid='%s' changed from '%s' to '%s'..." % (target_id, serial_port, new_serial_port))
                            break
                sleep(timeout_step)
        else:
            new_serial_port = serial_port

        return new_serial_port
Ejemplo n.º 11
0
def get_mount_point(board_id):
    lstools = mbed_lstools.create()
    mbed_list = lstools.list_mbeds()
    for mbed in mbed_list:
        if mbed['target_id'] == board_id:
            return mbed['mount_point']
    else:
        Exception("Board %s not found" % board_id)
Ejemplo n.º 12
0
        def check_flash_error(target_id, disk, initial_remount_count):
            """! Check for flash errors
            @return Returns false if FAIL.TXT present, else true
            """
            if not target_id:
                self.logger.prn_wrn(
                    "Target ID not found: Skipping flash check and retry")
                return True

            bad_files = set(['FAIL.TXT'])
            # Re-try at max 5 times with 0.5 sec in delay
            for i in range(5):
                # mbed_lstools.create() should be done inside the loop. Otherwise it will loop on same data.
                mbeds = mbed_lstools.create()
                mbed_list = mbeds.list_mbeds()  #list of mbeds present
                # get first item in list with a matching target_id, if present
                mbed_target = next(
                    (x for x in mbed_list if x['target_id'] == target_id),
                    None)

                if mbed_target is not None:
                    if 'mount_point' in mbed_target and mbed_target[
                            'mount_point'] is not None:
                        if not initial_remount_count is None:
                            new_remount_count = get_remount_count(disk)
                            if not new_remount_count is None and new_remount_count == initial_remount_count:
                                sleep(0.5)
                                continue

                        common_items = []
                        try:
                            items = set([
                                x.upper()
                                for x in os.listdir(mbed_target['mount_point'])
                            ])
                            common_items = bad_files.intersection(items)
                        except OSError as e:
                            print("Failed to enumerate disk files, retrying")
                            continue

                        for common_item in common_items:
                            full_path = os.path.join(
                                mbed_target['mount_point'], common_item)
                            self.logger.prn_err("Found %s" % (full_path))
                            bad_file_contents = "[failed to read bad file]"
                            try:
                                with open(full_path, "r") as bad_file:
                                    bad_file_contents = bad_file.read()
                            except IOError as error:
                                self.logger.prn_err("Error opening '%s': %s" %
                                                    (full_path, error))

                            self.logger.prn_err("Error file contents:\n%s" %
                                                bad_file_contents)
                        if common_items:
                            return False
                sleep(0.5)
            return True
Ejemplo n.º 13
0
def get_all_attached_daplink_boards():
    all_boards = []
    lstools = mbed_lstools.create()
    mbed_list = lstools.list_mbeds()
    for mbed in mbed_list:
        unique_id = mbed['target_id']
        board = DaplinkBoard(unique_id)
        all_boards.append(board)
    return all_boards
Ejemplo n.º 14
0
def get_all_attached_daplink_boards():
    all_boards = []
    lstools = mbed_lstools.create()
    mbed_list = lstools.list_mbeds()
    for mbed in mbed_list:
        unique_id = mbed['target_id']
        board = DaplinkBoard(unique_id)
        all_boards.append(board)
    return all_boards
Ejemplo n.º 15
0
 def refresh_target_once(target_id):
     """
     Refresh target once with help of mbedls.
     :param target_id: target_id to be searched for
     :return: list of targets
     """
     mbedls = mbed_lstools.create()
     return mbedls.list_mbeds(
         filter_function=lambda m: m["target_id"] == target_id)
Ejemplo n.º 16
0
    def check_mount_point_ready(self, destination_disk, init_delay=0.2, loop_delay=0.25, target_id=None, timeout=60):
        """! Waits until destination_disk is ready and can be accessed by e.g. copy commands
        @return True if mount point was ready in given time, False otherwise
        @param destination_disk Mount point (disk) which will be checked for readiness
        @param init_delay - Initial delay time before first access check
        @param loop_delay - polling delay for access check
        @param timeout Mount point pooling timeout in seconds
        """

        if target_id:
            # Wait for mount point to appear with mbed-ls
            # and if it does check if mount point for target_id changed
            # If mount point changed, use new mount point and check if its ready (os.access)
            new_destination_disk = destination_disk

            # Sometimes OSes take a long time to mount devices (up to one minute).
            # Current pooling time: 120x 500ms = 1 minute
            self.print_plugin_info("Waiting up to %d sec for '%s' mount point (current is '%s')..."% (timeout, target_id, destination_disk))
            timeout_step = 0.5
            timeout = int(timeout / timeout_step)
            for i in range(timeout):
                # mbed_lstools.create() should be done inside the loop.
                # Otherwise it will loop on same data.
                mbeds = mbed_lstools.create()
                mbed_list = mbeds.list_mbeds() #list of mbeds present
                # get first item in list with a matching target_id, if present
                mbed_target = next((x for x in mbed_list if x['target_id']==target_id), None)

                if mbed_target is not None:
                    # Only assign if mount point is present and known (not None)
                    if 'mount_point' in mbed_target and mbed_target['mount_point'] is not None:
                        new_destination_disk = mbed_target['mount_point']
                        break
                sleep(timeout_step)

            if new_destination_disk != destination_disk:
                # Mount point changed, update to new mount point from mbed-ls
                self.print_plugin_info("Mount point for '%s' changed from '%s' to '%s'..."% (target_id, destination_disk, new_destination_disk))
                destination_disk = new_destination_disk

        result = True
        # Check if mount point we've promoted to be valid one (by optional target_id check above)
        # Let's wait for 30 * loop_delay + init_delay max
        if not access(destination_disk, F_OK):
            self.print_plugin_info("Waiting for mount point '%s' to be ready..."% destination_disk, NL=False)
            sleep(init_delay)
            for i in range(30):
                if access(destination_disk, F_OK):
                    result = True
                    break
                sleep(loop_delay)
                self.print_plugin_char('.')
            else:
                self.print_plugin_error("mount {} is not accessible ...".format(destination_disk))
                result = False
        return (result, destination_disk)
Ejemplo n.º 17
0
class MbedDeviceManager:
    def __init__(self):
        try:
            import mbed_lstools
        except ImportError, e:
            print("Error: Can't import 'mbed_lstools' module: %s"% e)
        mbed = mbed_lstools.create()
        self.mbed_list = mbed.list_mbeds()
        for dev in self.mbed_list:
            dev['Available'] = True
Ejemplo n.º 18
0
 def read_mbed_devices(self):
     gt_log("auto-detecting connected devices...")
     self.mbeds = mbed_lstools.create()
     self.mbeds_list = self.mbeds.list_mbeds()
     self.platform_list = self.mbeds.list_platforms_ext()
     for mbed in self.platform_list:
         n = int(self.platform_list[mbed])
         gt_log_tab("found %d platform%s '%s'"% (n, '' if n==1 else 's', mbed))
     if not self.platform_list:
         gt_log_warn("failed to auto-detect any compatible device")
Ejemplo n.º 19
0
 def test_reset_with_target_id_list(self):
     mbeds = mbed_lstools.create()
     devices = mbeds.list_mbeds()
     resetter = Reset()
     ret = None
     for item in devices:
         if item['target_id']:
             ret = resetter.reset(target_id=[item['target_id']],
                                  method='simple')
             break
     self.assertEqual(ret, 0)
Ejemplo n.º 20
0
    def get_supported_targets():
        """
        :return: supported JLink types
        """
        if not FlasherJLink.supported_targets:
            mbeds = mbed_lstools.create()
            # @todo this is workaround until mbed-ls provide public api
            db_items = list(mbeds.plat_db.items(device_type='jlink'))
            FlasherJLink.supported_targets = sorted([i[1]["platform_name"] for i in db_items])

        return FlasherJLink.supported_targets
Ejemplo n.º 21
0
 def __init__(self, platforms_supported: List[str], binaries: Mapping[str,
                                                                      str]):
     mbed_ls = mbed_lstools.create()
     boards = mbed_ls.list_mbeds(filter_function=lambda m: m[
         'platform_name'] in platforms_supported)
     self.board_description = boards
     self.binaries = binaries
     self.allocation = []  # type: List[BoardAllocation]
     self.flasher = None
     for desc in boards:
         self.allocation.append(BoardAllocation(desc))
Ejemplo n.º 22
0
 def __init__(self, platforms_supported: List[str],
              serial_inter_byte_delay: float, baudrate: int):
     mbed_ls = mbed_lstools.create(list_unmounted=True)
     boards = mbed_ls.list_mbeds(filter_function=lambda m: m[
         'platform_name'] in platforms_supported)
     self.board_description = boards
     self.allocation = []
     self.serial_inter_byte_delay = serial_inter_byte_delay
     self.baudrate = baudrate
     for desc in boards:
         self.allocation.append(BoardAllocation(desc))
Ejemplo n.º 23
0
 def test_erase_with_target_id(self):
     mbeds = mbed_lstools.create()
     devices = mbeds.list_mbeds()
     eraser = Erase()
     ret = None
     for item in devices:
         if item['target_id']:
             ret = eraser.erase(target_id=item['target_id'],
                                method='simple')
             break
     self.assertEqual(ret, 0)
     self.assertEqual(len(devices), len(mbeds.list_mbeds()))
Ejemplo n.º 24
0
def get_all_attached_daplink_boards():
    all_boards = []
    lstools = mbed_lstools.create()
    mbed_list = lstools.list_mbeds()
    for mbed in mbed_list:
        unique_id = mbed['target_id']
        board = DaplinkBoard(unique_id)
        if board._mode is not None: #Valid daplink should have set this mode
            all_boards.append(board)
        else:
            print("Warning: DAPLink tests cannot be done on board %s" % board.unique_id)
    return all_boards
Ejemplo n.º 25
0
def get_all_attached_daplink_boards():
    all_boards = []
    lstools = mbed_lstools.create()
    mbed_list = lstools.list_mbeds()
    for mbed in mbed_list:
        unique_id = mbed['target_id']
        board = DaplinkBoard(unique_id)
        if board._mode is not None:  #Valid daplink should have set this mode
            all_boards.append(board)
        else:
            print("Warning: DAPLink tests cannot be done on board %s" %
                  board.unique_id)
    return all_boards
Ejemplo n.º 26
0
    def get_supported_targets():
        """
        Load target mapping information
        """
        if not FlasherMbed.supported_targets:
            mbeds = mbed_lstools.create()

            # this should works for >=v1.3.0
            # @todo this is workaround until mbed-ls provide public
            #       API to get list of supported platform names
            FlasherMbed.supported_targets = sorted(set(name for id, name in mbeds.plat_db.items()))

        return FlasherMbed.supported_targets
Ejemplo n.º 27
0
    def check_mount_point_ready(self, destination_disk, init_delay=0.2, loop_delay=0.25, target_id=None, timeout=60):
        """! Waits until destination_disk is ready and can be accessed by e.g. copy commands
        @return True if mount point was ready in given time, False otherwise
        @param destination_disk Mount point (disk) which will be checked for readiness
        @param init_delay - Initial delay time before first access check
        @param loop_delay - polling delay for access check
        @param timeout Mount point pooling timeout in seconds
        """

        if target_id:
            # Wait for mount point to appear with mbed-ls
            # and if it does check if mount point for target_id changed
            # If mount point changed, use new mount point and check if its ready (os.access)
            new_destination_disk = destination_disk

            # Sometimes OSes take a long time to mount devices (up to one minute).
            # Current pooling time: 120x 500ms = 1 minute
            self.print_plugin_info("Waiting up to %d sec for '%s' mount point (current is '%s')..."% (timeout, target_id, destination_disk))
            timeout_step = 0.5
            timeout = int(timeout / timeout_step)
            for i in range(timeout):
                # mbed_lstools.create() should be done inside the loop.
                # Otherwise it will loop on same data.
                mbeds = mbed_lstools.create()
                mbeds_by_tid = mbeds.list_mbeds_by_targetid()   # key: target_id, value mbedls_dict()
                if target_id in mbeds_by_tid:
                    if 'mount_point' in mbeds_by_tid[target_id]:
                        if mbeds_by_tid[target_id]['mount_point']:
                            # Only assign if mount point is known (not None)
                            new_destination_disk = mbeds_by_tid[target_id]['mount_point']
                            break
                sleep(timeout_step)

            if new_destination_disk != destination_disk:
                # Mount point changed, update to new mount point from mbed-ls
                self.print_plugin_info("Mount point for '%s' changed from '%s' to '%s'..."% (target_id, destination_disk, new_destination_disk))
                destination_disk = new_destination_disk

        result = False
        # Check if mount point we've promoted to be valid one (by optional target_id check above)
        # Let's wait for 30 * loop_delay + init_delay max
        if not access(destination_disk, F_OK):
            self.print_plugin_info("Waiting for mount point '%s' to be ready..."% destination_disk, NL=False)
            sleep(init_delay)
            for i in range(30):
                if access(destination_disk, F_OK):
                    result = True
                    break
                sleep(loop_delay)
                self.print_plugin_char('.')
        return (result, destination_disk)
Ejemplo n.º 28
0
class MainTestCaseHW(unittest.TestCase):
    """ Basic true asserts to see that testing is executed
    """

    mbeds = mbed_lstools.create()

    def setUp(self):
        self.logging_patcher = mock.patch("mbed_flasher.main.logging")
        mock_logging = self.logging_patcher.start()
        mock_logging.getLogger = \
            mock.MagicMock(return_value=mock.Mock(spec=logging.Logger))
        # Mock logging
        # pylint: disable=no-member
        mock_logging.disable(logging.CRITICAL)
        Helper(platform_name='K64F', allowed_files=['DETAILS.TXT', 'MBED.HTM']).clear()

    def tearDown(self):
        Helper(platform_name='K64F', allowed_files=['DETAILS.TXT', 'MBED.HTM']).clear()

    def test_parser_invalid(self):
        with self.assertRaises(SystemExit) as context:
            FlasherCLI()
        self.assertEqual(context.exception.code, EXIT_CODE_MISUSE_CMD)

    # test name is meaningful
    # pylint: disable=invalid-name
    @mock.patch('sys.stdout', new_callable=StringIO)
    def test_reset_wrong_tid_with_device(self, mock_stdout):
        fcli = FlasherCLI(["reset", "--tid", "555"])
        with self.assertRaises(GeneralFatalError) as cm:
            fcli.execute()

        self.assertEqual(cm.exception.return_code, EXIT_CODE_COULD_NOT_MAP_DEVICE)
        six.assertRegex(self, mock_stdout.getvalue(),
                        r"Could not find given target_id from attached devices"
                        r"\nAvailable target_ids:\n\[u?(\'[0-9a-fA-F]+\')"
                        r"(,\su?\'[0-9a-fA-F]+\')*\]",
                        "Regex match failed")

    @mock.patch('sys.stdout', new_callable=StringIO)
    def test_erase_wrong_tid_with_device(self, mock_stdout):
        fcli = FlasherCLI(["erase", "--tid", "555"])
        with self.assertRaises(GeneralFatalError) as cm:
            fcli.execute()

        self.assertEqual(cm.exception.return_code, EXIT_CODE_COULD_NOT_MAP_DEVICE)
        six.assertRegex(self, mock_stdout.getvalue(),
                        r"Could not find given target_id from attached devices"
                        r"\nAvailable target_ids:\n\[u?(\'[0-9a-fA-F]+\')"
                        r"(,\su?\'[0-9a-fA-F]+\')*\]",
                        "Regex match failed")
Ejemplo n.º 29
0
    def _get_targets(self):
        """
        Return targets matching passed platform name
        """
        mbeds = mbed_lstools.create()
        targets = mbeds.list_mbeds()
        selected_targets = []

        if targets:
            for target in targets:
                if target['platform_name'] == self.platform_name:
                    selected_targets.append(target)

        return selected_targets
Ejemplo n.º 30
0
def _get_board_endpoints(unique_id):
    """Return a tuple of unique_id, serial_port, mount_point"""
    lstools = mbed_lstools.create()
    mbed_list = lstools.list_mbeds()

    host_id = _unique_id_to_host_id(unique_id)
    for mbed in mbed_list:
        mbed_unique_id = mbed['target_id']
        mbed_serial_port = mbed['serial_port']
        mbed_mount_point = mbed['mount_point']
        mbed_host_id = _unique_id_to_host_id(mbed_unique_id)
        if mbed_host_id == host_id:
            return mbed_unique_id, mbed_serial_port, mbed_mount_point
    return None
Ejemplo n.º 31
0
def _get_board_endpoints(unique_id):
    """Return a tuple of unique_id, serial_port, mount_point"""
    lstools = mbed_lstools.create()
    mbed_list = lstools.list_mbeds()

    host_id = _unique_id_to_host_id(unique_id)
    for mbed in mbed_list:
        mbed_unique_id = mbed['target_id']
        mbed_serial_port = mbed['serial_port']
        mbed_mount_point = mbed['mount_point']
        mbed_host_id = _unique_id_to_host_id(mbed_unique_id)
        if mbed_host_id == host_id:
            return mbed_unique_id, mbed_serial_port, mbed_mount_point
    return None
Ejemplo n.º 32
0
    def test_verify_hw_flash_no_reset(self):
        mbeds = mbed_lstools.create()
        targets = mbeds.list_mbeds()
        flasher = Flash()
        resetter = Reset()
        target_id = None
        serial_port = None
        for target in targets:
            if target['platform_name'] == 'K64F':
                if 'serial_port' and 'target_id' in target:
                    target_id = target['target_id']
                    serial_port = target['serial_port']
                    break
        if target_id and serial_port:
            second_binary = find_second_binary()
            self.assertIsNotNone(second_binary, 'Second binary not found')
            ret = flasher.flash(build=second_binary,
                                target_id=target_id,
                                platform_name='K64F',
                                device_mapping_table=False,
                                method='simple')
            self.assertEqual(ret, 0)
            if not verify_output_per_device(serial_port, 'help', 'echo'):
                self.assertEqual(
                    verify_output_per_device(serial_port, 'help', 'echo'),
                    True)

            ret = flasher.flash(build=second_binary,
                                target_id=target_id,
                                platform_name='K64F',
                                device_mapping_table=False,
                                method='simple',
                                no_reset=True)
            self.assertEqual(ret, 0)
            self.assertEqual(
                verify_output_per_device(serial_port, 'help', 'echo'), False)
            ret = resetter.reset(target_id=target_id, method='simple')
            self.assertEqual(ret, 0)
            if not verify_output_per_device(serial_port, 'help', 'echo'):
                self.assertEqual(
                    verify_output_per_device(serial_port, 'help', 'echo'),
                    True)
            ret = flasher.flash(build=self.bin_path,
                                target_id=target_id,
                                platform_name='K64F',
                                device_mapping_table=False,
                                method='simple')
            self.assertEqual(ret, 0)
            self.assertEqual(
                verify_output_per_device(serial_port, 'help', 'echo'), False)
Ejemplo n.º 33
0
 def __init__(self, platforms_supported: List[str],
              binaries: Mapping[str, str], serial_inter_byte_delay: float,
              baudrate: int, command_delay: float):
     mbed_ls = mbed_lstools.create()
     boards = mbed_ls.list_mbeds(filter_function=lambda m: m[
         'platform_name'] in platforms_supported)
     self.board_description = boards
     self.binaries = binaries
     self.allocation = []  # type: List[BoardAllocation]
     self.flasher = None
     self.serial_inter_byte_delay = serial_inter_byte_delay
     self.baudrate = baudrate
     self.command_delay = command_delay
     for desc in boards:
         self.allocation.append(BoardAllocation(desc))
Ejemplo n.º 34
0
    def refresh_target(target_id):
        """
        Refresh target with help of mbedls.
        :param target_id: target_id to be searched for
        :return: target or None
        """
        mbedls = mbed_lstools.create()

        for _ in range(REFRESH_TARGET_RETRIES):
            mbeds = mbedls.list_mbeds(
                filter_function=lambda m: m["target_id"] == target_id)
            if mbeds:
                return mbeds[0]

            time.sleep(REFRESH_TARGET_SLEEP)

        return None
Ejemplo n.º 35
0
 def test_hw_flash(self):
     mbeds = mbed_lstools.create()
     targets = mbeds.list_mbeds()
     target_id = None
     for target in targets:
         if target['platform_name'] == 'K64F':
             if 'target_id' in target:
                 target_id = target['target_id']
                 break
     if target_id:
         flasher = Flash()
         ret = flasher.flash(build=self.bin_path,
                             target_id=target_id,
                             platform_name=False,
                             device_mapping_table=None,
                             method='simple')
         self.assertEqual(ret, EXIT_CODE_SUCCESS)
Ejemplo n.º 36
0
    def update_device_info(self):
        """
        Updates device's port and disk using mbedls. Typically used after reset.

        :return:
        """
        for i in range(3):
            mbed_list = mbed_lstools.create().list_mbeds_ext()
            if mbed_list:
                for mut in mbed_list:
                    if mut['target_id'] == self.target_id:
                        self.port = mut['serial_port']
                        self.disk = mut['mount_point']
                        return True
            print "HOST: Failed to find target after reset. Retrying (%d)" % i
            time.sleep(1)
        return False
Ejemplo n.º 37
0
def main():
    global should_exit
    lstools = mbed_lstools.create()
    mbed_list = lstools.list_mbeds()
    for thread_index, mbed in enumerate(mbed_list):
        msd_thread = threading.Thread(target=hid_main,
                                      args=(thread_index, mbed['target_id']))
        msd_thread.start()

    try:
        with exit_cond:
            while not should_exit:
                exit_cond.wait(1)
    except KeyboardInterrupt:
        pass
    should_exit = True

    sync_print("Exiting")
Ejemplo n.º 38
0
    def run(self):
        """ Run tests, calculate overall score and print test results
        """
        mbeds = mbed_lstools.create()
        muts_list = mbeds.list_mbeds()
        test_base = IOperTestCaseBase()

        self.raw_test_results = {}
        for i, mut in enumerate(muts_list):
            result = []
            self.raw_test_results[mut['platform_name']] = []

            print "MBEDLS: Detected %s, port: %s, mounted: %s"% (mut['platform_name'],
                                                                 mut['serial_port'],
                                                                 mut['mount_point'])
            print "Running interoperability test suite, scope '%s'" % (self.requested_scope)
            for test_case in TEST_LIST:
                if self.scopes[self.requested_scope] >= self.scopes[test_case.scope]:
                    res = test_case.test(param=mut)
                    result.extend(res)
                    self.raw_test_results[mut['platform_name']].extend(res)

            columns = ['Platform', 'Test Case', 'Result', 'Scope', 'Description']
            pt = PrettyTable(columns)
            for col in columns:
                pt.align[col] = 'l'

            for tr in result:
                severity, tr_name, tr_scope, text = tr
                tr = (test_base.COLOR(severity, mut['platform_name']),
                      test_base.COLOR(severity, tr_name),
                      test_base.COLOR(severity, severity),
                      test_base.COLOR(severity, tr_scope),
                      test_base.COLOR(severity, text))
                pt.add_row(list(tr))
            print pt.get_string(border=True, sortby='Result')
            if i + 1 < len(muts_list):
                print
        return self.raw_test_results
Ejemplo n.º 39
0
    def check_serial_port_ready(self, serial_port, target_id=None, timeout=60):
        """! Function checks (using mbed-ls) and updates serial port name information for DUT with specified target_id.
        If no target_id is specified function returns old serial port name.
        @param serial_port Current serial port name
        @param target_id Target ID of a device under test which serial port will be checked and updated if needed
        @param timeout Serial port pooling timeout in seconds
        @return Tuple with result (always True) and serial port read from mbed-ls
        """
        result = True

        if target_id:
            # If serial port changed (check using mbed-ls), use new serial port
            new_serial_port = serial_port

            # Sometimes OSes take a long time to mount devices (up to one minute).
            # Current pooling time: 120x 500ms = 1 minute
            self.print_plugin_info("Waiting up to %d sec for '%s' serial port (current is '%s')..."% (timeout, target_id, serial_port))
            timeout_step = 0.5
            timeout = int(timeout / timeout_step)
            for i in range(timeout):
                # mbed_lstools.create() should be done inside the loop. Otherwise it will loop on same data.
                mbeds = mbed_lstools.create()
                mbeds_by_tid = mbeds.list_mbeds_by_targetid()   # key: target_id, value mbedls_dict()
                if target_id in mbeds_by_tid:
                    if 'serial_port' in mbeds_by_tid[target_id]:
                        if mbeds_by_tid[target_id]['serial_port']:
                            # Only assign if serial port is known (not None)
                            new_serial_port = mbeds_by_tid[target_id]['serial_port']
                            break
                sleep(timeout_step)

            if new_serial_port != serial_port:
                # Serial port changed, update to new serial port from mbed-ls
                self.print_plugin_info("Serial port for tid='%s' changed from '%s' to '%s'..."% (target_id, serial_port, new_serial_port))
                serial_port = new_serial_port

        return (result, serial_port)
Ejemplo n.º 40
0
def get_interface_version(mount_point):
    """ Function returns interface version from the target mounted on the specified mount point
    
        mount_point can be acquired via the following:
            muts = get_autodetected_MUTS_list()
            for mut in muts.values():
                mount_point = mut['disk']
                    
        @param mount_point Name of disk where platform is connected to host machine.
    """
    if get_module_avail('mbed_lstools'):
        try :
            mbedls = mbed_lstools.create()
            mbeds = mbedls.list_mbeds(unique_names=True, read_details_txt=True)
            
            for mbed in mbeds:
                if mbed['mount_point'] == mount_point:
            
                    if 'daplink_version' in mbed:
                        return mbed['daplink_version']           
        except :
            return 'unknown'
        
    return 'unknown'
Ejemplo n.º 41
0
    # Only prints matrix of supported toolchains
    if opts.supported_toolchains:
        print mcu_toolchain_matrix(platform_filter=opts.general_filter_regex)
        exit(0)

    test_spec = None
    MUTs = None

    if hasattr(opts, 'auto_detect') and opts.auto_detect:
        # If auto_detect attribute is present, we assume other auto-detection
        # parameters like 'toolchains_filter' are also set.
        print "MBEDLS: Detecting connected mbed-enabled devices... "

        if get_module_avail('mbed_lstools'):
            mbeds = mbed_lstools.create()
            muts_list = mbeds.list_mbeds_ext() if hasattr(mbeds, 'list_mbeds_ext') else mbeds.list_mbeds()
            for mut in muts_list:
                print "MBEDLS: Detected %s, port: %s, mounted: %s"% (mut['platform_name_unique'] if 'platform_name_unique' in mut else mut['platform_name'],
                                        mut['serial_port'],
                                        mut['mount_point'])

        # Set up parameters for test specification filter function (we need to set toolchains per target here)
        use_default_toolchain = 'default' in opts.toolchains_filter.split(',') if opts.toolchains_filter is not None else True
        use_supported_toolchains = 'all' in opts.toolchains_filter.split(',') if opts.toolchains_filter is not None else False
        toolchain_filter = opts.toolchains_filter
        platform_name_filter = opts.general_filter_regex.split(',') if opts.general_filter_regex is not None else opts.general_filter_regex
        # Test specification with information about each target and associated toolchain
        test_spec = get_autodetected_TEST_SPEC(muts_list,
                                               use_default_toolchain=use_default_toolchain,
                                               use_supported_toolchains=use_supported_toolchains,
Ejemplo n.º 42
0
            libc.sync()
    except:
        check_call(['sync'], shell=True)


# only select the files that exist from what was given
files = []
for file in args.files:
    f = abspath(file)
    if isfile(f):
        files.append(f)
    else:
        print('{} does not exist'.format(file))

# get the array of mbeds that are connected
mbed_tool = mbedls.create()
mbed_tool.list_unmounted = True
mbeds = mbed_tool.list_mbeds()

if len(mbeds) == 0:
    print("No mbeds found", file=sys.stderr)
    sys.exit(1)


def find_mbed_disk(mbed):
    dirpath = '/dev/disk/by-id'
    for path in os.listdir(dirpath):
        if mbed['target_id'] in path:
            return os.path.join(dirpath, path)
    return None
Ejemplo n.º 43
0
def main_cli(opts, args, gt_instance_uuid=None):
    """! This is main CLI function with all command line parameters
    @details This function also implements CLI workflow depending on CLI parameters inputed
    @return This function doesn't return, it exits to environment with proper success code
    """

    if not MBED_LMTOOLS:
        gt_logger.gt_log_err("error: mbed-ls proprietary module not installed")
        return (-1)

    if not MBED_HOST_TESTS:
        gt_logger.gt_log_err("error: mbed-host-tests proprietary module not installed")
        return (-1)

    # This is how you magically control colours in this piece of art software
    gt_logger.colorful(not opts.plain)

    # List available test binaries (names, no extension)
    if opts.list_binaries:
        list_binaries_for_targets()
        return (0)

    # Prints version and exits
    if opts.version:
        print_version()
        return (0)

    # We will load hooks from JSON file to support extra behaviour during test execution
    greentea_hooks = GreenteaHooks(opts.hooks_json) if opts.hooks_json else None

    # Capture alternative test console inputs, used e.g. in 'yotta test command'
    if opts.digest_source:
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)
        host_test_result = run_host_test(None,
                                         None,
                                         None,
                                         None,
                                         None,
                                         hooks=greentea_hooks,
                                         digest_source=opts.digest_source,
                                         enum_host_tests_path=enum_host_tests_path,
                                         verbose=opts.verbose_test_result_only)

        single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
        status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
        return (status)

    ### Read yotta module basic information
    yotta_module = YottaModule()
    yotta_module.init() # Read actual yotta module data

    # Check if NO greentea-client is in module.json of repo to test, if so abort
    if not yotta_module.check_greentea_client():
        gt_logger.gt_log("""
        *****************************************************************************************
        * We've noticed that NO 'greentea-client' module is specified in                        *
        * dependency/testDependency section of this module's 'module.json' file.                *
        *                                                                                       *
        * This version of Greentea requires 'greentea-client' module.                           *
        * Please downgrade to Greentea before v0.2.0:                                           *
        *                                                                                       *
        * $ pip install "mbed-greentea<0.2.0" --upgrade                                         *
        *                                                                                       *
        * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78    *
        *****************************************************************************************
        """)
        return (0)

    ### Selecting yotta targets to process
    yt_targets = [] # List of yotta targets specified by user used to process during this run
    if opts.list_of_targets:
        yt_targets = opts.list_of_targets.split(',')
    else:
        # Trying to use locally set yotta target
        gt_logger.gt_log("checking for yotta target in current directory")
        gt_logger.gt_log_tab("reason: no --target switch set")
        current_target = get_mbed_target_from_current_dir()
        if current_target:
            gt_logger.gt_log("assuming default target as '%s'"% gt_logger.gt_bright(current_target))
            # Assuming first target printed by 'yotta search' will be used
            yt_targets = [current_target]
        else:
            gt_logger.gt_log_tab("yotta target in current directory is not set")
            gt_logger.gt_log_err("yotta target is not specified. Use '%s' or '%s' command to set target"%
            (
                gt_logger.gt_bright('mbedgt -t <yotta_target>'),
                gt_logger.gt_bright('yotta target <yotta_target>')
            ))
            return (-1)

    ### Query with mbedls for available mbed-enabled devices
    gt_logger.gt_log("detecting connected mbed-enabled devices...")

    # Detect devices connected to system
    mbeds = mbed_lstools.create()
    mbeds_list = mbeds.list_mbeds_ext()

    ready_mbed_devices = [] # Devices which can be used (are fully detected)

    if mbeds_list:
        gt_logger.gt_log("detected %d device%s"% (len(mbeds_list), 's' if len(mbeds_list) != 1 else ''))
        for mut in mbeds_list:
            if not all(mut.values()):
                gt_logger.gt_log_err("can't detect all properties of the device!")
                for prop in mut:
                    if not mut[prop]:
                        gt_logger.gt_log_tab("property '%s' is '%s'"% (prop, str(mut[prop])))
            else:
                ready_mbed_devices.append(mut)
                gt_logger.gt_log_tab("detected '%s' -> '%s', console at '%s', mounted at '%s', target id '%s'"% (
                    gt_logger.gt_bright(mut['platform_name']),
                    gt_logger.gt_bright(mut['platform_name_unique']),
                    gt_logger.gt_bright(mut['serial_port']),
                    gt_logger.gt_bright(mut['mount_point']),
                    gt_logger.gt_bright(mut['target_id'])
                ))
    else:
        gt_logger.gt_log_err("no devices detected")
        return (RET_NO_DEVICES)

    ### Use yotta to search mapping between platform names and available platforms
    # Convert platform:target, ... mapping to data structure
    map_platform_to_yt_target = {}
    if opts.map_platform_to_yt_target:
        gt_logger.gt_log("user defined platform -> target supported mapping definition (specified with --map-target switch)")
        p_to_t_mappings = opts.map_platform_to_yt_target.split(',')
        for mapping in p_to_t_mappings:
            if len(mapping.split(':')) == 2:
                platform, yt_target = mapping.split(':')
                if platform not in map_platform_to_yt_target:
                    map_platform_to_yt_target[platform] = []
                map_platform_to_yt_target[platform].append(yt_target)
                gt_logger.gt_log_tab("mapped platform '%s' to be compatible with '%s'"% (
                    gt_logger.gt_bright(platform),
                    gt_logger.gt_bright(yt_target)
                ))
            else:
                gt_logger.gt_log_tab("unknown format '%s', use 'platform:target' format"% mapping)

    # Check if mbed classic target name can be translated to yotta target name

    mut_info_map = {}   # platform_name : mut_info_dict, extract yt_targets with e.g. [k["yotta_target"] for k in d['K64F']["yotta_targets"]]

    for mut in ready_mbed_devices:
        platfrom_name = mut['platform_name']
        if platfrom_name not in mut_info_map:
            mut_info = get_mbed_clasic_target_info(platfrom_name,
                                                   map_platform_to_yt_target,
                                                   use_yotta_registry=opts.yotta_search_for_mbed_target)
            if mut_info:
                mut_info_map[platfrom_name] = mut_info

    ### List of unique ready platform names
    unique_mbed_devices = list(set(mut_info_map.keys()))

    ### Identify which targets has to be build because platforms are present
    yt_target_platform_map = {}     # yt_target_to_test : platforms to test on

    for yt_target in yt_targets:
        for platform_name in unique_mbed_devices:
            if yt_target in [k["yotta_target"] for k in mut_info_map[platform_name]["yotta_targets"]]:
                if yt_target not in yt_target_platform_map:
                    yt_target_platform_map[yt_target] = []
                if platform_name not in yt_target_platform_map[yt_target]:
                    yt_target_platform_map[yt_target].append(platform_name)

    ### We can filter in only specific target ids
    accepted_target_ids = None
    if opts.use_target_ids:
        gt_logger.gt_log("filtering out target ids not on below list (specified with --use-tids switch)")
        accepted_target_ids = opts.use_target_ids.split(',')
        for tid in accepted_target_ids:
            gt_logger.gt_log_tab("accepting target id '%s'"% gt_logger.gt_bright(tid))

    test_exec_retcode = 0       # Decrement this value each time test case result is not 'OK'
    test_platforms_match = 0    # Count how many tests were actually ran with current settings
    target_platforms_match = 0  # Count how many platforms were actually tested with current settings

    test_report = {}            # Test report used to export to Junit, HTML etc...
    muts_to_test = []           # MUTs to actually be tested
    test_queue = Queue()        # contains information about test_bin and image_path for each test case
    test_result_queue = Queue() # used to store results of each thread
    execute_threads = []        # list of threads to run test cases

    ### check if argument of --parallel mode is a integer and greater or equal 1
    try:
        parallel_test_exec = int(opts.parallel_test_exec)
        if parallel_test_exec < 1:
            parallel_test_exec = 1
    except ValueError:
        gt_logger.gt_log_err("argument of mode --parallel is not a int, disable parallel mode")
        parallel_test_exec = 1

    # Values used to generate random seed for test execution order shuffle
    SHUFFLE_SEED_ROUND = 10 # Value used to round float random seed
    shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND)

    # Set shuffle seed if it is provided with command line option
    if opts.shuffle_test_seed:
        shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND)

    ### Testing procedures, for each target, for each target's compatible platform
    for yotta_target_name in yt_target_platform_map:
        gt_logger.gt_log("processing '%s' yotta target compatible platforms..."% gt_logger.gt_bright(yotta_target_name))

        for platform_name in yt_target_platform_map[yotta_target_name]:
            gt_logger.gt_log("processing '%s' platform..."% gt_logger.gt_bright(platform_name))

            ### Select MUTS to test from list of available MUTS to start testing
            mut = None
            number_of_parallel_instances = 1
            for mbed_dev in ready_mbed_devices:
                if accepted_target_ids and mbed_dev['target_id'] not in accepted_target_ids:
                    continue

                if mbed_dev['platform_name'] == platform_name:
                    mut = mbed_dev
                    muts_to_test.append(mbed_dev)
                    gt_logger.gt_log("using platform '%s' for test:"% gt_logger.gt_bright(platform_name))
                    for k in mbed_dev:
                        gt_logger.gt_log_tab("%s = '%s'"% (k, mbed_dev[k]))
                    if number_of_parallel_instances < parallel_test_exec:
                        number_of_parallel_instances += 1
                    else:
                        break

            # Configuration print mode:
            if opts.verbose_test_configuration_only:
                continue

            if mut:
                target_platforms_match += 1

                # Demo mode: --run implementation (already added --run to mbedhtrun)
                # We want to pass file name to mbedhtrun (--run NAME  =>  -f NAME_ and run only one binary
                if opts.run_app:
                    gt_logger.gt_log("running '%s' for '%s'"% (gt_logger.gt_bright(opts.run_app), gt_logger.gt_bright(yotta_target_name)))
                    disk = mut['mount_point']
                    port = mut['serial_port']
                    micro = mut['platform_name']
                    program_cycle_s = mut_info_map[platfrom_name]['properties']['program_cycle_s']
                    copy_method = opts.copy_method if opts.copy_method else 'shell'
                    enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)

                    yotta_config = YottaConfig()
                    yotta_config.init(yotta_target_name)

                    yotta_config_baudrate = yotta_config.get_baudrate()

                    # We will force configuration specific baudrate
                    if port:
                        port = "%s:%d"% (port, yotta_config_baudrate)

                    test_platforms_match += 1
                    host_test_result = run_host_test(opts.run_app,
                                                     disk,
                                                     port,
                                                     yotta_target_name,
                                                     mut['target_id'],
                                                     micro=micro,
                                                     copy_method=copy_method,
                                                     program_cycle_s=program_cycle_s,
                                                     digest_source=opts.digest_source,
                                                     json_test_cfg=opts.json_test_configuration,
                                                     run_app=opts.run_app,
                                                     enum_host_tests_path=enum_host_tests_path,
                                                     verbose=True)

                    single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
                    status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
                    if single_test_result != TEST_RESULT_OK:
                        test_exec_retcode += 1
                    continue

                # Regression test mode:
                # Building sources for given target and perform normal testing

                yotta_result, yotta_ret = True, 0   # Skip build and assume 'yotta build' was successful
                if opts.skip_yotta_build:
                    gt_logger.gt_log("skipping calling yotta (specified with --skip-build option)")
                else:
                    yotta_result, yotta_ret = build_with_yotta(yotta_target_name,
                        verbose=opts.verbose,
                        build_to_release=opts.build_to_release,
                        build_to_debug=opts.build_to_debug)

                # We need to stop executing if yotta build fails
                if not yotta_result:
                    gt_logger.gt_log_err("yotta returned %d"% yotta_ret)
                    return (RET_YOTTA_BUILD_FAIL)

                if opts.only_build_tests:
                    continue

                # Build phase will be followed by test execution for each target
                if yotta_result and not opts.only_build_tests:
                    binary_type = mut_info_map[platform_name]['properties']['binary_type']
                    ctest_test_list = load_ctest_testsuite(os.path.join('.', 'build', yotta_target_name),
                        binary_type=binary_type)
                    #TODO no tests to execute

                filtered_ctest_test_list = create_filtered_test_list(ctest_test_list, opts.test_by_names, opts.skip_test)

                gt_logger.gt_log("running %d test%s for target '%s' and platform '%s'"% (
                    len(filtered_ctest_test_list),
                    "s" if len(filtered_ctest_test_list) != 1 else "",
                    gt_logger.gt_bright(yotta_target_name),
                    gt_logger.gt_bright(platform_name)
                ))

                # Test execution order can be shuffled (also with provided random seed)
                # for test execution reproduction.
                filtered_ctest_test_list_keys = filtered_ctest_test_list.keys()
                if opts.shuffle_test_order:
                    # We want to shuffle test names randomly
                    random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed)

                for test_bin in filtered_ctest_test_list_keys:
                    image_path = filtered_ctest_test_list[test_bin]
                    test = {"test_bin":test_bin, "image_path":image_path}
                    test_queue.put(test)

                #for test_bin, image_path in filtered_ctest_test_list.iteritems():
                #    test = {"test_bin":test_bin, "image_path":image_path}
                #    test_queue.put(test)

                number_of_threads = 0
                for mut in muts_to_test:
                    #################################################################
                    # Experimental, parallel test execution
                    #################################################################
                    if number_of_threads < parallel_test_exec:
                        args = (test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name, greentea_hooks)
                        t = Thread(target=run_test_thread, args=args)
                        execute_threads.append(t)
                        number_of_threads += 1

    gt_logger.gt_log_tab("use %s instance%s for testing" % (len(execute_threads), 's' if len(execute_threads) != 1 else ''))
    for t in execute_threads:
        t.daemon = True
        t.start()

    # merge partial test reports from diffrent threads to final test report
    for t in execute_threads:
        try:
            t.join() #blocking
            test_return_data = test_result_queue.get(False)
        except Exception as e:
            # No test report generated
            gt_logger.gt_log_err("could not generate test report" + str(e))
            test_exec_retcode += -1000
            return test_exec_retcode

        test_platforms_match += test_return_data['test_platforms_match']
        test_exec_retcode += test_return_data['test_exec_retcode']
        partial_test_report = test_return_data['test_report']
        # todo: find better solution, maybe use extend
        for report_key in partial_test_report.keys():
            if report_key not in test_report:
                test_report[report_key] = {}
                test_report.update(partial_test_report)
            else:
                test_report[report_key].update(partial_test_report[report_key])

    if opts.verbose_test_configuration_only:
        print
        print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target"
        return (0)

    gt_logger.gt_log("all tests finished!")

    # We will execute post test hooks on tests
    for yotta_target in test_report:
        test_name_list = []    # All test case names for particular yotta target
        for test_name in test_report[yotta_target]:
            test = test_report[yotta_target][test_name]
            # Test was successful
            if test['single_test_result'] in [TEST_RESULT_OK, TEST_RESULT_FAIL]:
                test_name_list.append(test_name)
                # Call hook executed for each test, just after all tests are finished
                if greentea_hooks:
                    # We can execute this test hook just after all tests are finished ('hook_post_test_end')
                    format = {
                        "test_name": test_name,
                        "test_bin_name": test['test_bin_name'],
                        "image_path": test['image_path'],
                        "build_path": test['build_path'],
                        "build_path_abs": test['build_path_abs'],
                        "yotta_target_name": yotta_target,
                    }
                    greentea_hooks.run_hook_ext('hook_post_test_end', format)
        if greentea_hooks:
            # Call hook executed for each yotta target, just after all tests are finished
            build_path = os.path.join("./build", yotta_target)
            build_path_abs = os.path.abspath(build_path)
            # We can execute this test hook just after all tests are finished ('hook_post_test_end')
            format = {
                "build_path": build_path,
                "build_path_abs": build_path_abs,
                "test_name_list": test_name_list,
                "yotta_target_name": yotta_target,
            }
            greentea_hooks.run_hook_ext('hook_post_all_test_end', format)

    # This tool is designed to work in CI
    # We want to return success codes based on tool actions,
    # only if testes were executed and all passed we want to
    # return 0 (success)
    if not opts.only_build_tests:
        # Prints shuffle seed
        gt_logger.gt_log("shuffle seed: %.*f"% (SHUFFLE_SEED_ROUND, shuffle_random_seed))

        # Reports (to file)
        if opts.report_junit_file_name:
            gt_logger.gt_log("exporting to JUnit file '%s'..."% gt_logger.gt_bright(opts.report_junit_file_name))
            junit_report = exporter_testcase_junit(test_report, test_suite_properties=yotta_module.get_data())
            with open(opts.report_junit_file_name, 'w') as f:
                f.write(junit_report)
        if opts.report_text_file_name:
            gt_logger.gt_log("exporting to text '%s'..."% gt_logger.gt_bright(opts.report_text_file_name))

            text_report, text_results = exporter_text(test_report)
            text_testcase_report, text_testcase_results = exporter_testcase_text(test_report)
            with open(opts.report_text_file_name, 'w') as f:
                f.write('\n'.join([text_report, text_results, text_testcase_report, text_testcase_results]))

        # Reports (to console)
        if opts.report_json:
            # We will not print summary and json report together
            gt_logger.gt_log("json test report:")
            print exporter_json(test_report)
        else:
            # Final summary
            if test_report:
                # Test suite report
                gt_logger.gt_log("test suite report:")
                text_report, text_results = exporter_text(test_report)
                print text_report
                gt_logger.gt_log("test suite results: " + text_results)
                # test case detailed report
                gt_logger.gt_log("test case report:")
                text_testcase_report, text_testcase_results = exporter_testcase_text(test_report, test_suite_properties=yotta_module.get_data())
                print text_testcase_report
                gt_logger.gt_log("test case results: " + text_testcase_results)

        # This flag guards 'build only' so we expect only yotta errors
        if test_platforms_match == 0:
            # No tests were executed
            gt_logger.gt_log_warn("no platform/target matching tests were found!")
            test_exec_retcode += -10
        if target_platforms_match == 0:
            # No platforms were tested
            gt_logger.gt_log_warn("no target matching platforms were found!")
            test_exec_retcode += -100

    return (test_exec_retcode)
Ejemplo n.º 44
0
def main_cli(opts, args, gt_instance_uuid=None):
    """! This is main CLI function with all command line parameters
    @details This function also implements CLI workflow depending on CLI parameters inputed
    @return This function doesn't return, it exits to environment with proper success code
    """

    def filter_ready_devices(mbeds_list):
        """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module.
        @details This function logs a lot to help users figure out root cause of their problems
        @param mbeds_list List of MUTs to verify
        @return Tuple of (MUTS detected correctly, MUTs not detected fully)
        """
        ready_mbed_devices = []  # Devices which can be used (are fully detected)
        not_ready_mbed_devices = []  # Devices which can't be used (are not fully detected)

        gt_logger.gt_log("detected %d device%s" % (len(mbeds_list), "s" if len(mbeds_list) != 1 else ""))
        for mut in mbeds_list:
            if not all(mut.values()):
                gt_logger.gt_log_err("mbed-ls was unable to enumerate correctly all properties of the device!")
                gt_logger.gt_log_tab(
                    "check with 'mbedls -j' command if all properties of your device are enumerated properly"
                )
                for prop in mut:
                    if not mut[prop]:
                        # Adding MUT to NOT DETECTED FULLY list
                        if mut not in not_ready_mbed_devices:
                            not_ready_mbed_devices.append(mut)
                        gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" % (prop, str(mut[prop])))
                        if prop == "serial_port":
                            gt_logger.gt_log_tab("check if your serial port driver is correctly installed!")
                        if prop == "mount_point":
                            gt_logger.gt_log_tab("check if your OS can detect and mount mbed device mount point!")
            else:
                # Adding MUT to DETECTED CORRECTLY list
                ready_mbed_devices.append(mut)
        return (ready_mbed_devices, not_ready_mbed_devices)

    def get_parallel_value(value):
        """! Get correct value for parallel switch (--parallel)
        @param value Value passed from --parallel
        @return Refactored version of parallel number
        """
        try:
            parallel_test_exec = int(value)
            if parallel_test_exec < 1:
                parallel_test_exec = 1
        except ValueError:
            gt_logger.gt_log_err("argument of mode --parallel is not a int, disabled parallel mode")
            parallel_test_exec = 1
        return parallel_test_exec

    if not MBED_LMTOOLS:
        gt_logger.gt_log_err("error: mbed-ls proprietary module not installed")
        return -1

    if not MBED_HOST_TESTS:
        gt_logger.gt_log_err("error: mbed-host-tests proprietary module not installed")
        return -1

    # This is how you magically control colours in this piece of art software
    gt_logger.colorful(not opts.plain)

    # Prints version and exits
    if opts.version:
        print_version()
        return 0

    # Load test specification or print warnings / info messages and exit CLI mode
    test_spec, ret = get_test_spec(opts)
    if not test_spec:
        return ret

    # Verbose flag
    verbose = opts.verbose_test_result_only

    # We will load hooks from JSON file to support extra behaviour during test execution
    greentea_hooks = GreenteaHooks(opts.hooks_json) if opts.hooks_json else None

    # Capture alternative test console inputs, used e.g. in 'yotta test command'
    if opts.digest_source:
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)
        host_test_result = run_host_test(
            None,
            None,
            None,
            None,
            None,
            hooks=greentea_hooks,
            digest_source=opts.digest_source,
            enum_host_tests_path=enum_host_tests_path,
            verbose=verbose,
        )

        # Some error in htrun, abort test execution
        if isinstance(host_test_result, int):
            # int(host_test_result) > 0 - Call to mbedhtrun failed
            # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun
            return host_test_result

        # If execution was successful 'run_host_test' return tuple with results
        single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = (
            host_test_result
        )
        status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
        return status

    ### Query with mbedls for available mbed-enabled devices
    gt_logger.gt_log("detecting connected mbed-enabled devices...")

    # Detect devices connected to system
    mbeds = mbed_lstools.create()
    mbeds_list = mbeds.list_mbeds_ext()

    if opts.global_resource_mgr:
        # Mocking available platform requested by --grm switch
        grm_values = parse_global_resource_mgr(opts.global_resource_mgr)
        if grm_values:
            gt_logger.gt_log_warn("entering global resource manager mbed-ls dummy mode!")
            grm_platform_name, grm_module_name, grm_ip_name, grm_port_name = grm_values
            mbeds_list = []
            mbeds_list.append(mbeds.get_dummy_platform(grm_platform_name))
            opts.global_resource_mgr = ":".join(grm_values[1:])
            gt_logger.gt_log_tab("adding dummy platform '%s'" % grm_platform_name)
        else:
            gt_logger.gt_log("global resource manager switch '--grm %s' in wrong format!" % opts.global_resource_mgr)
            return -1

    ready_mbed_devices = []  # Devices which can be used (are fully detected)
    not_ready_mbed_devices = []  # Devices which can't be used (are not fully detected)

    if mbeds_list:
        ready_mbed_devices, not_ready_mbed_devices = filter_ready_devices(mbeds_list)
        if ready_mbed_devices:
            # devices in form of a pretty formatted table
            for line in log_mbed_devices_in_table(ready_mbed_devices).splitlines():
                gt_logger.gt_log_tab(line.strip(), print_text=verbose)
    else:
        gt_logger.gt_log_err("no compatible devices detected")
        return RET_NO_DEVICES

    ### We can filter in only specific target ids
    accepted_target_ids = None
    if opts.use_target_ids:
        gt_logger.gt_log("filtering out target ids not on below list (specified with --use-tids switch)")
        accepted_target_ids = opts.use_target_ids.split(",")
        for tid in accepted_target_ids:
            gt_logger.gt_log_tab("accepting target id '%s'" % gt_logger.gt_bright(tid))

    test_exec_retcode = 0  # Decrement this value each time test case result is not 'OK'
    test_platforms_match = 0  # Count how many tests were actually ran with current settings
    target_platforms_match = 0  # Count how many platforms were actually tested with current settings

    test_report = {}  # Test report used to export to Junit, HTML etc...
    muts_to_test = []  # MUTs to actually be tested
    test_queue = Queue()  # contains information about test_bin and image_path for each test case
    test_result_queue = Queue()  # used to store results of each thread
    execute_threads = []  # list of threads to run test cases

    ### check if argument of --parallel mode is a integer and greater or equal 1

    parallel_test_exec = get_parallel_value(opts.parallel_test_exec)

    # Values used to generate random seed for test execution order shuffle
    SHUFFLE_SEED_ROUND = 10  # Value used to round float random seed
    shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND)

    # Set shuffle seed if it is provided with command line option
    if opts.shuffle_test_seed:
        shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND)

    ### Testing procedures, for each target, for each target's compatible platform
    # In case we are using test spec (switch --test-spec) command line option -t <list_of_targets>
    # is used to enumerate builds from test spec we are supplying
    filter_test_builds = opts.list_of_targets.split(",") if opts.list_of_targets else None
    for test_build in test_spec.get_test_builds(filter_test_builds):
        platform_name = test_build.get_platform()
        gt_logger.gt_log(
            "processing target '%s' toolchain '%s' compatible platforms... (note: switch set to --parallel %d)"
            % (
                gt_logger.gt_bright(platform_name),
                gt_logger.gt_bright(test_build.get_toolchain()),
                int(opts.parallel_test_exec),
            )
        )

        baudrate = test_build.get_baudrate()

        ### Select MUTS to test from list of available MUTS to start testing
        mut = None
        number_of_parallel_instances = 1
        for mbed_dev in ready_mbed_devices:
            if accepted_target_ids and mbed_dev["target_id"] not in accepted_target_ids:
                continue

            if mbed_dev["platform_name"] == platform_name:
                # We will force configuration specific baudrate by adding baudrate to serial port
                # Only add baudrate decoration for serial port if it's not already there
                # Format used by mbedhtrun: 'serial_port' = '<serial_port_name>:<baudrate>'
                if not mbed_dev["serial_port"].endswith(str(baudrate)):
                    mbed_dev["serial_port"] = "%s:%d" % (mbed_dev["serial_port"], baudrate)
                mut = mbed_dev
                muts_to_test.append(mbed_dev)
                if number_of_parallel_instances < parallel_test_exec:
                    number_of_parallel_instances += 1
                else:
                    break

        # devices in form of a pretty formatted table
        for line in log_mbed_devices_in_table(muts_to_test).splitlines():
            gt_logger.gt_log_tab(line.strip(), print_text=verbose)

        # Configuration print mode:
        if opts.verbose_test_configuration_only:
            continue

        ### If we have at least one available device we can proceed
        if mut:
            target_platforms_match += 1

            build = test_build.get_name()
            build_path = test_build.get_path()

            # Demo mode: --run implementation (already added --run to mbedhtrun)
            # We want to pass file name to mbedhtrun (--run NAME  =>  -f NAME_ and run only one binary
            if opts.run_app:
                gt_logger.gt_log(
                    "running '%s' for '%s'-'%s'"
                    % (
                        gt_logger.gt_bright(opts.run_app),
                        gt_logger.gt_bright(platform_name),
                        gt_logger.gt_bright(test_build.get_toolchain()),
                    )
                )
                disk = mut["mount_point"]
                port = mut["serial_port"]
                micro = mut["platform_name"]
                program_cycle_s = get_platform_property(micro, "program_cycle_s")
                copy_method = opts.copy_method if opts.copy_method else "shell"
                enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)

                test_platforms_match += 1
                host_test_result = run_host_test(
                    opts.run_app,
                    disk,
                    port,
                    build_path,
                    mut["target_id"],
                    micro=micro,
                    copy_method=copy_method,
                    program_cycle_s=program_cycle_s,
                    digest_source=opts.digest_source,
                    json_test_cfg=opts.json_test_configuration,
                    run_app=opts.run_app,
                    enum_host_tests_path=enum_host_tests_path,
                    verbose=True,
                )

                # Some error in htrun, abort test execution
                if isinstance(host_test_result, int):
                    # int(host_test_result) > 0 - Call to mbedhtrun failed
                    # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun
                    return host_test_result

                # If execution was successful 'run_host_test' return tuple with results
                single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = (
                    host_test_result
                )
                status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
                if single_test_result != TEST_RESULT_OK:
                    test_exec_retcode += 1

            test_list = test_build.get_tests()

            filtered_ctest_test_list = create_filtered_test_list(
                test_list, opts.test_by_names, opts.skip_test, test_spec=test_spec
            )

            gt_logger.gt_log(
                "running %d test%s for platform '%s' and toolchain '%s'"
                % (
                    len(filtered_ctest_test_list),
                    "s" if len(filtered_ctest_test_list) != 1 else "",
                    gt_logger.gt_bright(platform_name),
                    gt_logger.gt_bright(test_build.get_toolchain()),
                )
            )

            # Test execution order can be shuffled (also with provided random seed)
            # for test execution reproduction.
            filtered_ctest_test_list_keys = filtered_ctest_test_list.keys()
            if opts.shuffle_test_order:
                # We want to shuffle test names randomly
                random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed)

            for test_name in filtered_ctest_test_list_keys:
                image_path = (
                    filtered_ctest_test_list[test_name].get_binary(binary_type=TestBinary.BIN_TYPE_BOOTABLE).get_path()
                )
                if image_path is None:
                    gt_logger.gt_log_err("Failed to find test binary for test %s flash method %s" % (test_name, "usb"))
                else:
                    test = {"test_bin": test_name, "image_path": image_path}
                    test_queue.put(test)

            number_of_threads = 0
            for mut in muts_to_test:
                # Experimental, parallel test execution
                if number_of_threads < parallel_test_exec:
                    args = (test_result_queue, test_queue, opts, mut, build, build_path, greentea_hooks)
                    t = Thread(target=run_test_thread, args=args)
                    execute_threads.append(t)
                    number_of_threads += 1

        gt_logger.gt_log_tab(
            "use %s instance%s of execution threads for testing"
            % (len(execute_threads), "s" if len(execute_threads) != 1 else str()),
            print_text=verbose,
        )
        for t in execute_threads:
            t.daemon = True
            t.start()

        # merge partial test reports from different threads to final test report
        for t in execute_threads:
            try:
                t.join()  # blocking
                test_return_data = test_result_queue.get(False)
            except Exception as e:
                # No test report generated
                gt_logger.gt_log_err("could not generate test report" + str(e))
                test_exec_retcode += -1000
                return test_exec_retcode

            test_platforms_match += test_return_data["test_platforms_match"]
            test_exec_retcode += test_return_data["test_exec_retcode"]
            partial_test_report = test_return_data["test_report"]
            # todo: find better solution, maybe use extend
            for report_key in partial_test_report.keys():
                if report_key not in test_report:
                    test_report[report_key] = {}
                    test_report.update(partial_test_report)
                else:
                    test_report[report_key].update(partial_test_report[report_key])

        execute_threads = []

        if opts.verbose_test_configuration_only:
            print
            print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target"
            return 0

        gt_logger.gt_log("all tests finished!")

    # We will execute post test hooks on tests
    for build_name in test_report:
        test_name_list = []  # All test case names for particular yotta target
        for test_name in test_report[build_name]:
            test = test_report[build_name][test_name]
            # Test was successful
            if test["single_test_result"] in [TEST_RESULT_OK, TEST_RESULT_FAIL]:
                test_name_list.append(test_name)
                # Call hook executed for each test, just after all tests are finished
                if greentea_hooks:
                    # We can execute this test hook just after all tests are finished ('hook_post_test_end')
                    format = {
                        "test_name": test_name,
                        "test_bin_name": test["test_bin_name"],
                        "image_path": test["image_path"],
                        "build_path": test["build_path"],
                        "build_path_abs": test["build_path_abs"],
                    }
                    greentea_hooks.run_hook_ext("hook_post_test_end", format)
        if greentea_hooks:
            build = test_spec.get_test_build(build_name)
            assert build is not None, "Failed to find build info for build %s" % build_name

            # Call hook executed for each yotta target, just after all tests are finished
            build_path = build.get_path()
            build_path_abs = os.path.abspath(build_path)
            # We can execute this test hook just after all tests are finished ('hook_post_test_end')
            format = {"build_path": build_path, "build_path_abs": build_path_abs, "test_name_list": test_name_list}
            greentea_hooks.run_hook_ext("hook_post_all_test_end", format)

    # This tool is designed to work in CI
    # We want to return success codes based on tool actions,
    # only if testes were executed and all passed we want to
    # return 0 (success)
    if not opts.only_build_tests:
        # Prints shuffle seed
        gt_logger.gt_log("shuffle seed: %.*f" % (SHUFFLE_SEED_ROUND, shuffle_random_seed))

        def dump_report_to_text_file(filename, content):
            """! Closure for report dumps to text files
            @param filename Name of destination file
            @parm content Text content of the file to write
            @return True if write was successful, else return False
            """
            try:
                with open(filename, "w") as f:
                    f.write(content)
            except IOError as e:
                gt_logger.gt_log_err("can't export to '%s', reason:" % filename)
                gt_logger.gt_log_err(str(e))
                return False
            return True

        # Reports to JUNIT file
        if opts.report_junit_file_name:
            gt_logger.gt_log("exporting to JUNIT file '%s'..." % gt_logger.gt_bright(opts.report_junit_file_name))
            # This test specification will be used by JUnit exporter to populate TestSuite.properties (useful meta-data for Viewer)
            test_suite_properties = {}
            for target_name in test_report:
                test_build_properties = get_test_build_properties(test_spec, target_name)
                if test_build_properties:
                    test_suite_properties[target_name] = test_build_properties
            junit_report = exporter_testcase_junit(test_report, test_suite_properties=test_suite_properties)
            dump_report_to_text_file(opts.report_junit_file_name, junit_report)

        # Reports to text file
        if opts.report_text_file_name:
            gt_logger.gt_log("exporting to TEXT '%s'..." % gt_logger.gt_bright(opts.report_text_file_name))
            # Useful text reporter for those who do not like to copy paste to files tabale with results
            text_report, text_results = exporter_text(test_report)
            text_testcase_report, text_testcase_results = exporter_testcase_text(test_report)
            text_final_report = "\n".join([text_report, text_results, text_testcase_report, text_testcase_results])
            dump_report_to_text_file(opts.report_text_file_name, text_final_report)

        # Reports to JSON file
        if opts.report_json_file_name:
            # We will not print summary and json report together
            gt_logger.gt_log("exporting to JSON '%s'..." % gt_logger.gt_bright(opts.report_json_file_name))
            json_report = exporter_json(test_report)
            dump_report_to_text_file(opts.report_json_file_name, json_report)

        # Reports to HTML file
        if opts.report_html_file_name:
            gt_logger.gt_log("exporting to HTML file '%s'..." % gt_logger.gt_bright(opts.report_html_file_name))
            # Generate a HTML page displaying all of the results
            html_report = exporter_html(test_report)
            dump_report_to_text_file(opts.report_html_file_name, html_report)

        # Final summary
        if test_report:
            # Test suite report
            gt_logger.gt_log("test suite report:")
            text_report, text_results = exporter_text(test_report)
            print text_report
            gt_logger.gt_log("test suite results: " + text_results)
            # test case detailed report
            gt_logger.gt_log("test case report:")
            text_testcase_report, text_testcase_results = exporter_testcase_text(test_report)
            print text_testcase_report
            gt_logger.gt_log("test case results: " + text_testcase_results)

        # This flag guards 'build only' so we expect only yotta errors
        if test_platforms_match == 0:
            # No tests were executed
            gt_logger.gt_log_warn("no platform/target matching tests were found!")
            test_exec_retcode += -10
        if target_platforms_match == 0:
            # No platforms were tested
            gt_logger.gt_log_warn("no matching platforms were found!")
            test_exec_retcode += -100

    return test_exec_retcode
Ejemplo n.º 45
0
def main():
    """! This is main CLI function with all command line parameters

    @details This function also implements CLI workflow depending on CLI parameters inputed

    @return This function doesn't return, it exits to environment with proper success code
    """
    if not MBED_LMTOOLS:
        print "Error: mbed-lstools mbed proprietary module not installed"
        exit(-1)

    if not MBED_HOST_TESTS:
        print "Error: mbed-host-tests mbed proprietary module not installed"
        exit(-1)

    parser = optparse.OptionParser()

    parser.add_option('-t', '--target',
                    dest='list_of_targets',
                    help='You can specify list of targets you want to build. Use comma to sepatate them')

    parser.add_option('-n', '--test-by-names',
                    dest='test_by_names',
                    help='Runs only test enumerated it this switch. Use comma to separate test case names.')

    parser.add_option("-O", "--only-build",
                    action="store_true",
                    dest="only_build_tests",
                    default=False,
                    help="Only build repository and tests, skips actual test procedures (flashing etc.)")

    copy_methods_str = "Plugin support: " + ', '.join(mbed_host_tests.host_tests_plugins.get_plugin_caps('CopyMethod'))
    parser.add_option("-c", "--copy",
                    dest="copy_method",
                    help="Copy (flash the target) method selector. " + copy_methods_str,
                    metavar="COPY_METHOD")

    parser.add_option('', '--config',
                    dest='verbose_test_configuration_only',
                    default=False,
                    action="store_true",
                    help='Displays connected boards and detected targets and exits.')

    parser.add_option('', '--release',
                    dest='build_to_release',
                    default=False,
                    action="store_true",
                    help='If possible force build in release mode (yotta -r).')

    parser.add_option('', '--debug',
                    dest='build_to_debug',
                    default=False,
                    action="store_true",
                    help='If possible force build in debug mode (yotta -d).')

    parser.add_option('', '--list',
                    dest='list_binaries',
                    default=False,
                    action="store_true",
                    help='List available binaries')

    parser.add_option('', '--digest',
                    dest='digest_source',
                    help='Redirect input from where test suite should take console input. You can use stdin or file name to get test case console output')

    parser.add_option('', '--test-cfg',
                    dest='json_test_configuration',
                    help='Pass to host test data about host test configuration')

    parser.add_option('', '--run',
                    dest='run_app',
                    help='Flash, reset and dump serial from selected binary application')

    parser.add_option('', '--report-junit',
                    dest='report_junit_file_name',
                    help='You can log test suite results in form of JUnit compliant XML report')

    parser.add_option('', '--report-text',
                    dest='report_text_file_name',
                    help='You can log test suite results to text file')

    parser.add_option('', '--report-json',
                    dest='report_json',
                    default=False,
                    action="store_true",
                    help='Outputs test results in JSON')

    parser.add_option('', '--report-fails',
                    dest='report_fails',
                    default=False,
                    action="store_true",
                    help='Prints console outputs for failed tests')

    parser.add_option('-V', '--verbose-test-result',
                    dest='verbose_test_result_only',
                    default=False,
                    action="store_true",
                    help='Prints test serial output')

    parser.add_option('-v', '--verbose',
                    dest='verbose',
                    default=False,
                    action="store_true",
                    help='Verbose mode (prints some extra information)')

    parser.add_option('', '--version',
                    dest='version',
                    default=False,
                    action="store_true",
                    help='Prints package version and exits')

    parser.description = """This automated test script is used to test mbed SDK 3.0 on mbed-enabled devices with support from yotta build tool"""
    parser.epilog = """Example: mbedgt --target frdm-k64f-gcc"""

    (opts, args) = parser.parse_args()

    # List available test binaries (names, no extension)
    if opts.list_binaries:
        list_binaries_for_targets()
        exit(0)

    # Prints version and exits
    if opts.version:
        import pkg_resources  # part of setuptools
        version = pkg_resources.require("mbed-greentea")[0].version
        print version
        exit(0)

    # Capture alternative test console inputs, used e.g. in 'yotta test command'
    if opts.digest_source:
        host_test_result = run_host_test(image_path=None, disk=None, port=None,
                                    digest_source=opts.digest_source,
                                    verbose=opts.verbose_test_result_only)
        single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
        status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
        sys.exit(status)

    # mbed-enabled devices auto-detection procedures
    mbeds = mbed_lstools.create()
    mbeds_list = mbeds.list_mbeds()

    current_target = get_mbed_target_from_current_dir()
    print "mbedgt: current yotta target is: %s"% (current_target if current_target is not None else 'not set')

    if opts.list_of_targets is None:
        if current_target is not None:
            opts.list_of_targets = current_target.split(',')[0]

    print "mbed-ls: detecting connected mbed-enabled devices... %s"% ("no devices detected" if not len(mbeds_list) else "")
    list_of_targets = opts.list_of_targets.split(',') if opts.list_of_targets is not None else None

    test_report = {}    # Test report used to export to Junit, HTML etc...

    if opts.list_of_targets is None:
        print "mbedgt: assuming default target to be '%s'"% (current_target)
        print "\treason: no --target switch set"
        list_of_targets = [current_target]

    test_exec_retcode = 0       # Decrement this value each time test case result is not 'OK'
    test_platforms_match = 0    # Count how many tests were actually ran with current settings
    target_platforms_match = 0  # Count how many platforms were actually tested with current settings

    for mut in mbeds_list:
        print "\tdetected %s, console at: %s, mounted at: %s"% (mut['platform_name'],
            mut['serial_port'],
            mut['mount_point'])

        # Check if mbed classic target name can be translated to yotta target name
        print "mbedgt: scan available targets for '%s' platform..."% (mut['platform_name'])
        mut_info = get_mbed_clasic_target_info(mut['platform_name'])

        if mut_info is not None:
            for yotta_target in mut_info['yotta_targets']:
                yotta_target_name = yotta_target['yotta_target']

                if yotta_target_name in list_of_targets:
                    target_platforms_match += 1

                # Configuration print mode:
                if opts.verbose_test_configuration_only:
                    continue

                # Demo mode: --run implementation (already added --run to mbedhtrun)
                # We want to pass file name to mbedhtrun (--run NAME  =>  -f NAME_ and run only one binary
                if opts.run_app and yotta_target_name in list_of_targets:
                    print "mbedgt: running '%s' for '%s'"% (opts.run_app, yotta_target_name)
                    disk = mut['mount_point']
                    port = mut['serial_port']
                    micro = mut['platform_name']
                    program_cycle_s = mut_info['properties']['program_cycle_s']
                    copy_method = opts.copy_method if opts.copy_method else 'shell'
                    verbose = opts.verbose_test_result_only

                    test_platforms_match += 1
                    host_test_result = run_host_test(opts.run_app, disk, port,
                                                micro=micro,
                                                copy_method=copy_method,
                                                program_cycle_s=program_cycle_s,
                                                digest_source=opts.digest_source,
                                                json_test_cfg=opts.json_test_configuration,
                                                run_app=opts.run_app,
                                                verbose=True)
                    single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
                    status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
                    if single_test_result != TEST_RESULT_OK:
                        test_exec_retcode += 1
                    continue

                # Regression test mode:
                # Building sources for given target and perform normal testing
                if yotta_target_name in list_of_targets:
                    print "mbedgt: using '%s' target, prepare to build"% yotta_target_name
                    cmd = ['yotta'] # "yotta %s --target=%s,* build"% (yotta_verbose, yotta_target_name)
                    if opts.verbose is not None: cmd.append('-v')
                    cmd.append('--target=%s,*' % yotta_target_name)
                    cmd.append('build')
                    if opts.build_to_release:
                        cmd.append('-r')
                    elif opts.build_to_debug:
                        cmd.append('-d')

                    print "mbedgt: calling yotta to build your sources and tests: %s" % (' '.join(cmd))
                    yotta_result = run_cli_command(cmd, shell=False, verbose=opts.verbose)

                    print "mbedgt: yotta build %s"% ('successful' if yotta_result else 'failed')
                    # Build phase will be followed by test execution for each target
                    if yotta_result and not opts.only_build_tests:
                        binary_type = mut_info['properties']['binary_type']
                        ctest_test_list = load_ctest_testsuite(os.path.join('.', 'build', yotta_target_name),
                            binary_type=binary_type)

                        print "mbedgt: running tests for '%s' target" % yotta_target_name
                        test_list = None
                        if opts.test_by_names:
                            test_list = opts.test_by_names.lower().split(',')
                            print "mbedgt: test case filter: %s (specified with -n option)" % ', '.join(["'%s'"% t for t in test_list])

                            for test_n in test_list:
                                if test_n not in ctest_test_list:
                                    print "\ttest name '%s' not found (specified with -n option)"% test_n

                        for test_bin, image_path in ctest_test_list.iteritems():
                            test_result = 'SKIPPED'
                            # Skip test not mentioned in -n option
                            if opts.test_by_names:
                                if test_bin.lower() not in test_list:
                                    continue

                            if get_mbed_supported_test(test_bin):
                                disk = mut['mount_point']
                                port = mut['serial_port']
                                micro = mut['platform_name']
                                program_cycle_s = mut_info['properties']['program_cycle_s']
                                copy_method = opts.copy_method if opts.copy_method else 'shell'
                                verbose = opts.verbose_test_result_only

                                test_platforms_match += 1
                                print "\trunning host test..."
                                host_test_result = run_host_test(image_path, disk, port,
                                    micro=micro,
                                    copy_method=copy_method,
                                    program_cycle_s=program_cycle_s,
                                    digest_source=opts.digest_source,
                                    json_test_cfg=opts.json_test_configuration,
                                    verbose=verbose)
                                single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
                                test_result = single_test_result
                                if single_test_result != TEST_RESULT_OK:
                                    test_exec_retcode += 1

                                # Update report for optional reporting feature
                                test_name = test_bin.lower()
                                if yotta_target_name not in test_report:
                                    test_report[yotta_target_name] = {}
                                if test_name not in test_report[yotta_target_name]:
                                    test_report[yotta_target_name][test_name] = {}

                                test_report[yotta_target_name][test_name]['single_test_result'] = single_test_result
                                test_report[yotta_target_name][test_name]['single_test_output'] = single_test_output
                                test_report[yotta_target_name][test_name]['elapsed_time'] = single_testduration
                                test_report[yotta_target_name][test_name]['platform_name'] = micro
                                test_report[yotta_target_name][test_name]['copy_method'] = copy_method

                                if single_test_result != 'OK' and not verbose and opts.report_fails:
                                    # In some cases we want to print console to see why test failed
                                    # even if we are not in verbose mode
                                    print "\ttest failed, reporting console output (specified with --report-fails option)"
                                    print
                                    print single_test_output

                                print "\ttest '%s' %s"% (test_bin, '.' * (80 - len(test_bin))),
                                print " %s in %.2f sec"% (test_result, single_testduration)
                    # We need to stop executing if yotta build fails
                    if not yotta_result:
                        print "mbedgt: yotta build failed!"
                        test_exec_retcode = -1
                        exit(test_exec_retcode)
        else:
            print "mbed-ls: mbed classic target name '%s' is not in target database"% (mut['platform_name'])

    if opts.verbose_test_configuration_only:
        print
        print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target"

    # This tool is designed to work in CI
    # We want to return success codes based on tool actions,
    # only if testes were executed and all passed we want to
    # return 0 (success)
    if not opts.only_build_tests:
        # Reports (to file)
        if opts.report_junit_file_name:
            junit_report = exporter_junit(test_report)
            with open(opts.report_junit_file_name, 'w') as f:
                f.write(junit_report)
        if opts.report_text_file_name:
            print "mbedgt: exporting to junit '%s'..."% (opts.report_text_file_name)
            text_report = exporter_text(test_report)
            with open(opts.report_text_file_name, 'w') as f:
                f.write(text_report)
        # Reports (to console)
        if opts.report_json:
            # We will not print summary and json report together
            print "mbedgt: json test report:"
            print exporter_json(test_report)
        else:
            # Final summary
            print "mbedgt: test report:"
            print exporter_text(test_report)

        # This flag guards 'build only' so we expect only yotta errors
        if test_platforms_match == 0:
            # No tests were executed
            print "mbedgt: no target matching tests were found!"
            test_exec_retcode += -10
        if target_platforms_match == 0:
            # No platforms were tested
            print "mbedgt: no target matching platforms were found!"
            test_exec_retcode += -100

    exit(test_exec_retcode)
Ejemplo n.º 46
0
def main_singletest_cli(opts, args, gt_instance_uuid=None):
    """! This is main CLI function with all command line parameters
    @details This function also implements CLI workflow depending on CLI parameters inputed
    @return This function doesn't return, it exits to environment with proper success code
    """

    # List available test binaries (names, no extension)
    if opts.list_binaries:
        list_binaries_for_targets()
        return 0

    # Prints version and exits
    if opts.version:
        print_version()
        return 0

    # Capture alternative test console inputs, used e.g. in 'yotta test command'
    if opts.digest_source:
        host_test_result = run_host_test(
            image_path=None,
            disk=None,
            port=None,
            digest_source=opts.digest_source,
            verbose=opts.verbose_test_result_only,
        )

        single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
        status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
        return status

    # mbed-enabled devices auto-detection procedures
    mbeds = mbed_lstools.create()
    mbeds_list = mbeds.list_mbeds()
    platform_list = mbeds.list_platforms_ext()

    # Option -t <opts.list_of_targets> supersedes yotta target set in current directory
    if opts.list_of_targets is None:
        if opts.verbose:
            gt_log("yotta target not set from command line (specified with -t option)")
        # Trying to use locally set yotta target
        current_target = get_mbed_target_from_current_dir()

        if current_target:
            gt_log("yotta target in current directory is set to '%s'" % gt_bright(current_target))
            # Assuming first target printed by 'yotta search' will be used
            opts.list_of_targets = current_target.split(",")[0]
        else:
            gt_log("yotta target in current directory is not set")
            gt_log_err(
                "yotta target is not specified. Use '%s' or '%s' command to set target"
                % (gt_bright("mbedgt -t <target>"), gt_bright("yotta target <target>"))
            )
            return -1

    gt_log("detecting connected mbed-enabled devices... %s" % ("no devices detected" if not len(mbeds_list) else ""))
    if mbeds_list:
        gt_log("detected %d device%s" % (len(mbeds_list), "s" if len(mbeds_list) != 1 else ""))
    else:
        gt_log("no devices detected")

    list_of_targets = opts.list_of_targets.split(",") if opts.list_of_targets is not None else None

    test_report = {}  # Test report used to export to Junit, HTML etc...

    if opts.list_of_targets is None:
        gt_log("assuming default target as '%s'" % gt_bright(current_target))
        gt_log_tab("reason: no --target switch set")
        list_of_targets = [current_target]

    test_exec_retcode = 0  # Decrement this value each time test case result is not 'OK'
    test_platforms_match = 0  # Count how many tests were actually ran with current settings
    target_platforms_match = 0  # Count how many platforms were actually tested with current settings

    for mut in mbeds_list:
        platform_text = gt_bright(mut["platform_name"])
        serial_text = gt_bright(mut["serial_port"])
        mount_text = gt_bright(mut["mount_point"])
        platform_target_id = gt_bright(mut["target_id"])  # We can use it to do simple resource lock

        if not all([platform_text, serial_text, mount_text]):
            gt_log_err("can't detect all properties of the device!")
            gt_log_tab("detected '%s', console at '%s', mounted at '%s'" % (platform_text, serial_text, mount_text))
            continue

        gt_log_tab("detected '%s', console at '%s', mounted at '%s'" % (platform_text, serial_text, mount_text))

        # Check if mbed classic target name can be translated to yotta target name
        gt_log("scan available targets for '%s' platform..." % gt_bright(mut["platform_name"]))
        mut_info = get_mbed_clasic_target_info(mut["platform_name"])

        if mut_info is not None:
            for yotta_target in mut_info["yotta_targets"]:
                yotta_target_name = yotta_target["yotta_target"]

                if yotta_target_name in list_of_targets:
                    target_platforms_match += 1

                # Configuration print mode:
                if opts.verbose_test_configuration_only:
                    continue

                # Demo mode: --run implementation (already added --run to mbedhtrun)
                # We want to pass file name to mbedhtrun (--run NAME  =>  -f NAME_ and run only one binary
                if opts.run_app and yotta_target_name in list_of_targets:
                    gt_log("running '%s' for '%s'" % (gt_bright(opts.run_app), gt_bright(yotta_target_name)))
                    disk = mut["mount_point"]
                    port = mut["serial_port"]
                    micro = mut["platform_name"]
                    program_cycle_s = mut_info["properties"]["program_cycle_s"]
                    copy_method = opts.copy_method if opts.copy_method else "shell"
                    verbose = opts.verbose_test_result_only

                    test_platforms_match += 1
                    host_test_result = run_host_test(
                        opts.run_app,
                        disk,
                        port,
                        micro=micro,
                        copy_method=copy_method,
                        program_cycle_s=program_cycle_s,
                        digest_source=opts.digest_source,
                        json_test_cfg=opts.json_test_configuration,
                        run_app=opts.run_app,
                        verbose=True,
                    )

                    single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
                    status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
                    if single_test_result != TEST_RESULT_OK:
                        test_exec_retcode += 1
                    continue

                # Regression test mode:
                # Building sources for given target and perform normal testing
                if yotta_target_name in list_of_targets:
                    gt_log("using '%s' target, prepare to build" % gt_bright(yotta_target_name))
                    cmd = ["yotta"]  # "yotta %s --target=%s,* build"% (yotta_verbose, yotta_target_name)
                    if opts.verbose is not None:
                        cmd.append("-v")
                    cmd.append("--target=%s,*" % yotta_target_name)
                    cmd.append("build")
                    if opts.build_to_release:
                        cmd.append("-r")
                    elif opts.build_to_debug:
                        cmd.append("-d")

                    if not opts.skip_yotta_build:
                        gt_log("building your sources and tests with yotta...")
                        gt_log_tab("calling yotta: %s" % " ".join(cmd))
                        yotta_result, yotta_ret = run_cli_command(cmd, shell=False, verbose=opts.verbose)
                        if yotta_result:
                            gt_log("yotta build for target '%s' was successful" % gt_bright(yotta_target_name))
                        else:
                            gt_log_err("yotta build failed!")
                    else:
                        gt_log("skipping calling yotta (specified with --skip-build option)")
                        yotta_result, yotta_ret = True, 0  # Skip build and assume 'yotta build' was successful

                    # Build phase will be followed by test execution for each target
                    if yotta_result and not opts.only_build_tests:
                        binary_type = mut_info["properties"]["binary_type"]
                        ctest_test_list = load_ctest_testsuite(
                            os.path.join(".", "build", yotta_target_name), binary_type=binary_type
                        )

                        test_list = None
                        if opts.test_by_names:
                            test_list = opts.test_by_names.split(",")
                            gt_log(
                                "test case filter: %s (specified with -n option)"
                                % ", ".join(["'%s'" % gt_bright(t) for t in test_list])
                            )

                            invalid_test_names = False
                            for test_n in test_list:
                                if test_n not in ctest_test_list:
                                    gt_log_tab(
                                        "test name '%s' not found in CTestTestFile.cmake (specified with -n option)"
                                        % gt_bright(test_n)
                                    )
                                    invalid_test_names = True
                            if invalid_test_names:
                                gt_log("invalid test case names (specified with -n option)")
                                gt_log_tab("note: test case names are case sensitive")
                                gt_log_tab("note: see list of available test cases below")
                                list_binaries_for_targets(verbose_footer=False)

                        gt_log("running tests for target '%s'" % gt_bright(yotta_target_name))
                        for test_bin, image_path in ctest_test_list.iteritems():
                            test_result = "SKIPPED"
                            # Skip test not mentioned in -n option
                            if opts.test_by_names:
                                if test_bin not in test_list:
                                    continue

                            if get_mbed_supported_test(test_bin):
                                disk = mut["mount_point"]
                                port = mut["serial_port"]
                                micro = mut["platform_name"]
                                program_cycle_s = mut_info["properties"]["program_cycle_s"]
                                copy_method = opts.copy_method if opts.copy_method else "shell"
                                verbose = opts.verbose_test_result_only

                                test_platforms_match += 1
                                gt_log_tab("running host test...")
                                host_test_result = run_host_test(
                                    image_path,
                                    disk,
                                    port,
                                    micro=micro,
                                    copy_method=copy_method,
                                    program_cycle_s=program_cycle_s,
                                    digest_source=opts.digest_source,
                                    json_test_cfg=opts.json_test_configuration,
                                    verbose=verbose,
                                )

                                single_test_result, single_test_output, single_testduration, single_timeout = (
                                    host_test_result
                                )
                                test_result = single_test_result
                                if single_test_result != TEST_RESULT_OK:
                                    test_exec_retcode += 1

                                # Update report for optional reporting feature
                                test_name = test_bin.lower()
                                if yotta_target_name not in test_report:
                                    test_report[yotta_target_name] = {}
                                if test_name not in test_report[yotta_target_name]:
                                    test_report[yotta_target_name][test_name] = {}

                                test_report[yotta_target_name][test_name]["single_test_result"] = single_test_result
                                test_report[yotta_target_name][test_name]["single_test_output"] = single_test_output
                                test_report[yotta_target_name][test_name]["elapsed_time"] = single_testduration
                                test_report[yotta_target_name][test_name]["platform_name"] = micro
                                test_report[yotta_target_name][test_name]["copy_method"] = copy_method

                                if single_test_result != "OK" and not verbose and opts.report_fails:
                                    # In some cases we want to print console to see why test failed
                                    # even if we are not in verbose mode
                                    gt_log_tab(
                                        "test failed, reporting console output (specified with --report-fails option)"
                                    )
                                    print
                                    print single_test_output

                                gt_log_tab(
                                    "test '%s' %s %s in %.2f sec"
                                    % (test_bin, "." * (80 - len(test_bin)), test_result, single_testduration)
                                )
                    # We need to stop executing if yotta build fails
                    if not yotta_result:
                        gt_log_err("yotta returned %d" % yotta_ret)
                        test_exec_retcode = -1
                        return test_exec_retcode
        else:
            gt_log_err("mbed classic target name '%s' is not in target database" % gt_bright(mut["platform_name"]))

    if opts.verbose_test_configuration_only:
        print
        print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target"
        return 0

    # This tool is designed to work in CI
    # We want to return success codes based on tool actions,
    # only if testes were executed and all passed we want to
    # return 0 (success)
    if not opts.only_build_tests:
        # Reports (to file)
        if opts.report_junit_file_name:
            junit_report = exporter_junit(test_report)
            with open(opts.report_junit_file_name, "w") as f:
                f.write(junit_report)
        if opts.report_text_file_name:
            gt_log("exporting to junit '%s'..." % gt_bright(opts.report_text_file_name))
            text_report, text_results = exporter_text(test_report)
            with open(opts.report_text_file_name, "w") as f:
                f.write(text_report)
        # Reports (to console)
        if opts.report_json:
            # We will not print summary and json report together
            gt_log("json test report:")
            print exporter_json(test_report)
        else:
            # Final summary
            gt_log("test report:")
            text_report, text_results = exporter_text(test_report)
            print text_report
            print
            print "Result: " + text_results

        # This flag guards 'build only' so we expect only yotta errors
        if test_platforms_match == 0:
            # No tests were executed
            gt_log("no target matching tests were found!")
            test_exec_retcode += -10
        if target_platforms_match == 0:
            # No platforms were tested
            gt_log("no target matching platforms were found!")
            test_exec_retcode += -100

    return test_exec_retcode
Ejemplo n.º 47
0
def main_cli(opts, args, gt_instance_uuid=None):
    """! This is main CLI function with all command line parameters
    @details This function also implements CLI workflow depending on CLI parameters inputed
    @return This function doesn't return, it exits to environment with proper success code
    """

    if not MBED_LMTOOLS:
        gt_log_err("error: mbed-ls proprietary module not installed")
        return (-1)

    if not MBED_HOST_TESTS:
        gt_log_err("error: mbed-host-tests proprietary module not installed")
        return (-1)

    # List available test binaries (names, no extension)
    if opts.list_binaries:
        list_binaries_for_targets()
        return (0)

    # Prints version and exits
    if opts.version:
        print_version()
        return (0)

    # Capture alternative test console inputs, used e.g. in 'yotta test command'
    if opts.digest_source:
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)
        host_test_result = run_host_test(image_path=None,
                                         disk=None,
                                         port=None,
                                         digest_source=opts.digest_source,
                                         enum_host_tests_path=enum_host_tests_path,
                                         verbose=opts.verbose_test_result_only)

        single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
        status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
        return (status)

    ### Selecting yotta targets to process
    yt_targets = [] # List of yotta targets specified by user used to process during this run
    if opts.list_of_targets:
        yt_targets = opts.list_of_targets.split(',')
    else:
        # Trying to use locally set yotta target
        gt_log("checking for yotta target in current directory")
        gt_log_tab("reason: no --target switch set")
        current_target = get_mbed_target_from_current_dir()
        if current_target:
            gt_log("assuming default target as '%s'"% gt_bright(current_target))
            # Assuming first target printed by 'yotta search' will be used
            yt_targets = [current_target]
        else:
            gt_log_tab("yotta target in current directory is not set")
            gt_log_err("yotta target is not specified. Use '%s' or '%s' command to set target"%
            (
                gt_bright('mbedgt -t <yotta_target>'),
                gt_bright('yotta target <yotta_target>')
            ))
            return (-1)

    #print "yt_targets:", yt_targets

    ### Query with mbedls for available mbed-enabled devices
    gt_log("detecting connected mbed-enabled devices...")

    # Detect devices connected to system
    mbeds = mbed_lstools.create()
    mbeds_list = mbeds.list_mbeds_ext()

    ready_mbed_devices = [] # Devices which can be used (are fully detected)

    if mbeds_list:
        gt_log("detected %d device%s"% (len(mbeds_list), 's' if len(mbeds_list) != 1 else ''))
        for mut in mbeds_list:
            if not all(mut.values()):
                gt_log_err("can't detect all properties of the device!")
            else:
                ready_mbed_devices.append(mut)
                gt_log_tab("detected '%s' -> '%s', console at '%s', mounted at '%s', target id '%s'"% (
                    gt_bright(mut['platform_name']),
                    gt_bright(mut['platform_name_unique']),
                    gt_bright(mut['serial_port']),
                    gt_bright(mut['mount_point']),
                    gt_bright(mut['target_id'])
                ))
    else:
        gt_log("no devices detected")
        return (RET_NO_DEVICES)

    ### Use yotta to search mapping between platform names and available platforms
    # Convert platform:target, ... mapping to data structure
    map_platform_to_yt_target = {}
    if opts.map_platform_to_yt_target:
        gt_log("user defined platform -> target supported mapping definition (specified with --map-target switch)")
        p_to_t_mappings = opts.map_platform_to_yt_target.split(',')
        for mapping in p_to_t_mappings:
            if len(mapping.split(':')) == 2:
                platform, yt_target = mapping.split(':')
                if platform not in map_platform_to_yt_target:
                    map_platform_to_yt_target[platform] = []
                map_platform_to_yt_target[platform].append(yt_target)
                gt_log_tab("mapped platform '%s' to be compatible with '%s'"% (
                    gt_bright(platform),
                    gt_bright(yt_target)
                ))
            else:
                gt_log_tab("unknown format '%s', use 'platform:target' format"% mapping)

    # Check if mbed classic target name can be translated to yotta target name

    mut_info_map = {}   # platform_name : mut_info_dict, extract yt_targets with e.g. [k["yotta_target"] for k in d['K64F']["yotta_targets"]]

    for mut in ready_mbed_devices:
        platfrom_name = mut['platform_name']
        if platfrom_name not in mut_info_map:
            mut_info = get_mbed_clasic_target_info(platfrom_name,
                                                   map_platform_to_yt_target,
                                                   use_yotta_registry=opts.yotta_search_for_mbed_target)
            if mut_info:
                mut_info_map[platfrom_name] = mut_info
    #print "mut_info_map:", json.dumps(mut_info_map, indent=2)

    ### List of unique ready platform names
    unique_mbed_devices = list(set(mut_info_map.keys()))
    #print "unique_mbed_devices", json.dumps(unique_mbed_devices, indent=2)

    ### Identify which targets has to be build because platforms are present
    yt_target_platform_map = {}     # yt_target_to_test : platforms to test on

    for yt_target in yt_targets:
        for platform_name in unique_mbed_devices:
            if yt_target in [k["yotta_target"] for k in mut_info_map[platform_name]["yotta_targets"]]:
                if yt_target not in yt_target_platform_map:
                    yt_target_platform_map[yt_target] = []
                if platform_name not in yt_target_platform_map[yt_target]:
                    yt_target_platform_map[yt_target].append(platform_name)
    #print "yt_target_platform_map", json.dumps(yt_target_platform_map, indent=2)

    ### We can filter in only specific target ids
    accepted_target_ids = None
    if opts.use_target_ids:
        gt_log("filtering out target ids not on below list (specified with --use-tids switch)")
        accepted_target_ids = opts.use_target_ids.split(',')
        for tid in accepted_target_ids:
            gt_log_tab("accepting target id '%s'"% gt_bright(tid))

    test_exec_retcode = 0       # Decrement this value each time test case result is not 'OK'
    test_platforms_match = 0    # Count how many tests were actually ran with current settings
    target_platforms_match = 0  # Count how many platforms were actually tested with current settings

    test_report = {}            # Test report used to export to Junit, HTML etc...
    muts_to_test = []           # MUTs to actually be tested
    test_queue = Queue()        # contains information about test_bin and image_path for each test case
    test_result_queue = Queue() # used to store results of each thread
    execute_threads = []        # list of threads to run test cases

    ### check if argument of --parallel mode is a integer and greater or equal 1
    try:
        parallel_test_exec = int(opts.parallel_test_exec)
        if parallel_test_exec < 1:
            parallel_test_exec = 1
    except ValueError:
        gt_log_err("argument of mode --parallel is not a int, disable parallel mode")
        parallel_test_exec = 1


    ### Testing procedures, for each target, for each target's compatible platform
    for yotta_target_name in yt_target_platform_map:
        gt_log("processing '%s' yotta target compatible platforms..."% gt_bright(yotta_target_name))

        for platform_name in yt_target_platform_map[yotta_target_name]:
            gt_log("processing '%s' platform..."% gt_bright(platform_name))

            ### Select MUTS to test from list of available MUTS to start testing
            mut = None
            number_of_parallel_instances = 1
            for mbed_dev in ready_mbed_devices:
                if accepted_target_ids and mbed_dev['target_id'] not in accepted_target_ids:
                    continue

                if mbed_dev['platform_name'] == platform_name:
                    mut = mbed_dev
                    muts_to_test.append(mbed_dev)
                    gt_log("using platform '%s' for test:"% gt_bright(platform_name))
                    for k in mbed_dev:
                        gt_log_tab("%s = '%s'"% (k, mbed_dev[k]))
                    if number_of_parallel_instances < parallel_test_exec:
                        number_of_parallel_instances += 1
                    else:
                        break

            # Configuration print mode:
            if opts.verbose_test_configuration_only:
                continue

            if mut:
                target_platforms_match += 1

                # Demo mode: --run implementation (already added --run to mbedhtrun)
                # We want to pass file name to mbedhtrun (--run NAME  =>  -f NAME_ and run only one binary
                if opts.run_app:
                    gt_log("running '%s' for '%s'"% (gt_bright(opts.run_app), gt_bright(yotta_target_name)))
                    disk = mut['mount_point']
                    port = mut['serial_port']
                    micro = mut['platform_name']
                    program_cycle_s = mut_info_map[platfrom_name]['properties']['program_cycle_s']
                    copy_method = opts.copy_method if opts.copy_method else 'shell'
                    enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)

                    yotta_config = YottaConfig()
                    yotta_config.init(yotta_target_name)

                    yotta_config_baudrate = yotta_config.get_baudrate()

                    # We will force configuration specific baudrate
                    if port:
                        port = "%s:%d"% (port, yotta_config_baudrate)

                    test_platforms_match += 1
                    host_test_result = run_host_test(opts.run_app,
                                                     disk,
                                                     port,
                                                     micro=micro,
                                                     copy_method=copy_method,
                                                     program_cycle_s=program_cycle_s,
                                                     digest_source=opts.digest_source,
                                                     json_test_cfg=opts.json_test_configuration,
                                                     run_app=opts.run_app,
                                                     enum_host_tests_path=enum_host_tests_path,
                                                     verbose=True)

                    single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
                    status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
                    if single_test_result != TEST_RESULT_OK:
                        test_exec_retcode += 1
                    continue

                # Regression test mode:
                # Building sources for given target and perform normal testing

                yotta_result, yotta_ret = True, 0   # Skip build and assume 'yotta build' was successful
                if opts.skip_yotta_build:
                    gt_log("skipping calling yotta (specified with --skip-build option)")
                else:
                    yotta_result, yotta_ret = build_with_yotta(yotta_target_name,
                        verbose=opts.verbose,
                        build_to_release=opts.build_to_release,
                        build_to_debug=opts.build_to_debug)

                # We need to stop executing if yotta build fails
                if not yotta_result:
                    gt_log_err("yotta returned %d"% yotta_ret)
                    return (RET_YOTTA_BUILD_FAIL)

                if opts.only_build_tests:
                    continue

                # Build phase will be followed by test execution for each target
                if yotta_result and not opts.only_build_tests:
                    binary_type = mut_info_map[platform_name]['properties']['binary_type']
                    ctest_test_list = load_ctest_testsuite(os.path.join('.', 'build', yotta_target_name),
                        binary_type=binary_type)
                    #print json.dumps(ctest_test_list, indent=2)
                    #TODO no tests to execute

                filtered_ctest_test_list = ctest_test_list
                test_list = None
                if opts.test_by_names:
                    filtered_ctest_test_list = {}   # Subset of 'ctest_test_list'
                    test_list = opts.test_by_names.split(',')
                    gt_log("test case filter (specified with -n option)")

                    invalid_test_names = False
                    for test_name in test_list:
                        if test_name not in ctest_test_list:
                            gt_log_tab("test name '%s' not found in CTestTestFile.cmake (specified with -n option)"% gt_bright(test_name))
                            invalid_test_names = True
                        else:
                            gt_log_tab("test filtered in '%s'"% gt_bright(test_name))
                            filtered_ctest_test_list[test_name] = ctest_test_list[test_name]
                    if invalid_test_names:
                        gt_log("invalid test case names (specified with -n option)")
                        gt_log_tab("note: test case names are case sensitive")
                        gt_log_tab("note: see list of available test cases below")
                        list_binaries_for_targets(verbose_footer=False)

                gt_log("running %d test%s for target '%s' and platform '%s'"% (
                    len(filtered_ctest_test_list),
                    "s" if len(filtered_ctest_test_list) != 1 else "",
                    gt_bright(yotta_target_name),
                    gt_bright(platform_name)
                ))

                for test_bin, image_path in filtered_ctest_test_list.iteritems():
                    test = {"test_bin":test_bin, "image_path":image_path}
                    test_queue.put(test)

                number_of_threads = 0
                for mut in muts_to_test:
                    #################################################################
                    # Experimental, parallel test execution
                    #################################################################
                    if number_of_threads < parallel_test_exec:
                        args = (test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name)
                        t = Thread(target=run_test_thread, args=args)
                        execute_threads.append(t)
                        number_of_threads += 1

    gt_log_tab("use %s instance%s for testing" % (len(execute_threads), 's' if len(execute_threads) != 1 else ''))
    for t in execute_threads:
        t.daemon = True
        t.start()
    while test_result_queue.qsize() != len(execute_threads):
        sleep(1)

    # merge partial test reports from diffrent threads to final test report
    for t in execute_threads:
        t.join()
        test_return_data = test_result_queue.get(False)
        test_platforms_match += test_return_data['test_platforms_match']
        test_exec_retcode += test_return_data['test_exec_retcode']
        partial_test_report = test_return_data['test_report']
        # todo: find better solution, maybe use extend
        for report_key in partial_test_report.keys():
            if report_key not in test_report:
                test_report[report_key] = {}
                test_report.update(partial_test_report)
            else:
                test_report[report_key].update(partial_test_report[report_key])

    if opts.verbose_test_configuration_only:
        print
        print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target"
        return (0)

    # This tool is designed to work in CI
    # We want to return success codes based on tool actions,
    # only if testes were executed and all passed we want to
    # return 0 (success)
    if not opts.only_build_tests:
        # Reports (to file)
        if opts.report_junit_file_name:
            junit_report = exporter_junit(test_report)
            with open(opts.report_junit_file_name, 'w') as f:
                f.write(junit_report)
        if opts.report_text_file_name:
            gt_log("exporting to junit '%s'..."% gt_bright(opts.report_text_file_name))
            text_report, text_results = exporter_text(test_report)
            with open(opts.report_text_file_name, 'w') as f:
                f.write(text_report)

        # Reports (to console)
        if opts.report_json:
            # We will not print summary and json report together
            gt_log("json test report:")
            print exporter_json(test_report)
        else:
            # Final summary
            if test_report:
                gt_log("test report:")
                text_report, text_results = exporter_text(test_report)
                print text_report
                print
                print "Result: " + text_results

        # This flag guards 'build only' so we expect only yotta errors
        if test_platforms_match == 0:
            # No tests were executed
            gt_log("no platform/target matching tests were found!")
            test_exec_retcode += -10
        if target_platforms_match == 0:
            # No platforms were tested
            gt_log("no target matching platforms were found!")
            test_exec_retcode += -100

    return (test_exec_retcode)