コード例 #1
0
def search_jobs(keywords, page=1, data_collection=[]):
    search_template = "curl 'https://chalice-search-api.cloud.seek.com.au/search?siteKey=AU-Main&sourcesystem=houston&userqueryid=97ec6621797cfebd8a0f96a5bc59d139-1449239&userid=2734ad6f-4c64-49e0-aff0-6f615ec2cb82&usersessionid=2734ad6f-4c64-49e0-aff0-6f615ec2cb82&eventCaptureSessionId=bdbc2f3d-c84b-40c1-bf60-bad7564b3380&where=All+Australia&page=<PAGE>&seekSelectAllPages=true&keywords=<KEYWORDS>&include=seodata&isDesktop=true' -H 'Origin: https://www.seek.com.au' -H 'Accept-Encoding: gzip, deflate, br' -H 'Accept-Language: en-US,en;q=0.9' -H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' -H 'Accept: application/json, text/plain, */*' -H 'Referer: https://www.seek.com.au/java-java-jobs' -H 'Connection: keep-alive' -H 'X-Seek-Site: Chalice' --compressed"
    search_replace_dict = {
        "<KEYWORDS>": [urllib.parse.quote_plus(keywords)],
        "<PAGE>": [str(page)],
    }
    if not do_query_listings:
        print(">> Loading queries from listings.json")
        with open(os.path.join(Config.data_path, "listings.json"),
                  'r') as infile:
            return json.load(infile)
    if Config.auto_y or input("Run search jobs (y)? ") == 'y':
        executor = CommandExecutor()
        raw_executor_results = executor.run_commands_on_workers(
            commands=CommandGenerator.generate_commands(
                search_template, search_replace_dict),
            workers=1)
        json_response = json.loads(
            raw_executor_results[0]['stdout'].decode('utf-8'))
        data_collection += json_response['data']
        print("Current data_collection size: ", len(data_collection))
        if len(data_collection) < json_response['totalCount']:
            return search_jobs(keywords, page + 1, data_collection)
        print(">> Saving to listings.json")
        with open(os.path.join(Config.data_path, "listings.json"),
                  'w') as outfile:
            json.dump(data_collection, outfile, indent=4)
        return data_collection
コード例 #2
0
def view_post(ids):
    view_template = 'curl "https://chalice-experience.cloud.seek.com.au/job/<ID>?isDesktop=true^&locale=AU" -H "Origin: https://www.seek.com.au" -H "Accept-Encoding: gzip, deflate, br" -H "Accept-Language: en-US,en;q=0.9" -H "User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36" -H "Accept: application/json, text/plain, */*" -H "Referer: https://www.seek.com.au/job/<ID>?type=standout" -H "If-None-Match: W/^\^"e86-I553tIAg7654QmbWH+FrADxPO7I^\^"" -H "Connection: keep-alive" -H "X-Seek-Site: Chalice" --compressed'
    view_replace_dict = {
        "<ID>": ids,
    }
    if not do_query_posts:
        print(">> Loading queries from posts.json")
        with open(os.path.join(Config.data_path, "posts.json"), 'r') as infile:
            return json.load(infile)
    if Config.auto_y or input("Run view posts (y)? ") == 'y':
        executor = CommandExecutor()
        raw_executor_results = executor.run_commands_on_workers(
            commands=CommandGenerator.generate_commands(
                view_template, view_replace_dict),
            workers=20)
        raw_results = [raw_result for raw_result in raw_executor_results]
        results = [
            json.loads(raw_result['stdout'].decode('utf-8'))
            for raw_result in raw_results
        ]
        print(">> Saving to posts.json")
        with open(os.path.join(Config.data_path, "posts.json"),
                  'w') as outfile:
            json.dump(results, outfile, indent=4)
        return results
コード例 #3
0
 def __init__(self, logger, disk_util, passphrase_filename, public_settings, distro_info):
     self.logger = logger
     self.executor = CommandExecutor(self.logger)
     self.disk_util = disk_util
     self.passphrase_filename = passphrase_filename  # WARNING: This may be null, in which case we mount the resource disk if its unencrypted and do nothing if it is.
     self.public_settings = public_settings
     self.distro_info = distro_info
コード例 #4
0
        def handle_standard_command(message):

            command = Command()
            command.type = Command.CommandType.STANDARD_COMMAND
            command.message = message
            command.bot = self.__bot
            CommandExecutor.execute_command(command)
コード例 #5
0
def view_applications(ids):
    view_template = 'curl "https://ca-jobapply-ex-api.cloud.seek.com.au/jobs/<ID>/" -H "Origin: https://www.seek.com.au" -H "Accept-Encoding: gzip, deflate, br" -H "Accept-Language: en-US,en;q=0.9" -H "User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36" -H "Accept: application/json, text/plain, */*" -H "Referer: https://www.seek.com.au/job-apply/<ID>" -H "Connection: keep-alive" -H "X-Seek-Site: SEEK JobApply" --compressed'
    view_replace_dict = {
        "<ID>": ids,
    }
    if not do_query_applications:
        print(">> Loading queries from applications.json")
        with open(os.path.join(Config.data_path, "applications.json"),
                  'r') as infile:
            return json.load(infile)
    if Config.auto_y or input("Run view applications (y)? ") == 'y':
        executor = CommandExecutor()
        raw_executor_results = executor.run_commands_on_workers(
            commands=CommandGenerator.generate_commands(
                view_template, view_replace_dict),
            workers=20)
        raw_results = [raw_result for raw_result in raw_executor_results]
        results = [
            json.loads(raw_result['stdout'].decode('utf-8'))
            for raw_result in raw_results
        ]
        print(">> Saving to applications.json")
        with open(os.path.join(Config.data_path, "applications.json"),
                  'w') as outfile:
            json.dump(results, outfile, indent=4)
        return results
コード例 #6
0
ファイル: Driver.py プロジェクト: arpitrathi/parking_lot
class Driver(object):
    def __init__(self):
        self.commandExecutor = CommandExecutor()

    def processFile(self, fileName):
        fileContents = open(fileName, "r")
        for commandStr in fileContents.readlines():
            try:
                self.commandExecutor.executeCommand(commandStr)
            except AssertionError:
                print("Please input valid commands")
            except Exception:
                traceback.print_exc()

    def processCommandLineArguments(self):
        while True:
            try:
                commandStr = raw_input()
                self.commandExecutor.executeCommand(commandStr)
            except AssertionError:
                print("Please input valid commands")
            except Exception:
                traceback.print_exc()

    def run(self):
        if len(sys.argv) > 1:
            fileName = sys.argv[1]
            self.processFile(fileName)
        else:
            self.processCommandLineArguments()
コード例 #7
0
 def validate_vfat(self):
     """ Check for vfat module using modprobe and raise exception if not found """
     try:
         executor = CommandExecutor(self.logger)
         executor.Execute("modprobe vfat", True)
     except:
         raise RuntimeError(
             'Incompatible system, prerequisite vfat module was not found.')
コード例 #8
0
    def __call__(self, args, executor: CommandExecutor):
        target_dir = self.fs.existing_dir(os.sep.join(['.', 'jmake_src', 'target']))

        def investigate_builds(log):
            builds_to_compare = None if not args.compare_builds else args.compare_builds.split(',')
            if builds_to_compare is not None and len(builds_to_compare) != 2:
                log.error('Argument compare_builds should be in format: BN-FIRST,BN-SECOND')
                log.error('Found: %s' % args.compare_builds)
                return Callable.failure

            ec, hits_dir_before = self.get_hits_for_build(log, target_dir, builds_to_compare[0])
            if ec != Callable.success:
                return ec

            ec, hits_dir_current = self.get_hits_for_build(log, target_dir, builds_to_compare[1])
            if ec != Callable.success:
                return ec

            metrics_to_compare = None if args.metrics is None else args.metrics.split(',')
            self.perform_diff(log, hits_dir_current, hits_dir_before, metrics_to_compare)

            return Callable.success

        def investigate_local(log):
            failed_metrics_file = os.sep.join(['target', '.jmake.eh-metrics.failed-metrics.txt'])

            if not self.fs.file_exists(failed_metrics_file):
                log.warn('The file %s doesn\'t exists. Did you run eh-metrics? Has it failed?' % failed_metrics_file)

            if (not any([args.metrics, args.build_number, args.compare_builds])) and self.fs.file_exists(failed_metrics_file):
                commit_hash_unused, metrics_string, build_number_before = self.fs.read_lines(failed_metrics_file)[0].split(':')
                metrics_to_compare = metrics_string.split(',')
            else:
                metrics_to_compare = None if args.metrics is None else args.metrics.split(',')
                build_number_before = args.build_number

            if build_number_before is None:
                log.error('I don\'t know the build number to compare with, sorry :(. '
                          'Did you run eh-metrics? Has it failed? You can always give me a build '
                          'number using --build-number. But this message is unlikely to appear.')
                return Callable.failure

            hits_dir_current = os.sep.join([target_dir, 'eh-metrics-hits'])
            ec, hits_dir_before = self.get_hits_for_build(log, target_dir, build_number_before)
            if ec != Callable.success:
                return ec

            if not self.fs.dir_exists(hits_dir_current):
                log.error('Could not find current eh-metrics hits, did you run ./jmake eh-metrics?')
                return Callable.failure

            self.perform_diff(log, hits_dir_current, hits_dir_before, metrics_to_compare)
            return Callable.success

        if args.compare_builds is not None:
            executor.append(investigate_builds)
        else:
            executor.append(investigate_local)
コード例 #9
0
    def __init__(self, install_lib):
        self.__logger = ClientLogger.setup(phase='install')
        self.__command_executor = CommandExecutor(phase='install')

        self.__install_lib = install_lib
        self.__service_link = "/etc/init.d/{0}".format(
            PackageInfo.pip_package_name)
        self.__service_script = '{0}{1}/Service.py'.format(
            install_lib, PackageInfo.python_package_name)
コード例 #10
0
    def __init__(self, hutil, patching, logger, encryption_environment):
        self.encryption_environment = encryption_environment
        self.hutil = hutil
        self.distro_patcher = patching
        self.logger = logger
        self.ide_class_id = "{32412632-86cb-44a2-9b5c-50d1417354f5}"
        self.vmbus_sys_path = '/sys/bus/vmbus/devices'

        self.command_executor = CommandExecutor(self.logger)
コード例 #11
0
 def __init__(self, hutil, logger, distro_patcher):
     self.hutil = hutil
     self.logger = logger
     self.executor = CommandExecutor(self.logger)
     self.disk_util = DiskUtil(hutil=self.hutil,
                               patching=distro_patcher,
                               logger=self.logger,
                               encryption_environment=None)
     self.mapper_name = str(uuid.uuid4())
     self.mapper_path = self.DM_PREFIX + self.mapper_name
コード例 #12
0
    def __auto_update_timer_tick(self):
        if not self.__client.is_in_use():
            try:
                CommandExecutor().execute_commands([
                    '{0} update'.format(PackageInfo.pip_package_name)
                ])
            except Exception:
                pass

        self.__start_auto_update_timer()
コード例 #13
0
def search_seek(keywords,
                start=0,
                data_collection=[]
                ):  # https://au.indeed.com/viewjob?jk=23b529ed5c26e9af
    page_entries = 50
    search_template = "curl 'https://au.indeed.com/jobs?as_and=<KEYWORDS>&as_phr=&as_any=&as_not=&as_ttl=&as_cmp=&jt=all&st=&as_src=&salary=&radius=100&l=Melbourne+City+Centre+VIC&fromage=any&limit=50&start=<START>&sort=&psf=advsrch' -H 'authority: au.indeed.com' -H 'upgrade-insecure-requests: 1' -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36' -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' -H 'accept-encoding: gzip, deflate, br' -H 'accept-language: en-US,en;q=0.9' --compressed"
    search_replace_dict = {
        "<KEYWORDS>": [urllib.parse.quote_plus(keywords)],
        "<PAGE>": [str(start)],
    }
    if not do_seek_search: pass
    #print(">> Loading queries from listings.json")
    #with open(os.path.join(Config.data_path, "listings.json"), 'r') as infile:
    #    return json.load(infile)
    if Config.auto_y or input("Run seek jobs (y)? ") == 'y':
        executor = CommandExecutor()
        raw_executor_results = executor.run_commands_on_workers(
            commands=CommandGenerator.generate_commands(
                search_template, search_replace_dict),
            workers=1)
コード例 #14
0
ファイル: ClientDaemon.py プロジェクト: lucosmic/tinkerAccess
    def update(opts, args):
        if not ClientDaemon.__is_in_use(opts, args):
            logger = ClientLogger.setup(opts)
            requested_version = args[1] if len(args) >= 2 else None
            if ClientDaemon.__should_update(opts, requested_version):
                try:
                    requested_package = PackageInfo.pip_package_name

                    # BUG: There is a big fat bug with pip that is causing it to redirect to
                    # install the latest version, even when a specific version is installed.
                    # I'll look into this when I get time.

                    if requested_version:
                        requested_package = '{0}=={1}'.format(requested_package, requested_version)

                    ClientDaemon.stop(opts, args)
                    CommandExecutor().execute_commands([
                        'pip install --upgrade --force-reinstall --ignore-installed --no-cache-dir {0}'
                        .format(requested_package)
                    ])
                except Exception as e:
                    msg = '{0} update failed, remediation maybe required!'.format(PackageInfo.pip_package_name)
                    logger.error(msg)
                    logger.exception(e)
                    raise e
                finally:
                    if not ClientDaemon.status(opts, args):
                        ClientDaemon.restart(opts, args)
            else:

                force_update_hint = 'Use the --force-update option to bypass ' \
                                    'the version check and re-install from PyPi.\n'\
                                    .format(
                                        PackageInfo.pip_package_name,
                                        PackageInfo.version
                                    )

                if not PackageInfo.version:
                    return [
                        'You are currently using a non-versioned build...\n',
                        force_update_hint
                    ], 1

                version = 'latest' if not requested_version else 'requested'
                return [
                    '{0} v{1} already matches the {2} version. \n'
                    'Use the --force-update option to bypass the version check and re-install.\n'.format(
                        PackageInfo.pip_package_name,
                        PackageInfo.version,
                        version
                    )], 1
        else:
            return ['{0} is currently in use, try again later...\n'.format(PackageInfo.pip_package_name)], 1
コード例 #15
0
    def __init__(self,
                 logger,
                 hutil,
                 disk_util,
                 ongoing_item_config,
                 patching,
                 encryption_environment,
                 status_prefix=''):
        """
        copy_total_size is in bytes.
        """
        self.command_executer = CommandExecutor(logger)
        self.ongoing_item_config = ongoing_item_config
        self.total_size = self.ongoing_item_config.get_current_total_copy_size(
        )
        self.block_size = self.ongoing_item_config.get_current_block_size()
        self.source_dev_full_path = self.ongoing_item_config.get_current_source_path(
        )
        self.destination = self.ongoing_item_config.get_current_destination()
        self.current_slice_index = self.ongoing_item_config.get_current_slice_index(
        )
        self.from_end = self.ongoing_item_config.get_from_end()

        self.last_slice_size = self.total_size % self.block_size
        # we add 1 even the last_slice_size is zero.
        self.total_slice_size = (
            (self.total_size - self.last_slice_size) / self.block_size) + 1

        self.status_prefix = status_prefix
        self.encryption_environment = encryption_environment
        self.logger = logger
        self.patching = patching
        self.disk_util = disk_util
        self.hutil = hutil
        self.tmpfs_mount_point = "/mnt/azure_encrypt_tmpfs"
        self.slice_file_path = self.tmpfs_mount_point + "/slice_file"
        self.copy_command = self.patching.dd_path
コード例 #16
0
    def run(opts, args):
        logger = ClientLogger.setup(opts)
        reboot_delay = opts.get(ClientOption.REBOOT_DELAY) * 60
        reboot_on_error = opts.get(ClientOption.REBOOT_ON_ERROR)

        try:
            with DeviceApi(opts) as device, \
                    Client(device, opts) as client, \
                    AutoUpdateTimer(client, opts) as auto_update_timer:

                device.on(
                    Channel.SERIAL,
                    direction=device.GPIO.IN,
                    call_back=client.handle_badge_code
                )

                device.on(
                    Channel.PIN,
                    pin=opts.get(ClientOption.PIN_LOGOUT),
                    direction=device.GPIO.RISING,
                    call_back=client.logout
                )

                client.idle()
                auto_update_timer.start()
                while not client.is_terminated():
                    logger.debug('%s is waiting...', PackageInfo.pip_package_name)
                    client.wait()

        except (KeyboardInterrupt, SystemExit) as e:
            pass

        except Exception as e:
            logger.exception(e)

            if reboot_on_error:

                # reboot is only supported on Raspberry PI devices
                # noinspection PyBroadException
                try:
                    # noinspection PyUnresolvedReferences
                    import RPi.GPIO
                    logger.error('Rebooting in %s minutes...', reboot_delay / 60)
                    CommandExecutor().execute_commands([
                        'sleep {0}s'.format(reboot_delay),
                        'reboot now'
                    ])
                except Exception:
                    pass
コード例 #17
0
ファイル: ClientDaemon.py プロジェクト: lucosmic/tinkerAccess
 def remove(opts, args):
     try:
         CommandExecutor().execute_commands([
             '{'
             '\tset +e',
             '\tservice tinker-access-client stop',
             '\tupdate-rc.d -f tinker-access-client remove',
             '\trm -rf /etc/init.d/tinker-access-client',
             '\twhile [[ $(pip uninstall tinker-access-client -y) == 0 ]]; do :; done',
             '\tfind /usr/local/lib/python2.7/dist-packages/ -name \'tinker[_-]access[_-]client*\' -type d -exec sudo rm -r "{}" \;',
             '\tsed -i.bak \'/^\/.*\/tinker_access_client$/s///g\' /usr/local/lib/python2.7/dist-packages/easy-install.pth',
             '} &> /dev/null'
         ])
     except Exception:
         pass
コード例 #18
0
def run(vk=vk_init()):
    parser = argparse.ArgumentParser()
    parser.add_argument("-start_time", type=int)
    args = parser.parse_args()

    command_executor = CommandExecutor(args.start_time, VERSION, storage)

    print("STARTED")
    send = vk.rest.post(
        "messages.send",
        peer_id=vk.user_id,
        message=f"<Started {VERSION}. {time.strftime('%H:%M:%S')}>",
        random_id=random.randint(-2147483648, 2147483647))
    while True:
        parse_messages(vk, command_executor)
コード例 #19
0
class ServiceInstaller(object):
    def __init__(self, install_lib):
        self.__logger = ClientLogger.setup(phase='install')
        self.__command_executor = CommandExecutor(phase='install')

        self.__install_lib = install_lib
        self.__service_link = "/etc/init.d/{0}".format(
            PackageInfo.pip_package_name)
        self.__service_script = '{0}{1}/Service.py'.format(
            install_lib, PackageInfo.python_package_name)

    def install(self):
        try:
            self.__create_service()
            self.__configure_service()
            self.__restart_service()

        except Exception as e:
            self.__logger.debug('%s service installation failed.',
                                PackageInfo.pip_package_name)
            self.__logger.exception(e)
            raise e

    def __create_service(self):
        self.__command_executor.ensure_execute_permission(
            self.__service_script)

        # remove any existing service if it is a file or directory, and it is not a symlink
        if os.path.exists(self.__service_link) and not os.path.islink(
                self.__service_link):
            os.remove(self.__service_link)

        # remove the existing service if it is a symlink and it is not pointed to the current target
        if os.path.lexists(self.__service_link) and os.readlink(
                self.__service_link) != self.__service_script:
            os.remove(self.__service_link)

        # create the symlink if it doesn't already exists
        if not os.path.lexists(self.__service_link):
            os.symlink(self.__service_script, self.__service_link)

    def __configure_service(self):
        time.sleep(5)
        self.__command_executor.execute_commands([
            'update-rc.d -f {0} defaults 91\n'.format(
                PackageInfo.pip_package_name)
        ])

    def __restart_service(self):
        time.sleep(5)
        self.__command_executor.execute_commands(
            ['service {0} restart\n'.format(PackageInfo.pip_package_name)])
コード例 #20
0
class TransactionalCopyTask(object):
    """
    copy_total_size is in byte, skip_target_size is also in byte
    slice_size is in byte 50M
    """
    def __init__(self,
                 logger,
                 hutil,
                 disk_util,
                 ongoing_item_config,
                 patching,
                 encryption_environment,
                 status_prefix=''):
        """
        copy_total_size is in bytes.
        """
        self.command_executer = CommandExecutor(logger)
        self.ongoing_item_config = ongoing_item_config
        self.total_size = self.ongoing_item_config.get_current_total_copy_size(
        )
        self.block_size = self.ongoing_item_config.get_current_block_size()
        self.source_dev_full_path = self.ongoing_item_config.get_current_source_path(
        )
        self.destination = self.ongoing_item_config.get_current_destination()
        self.current_slice_index = self.ongoing_item_config.get_current_slice_index(
        )
        self.from_end = self.ongoing_item_config.get_from_end()

        self.last_slice_size = self.total_size % self.block_size
        # we add 1 even the last_slice_size is zero.
        self.total_slice_size = (
            (self.total_size - self.last_slice_size) / self.block_size) + 1

        self.status_prefix = status_prefix
        self.encryption_environment = encryption_environment
        self.logger = logger
        self.patching = patching
        self.disk_util = disk_util
        self.hutil = hutil
        self.tmpfs_mount_point = "/mnt/azure_encrypt_tmpfs"
        self.slice_file_path = self.tmpfs_mount_point + "/slice_file"
        self.copy_command = self.patching.dd_path

    def resume_copy_internal(self, copy_slice_item_backup_file_size,
                             skip_block, original_total_copy_size):
        block_size_of_slice_item_backup = 512
        #copy the left slice
        if copy_slice_item_backup_file_size <= original_total_copy_size:
            skip_of_slice_item_backup_file = copy_slice_item_backup_file_size / block_size_of_slice_item_backup
            left_count = (
                (original_total_copy_size - copy_slice_item_backup_file_size) /
                block_size_of_slice_item_backup)
            total_count = original_total_copy_size / block_size_of_slice_item_backup
            original_device_skip_count = (
                self.block_size * skip_block) / block_size_of_slice_item_backup
            if left_count != 0:
                dd_cmd = str(self.copy_command) \
                       + ' if=' + self.source_dev_full_path \
                       + ' of=' + self.encryption_environment.copy_slice_item_backup_file \
                       + ' bs=' + str(block_size_of_slice_item_backup) \
                       + ' skip=' + str(original_device_skip_count + skip_of_slice_item_backup_file) \
                       + ' seek=' + str(skip_of_slice_item_backup_file) \
                       + ' count=' + str(left_count)

                return_code = self.command_executer.Execute(dd_cmd)
                if return_code != CommonVariables.process_success:
                    return return_code

            dd_cmd = str(self.copy_command) \
                   + ' if=' + self.encryption_environment.copy_slice_item_backup_file \
                   + ' of=' + self.destination \
                   + ' bs=' + str(block_size_of_slice_item_backup) \
                   + ' seek=' + str(original_device_skip_count) \
                   + ' count=' + str(total_count)

            return_code = self.command_executer.Execute(dd_cmd)
            if return_code != CommonVariables.process_success:
                return return_code
            else:
                self.current_slice_index += 1
                self.ongoing_item_config.current_slice_index = self.current_slice_index
                self.ongoing_item_config.commit()
                if os.path.exists(self.encryption_environment.
                                  copy_slice_item_backup_file):
                    os.remove(self.encryption_environment.
                              copy_slice_item_backup_file)
                return return_code
        else:
            self.logger.log(
                msg=
                "copy_slice_item_backup_file_size is bigger than original_total_copy_size",
                level=CommonVariables.ErrorLevel)
            return CommonVariables.backup_slice_file_error

    def resume_copy(self):
        if self.from_end.lower() == 'true':
            skip_block = (self.total_slice_size - self.current_slice_index - 1)
        else:
            skip_block = self.current_slice_index

        return_code = CommonVariables.process_success

        if self.current_slice_index == 0:
            if self.last_slice_size > 0:
                if os.path.exists(self.encryption_environment.
                                  copy_slice_item_backup_file):
                    copy_slice_item_backup_file_size = os.path.getsize(
                        self.encryption_environment.copy_slice_item_backup_file
                    )
                    return_code = self.resume_copy_internal(
                        copy_slice_item_backup_file_size=
                        copy_slice_item_backup_file_size,
                        skip_block=skip_block,
                        original_total_copy_size=self.last_slice_size)
                else:
                    self.logger.log(
                        msg="1. the slice item backup file not exists.",
                        level=CommonVariables.WarningLevel)
            else:
                self.logger.log(msg="the last slice",
                                level=CommonVariables.WarningLevel)
        else:
            if os.path.exists(
                    self.encryption_environment.copy_slice_item_backup_file):
                copy_slice_item_backup_file_size = os.path.getsize(
                    self.encryption_environment.copy_slice_item_backup_file)
                return_code = self.resume_copy_internal(
                    copy_slice_item_backup_file_size,
                    skip_block=skip_block,
                    original_total_copy_size=self.block_size)
            else:
                self.logger.log(
                    msg=
                    "2. unfortunately the slice item backup file not exists.",
                    level=CommonVariables.WarningLevel)
        return return_code

    def copy_last_slice(self, skip_block):
        block_size_of_last_slice = 512
        skip_of_last_slice = (skip_block *
                              self.block_size) / block_size_of_last_slice
        count_of_last_slice = self.last_slice_size / block_size_of_last_slice

        copy_result = self.copy_internal(from_device=self.source_dev_full_path,
                                         to_device=self.destination,
                                         skip=skip_of_last_slice,
                                         seek=skip_of_last_slice,
                                         block_size=block_size_of_last_slice,
                                         count=count_of_last_slice)
        return copy_result

    def begin_copy(self):
        """
        check the device_item size first, cut it
        """
        self.resume_copy()
        if self.from_end.lower() == 'true':
            while self.current_slice_index < self.total_slice_size:
                skip_block = (self.total_slice_size -
                              self.current_slice_index - 1)

                if self.current_slice_index == 0:
                    if self.last_slice_size > 0:
                        copy_result = self.copy_last_slice(skip_block)
                        if copy_result != CommonVariables.process_success:
                            return copy_result
                    else:
                        self.logger.log(
                            msg=
                            "the last slice size is zero, so skip the 0 index."
                        )
                else:
                    copy_result = self.copy_internal(
                        from_device=self.source_dev_full_path,
                        to_device=self.destination,
                        skip=skip_block,
                        seek=skip_block,
                        block_size=self.block_size)

                    if copy_result != CommonVariables.process_success:
                        return copy_result

                self.current_slice_index += 1

                if self.status_prefix:
                    msg = self.status_prefix + ': ' \
                        + str(int(self.current_slice_index / (float)(self.total_slice_size) * 100.0)) \
                        + '%'

                    self.hutil.do_status_report(
                        operation='DataCopy',
                        status=CommonVariables.extension_success_status,
                        status_code=str(CommonVariables.success),
                        message=msg)

                self.ongoing_item_config.current_slice_index = self.current_slice_index
                self.ongoing_item_config.commit()

            return CommonVariables.process_success
        else:
            while self.current_slice_index < self.total_slice_size:
                skip_block = self.current_slice_index

                if self.current_slice_index == (self.total_slice_size - 1):
                    if self.last_slice_size > 0:
                        copy_result = self.copy_last_slice(skip_block)
                        if copy_result != CommonVariables.process_success:
                            return copy_result
                    else:
                        self.logger.log(
                            msg=
                            "the last slice size is zero, so skip the last slice index."
                        )
                else:
                    copy_result = self.copy_internal(
                        from_device=self.source_dev_full_path,
                        to_device=self.destination,
                        skip=skip_block,
                        seek=skip_block,
                        block_size=self.block_size)

                    if copy_result != CommonVariables.process_success:
                        return copy_result

                self.current_slice_index += 1

                if self.status_prefix:
                    msg = self.status_prefix + ': ' \
                        + str(int(self.current_slice_index / (float)(self.total_slice_size) * 100.0)) \
                        + '%'

                    self.hutil.do_status_report(
                        operation='DataCopy',
                        status=CommonVariables.extension_success_status,
                        status_code=str(CommonVariables.success),
                        message=msg)

                self.hutil.do_status_report(
                    operation='DataCopy',
                    status=CommonVariables.extension_success_status,
                    status_code=str(CommonVariables.success),
                    message=msg)

                self.ongoing_item_config.current_slice_index = self.current_slice_index
                self.ongoing_item_config.commit()
            return CommonVariables.process_success

    """
    TODO: if the copy failed?
    """

    def copy_internal(self,
                      from_device,
                      to_device,
                      block_size,
                      skip=0,
                      seek=0,
                      count=1):
        """
        first, copy the data to the middle cache
        """
        dd_cmd = str(self.copy_command) \
               + ' if=' + from_device \
               + ' of=' + self.slice_file_path \
               + ' bs=' + str(block_size) \
               + ' skip=' + str(skip) \
               + ' count=' + str(count)

        return_code = self.command_executer.Execute(dd_cmd)
        if return_code != CommonVariables.process_success:
            self.logger.log(msg="{0} is {1}".format(dd_cmd, return_code),
                            level=CommonVariables.ErrorLevel)
            return return_code
        else:
            slice_file_size = os.path.getsize(self.slice_file_path)
            self.logger.log(
                msg=("slice_file_size is: {0}".format(slice_file_size)))
            """
            second, copy the data in the middle cache to the backup slice.
            """
            backup_slice_item_cmd = str(self.copy_command) \
                                  + ' if=' + self.slice_file_path \
                                  + ' of=' + self.encryption_environment.copy_slice_item_backup_file \
                                  + ' bs=' + str(block_size) \
                                  + ' count=' + str(count)
            backup_slice_args = shlex.split(backup_slice_item_cmd)
            backup_process = Popen(backup_slice_args)
            self.logger.log(
                "backup_slice_item_cmd is:{0}".format(backup_slice_item_cmd))
            """
            third, copy the data in the middle cache to the target device.
            """
            dd_cmd = str(
                self.copy_command
            ) + ' if=' + self.slice_file_path + ' of=' + to_device + ' bs=' + str(
                block_size) + ' seek=' + str(seek) + ' count=' + str(count)
            return_code = self.command_executer.Execute(dd_cmd)
            if return_code != CommonVariables.process_success:
                self.logger.log(msg=("{0} is: {1}".format(dd_cmd,
                                                          return_code)),
                                level=CommonVariables.ErrorLevel)
            else:
                #the copy done correctly, so clear the backup slice file item.
                backup_process.kill()
                if os.path.exists(self.encryption_environment.
                                  copy_slice_item_backup_file):
                    self.logger.log(msg="clean up the backup file")
                    os.remove(self.encryption_environment.
                              copy_slice_item_backup_file)
                if os.path.exists(self.slice_file_path):
                    self.logger.log(msg="clean up the slice file")
                    os.remove(self.slice_file_path)
            return return_code

    def prepare_mem_fs(self):
        self.disk_util.make_sure_path_exists(self.tmpfs_mount_point)
        commandToExecute = self.patching.mount_path + " -t tmpfs -o size=" + str(
            self.block_size + 1024) + " tmpfs " + self.tmpfs_mount_point
        self.logger.log(
            "prepare mem fs script is: {0}".format(commandToExecute))
        return_code = self.command_executer.Execute(commandToExecute)
        return return_code

    def clear_mem_fs(self):
        commandToExecute = self.patching.umount_path + " " + self.tmpfs_mount_point
        return_code = self.command_executer.Execute(commandToExecute)
        return return_code
コード例 #21
0
    def __call__(self, args, executor: CommandExecutor):
        target_dir = self.fs.existing_dir(
            os.sep.join(['.', 'jmake_src', 'target']))

        def investigate_builds(log):
            builds_to_compare = None if not args.compare_builds else args.compare_builds.split(
                ',')
            if builds_to_compare is not None and len(builds_to_compare) != 2:
                log.error(
                    'Argument compare_builds should be in format: BN-FIRST,BN-SECOND'
                )
                log.error('Found: %s' % args.compare_builds)
                return Callable.failure

            ec, hits_dir_before = self.get_hits_for_build(
                log, target_dir, builds_to_compare[0])
            if ec != Callable.success:
                return ec

            ec, hits_dir_current = self.get_hits_for_build(
                log, target_dir, builds_to_compare[1])
            if ec != Callable.success:
                return ec

            metrics_to_compare = None if args.metrics is None else args.metrics.split(
                ',')
            self.perform_diff(log, hits_dir_current, hits_dir_before,
                              metrics_to_compare)

            return Callable.success

        def investigate_local(log):
            failed_metrics_file = os.sep.join(
                ['target', '.jmake.eh-metrics.failed-metrics.txt'])

            if not self.fs.file_exists(failed_metrics_file):
                log.warn(
                    'The file %s doesn\'t exists. Did you run eh-metrics? Has it failed?'
                    % failed_metrics_file)

            if (not any([args.metrics, args.build_number, args.compare_builds
                         ])) and self.fs.file_exists(failed_metrics_file):
                commit_hash_unused, metrics_string, build_number_before = self.fs.read_lines(
                    failed_metrics_file)[0].split(':')
                metrics_to_compare = metrics_string.split(',')
            else:
                metrics_to_compare = None if args.metrics is None else args.metrics.split(
                    ',')
                build_number_before = args.build_number

            if build_number_before is None:
                log.error(
                    'I don\'t know the build number to compare with, sorry :(. '
                    'Did you run eh-metrics? Has it failed? You can always give me a build '
                    'number using --build-number. But this message is unlikely to appear.'
                )
                return Callable.failure

            hits_dir_current = os.sep.join([target_dir, 'eh-metrics-hits'])
            ec, hits_dir_before = self.get_hits_for_build(
                log, target_dir, build_number_before)
            if ec != Callable.success:
                return ec

            if not self.fs.dir_exists(hits_dir_current):
                log.error(
                    'Could not find current eh-metrics hits, did you run ./jmake eh-metrics?'
                )
                return Callable.failure

            self.perform_diff(log, hits_dir_current, hits_dir_before,
                              metrics_to_compare)
            return Callable.success

        if args.compare_builds is not None:
            executor.append(investigate_builds)
        else:
            executor.append(investigate_local)
コード例 #22
0
ファイル: jmake.py プロジェクト: linuxscn/mysource
#!/usr/bin/env python3
import sys
from CommandDispatcher import CommandDispatcher
from CommandExecutor import CommandExecutor
from Logger import LOG


dispatcher = CommandDispatcher(CommandExecutor().set_logger(LOG.set_debug()))
sys.exit(dispatcher.dispatch_from_params(sys.argv))






コード例 #23
0
class DiskUtil(object):
    os_disk_lvm = None
    sles_cache = {}
    device_id_cache = {}

    def __init__(self, hutil, patching, logger, encryption_environment):
        self.encryption_environment = encryption_environment
        self.hutil = hutil
        self.distro_patcher = patching
        self.logger = logger
        self.ide_class_id = "{32412632-86cb-44a2-9b5c-50d1417354f5}"
        self.vmbus_sys_path = '/sys/bus/vmbus/devices'

        self.command_executor = CommandExecutor(self.logger)

    def copy(self, ongoing_item_config, status_prefix=''):
        copy_task = TransactionalCopyTask(logger=self.logger,
                                          disk_util=self,
                                          hutil=self.hutil,
                                          ongoing_item_config=ongoing_item_config,
                                          patching=self.distro_patcher,
                                          encryption_environment=self.encryption_environment,
                                          status_prefix=status_prefix)
        try:
            mem_fs_result = copy_task.prepare_mem_fs()
            if mem_fs_result != CommonVariables.process_success:
                return CommonVariables.tmpfs_error
            else:
                return copy_task.begin_copy()
        except Exception as e:
            message = "Failed to perform dd copy: {0}, stack trace: {1}".format(e, traceback.format_exc())
            self.logger.log(msg=message, level=CommonVariables.ErrorLevel)
        finally:
            copy_task.clear_mem_fs()

    def format_disk(self, dev_path, file_system):
        mkfs_command = ""
        if file_system == "ext4":
            mkfs_command = "mkfs.ext4"
        elif file_system == "ext3":
            mkfs_command = "mkfs.ext3"
        elif file_system == "xfs":
            mkfs_command = "mkfs.xfs"
        elif file_system == "btrfs":
            mkfs_command = "mkfs.btrfs"
        mkfs_cmd = "{0} {1}".format(mkfs_command, dev_path)
        return self.command_executor.Execute(mkfs_cmd)

    def make_sure_path_exists(self, path):
        mkdir_cmd = self.distro_patcher.mkdir_path + ' -p ' + path
        self.logger.log("make sure path exists, executing: {0}".format(mkdir_cmd))
        return self.command_executor.Execute(mkdir_cmd)

    def touch_file(self, path):
        mkdir_cmd = self.distro_patcher.touch_path + ' ' + path
        self.logger.log("touching file, executing: {0}".format(mkdir_cmd))
        return self.command_executor.Execute(mkdir_cmd)

    def get_crypt_items(self):
        crypt_items = []
        rootfs_crypt_item_found = False

        if not os.path.exists(self.encryption_environment.azure_crypt_mount_config_path):
            self.logger.log("{0} does not exist".format(self.encryption_environment.azure_crypt_mount_config_path))
        else:
            with open(self.encryption_environment.azure_crypt_mount_config_path,'r') as f:
                for line in f:
                    if not line.strip():
                        continue

                    crypt_mount_item_properties = line.strip().split()

                    crypt_item = CryptItem()
                    crypt_item.mapper_name = crypt_mount_item_properties[0]
                    crypt_item.dev_path = crypt_mount_item_properties[1]

                    header_file_path = None
                    if crypt_mount_item_properties[2] and crypt_mount_item_properties[2] != "None":
                        header_file_path = crypt_mount_item_properties[2]

                    crypt_item.luks_header_path = header_file_path
                    crypt_item.mount_point = crypt_mount_item_properties[3]

                    if crypt_item.mount_point == "/":
                        rootfs_crypt_item_found = True

                    crypt_item.file_system = crypt_mount_item_properties[4]
                    crypt_item.uses_cleartext_key = True if crypt_mount_item_properties[5] == "True" else False

                    try:
                        crypt_item.current_luks_slot = int(crypt_mount_item_properties[6])
                    except IndexError:
                        crypt_item.current_luks_slot = -1

                    crypt_items.append(crypt_item)

        encryption_status = json.loads(self.get_encryption_status())

        if encryption_status["os"] == "Encrypted" and not rootfs_crypt_item_found:
            crypt_item = CryptItem()
            crypt_item.mapper_name = CommonVariables.osmapper_name

            proc_comm = ProcessCommunicator()
            grep_result = self.command_executor.ExecuteInBash("cryptsetup status {0} | grep device:".format(crypt_item.mapper_name), communicator=proc_comm)

            if grep_result == 0:
                crypt_item.dev_path = proc_comm.stdout.strip().split()[1]
            else:
                proc_comm = ProcessCommunicator()
                self.command_executor.Execute("dmsetup table --target crypt", communicator=proc_comm)

                for line in proc_comm.stdout.splitlines():
                    if crypt_item.mapper_name in line:
                        majmin = filter(lambda p: re.match(r'\d+:\d+', p), line.split())[0]
                        src_device = filter(lambda d: d.majmin == majmin, self.get_device_items(None))[0]
                        crypt_item.dev_path = '/dev/' + src_device.name
                        break

            rootfs_dev = next((m for m in self.get_mount_items() if m["dest"] == "/"))
            crypt_item.file_system = rootfs_dev["fs"]

            if not crypt_item.dev_path:
                raise Exception("Could not locate block device for rootfs")

            crypt_item.luks_header_path = "/boot/luks/osluksheader"

            if not os.path.exists(crypt_item.luks_header_path):
                crypt_item.luks_header_path = crypt_item.dev_path

            crypt_item.mount_point = "/"
            crypt_item.uses_cleartext_key = False
            crypt_item.current_luks_slot = -1

            crypt_items.append(crypt_item)

        return crypt_items

    def add_crypt_item(self, crypt_item):
        """
        TODO we should judge that the second time.
        format is like this:
        <target name> <source device> <key file> <options>
        """
        try:
            if not crypt_item.luks_header_path:
                crypt_item.luks_header_path = "None"

            mount_content_item = (crypt_item.mapper_name + " " +
                                  crypt_item.dev_path + " " +
                                  crypt_item.luks_header_path + " " +
                                  crypt_item.mount_point + " " +
                                  crypt_item.file_system + " " +
                                  str(crypt_item.uses_cleartext_key) + " " +
                                  str(crypt_item.current_luks_slot))

            if os.path.exists(self.encryption_environment.azure_crypt_mount_config_path):
                with open(self.encryption_environment.azure_crypt_mount_config_path,'r') as f:
                    existing_content = f.read()
                    if existing_content is not None and existing_content.strip() != "":
                        new_mount_content = existing_content + "\n" + mount_content_item
                    else:
                        new_mount_content = mount_content_item
            else:
                new_mount_content = mount_content_item

            with open(self.encryption_environment.azure_crypt_mount_config_path,'w') as wf:
                wf.write('\n')
                wf.write(new_mount_content)
                wf.write('\n')
            return True
        except Exception:
            return False

    def remove_crypt_item(self, crypt_item):
        if not os.path.exists(self.encryption_environment.azure_crypt_mount_config_path):
            return False

        try:
            mount_lines = []

            with open(self.encryption_environment.azure_crypt_mount_config_path, 'r') as f:
                mount_lines = f.readlines()

            filtered_mount_lines = filter(lambda line: not crypt_item.mapper_name in line, mount_lines)

            with open(self.encryption_environment.azure_crypt_mount_config_path, 'w') as wf:
                wf.write('\n')
                wf.write('\n'.join(filtered_mount_lines))
                wf.write('\n')

            return True

        except Exception:
            return False

    def update_crypt_item(self, crypt_item):
        self.logger.log("Updating entry for crypt item {0}".format(crypt_item))
        self.remove_crypt_item(crypt_item)
        self.add_crypt_item(crypt_item)

    def create_luks_header(self, mapper_name):
        luks_header_file_path = self.encryption_environment.luks_header_base_path + mapper_name
        if not os.path.exists(luks_header_file_path):
            dd_command = self.distro_patcher.dd_path + ' if=/dev/zero bs=33554432 count=1 > ' + luks_header_file_path
            self.command_executor.ExecuteInBash(dd_command, raise_exception_on_failure=True)
        return luks_header_file_path

    def create_cleartext_key(self, mapper_name):
        cleartext_key_file_path = self.encryption_environment.cleartext_key_base_path + mapper_name
        if not os.path.exists(cleartext_key_file_path):
            dd_command = self.distro_patcher.dd_path + ' if=/dev/urandom bs=128 count=1 > ' + cleartext_key_file_path
            self.command_executor.ExecuteInBash(dd_command, raise_exception_on_failure=True)
        return cleartext_key_file_path

    def encrypt_disk(self, dev_path, passphrase_file, mapper_name, header_file):
        return_code = self.luks_format(passphrase_file=passphrase_file, dev_path=dev_path, header_file=header_file)
        if return_code != CommonVariables.process_success:
            self.logger.log(msg=('cryptsetup luksFormat failed, return_code is:{0}'.format(return_code)), level=CommonVariables.ErrorLevel)
            return return_code
        else:
            return_code = self.luks_open(passphrase_file=passphrase_file,
                                        dev_path=dev_path,
                                        mapper_name=mapper_name,
                                        header_file=header_file,
                                        uses_cleartext_key=False)
            if return_code != CommonVariables.process_success:
                self.logger.log(msg=('cryptsetup luksOpen failed, return_code is:{0}'.format(return_code)), level=CommonVariables.ErrorLevel)
            return return_code

    def check_fs(self, dev_path):
        self.logger.log("checking fs:" + str(dev_path))
        check_fs_cmd = self.distro_patcher.e2fsck_path + " -f -y " + dev_path
        return self.command_executor.Execute(check_fs_cmd)

    def expand_fs(self, dev_path):
        expandfs_cmd = self.distro_patcher.resize2fs_path + " " + str(dev_path)
        return self.command_executor.Execute(expandfs_cmd)

    def shrink_fs(self, dev_path, size_shrink_to):
        """
        size_shrink_to is in sector (512 byte)
        """
        shrinkfs_cmd = self.distro_patcher.resize2fs_path + ' ' + str(dev_path) + ' ' + str(size_shrink_to) + 's'
        return self.command_executor.Execute(shrinkfs_cmd)

    def check_shrink_fs(self, dev_path, size_shrink_to):
        return_code = self.check_fs(dev_path)
        if return_code == CommonVariables.process_success:
            return_code = self.shrink_fs(dev_path = dev_path, size_shrink_to = size_shrink_to)
            return return_code
        else:
            return return_code

    def luks_format(self, passphrase_file, dev_path, header_file):
        """
        return the return code of the process for error handling.
        """
        self.hutil.log("dev path to cryptsetup luksFormat {0}".format(dev_path))
        #walkaround for sles sp3
        if self.distro_patcher.distro_info[0].lower() == 'suse' and self.distro_patcher.distro_info[1] == '11':
            proc_comm = ProcessCommunicator()
            passphrase_cmd = self.distro_patcher.cat_path + ' ' + passphrase_file
            self.command_executor.Execute(passphrase_cmd, communicator=proc_comm)
            passphrase = proc_comm.stdout

            cryptsetup_cmd = "{0} luksFormat {1} -q".format(self.distro_patcher.cryptsetup_path, dev_path)
            return self.command_executor.Execute(cryptsetup_cmd, input=passphrase)
        else:
            if header_file is not None:
                cryptsetup_cmd = "{0} luksFormat {1} --header {2} -d {3} -q".format(self.distro_patcher.cryptsetup_path , dev_path , header_file , passphrase_file)
            else:
                cryptsetup_cmd = "{0} luksFormat {1} -d {2} -q".format(self.distro_patcher.cryptsetup_path , dev_path , passphrase_file)
            
            return self.command_executor.Execute(cryptsetup_cmd)
        
    def luks_add_key(self, passphrase_file, dev_path, mapper_name, header_file, new_key_path):
        """
        return the return code of the process for error handling.
        """
        self.hutil.log("new key path: " + (new_key_path))

        if not os.path.exists(new_key_path):
            self.hutil.error("new key does not exist")
            return None

        if header_file:
            cryptsetup_cmd = "{0} luksAddKey {1} {2} -d {3} -q".format(self.distro_patcher.cryptsetup_path, header_file, new_key_path, passphrase_file)
        else:
            cryptsetup_cmd = "{0} luksAddKey {1} {2} -d {3} -q".format(self.distro_patcher.cryptsetup_path, dev_path, new_key_path, passphrase_file)

        return self.command_executor.Execute(cryptsetup_cmd)
        
    def luks_remove_key(self, passphrase_file, dev_path, header_file):
        """
        return the return code of the process for error handling.
        """
        self.hutil.log("removing keyslot: {0}".format(passphrase_file))

        if header_file:
            cryptsetup_cmd = "{0} luksRemoveKey {1} -d {2} -q".format(self.distro_patcher.cryptsetup_path, header_file, passphrase_file)
        else:
            cryptsetup_cmd = "{0} luksRemoveKey {1} -d {2} -q".format(self.distro_patcher.cryptsetup_path, dev_path, passphrase_file)

        return self.command_executor.Execute(cryptsetup_cmd)
        
    def luks_kill_slot(self, passphrase_file, dev_path, header_file, keyslot):
        """
        return the return code of the process for error handling.
        """
        self.hutil.log("killing keyslot: {0}".format(keyslot))

        if header_file:
            cryptsetup_cmd = "{0} luksKillSlot {1} {2} -d {3} -q".format(self.distro_patcher.cryptsetup_path, header_file, keyslot, passphrase_file)
        else:
            cryptsetup_cmd = "{0} luksKillSlot {1} {2} -d {3} -q".format(self.distro_patcher.cryptsetup_path, dev_path, keyslot, passphrase_file)

        return self.command_executor.Execute(cryptsetup_cmd)
        
    def luks_add_cleartext_key(self, passphrase_file, dev_path, mapper_name, header_file):
        """
        return the return code of the process for error handling.
        """
        cleartext_key_file_path = self.encryption_environment.cleartext_key_base_path + mapper_name

        self.hutil.log("cleartext key path: " + (cleartext_key_file_path))

        return self.luks_add_key(passphrase_file, dev_path, mapper_name, header_file, cleartext_key_file_path)

    def luks_dump_keyslots(self, dev_path, header_file):
        cryptsetup_cmd = ""
        if header_file:
            cryptsetup_cmd = "{0} luksDump {1}".format(self.distro_patcher.cryptsetup_path, header_file)
        else:
            cryptsetup_cmd = "{0} luksDump {1}".format(self.distro_patcher.cryptsetup_path, dev_path)

        proc_comm = ProcessCommunicator()
        self.command_executor.Execute(cryptsetup_cmd, communicator=proc_comm)

        lines = filter(lambda l: "key slot" in l.lower(), proc_comm.stdout.split("\n"))
        keyslots = map(lambda l: "enabled" in l.lower(), lines)

        return keyslots

    def luks_open(self, passphrase_file, dev_path, mapper_name, header_file, uses_cleartext_key):
        """
        return the return code of the process for error handling.
        """
        self.hutil.log("dev mapper name to cryptsetup luksOpen " + (mapper_name))

        if uses_cleartext_key:
            passphrase_file = self.encryption_environment.cleartext_key_base_path + mapper_name

        self.hutil.log("keyfile: " + (passphrase_file))

        if header_file:
            cryptsetup_cmd = "{0} luksOpen {1} {2} --header {3} -d {4} -q".format(self.distro_patcher.cryptsetup_path , dev_path , mapper_name, header_file , passphrase_file)
        else:
            cryptsetup_cmd = "{0} luksOpen {1} {2} -d {3} -q".format(self.distro_patcher.cryptsetup_path , dev_path , mapper_name , passphrase_file)

        return self.command_executor.Execute(cryptsetup_cmd)

    def luks_close(self, mapper_name):
        """
        returns the exit code for cryptsetup process.
        """
        self.hutil.log("dev mapper name to cryptsetup luksOpen " + (mapper_name))
        cryptsetup_cmd = "{0} luksClose {1} -q".format(self.distro_patcher.cryptsetup_path, mapper_name)

        return self.command_executor.Execute(cryptsetup_cmd)

    #TODO error handling.
    def append_mount_info(self, dev_path, mount_point):
        shutil.copy2('/etc/fstab', '/etc/fstab.backup.' + str(str(uuid.uuid4())))
        mount_content_item = dev_path + " " + mount_point + "  auto defaults 0 0"
        new_mount_content = ""
        with open("/etc/fstab",'r') as f:
            existing_content = f.read()
            new_mount_content = existing_content + "\n" + mount_content_item
        with open("/etc/fstab",'w') as wf:
            wf.write(new_mount_content)

    def remove_mount_info(self, mount_point):
        if not mount_point:
            self.logger.log("remove_mount_info: mount_point is empty")
            return

        shutil.copy2('/etc/fstab', '/etc/fstab.backup.' + str(str(uuid.uuid4())))

        filtered_contents = []
        removed_lines = []

        with open('/etc/fstab', 'r') as f:
            for line in f.readlines():
                line = line.strip()
                pattern = '\s' + re.escape(mount_point) + '\s'

                if re.search(pattern, line):
                    self.logger.log("removing fstab line: {0}".format(line))
                    removed_lines.append(line)
                    continue

                filtered_contents.append(line)

        with open('/etc/fstab', 'w') as f:
            f.write('\n')
            f.write('\n'.join(filtered_contents))
            f.write('\n')

        self.logger.log("fstab updated successfully")

        with open('/etc/fstab.azure.backup', 'a+') as f:
            f.write('\n')
            f.write('\n'.join(removed_lines))
            f.write('\n')

        self.logger.log("fstab.azure.backup updated successfully")

    def restore_mount_info(self, mount_point):
        if not mount_point:
            self.logger.log("restore_mount_info: mount_point is empty")
            return

        shutil.copy2('/etc/fstab', '/etc/fstab.backup.' + str(str(uuid.uuid4())))

        filtered_contents = []
        removed_lines = []

        with open('/etc/fstab.azure.backup', 'r') as f:
            for line in f.readlines():
                line = line.strip()
                pattern = '\s' + re.escape(mount_point) + '\s'

                if re.search(pattern, line):
                    self.logger.log("removing fstab.azure.backup line: {0}".format(line))
                    removed_lines.append(line)
                    continue

                filtered_contents.append(line)

        with open('/etc/fstab.azure.backup', 'w') as f:
            f.write('\n')
            f.write('\n'.join(filtered_contents))
            f.write('\n')

        self.logger.log("fstab.azure.backup updated successfully")

        with open('/etc/fstab', 'a+') as f:
            f.write('\n')
            f.write('\n'.join(removed_lines))
            f.write('\n')

        self.logger.log("fstab updated successfully")

    def mount_bek_volume(self, bek_label, mount_point, option_string):
        """
        mount the BEK volume
        """
        self.make_sure_path_exists(mount_point)
        mount_cmd = self.distro_patcher.mount_path + ' -L "' + bek_label + '" ' + mount_point + ' -o ' + option_string
        return self.command_executor.Execute(mount_cmd)

    def mount_filesystem(self, dev_path, mount_point, file_system=None):
        """
        mount the file system.
        """
        self.make_sure_path_exists(mount_point)
        if file_system is None:
            mount_cmd = self.distro_patcher.mount_path + ' ' + dev_path + ' ' + mount_point
        else: 
            mount_cmd = self.distro_patcher.mount_path + ' ' + dev_path + ' ' + mount_point + ' -t ' + file_system

        return self.command_executor.Execute(mount_cmd)

    def mount_crypt_item(self, crypt_item, passphrase):
        self.logger.log("trying to mount the crypt item:" + str(crypt_item))
        mount_filesystem_result = self.mount_filesystem(os.path.join('/dev/mapper', crypt_item.mapper_name), crypt_item.mount_point, crypt_item.file_system)
        self.logger.log("mount file system result:{0}".format(mount_filesystem_result))

    def swapoff(self):
        return self.command_executor.Execute('swapoff -a')

    def umount(self, path):
        umount_cmd = self.distro_patcher.umount_path + ' ' + path
        return self.command_executor.Execute(umount_cmd)

    def umount_all_crypt_items(self):
        for crypt_item in self.get_crypt_items():
            self.logger.log("Unmounting {0}".format(crypt_item.mount_point))
            self.umount(crypt_item.mount_point)

    def mount_all(self):
        mount_all_cmd = self.distro_patcher.mount_path + ' -a'
        return self.command_executor.Execute(mount_all_cmd)

    def get_mount_items(self):
        items = []

        for line in file('/proc/mounts'):
            line = [s.decode('string_escape') for s in line.split()]
            item = {
                "src": line[0],
                "dest": line[1],
                "fs": line[2]
            }
            items.append(item)

        return items

    def get_encryption_status(self):
        encryption_status = {
            "data": "NotEncrypted",
            "os": "NotEncrypted"
        }

        mount_items = self.get_mount_items()

        os_drive_encrypted = False
        data_drives_found = False
        data_drives_encrypted = True
        osmapper_path = os.path.join(CommonVariables.dev_mapper_root, CommonVariables.osmapper_name)
        for mount_item in mount_items:
            if mount_item["fs"] in ["ext2", "ext4", "ext3", "xfs"] and \
                not "/mnt" == mount_item["dest"] and \
                not "/" == mount_item["dest"] and \
                not "/oldroot/mnt/resource" == mount_item["dest"] and \
                not "/oldroot/boot" == mount_item["dest"] and \
                not "/oldroot" == mount_item["dest"] and \
                not "/mnt/resource" == mount_item["dest"] and \
                not "/boot" == mount_item["dest"]:

                data_drives_found = True

                if not CommonVariables.dev_mapper_root in mount_item["src"]:
                    self.logger.log("Data volume {0} is mounted from {1}".format(mount_item["dest"], mount_item["src"]))
                    data_drives_encrypted = False

            if self.is_os_disk_lvm():
                grep_result = self.command_executor.ExecuteInBash('pvdisplay | grep {0}'.format(osmapper_path), suppress_logging=True)
                if grep_result == 0 and not os.path.exists('/volumes.lvm'):
                    self.logger.log("OS PV is encrypted")
                    os_drive_encrypted = True
            elif mount_item["dest"] == "/" and \
                CommonVariables.dev_mapper_root in mount_item["src"] or \
                "/dev/dm" in mount_item["src"]:
                self.logger.log("OS volume {0} is mounted from {1}".format(mount_item["dest"], mount_item["src"]))
                os_drive_encrypted = True
    
        if not data_drives_found:
            encryption_status["data"] = "NotMounted"
        elif data_drives_encrypted:
            encryption_status["data"] = "Encrypted"
        if os_drive_encrypted:
            encryption_status["os"] = "Encrypted"

        encryption_marker = EncryptionMarkConfig(self.logger, self.encryption_environment)
        decryption_marker = DecryptionMarkConfig(self.logger, self.encryption_environment)
        if decryption_marker.config_file_exists():
            encryption_status["data"] = "DecryptionInProgress"
        elif encryption_marker.config_file_exists():
            encryption_config = EncryptionConfig(self.encryption_environment, self.logger)
            volume_type = encryption_config.get_volume_type().lower()

            if volume_type == CommonVariables.VolumeTypeData.lower() or \
                volume_type == CommonVariables.VolumeTypeAll.lower():
                encryption_status["data"] = "EncryptionInProgress"

            if volume_type == CommonVariables.VolumeTypeOS.lower() or \
                volume_type == CommonVariables.VolumeTypeAll.lower():
                encryption_status["os"] = "EncryptionInProgress"
        elif os.path.exists(osmapper_path) and not os_drive_encrypted:
            encryption_status["os"] = "VMRestartPending"

        return json.dumps(encryption_status)

    def query_dev_sdx_path_by_scsi_id(self, scsi_number): 
        p = Popen([self.distro_patcher.lsscsi_path, scsi_number], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        identity, err = p.communicate()
        # identity sample: [5:0:0:0] disk Msft Virtual Disk 1.0 /dev/sdc
        self.logger.log("lsscsi output is: {0}\n".format(identity))
        vals = identity.split()
        if vals is None or len(vals) == 0:
            return None
        sdx_path = vals[len(vals) - 1]
        return sdx_path

    def query_dev_sdx_path_by_uuid(self, uuid):
        """
        return /dev/disk/by-id that maps to the sdx_path, otherwise return the original path
        """
        desired_uuid_path = os.path.join(CommonVariables.disk_by_uuid_root, uuid)
        for disk_by_uuid in os.listdir(CommonVariables.disk_by_uuid_root):
            disk_by_uuid_path = os.path.join(CommonVariables.disk_by_uuid_root, disk_by_uuid)

            if disk_by_uuid_path == desired_uuid_path:
                return os.path.realpath(disk_by_uuid_path)

        return desired_uuid_path

    def query_dev_id_path_by_sdx_path(self, sdx_path):
        """
        return /dev/disk/by-id that maps to the sdx_path, otherwise return the original path
        """
        for disk_by_id in os.listdir(CommonVariables.disk_by_id_root):
            disk_by_id_path = os.path.join(CommonVariables.disk_by_id_root, disk_by_id)
            if os.path.realpath(disk_by_id_path) == sdx_path:
                return disk_by_id_path

        return sdx_path

    def query_dev_uuid_path_by_sdx_path(self, sdx_path):
        """
        the behaviour is if we could get the uuid, then return, if not, just return the sdx.
        """
        self.logger.log("querying the sdx path of:{0}".format(sdx_path))
        #blkid path
        p = Popen([self.distro_patcher.blkid_path, sdx_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        identity, err = p.communicate()
        identity = identity.lower()
        self.logger.log("blkid output is: \n" + identity)
        uuid_pattern = 'uuid="'
        index_of_uuid = identity.find(uuid_pattern)
        identity = identity[index_of_uuid + len(uuid_pattern):]
        index_of_quote = identity.find('"')
        uuid = identity[0:index_of_quote]
        if uuid.strip() == "":
            #TODO this is strange?  BUGBUG
            return sdx_path
        return os.path.join("/dev/disk/by-uuid/", uuid)

    def query_dev_uuid_path_by_scsi_number(self, scsi_number):
        # find the scsi using the filter
        # TODO figure out why the disk formated using fdisk do not have uuid
        sdx_path = self.query_dev_sdx_path_by_scsi_id(scsi_number)
        return self.query_dev_uuid_path_by_sdx_path(sdx_path)

    def get_device_path(self, dev_name):
        device_path = None

        if os.path.exists("/dev/" + dev_name):
            device_path = "/dev/" + dev_name
        elif os.path.exists("/dev/mapper/" + dev_name):
            device_path = "/dev/mapper/" + dev_name

        return device_path

    def get_device_id(self, dev_path):
        if (dev_path) in DiskUtil.device_id_cache:
            return DiskUtil.device_id_cache[dev_path]

        udev_cmd = "udevadm info -a -p $(udevadm info -q path -n {0}) | grep device_id".format(dev_path)
        proc_comm = ProcessCommunicator()
        self.command_executor.ExecuteInBash(udev_cmd, communicator=proc_comm, suppress_logging=True)
        match = re.findall(r'"{(.*)}"', proc_comm.stdout.strip())
        DiskUtil.device_id_cache[dev_path] = match[0] if match else ""

        return DiskUtil.device_id_cache[dev_path]

    def get_device_items_property(self, dev_name, property_name):
        if (dev_name, property_name) in DiskUtil.sles_cache:
            return DiskUtil.sles_cache[(dev_name, property_name)]

        self.logger.log("getting property of device {0}".format(dev_name))

        device_path = self.get_device_path(dev_name)
        property_value = ""

        if property_name == "SIZE":
            get_property_cmd = self.distro_patcher.blockdev_path + " --getsize64 " + device_path
            proc_comm = ProcessCommunicator()
            self.command_executor.Execute(get_property_cmd, communicator=proc_comm, suppress_logging=True)
            property_value = proc_comm.stdout.strip()
        elif property_name == "DEVICE_ID":
            property_value = self.get_device_id(device_path)
        else:
            get_property_cmd = self.distro_patcher.lsblk_path + " " + device_path + " -b -nl -o NAME," + property_name
            proc_comm = ProcessCommunicator()
            self.command_executor.Execute(get_property_cmd, communicator=proc_comm, raise_exception_on_failure=True, suppress_logging=True)
            for line in proc_comm.stdout.splitlines():
                if line.strip():
                    disk_info_item_array = line.strip().split()
                    if dev_name == disk_info_item_array[0]:
                        if len(disk_info_item_array) > 1:
                            property_value = disk_info_item_array[1]

        DiskUtil.sles_cache[(dev_name, property_name)] = property_value
        return property_value

    def get_block_device_to_azure_udev_table(self):
        table = {}
        azure_links_dir = '/dev/disk/azure'
        
        if not os.path.exists(azure_links_dir):
            return table

        for top_level_item in os.listdir(azure_links_dir):
            top_level_item_full_path = os.path.join(azure_links_dir, top_level_item)
            if os.path.isdir(top_level_item_full_path):
                scsi_path = os.path.join(azure_links_dir, top_level_item)
                for symlink in os.listdir(scsi_path):
                    symlink_full_path = os.path.join(scsi_path, symlink)
                    table[os.path.realpath(symlink_full_path)] = symlink_full_path
            else:
                table[os.path.realpath(top_level_item_full_path)] = top_level_item_full_path
        return table

    def get_azure_symlinks(self):
        azure_udev_links = {}

        if os.path.exists('/dev/disk/azure'):
            wdbackup = os.getcwd()
            os.chdir('/dev/disk/azure')
            for symlink in os.listdir('/dev/disk/azure'):
                azure_udev_links[os.path.basename(symlink)] = os.path.realpath(symlink)
            os.chdir(wdbackup)

        return azure_udev_links

    def log_lsblk_output(self):
        lsblk_command = 'lsblk -o NAME,TYPE,FSTYPE,LABEL,SIZE,RO,MOUNTPOINT'
        proc_comm = ProcessCommunicator()
        self.command_executor.Execute(lsblk_command, communicator=proc_comm)
        self.logger.log('\n' + str(proc_comm.stdout) + '\n')

    def get_device_items_sles(self, dev_path):
        if dev_path:
            self.logger.log(msg=("getting blk info for: {0}".format(dev_path)))
        device_items_to_return = []
        device_items = []

        #first get all the device names
        if dev_path is None:
            lsblk_command = 'lsblk -b -nl -o NAME'
        else:
            lsblk_command = 'lsblk -b -nl -o NAME ' + dev_path

        proc_comm = ProcessCommunicator()
        self.command_executor.Execute(lsblk_command, communicator=proc_comm, raise_exception_on_failure=True)

        for line in proc_comm.stdout.splitlines():
            item_value_str = line.strip()
            if item_value_str:
                device_item = DeviceItem()
                device_item.name = item_value_str.split()[0]
                device_items.append(device_item)

        for device_item in device_items:
            device_item.file_system = self.get_device_items_property(dev_name=device_item.name, property_name='FSTYPE')
            device_item.mount_point = self.get_device_items_property(dev_name=device_item.name, property_name='MOUNTPOINT')
            device_item.label = self.get_device_items_property(dev_name=device_item.name, property_name='LABEL')
            device_item.uuid = self.get_device_items_property(dev_name=device_item.name, property_name='UUID')
            device_item.majmin = self.get_device_items_property(dev_name=device_item.name, property_name='MAJ:MIN')
            device_item.device_id = self.get_device_items_property(dev_name=device_item.name, property_name='DEVICE_ID')

            device_item.azure_name = ''
            for symlink, target in self.get_azure_symlinks().items():
                if device_item.name in target:
                    device_item.azure_name = symlink

            # get the type of device
            model_file_path = '/sys/block/' + device_item.name + '/device/model'

            if os.path.exists(model_file_path):
                with open(model_file_path, 'r') as f:
                    device_item.model = f.read().strip()
            else:
                self.logger.log(msg=("no model file found for device {0}".format(device_item.name)))

            if device_item.model == 'Virtual Disk':
                self.logger.log(msg="model is virtual disk")
                device_item.type = 'disk'
            else:
                partition_files = glob.glob('/sys/block/*/' + device_item.name + '/partition')
                self.logger.log(msg="partition files exists")
                if partition_files is not None and len(partition_files) > 0:
                    device_item.type = 'part'

            size_string = self.get_device_items_property(dev_name=device_item.name, property_name='SIZE')

            if size_string is not None and size_string != "":
                device_item.size = int(size_string)

            if device_item.type is None:
                device_item.type = ''

            if device_item.size is not None:
                device_items_to_return.append(device_item)
            else:
                self.logger.log(msg=("skip the device {0} because we could not get size of it.".format(device_item.name)))

        return device_items_to_return

    def get_device_items(self, dev_path):
        if self.distro_patcher.distro_info[0].lower() == 'suse' and self.distro_patcher.distro_info[1] == '11':
            return self.get_device_items_sles(dev_path)
        else:
            if dev_path:
                self.logger.log(msg=("getting blk info for: " + str(dev_path)))

            if dev_path is None:
                lsblk_command = 'lsblk -b -n -P -o NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE,MAJ:MIN'
            else:
                lsblk_command = 'lsblk -b -n -P -o NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE,MAJ:MIN ' + dev_path
            
            proc_comm = ProcessCommunicator()
            self.command_executor.Execute(lsblk_command, communicator=proc_comm, raise_exception_on_failure=True, suppress_logging=True)
            
            device_items = []
            lvm_items = self.get_lvm_items()
            for line in proc_comm.stdout.splitlines():
                if line:
                    device_item = DeviceItem()

                    for disk_info_property in line.split():
                        property_item_pair = disk_info_property.split('=')
                        if property_item_pair[0] == 'SIZE':
                            device_item.size = int(property_item_pair[1].strip('"'))

                        if property_item_pair[0] == 'NAME':
                            device_item.name = property_item_pair[1].strip('"')

                        if property_item_pair[0] == 'TYPE':
                            device_item.type = property_item_pair[1].strip('"')

                        if property_item_pair[0] == 'FSTYPE':
                            device_item.file_system = property_item_pair[1].strip('"')
                        
                        if property_item_pair[0] == 'MOUNTPOINT':
                            device_item.mount_point = property_item_pair[1].strip('"')

                        if property_item_pair[0] == 'LABEL':
                            device_item.label = property_item_pair[1].strip('"')

                        if property_item_pair[0] == 'UUID':
                            device_item.uuid = property_item_pair[1].strip('"')

                        if property_item_pair[0] == 'MODEL':
                            device_item.model = property_item_pair[1].strip('"')

                        if property_item_pair[0] == 'MAJ:MIN':
                            device_item.majmin = property_item_pair[1].strip('"')

                    device_item.device_id = self.get_device_id(self.get_device_path(device_item.name))

                    if device_item.type is None:
                        device_item.type = ''

                    if device_item.type.lower() == 'lvm':
                        for lvm_item in lvm_items:
                            majmin = lvm_item.lv_kernel_major + ':' + lvm_item.lv_kernel_minor

                            if majmin == device_item.majmin:
                                device_item.name = lvm_item.vg_name + '/' + lvm_item.lv_name

                    device_item.azure_name = ''
                    for symlink, target in self.get_azure_symlinks().items():
                        if device_item.name in target:
                            device_item.azure_name = symlink

                    device_items.append(device_item)

            return device_items

    def get_lvm_items(self):
        lvs_command = 'lvs --noheadings --nameprefixes --unquoted -o lv_name,vg_name,lv_kernel_major,lv_kernel_minor'
        proc_comm = ProcessCommunicator()

        if self.command_executor.Execute(lvs_command, communicator=proc_comm):
            return []

        lvm_items = []

        for line in proc_comm.stdout.splitlines():
            if not line:
                continue

            lvm_item = LvmItem()

            for pair in line.strip().split():
                if len(pair.split('=')) != 2:
                    continue

                key, value = pair.split('=')

                if key == 'LVM2_LV_NAME':
                    lvm_item.lv_name = value

                if key == 'LVM2_VG_NAME':
                    lvm_item.vg_name = value

                if key == 'LVM2_LV_KERNEL_MAJOR':
                    lvm_item.lv_kernel_major = value

                if key == 'LVM2_LV_KERNEL_MINOR':
                    lvm_item.lv_kernel_minor = value

            lvm_items.append(lvm_item)

        return lvm_items

    def is_os_disk_lvm(self):
        if DiskUtil.os_disk_lvm is not None:
            return DiskUtil.os_disk_lvm

        device_items = self.get_device_items(None)

        if not any([item.type.lower() == 'lvm' for item in device_items]):
            DiskUtil.os_disk_lvm = False
            return False

        lvm_items = filter(lambda item: item.vg_name == "rootvg", self.get_lvm_items())

        current_lv_names = set([item.lv_name for item in lvm_items])

        DiskUtil.os_disk_lvm = False

        expected_lv_names = set(['homelv', 'optlv', 'rootlv', 'swaplv', 'tmplv', 'usrlv', 'varlv'])
        if expected_lv_names == current_lv_names:
            DiskUtil.os_disk_lvm = True

        expected_lv_names = set(['homelv', 'optlv', 'rootlv', 'tmplv', 'usrlv', 'varlv'])
        if expected_lv_names == current_lv_names:
            DiskUtil.os_disk_lvm = True

        return DiskUtil.os_disk_lvm

    def should_skip_for_inplace_encryption(self, device_item, encrypt_volume_type):
        """
        TYPE="raid0"
        TYPE="part"
        TYPE="crypt"

        first check whether there's one file system on it.
        if the type is disk, then to check whether it have child-items, say the part, lvm or crypt luks.
        if the answer is yes, then skip it.
        """

        if encrypt_volume_type.lower() == 'data':
            self.logger.log(msg="enabling encryption for data volumes", level=CommonVariables.WarningLevel)
            if device_item.device_id.startswith('00000000-0000'):
                self.logger.log(msg="skipping root disk", level=CommonVariables.WarningLevel)
                return True
            if device_item.device_id.startswith('00000000-0001'):
                self.logger.log(msg="skipping resource disk", level=CommonVariables.WarningLevel)
                return True

        if device_item.file_system is None or device_item.file_system == "":
            self.logger.log(msg=("there's no file system on this device: {0}, so skip it.").format(device_item))
            return True
        else:
            if device_item.size < CommonVariables.min_filesystem_size_support:
                self.logger.log(msg="the device size is too small," + str(device_item.size) + " so skip it.", level=CommonVariables.WarningLevel)
                return True

            supported_device_type = ["disk","part","raid0","raid1","raid5","raid10","lvm"]
            if device_item.type not in supported_device_type:
                self.logger.log(msg="the device type: " + str(device_item.type) + " is not supported yet, so skip it.", level=CommonVariables.WarningLevel)
                return True

            if device_item.uuid is None or device_item.uuid == "":
                self.logger.log(msg="the device do not have the related uuid, so skip it.", level=CommonVariables.WarningLevel)
                return True
            sub_items = self.get_device_items("/dev/" + device_item.name)
            if len(sub_items) > 1:
                self.logger.log(msg=("there's sub items for the device:{0} , so skip it.".format(device_item.name)), level=CommonVariables.WarningLevel)
                return True

            azure_blk_items = self.get_azure_devices()
            if device_item.type == "crypt":
                self.logger.log(msg=("device_item.type is:{0}, so skip it.".format(device_item.type)), level=CommonVariables.WarningLevel)
                return True

            if device_item.mount_point == "/":
                self.logger.log(msg=("the mountpoint is root:{0}, so skip it.".format(device_item)), level=CommonVariables.WarningLevel)
                return True
            for azure_blk_item in azure_blk_items:
                if azure_blk_item.name == device_item.name:
                    self.logger.log(msg="the mountpoint is the azure disk root or resource, so skip it.")
                    return True
            return False

    def get_azure_devices(self):
        ide_devices = self.get_ide_devices()
        blk_items = []
        for ide_device in ide_devices:
            current_blk_items = self.get_device_items("/dev/" + ide_device)
            for current_blk_item in current_blk_items:
                blk_items.append(current_blk_item)
        return blk_items

    def get_ide_devices(self):
        """
        this only return the device names of the ide.
        """
        ide_devices = []
        for vmbus in os.listdir(self.vmbus_sys_path):
            f = open('%s/%s/%s' % (self.vmbus_sys_path, vmbus, 'class_id'), 'r')
            class_id = f.read()
            f.close()
            if class_id.strip() == self.ide_class_id:
                device_sdx_path = self.find_block_sdx_path(vmbus)
                self.logger.log("found one ide with vmbus: {0} and the sdx path is: {1}".format(vmbus,
                                                                                                device_sdx_path))
                ide_devices.append(device_sdx_path)
        return ide_devices

    def find_block_sdx_path(self, vmbus):
        device = None
        for root, dirs, files in os.walk(os.path.join(self.vmbus_sys_path , vmbus)):
            if root.endswith("/block"):
                device = dirs[0]
            else : #older distros
                for d in dirs:
                    if ':' in d and "block" == d.split(':')[0]:
                        device = d.split(':')[1]
                        break
        return device
コード例 #24
0
class CommandExecutorTest(TestCase):
    def void(self, ret_code): return lambda logger: ret_code

    def push(self, i, ret=0): return lambda logger: self.execs.append(i) or ret


    def setUp(self):
        self.executor = CommandExecutor().set_logger(Logger().set_none())
        self.executor.perform_console_reset = False
        self.execs = []


    def test_empty_executables(self):
        self.executor.execute()

    def test_executables_should_be_executed_in_order(self):
        self.executor.append(self.push(1))
        self.executor.append(self.push(2))
        self.executor.append(self.push(3))
        self.executor.append(self.push(4))
        self.executor.append(self.push(5))
        execution_ret = self.executor.execute()
        self.assertEqual(Callable.success, execution_ret)
        self.assertListEqual([1, 2, 3, 4, 5], self.execs)

    def post_execution_test_with_return_code(self, ret_code):
        self.executor.append(self.void(Callable.success))
        self.executor.append(self.push(1, ret_code))
        self.executor.append_post(self.push(7, Callable.success))
        execution_ret = self.executor.execute()
        self.assertEqual(ret_code, execution_ret)
        self.assertListEqual([1, 7], self.execs)

    def test_post_execution_should_happen_when_zero_return_code(self):
        self.post_execution_test_with_return_code(Callable.success)

    def test_post_execution_should_happen_when_non_zero_return_code(self):
        self.post_execution_test_with_return_code(1)

    def test_post_execution_should_happen_always_do_not_proceed_return_code(self):
        self.post_execution_test_with_return_code(Callable.do_not_proceed)

    def test_execution_should_stop_on_error(self):
        self.executor.append(self.push(1, Callable.success))
        self.executor.append(self.push(2, 1))
        self.executor.append(self.push(3, Callable.success))
        self.executor.append(self.push(4, 1))
        self.executor.append(self.push(5, Callable.success))
        self.executor.append_post(self.push(6, Callable.success))
        self.executor.append_post(self.push(8, Callable.success))
        self.executor.append_post(self.push(10, 1))
        self.executor.append_post(self.push(12, Callable.success))

        execution_ret = self.executor.execute()
        self.assertEqual(1, execution_ret)
        self.assertListEqual([1, 2, 6, 8, 10], self.execs)

    def test_execution_should_stop_on_do_not_proceed(self):
        self.executor.append_post(self.push(4, Callable.success))
        self.executor.append_post(self.push(5, Callable.do_not_proceed))
        self.executor.append_post(self.push(6, Callable.success))
        self.executor.append(self.push(1, Callable.success))
        self.executor.append(self.push(2, Callable.do_not_proceed))
        self.executor.append(self.push(3, Callable.success))

        execution_ret = self.executor.execute()
        self.assertEqual(Callable.do_not_proceed, execution_ret)
        self.assertListEqual([1, 2, 4, 5], self.execs)
コード例 #25
0
 def setUp(self):
     self.executor = CommandExecutor().set_logger(Logger().set_none())
     self.executor.perform_console_reset = False
     self.execs = []
コード例 #26
0
ファイル: EhMetrics.py プロジェクト: moink635/mysource
    def __call__(self, args, executor: CommandExecutor):
        def check_remotes(log):
            if len(self.git.get_remotes()) == 0:
                self.set_remote(log)
            return Callable.success

        executor.append(check_remotes)

        if not args.fast:
            executor.append(
                lambda log: Callable.success
                if self.git.fetch_notes("*") == 0
                else log.error("FATAL: git: Failure to fetch notes from origin.") or Callable.do_not_proceed
            )

        if args.branch:

            def branch_check(logger):
                current_branch = self.git.current_branch()
                if not current_branch == args.branch:
                    logger.error(
                        'Branch check failed. You seem to be on "%s"; switch to "%s" first!'
                        % (current_branch, args.branch)
                    )
                    return Callable.do_not_proceed
                else:
                    return Callable.success

            executor.append(branch_check)

        def check_workspace(log: Logger):
            if args.note or not args.non_interactive:
                if not self.git.is_clean_workspace():
                    if args.note:
                        log.error(
                            "I cannot write notes with local changes. Commit your work first, so that notes can "
                            "be attached to your commit."
                        )
                        return Callable.do_not_proceed
                    else:
                        log.warn(
                            "You have uncommitted changes - if engineering health metrics are increased, you will "
                            "not be able to add an exclusion note for the build."
                        )
            return Callable.success

        executor.append(check_workspace)

        def clean_logs(log: Logger):
            if self.fs.dir_exists(MetricsCollector.log_directory):
                log.debug("Removing directory: %s" % MetricsCollector.log_directory)
                self.fs.remove_dir(MetricsCollector.log_directory)
            return Callable.success

        executor.append(clean_logs)

        def record_commit(log: Logger):
            self.fs.write_lines(
                os.sep.join([self.fs.existing_dir(MetricsCollector.log_directory), ".commit"]),
                [self.git.current_commit()],
            )
            return Callable.success

        executor.append(record_commit)

        metrics = DataBean()

        modules_descriptions = [
            JIRADirectoryScanModulesDescription(args.fast, file_utils=self.fs),
            BundledPluginsModulesDescription(args.fast),
            JIRATestsModulesDescription(args.fast),
        ]

        executor.append(self.metrics_processor.process_metrics(args, modules_descriptions, metrics))
        executor.append(self.metrics_processor.generate_report(metrics, self.fs, self.git))
        executor.append(self.metrics_processor.check_values(args, metrics, self.git, self.fs))

        if args.note:
            executor.append(lambda log: self.git.set_user("jmake stats runner", "*****@*****.**"))
            executor.append(
                lambda log: self.git.put_notes(self.json_writer.as_str(metrics), STATS_REF_NAME, "HEAD", True)
            )
            executor.append(lambda log: self.git.push_notes(STATS_REF_NAME))
コード例 #27
0
class ResourceDiskUtil(object):
    """ Resource Disk Encryption Utilities """

    RD_KEY_FILE = CommonVariables.PassphraseFileNameKey
    RD_MOUNT_POINT = '/mnt/resource'
    RD_BASE_DEV_PATH = '/dev/disk/azure/resource'
    RD_DEV_PATH = '/dev/disk/azure/resource-part1'
    DM_PREFIX = '/dev/mapper/'
    # todo: consolidate this and other key file path references
    # (BekUtil.py, ExtensionParameter.py, and dracut patches)
    RD_KEY_FILE = '/mnt/azure_bek_disk/LinuxPassPhraseFileName'
    RD_KEY_FILE_MOUNT_POINT = '/mnt/azure_bek_disk'
    RD_KEY_VOLUME_LABEL = 'BEK VOLUME'

    def __init__(self, hutil, logger, distro_patcher):
        self.hutil = hutil
        self.logger = logger
        self.executor = CommandExecutor(self.logger)
        self.disk_util = DiskUtil(hutil=self.hutil,
                                  patching=distro_patcher,
                                  logger=self.logger,
                                  encryption_environment=None)
        self.mapper_name = str(uuid.uuid4())
        self.mapper_path = self.DM_PREFIX + self.mapper_name

    def is_encrypt_format_all(self):
        """ return true if current encryption operation is EncryptFormatAll """
        try:
            public_settings_str = self.hutil._context._config[
                'runtimeSettings'][0]['handlerSettings'].get('publicSettings')
            if isinstance(public_settings_str, basestring):
                public_settings = json.loads(public_settings_str)
            else:
                public_settings = public_settings_str
            encryption_operation = public_settings.get(
                CommonVariables.EncryptionEncryptionOperationKey)
            if encryption_operation in [
                    CommonVariables.EnableEncryptionFormatAll
            ]:
                return True
        except:
            self.logger.log("unable to identify current encryption operation")
        return False

    def is_luks_device(self):
        """ checks if the device is set up with a luks header """
        if not self.resource_disk_partition_exists():
            return False
        cmd = 'cryptsetup isLuks ' + self.RD_DEV_PATH
        return (int)(self.executor.Execute(
            cmd, suppress_logging=True)) == CommonVariables.process_success

    def is_luks_device_opened(self):
        """ check for presence of luks uuid to see if device was already opened """
        # suppress logging to avoid log clutter if the device is not open yet
        if not self.resource_disk_partition_exists():
            return False
        cmd = 'test -b /dev/disk/by-uuid/$(cryptsetup luksUUID ' + self.RD_DEV_PATH + ')'
        return (int)(self.executor.ExecuteInBash(
            cmd, suppress_logging=True)) == CommonVariables.process_success

    def is_valid_key(self):
        """ test if current key can be used to open current partition """
        # suppress logging to avoid log clutter if the key doesn't match
        if not self.resource_disk_partition_exists():
            return False
        cmd = 'cryptsetup luksOpen ' + self.RD_DEV_PATH + ' --test-passphrase --key-file ' + self.RD_KEY_FILE
        return (int)(self.executor.Execute(
            cmd, suppress_logging=True)) == CommonVariables.process_success

    def resource_disk_exists(self):
        """ true if the udev name for resource disk exists """
        cmd = 'test -b ' + self.RD_BASE_DEV_PATH
        return (int)(self.executor.Execute(
            cmd, suppress_logging=True)) == CommonVariables.process_success

    def resource_disk_partition_exists(self):
        """ true if udev name for resource disk partition exists """
        cmd = 'test -b ' + self.RD_DEV_PATH
        return (int)(self.executor.Execute(
            cmd, suppress_logging=True)) == CommonVariables.process_success

    def format_luks(self):
        """ set up resource disk crypt device layer using disk util """
        if not self.resource_disk_partition_exists():
            self.logger.log(
                'LUKS format operation requested, but resource disk partition does not exist'
            )
            return False
        return (int)(self.disk_util.luks_format(
            passphrase_file=self.RD_KEY_FILE,
            dev_path=self.RD_DEV_PATH,
            header_file=None)) == CommonVariables.process_success

    def encrypt(self):
        """ use disk util with the appropriate device mapper """
        self.mount_key_volume()
        return (int)(self.disk_util.encrypt_disk(
            dev_path=self.RD_DEV_PATH,
            passphrase_file=self.RD_KEY_FILE,
            mapper_name=self.mapper_name,
            header_file=None)) == CommonVariables.process_success

    def make(self):
        """ make a default file system on top of the crypt layer """
        make_result = self.disk_util.format_disk(
            dev_path=self.mapper_path,
            file_system=CommonVariables.default_file_system)
        if make_result != CommonVariables.process_success:
            self.logger.log(msg="Failed to make file system on ephemeral disk",
                            level=CommonVariables.ErrorLevel)
            return False
        # todo - drop DATALOSS_WARNING_README.txt file to disk
        return True

    def mount_key_volume(self):
        """ attempt to mount the key volume and verify existence of key file"""
        if not os.path.exists(self.RD_KEY_FILE):
            self.disk_util.make_sure_path_exists(self.RD_KEY_FILE_MOUNT_POINT)
            key_volume_device_name = os.popen('blkid -L "' +
                                              self.RD_KEY_VOLUME_LABEL +
                                              '"').read().strip()
            self.disk_util.mount_filesystem(key_volume_device_name,
                                            self.RD_KEY_FILE_MOUNT_POINT)
        return os.path.exists(self.RD_KEY_FILE)

    def mount(self):
        """ mount the file system previously made on top of the crypt layer """
        #ensure that resource disk mount point directory has been created
        cmd = 'mkdir -p ' + self.RD_MOUNT_POINT
        if self.executor.Execute(
                cmd, suppress_logging=True) != CommonVariables.process_success:
            self.logger.log(msg='Failed to precreate mount point directory: ' +
                            cmd,
                            level=CommonVariables.ErrorLevel)
            return False

        # mount to mount point directory
        mount_result = self.disk_util.mount_filesystem(
            dev_path=self.mapper_path,
            mount_point=self.RD_MOUNT_POINT,
            file_system=CommonVariables.default_file_system)
        if mount_result != CommonVariables.process_success:
            self.logger.log(msg="Failed to mount file system on resource disk",
                            level=CommonVariables.ErrorLevel)
            return False
        return True

    def configure_waagent(self):
        """ turn off waagent.conf resource disk management  """
        # set ResourceDisk.MountPoint to standard mount point
        cmd = "sed -i.rdbak1 's|ResourceDisk.MountPoint=.*|ResourceDisk.MountPoint=" + self.RD_MOUNT_POINT + "|' /etc/waagent.conf"
        # set ResourceDiskFormat=n to ensure waagent does not attempt a simultaneous format
        cmd = "sed -i.rdbak2 's|ResourceDisk.Format=y|ResourceDisk.Format=n|' /etc/waagent.conf"
        if self.executor.ExecuteInBash(cmd) != CommonVariables.process_success:
            self.logger.log(
                msg="Failed to set ResourceDiskFormat in /etc/waagent.conf",
                level=CommonVariables.WarningLevel)
            return False
        # todo: restart waagent if necessary to ensure changes are picked up?
        return True

    def configure_fstab(self):
        """ remove resource disk from /etc/fstab if present """
        cmd = "sed -i.bak '/azure_resource-part1/d' /etc/fstab"
        if self.executor.ExecuteInBash(cmd) != CommonVariables.process_success:
            self.logger.log(
                msg="Failed to configure resource disk entry of /etc/fstab",
                level=CommonVariables.WarningLevel)
            return False
        return True

    def unmount_resource_disk(self):
        """ unmount resource disk """
        # after service healing multiple unmounts of key file mount point may be required
        self.disk_util.umount(self.RD_KEY_FILE_MOUNT_POINT)
        self.disk_util.umount(self.RD_KEY_FILE_MOUNT_POINT)
        self.disk_util.umount(self.RD_MOUNT_POINT)
        self.disk_util.umount('/mnt')

    def is_crypt_mounted(self):
        """ return true if mount point is already on a crypt layer """
        mount_items = self.disk_util.get_mount_items()
        for mount_item in mount_items:
            if mount_item["dest"] == self.RD_MOUNT_POINT and mount_item[
                    "src"].startswith(self.DM_PREFIX):
                return True
        return False

    def get_rd_device_mapper(self):
        """ retrieve current device mapper path backing the encrypted resource disk mount point """
        device_items = self.disk_util.get_device_items(self.RD_DEV_PATH)
        for device_item in device_items:
            if device_item.type.lower() == 'crypt':
                self.logger.log('Found device mapper: ' +
                                device_item.name.lower(),
                                level='Info')
                return device_item.name.lower()
        return None

    def remove_device_mapper(self):
        """ use dmsetup to remove the resource disk device mapper if it exists """
        dm_name = self.get_rd_device_mapper()
        if dm_name:
            cmd = 'dmsetup remove ' + self.DM_PREFIX + dm_name
            if self.executor.Execute(cmd) == CommonVariables.process_success:
                return True
            else:
                self.logger.log('failed to remove ' + dm_name)
        else:
            self.logger.log('no resource disk device mapper found')
        return False

    def prepare_partition(self):
        """ create partition on resource disk if missing """
        if self.resource_disk_partition_exists():
            return True
        self.logger.log("resource disk partition does not exist", level='Info')
        cmd = 'parted ' + self.RD_BASE_DEV_PATH + ' mkpart primary ext4 0% 100%'
        if self.executor.ExecuteInBash(cmd) == CommonVariables.process_success:
            # wait for the corresponding udev name to become available
            for i in range(0, 10):
                time.sleep(i)
                if self.resource_disk_partition_exists():
                    return True
        self.logger.log('unable to make resource disk partition')
        return False

    def clear_luks_header(self):
        """ clear luks header by overwriting with 10MB of entropy """
        if not self.resource_disk_partition_exists():
            self.logger.log(
                "resource partition does not exist, no luks header to clear")
            return True
        cmd = 'dd if=/dev/urandom of=' + self.RD_DEV_PATH + ' bs=512 count=20480'
        return self.executor.Execute(cmd) == CommonVariables.process_success

    def try_remount(self):
        """ mount encrypted resource disk if not already mounted"""
        if self.is_crypt_mounted():
            self.logger.log("resource disk already encrypted and mounted",
                            level='Info')
            return True

        if self.resource_disk_exists() and self.resource_disk_partition_exists(
        ) and self.is_luks_device() and self.is_valid_key():
            # store the currently associated path and name
            current_mapper_name = self.get_rd_device_mapper()
            if current_mapper_name:
                self.mapper_name = current_mapper_name
                self.mapper_path = self.DM_PREFIX + self.mapper_name
                if not self.is_luks_device_opened:
                    # attempt to open
                    self.disk_util.luks_open(passphrase_file=self.RD_KEY_FILE,
                                             dev_path=self.RD_DEV_PATH,
                                             mapper_name=self.mapper_name,
                                             header_file=None,
                                             uses_cleartext_key=False)
                    if not self.is_luks_device_opened:
                        return False
                # attempt mount
                return self.mount()

        # conditions required to re-mount were not met
        return False

    def prepare(self):
        """ prepare a non-encrypted resource disk to be encrypted """
        self.configure_waagent()
        self.configure_fstab()
        if self.resource_disk_partition_exists():
            self.disk_util.swapoff()
            self.unmount_resource_disk()
            self.remove_device_mapper()
            self.clear_luks_header()
        self.prepare_partition()
        return True

    def automount(self):
        """ encrypt resource disk """
        # try to remount if the disk was previously encrypted and is still valid
        if self.try_remount():
            return True

        # unencrypted or unusable
        if self.is_encrypt_format_all():
            return self.prepare() and self.encrypt() and self.make(
            ) and self.mount()
        else:
            self.logger.log(
                'EncryptionFormatAll not in use, resource disk will not be automatically formatted and encrypted.'
            )
コード例 #28
0
 def setUp(self):
     self.executor = CommandExecutor().set_logger(Logger().set_none())
     self.executor.perform_console_reset = False
     self.execs = []
コード例 #29
0
class ResourceDiskUtil(object):
    """ Resource Disk Encryption Utilities """

    RD_MOUNT_POINT = '/mnt/resource'
    RD_BASE_DEV_PATH = os.path.join(CommonVariables.azure_symlinks_dir,
                                    'resource')
    RD_DEV_PATH = os.path.join(CommonVariables.azure_symlinks_dir,
                               'resource-part1')
    DEV_DM_PREFIX = '/dev/dm-'
    # todo: consolidate this and other key file path references
    # (BekUtil.py, ExtensionParameter.py, and dracut patches)
    RD_MAPPER_NAME = 'resourceencrypt'
    RD_MAPPER_PATH = os.path.join(CommonVariables.dev_mapper_root,
                                  RD_MAPPER_NAME)

    def __init__(self, logger, disk_util, passphrase_filename, public_settings,
                 distro_info):
        self.logger = logger
        self.executor = CommandExecutor(self.logger)
        self.disk_util = disk_util
        self.passphrase_filename = passphrase_filename  # WARNING: This may be null, in which case we mount the resource disk if its unencrypted and do nothing if it is.
        self.public_settings = public_settings
        self.distro_info = distro_info

    def _is_encrypt_format_all(self):
        """ return true if current encryption operation is EncryptFormatAll """
        encryption_operation = self.public_settings.get(
            CommonVariables.EncryptionEncryptionOperationKey)
        if encryption_operation in [CommonVariables.EnableEncryptionFormatAll]:
            return True
        self.logger.log(
            "Current encryption operation is not EnableEncryptionFormatAll")
        return False

    def _is_luks_device(self):
        """ checks if the device is set up with a luks header """
        if not self._resource_disk_partition_exists():
            return False
        cmd = 'cryptsetup isLuks ' + self.RD_DEV_PATH
        return (int)(self.executor.Execute(
            cmd, suppress_logging=True)) == CommonVariables.process_success

    def _resource_disk_partition_exists(self):
        """ true if udev name for resource disk partition exists """
        cmd = 'test -b ' + self.RD_DEV_PATH
        return (int)(self.executor.Execute(
            cmd, suppress_logging=True)) == CommonVariables.process_success

    def _encrypt(self):
        """ use disk util with the appropriate device mapper """
        return (int)(self.disk_util.encrypt_disk(
            dev_path=self.RD_DEV_PATH,
            passphrase_file=self.passphrase_filename,
            mapper_name=self.RD_MAPPER_NAME,
            header_file=None)) == CommonVariables.process_success

    def _format_encrypted_partition(self):
        """ make a default file system on top of the crypt layer """
        make_result = self.disk_util.format_disk(
            dev_path=self.RD_MAPPER_PATH,
            file_system=CommonVariables.default_file_system)
        if make_result != CommonVariables.process_success:
            self.logger.log(msg="Failed to make file system on ephemeral disk",
                            level=CommonVariables.ErrorLevel)
            return False
        # todo - drop DATALOSS_WARNING_README.txt file to disk
        return True

    def _mount_resource_disk(self, dev_path):
        """ mount the file system previously made on top of the crypt layer """
        # ensure that resource disk mount point directory has been created
        cmd = 'mkdir -p ' + self.RD_MOUNT_POINT
        if self.executor.Execute(
                cmd, suppress_logging=True) != CommonVariables.process_success:
            self.logger.log(msg='Failed to precreate mount point directory: ' +
                            cmd,
                            level=CommonVariables.ErrorLevel)
            return False

        # mount to mount point directory
        mount_result = self.disk_util.mount_filesystem(
            dev_path=dev_path, mount_point=self.RD_MOUNT_POINT)
        if mount_result != CommonVariables.process_success:
            self.logger.log(msg="Failed to mount file system on resource disk",
                            level=CommonVariables.ErrorLevel)
            return False
        return True

    def _configure_waagent(self):
        """ turn off waagent.conf resource disk management  """
        # set ResourceDisk.MountPoint to standard mount point
        cmd = "sed -i.rdbak1 's|ResourceDisk.MountPoint=.*|ResourceDisk.MountPoint=" + self.RD_MOUNT_POINT + "|' /etc/waagent.conf"
        if self.executor.ExecuteInBash(cmd) != CommonVariables.process_success:
            self.logger.log(
                msg=
                "Failed to change ResourceDisk.MountPoint in /etc/waagent.conf",
                level=CommonVariables.WarningLevel)
            return False
        # set ResourceDiskFormat=n to ensure waagent does not attempt a simultaneous format
        cmd = "sed -i.rdbak2 's|ResourceDisk.Format=y|ResourceDisk.Format=n|' /etc/waagent.conf"
        if self.executor.ExecuteInBash(cmd) != CommonVariables.process_success:
            self.logger.log(
                msg="Failed to set ResourceDiskFormat in /etc/waagent.conf",
                level=CommonVariables.WarningLevel)
            return False
        # todo: restart waagent if necessary to ensure changes are picked up?
        return True

    def _configure_fstab(self):
        """ remove resource disk from /etc/fstab if present """
        cmd = "sed -i.bak '/azure_resource-part1/d' /etc/fstab"
        if self.executor.ExecuteInBash(cmd) != CommonVariables.process_success:
            self.logger.log(
                msg="Failed to configure resource disk entry of /etc/fstab",
                level=CommonVariables.WarningLevel)
            return False
        return True

    def _unmount_resource_disk(self):
        """ unmount resource disk """
        self.disk_util.umount(self.RD_MOUNT_POINT)
        self.disk_util.umount(CommonVariables.encryption_key_mount_point)
        self.disk_util.umount('/mnt')
        self.disk_util.make_sure_path_exists(
            CommonVariables.encryption_key_mount_point)
        self.disk_util.mount_bek_volume(
            "BEK VOLUME", CommonVariables.encryption_key_mount_point,
            "fmask=077")

    def _is_plain_mounted(self):
        """ return true if mount point is mounted from a non-crypt layer """
        mount_items = self.disk_util.get_mount_items()
        for mount_item in mount_items:
            if mount_item["dest"] == self.RD_MOUNT_POINT and not (
                    mount_item["src"].startswith(
                        CommonVariables.dev_mapper_root)
                    or mount_item["src"].startswith(self.DEV_DM_PREFIX)):
                return True
        return False

    def _is_crypt_mounted(self):
        """ return true if mount point is already on a crypt layer """
        mount_items = self.disk_util.get_mount_items()
        for mount_item in mount_items:
            if mount_item["dest"] == self.RD_MOUNT_POINT and (
                    mount_item["src"].startswith(
                        CommonVariables.dev_mapper_root)
                    or mount_item["src"].startswith(self.DEV_DM_PREFIX)):
                return True
        return False

    def _get_rd_device_mappers(self):
        """
        Retreive any device mapper device on the resource disk (e.g. /dev/dm-0).
        Can't imagine why there would be multiple device mappers here, but doesn't hurt to handle the case
        """
        device_items = self.disk_util.get_device_items(self.RD_DEV_PATH)
        device_mappers = []
        mapper_device_types = [
            "raid0", "raid1", "raid5", "raid10", "lvm", "crypt"
        ]
        for device_item in device_items:
            # fstype should be crypto_LUKS
            dev_path = self.disk_util.get_device_path(device_item.name)
            if device_item.type in mapper_device_types:
                device_mappers.append(device_item)
                self.logger.log('Found device mapper: ' + dev_path,
                                level='Info')
        return device_mappers

    def _remove_device_mappers(self):
        """
        Use dmsetup to remove the resource disk device mapper if it exists.
        This is to allow us to make sure that the resource disk is not being used by anything and we can
        safely luksFormat it.
        """

        # There could be a dependency between the
        something_closed = True
        while something_closed is True:
            # The mappers might be dependant on each other, like a crypt on an LVM.
            # Instead of trying to figure out the dependency tree we will try to close anything we can
            # and if anything does get closed we will refresh the list of devices and try to close everything again.
            # In effect we repeat until we either close everything or we reach a point where we can't close anything.
            dm_items = self._get_rd_device_mappers()
            something_closed = False

            if len(dm_items) == 0:
                self.logger.log('no resource disk device mapper found')
            for dm_item in dm_items:
                # try luksClose
                cmd = 'cryptsetup luksClose ' + dm_item.name
                if self.executor.Execute(
                        cmd) == CommonVariables.process_success:
                    self.logger.log('Successfully closed cryptlayer: ' +
                                    dm_item.name)
                    something_closed = True
                else:
                    # try a dmsetup remove, in case its non-crypt device mapper (lvm, raid, something we don't know)
                    cmd = 'dmsetup remove ' + self.disk_util.get_device_path(
                        dm_item.name)
                    if self.executor.Execute(
                            cmd) == CommonVariables.process_success:
                        something_closed = True
                    else:
                        self.logger.log('failed to remove ' + dm_item.name)

    def _prepare_partition(self):
        """ create partition on resource disk if missing """
        if self._resource_disk_partition_exists():
            return True
        self.logger.log("resource disk partition does not exist", level='Info')
        cmd = 'parted ' + self.RD_BASE_DEV_PATH + ' mkpart primary ext4 0% 100%'
        if self.executor.ExecuteInBash(cmd) == CommonVariables.process_success:
            # wait for the corresponding udev name to become available
            for i in range(0, 10):
                time.sleep(i)
                if self._resource_disk_partition_exists():
                    return True
        self.logger.log('unable to make resource disk partition')
        return False

    def _wipe_partition_header(self):
        """ clear any possible header (luke or filesystem) by overwriting with 10MB of entropy """
        if not self._resource_disk_partition_exists():
            self.logger.log(
                "resource partition does not exist, no header to clear")
            return True
        cmd = 'dd if=/dev/urandom of=' + self.RD_DEV_PATH + ' bs=512 count=20480'
        return self.executor.Execute(cmd) == CommonVariables.process_success

    def try_remount(self):
        """
        Mount the resource disk if not already mounted
        Returns true if the resource disk is mounted, false otherwise
        Throws an exception if anything goes wrong
        """
        self.logger.log("In try_remount")

        if self.passphrase_filename:
            self.logger.log(
                "passphrase_filename(value={0}) is not null, so trying to mount encrypted Resource Disk"
                .format(self.passphrase_filename))

            if self._is_crypt_mounted():
                self.logger.log("Resource disk already encrypted and mounted")
                # Add resource disk to crypttab if crypt mount is used
                # Scenario: RD is alreday crypt mounted and crypt mount to crypttab migration is initiated
                if not self.disk_util.should_use_azure_crypt_mount():
                    self.add_resource_disk_to_crypttab()
                return True

            if self._resource_disk_partition_exists() and self._is_luks_device(
            ):
                self.disk_util.luks_open(
                    passphrase_file=self.passphrase_filename,
                    dev_path=self.RD_DEV_PATH,
                    mapper_name=self.RD_MAPPER_NAME,
                    header_file=None,
                    uses_cleartext_key=False)
                self.logger.log("Trying to mount resource disk.")
                mount_retval = self._mount_resource_disk(self.RD_MAPPER_PATH)
                if mount_retval:
                    # We successfully mounted the RD but
                    # the RD was not auto-mounted, so trying to enable auto-unlock for RD
                    self.add_resource_disk_to_crypttab()
                return mount_retval

        else:
            self.logger.log(
                "passphrase_filename(value={0}) is null, so trying to mount plain Resource Disk"
                .format(self.passphrase_filename))
            if self._is_plain_mounted():
                self.logger.log("Resource disk already encrypted and mounted")
                return True
            return self._mount_resource_disk(self.RD_DEV_PATH)

        # conditions required to re-mount were not met
        return False

    def prepare(self):
        """ prepare a non-encrypted resource disk to be encrypted """
        self._configure_waagent()
        self._configure_fstab()
        if self._resource_disk_partition_exists():
            self.disk_util.swapoff()
            self._unmount_resource_disk()
            self._remove_device_mappers()
            self._wipe_partition_header()
        self._prepare_partition()
        return True

    def add_to_fstab(self):
        with open("/etc/fstab") as f:
            lines = f.readlines()

        if not self.disk_util.is_bek_in_fstab_file(lines):
            lines.append(self.disk_util.get_fstab_bek_line())
            self.disk_util.add_bek_to_default_cryptdisks()

        if not any([line.startswith(self.RD_MAPPER_PATH) for line in lines]):
            if self.distro_info[0].lower(
            ) == 'ubuntu' and self.distro_info[1].startswith('14'):
                lines.append(
                    '{0} {1} auto defaults,discard,nobootwait 0 0\n'.format(
                        self.RD_MAPPER_PATH, self.RD_MOUNT_POINT))
            else:
                lines.append(
                    '{0} {1} auto defaults,discard,nofail 0 0\n'.format(
                        self.RD_MAPPER_PATH, self.RD_MOUNT_POINT))

        with open('/etc/fstab', 'w') as f:
            f.writelines(lines)

    def encrypt_format_mount(self):
        if not self.prepare():
            self.logger.log(
                "Failed to prepare VM for Resource Disk Encryption",
                CommonVariables.ErrorLevel)
            return False
        if not self._encrypt():
            self.logger.log("Failed to encrypt Resource Disk Encryption",
                            CommonVariables.ErrorLevel)
            return False
        if not self._format_encrypted_partition():
            self.logger.log(
                "Failed to format the encrypted Resource Disk Encryption",
                CommonVariables.ErrorLevel)
            return False
        if not self._mount_resource_disk(self.RD_MAPPER_PATH):
            self.logger.log(
                "Failed to mount after formatting and encrypting the Resource Disk Encryption",
                CommonVariables.ErrorLevel)
            return False
        # We haven't failed so far, lets just add the RD to crypttab
        self.add_resource_disk_to_crypttab()
        return True

    def add_resource_disk_to_crypttab(self):
        self.logger.log("Adding resource disk to the crypttab file")
        crypt_item = CryptItem()
        crypt_item.dev_path = self.RD_DEV_PATH
        crypt_item.mapper_name = self.RD_MAPPER_NAME
        crypt_item.uses_cleartext_key = False
        self.disk_util.remove_crypt_item(
            crypt_item)  # Remove old item in case it was already there
        self.disk_util.add_crypt_item_to_crypttab(crypt_item,
                                                  self.passphrase_filename)
        self.add_to_fstab()

    def automount(self):
        """
        Mount the resource disk (encrypted or not)
        or
        encrypt the resource disk and mount it if enable was called with EFA

        If False is returned, the resource disk is not mounted.
        """
        # try to remount if the disk was previously encrypted and is still valid
        if self.try_remount():
            return True
        # unencrypted or unusable
        elif self._is_encrypt_format_all():
            return self.encrypt_format_mount()
        else:
            self.logger.log(
                'EncryptionFormatAll not in use, resource disk will not be automatically formatted and encrypted.'
            )

        return self._is_crypt_mounted() or self._is_plain_mounted()
コード例 #30
0
ファイル: EhMetrics.py プロジェクト: linuxscn/mysource
    def __call__(self, args, executor: CommandExecutor):
        def check_remotes(log):
            if len(self.git.get_remotes()) == 0:
                self.set_remote(log)
            return Callable.success

        executor.append(check_remotes)

        if not args.fast:
            executor.append(
                lambda log: Callable.success
                if self.git.fetch_notes('*') == 0 else log.error(
                    'FATAL: git: Failure to fetch notes from origin.') or
                Callable.do_not_proceed)

        if args.branch:

            def branch_check(logger):
                current_branch = self.git.current_branch()
                if not current_branch == args.branch:
                    logger.error(
                        'Branch check failed. You seem to be on "%s"; switch to "%s" first!'
                        % (current_branch, args.branch))
                    return Callable.do_not_proceed
                else:
                    return Callable.success

            executor.append(branch_check)

        def check_workspace(log: Logger):
            if args.note or not args.non_interactive:
                if not self.git.is_clean_workspace():
                    if args.note:
                        log.error(
                            'I cannot write notes with local changes. Commit your work first, so that notes can '
                            'be attached to your commit.')
                        return Callable.do_not_proceed
                    else:
                        log.warn(
                            'You have uncommitted changes - if engineering health metrics are increased, you will '
                            'not be able to add an exclusion note for the build.'
                        )
            return Callable.success

        executor.append(check_workspace)

        def clean_logs(log: Logger):
            if self.fs.dir_exists(MetricsCollector.log_directory):
                log.debug('Removing directory: %s' %
                          MetricsCollector.log_directory)
                self.fs.remove_dir(MetricsCollector.log_directory)
            return Callable.success

        executor.append(clean_logs)

        def record_commit(log: Logger):
            self.fs.write_lines(
                os.sep.join([
                    self.fs.existing_dir(MetricsCollector.log_directory),
                    '.commit'
                ]), [self.git.current_commit()])
            return Callable.success

        executor.append(record_commit)

        metrics = DataBean()

        modules_descriptions = [
            JIRADirectoryScanModulesDescription(args.fast, file_utils=self.fs),
            BundledPluginsModulesDescription(args.fast),
            JIRATestsModulesDescription(args.fast)
        ]

        executor.append(
            self.metrics_processor.process_metrics(args, modules_descriptions,
                                                   metrics))
        executor.append(
            self.metrics_processor.generate_report(metrics, self.fs, self.git))
        executor.append(
            self.metrics_processor.check_values(args, metrics, self.git,
                                                self.fs))

        if args.note:
            executor.append(lambda log: self.git.set_user(
                'jmake stats runner', '*****@*****.**'))
            executor.append(lambda log: self.git.put_notes(
                self.json_writer.as_str(metrics), STATS_REF_NAME, 'HEAD', True)
                            )
            executor.append(lambda log: self.git.push_notes(STATS_REF_NAME))
コード例 #31
0
class CommandExecutorTest(TestCase):
    def void(self, ret_code):
        return lambda logger: ret_code

    def push(self, i, ret=0):
        return lambda logger: self.execs.append(i) or ret

    def setUp(self):
        self.executor = CommandExecutor().set_logger(Logger().set_none())
        self.executor.perform_console_reset = False
        self.execs = []

    def test_empty_executables(self):
        self.executor.execute()

    def test_executables_should_be_executed_in_order(self):
        self.executor.append(self.push(1))
        self.executor.append(self.push(2))
        self.executor.append(self.push(3))
        self.executor.append(self.push(4))
        self.executor.append(self.push(5))
        execution_ret = self.executor.execute()
        self.assertEqual(Callable.success, execution_ret)
        self.assertListEqual([1, 2, 3, 4, 5], self.execs)

    def post_execution_test_with_return_code(self, ret_code):
        self.executor.append(self.void(Callable.success))
        self.executor.append(self.push(1, ret_code))
        self.executor.append_post(self.push(7, Callable.success))
        execution_ret = self.executor.execute()
        self.assertEqual(ret_code, execution_ret)
        self.assertListEqual([1, 7], self.execs)

    def test_post_execution_should_happen_when_zero_return_code(self):
        self.post_execution_test_with_return_code(Callable.success)

    def test_post_execution_should_happen_when_non_zero_return_code(self):
        self.post_execution_test_with_return_code(1)

    def test_post_execution_should_happen_always_do_not_proceed_return_code(
            self):
        self.post_execution_test_with_return_code(Callable.do_not_proceed)

    def test_execution_should_stop_on_error(self):
        self.executor.append(self.push(1, Callable.success))
        self.executor.append(self.push(2, 1))
        self.executor.append(self.push(3, Callable.success))
        self.executor.append(self.push(4, 1))
        self.executor.append(self.push(5, Callable.success))
        self.executor.append_post(self.push(6, Callable.success))
        self.executor.append_post(self.push(8, Callable.success))
        self.executor.append_post(self.push(10, 1))
        self.executor.append_post(self.push(12, Callable.success))

        execution_ret = self.executor.execute()
        self.assertEqual(1, execution_ret)
        self.assertListEqual([1, 2, 6, 8, 10], self.execs)

    def test_execution_should_stop_on_do_not_proceed(self):
        self.executor.append_post(self.push(4, Callable.success))
        self.executor.append_post(self.push(5, Callable.do_not_proceed))
        self.executor.append_post(self.push(6, Callable.success))
        self.executor.append(self.push(1, Callable.success))
        self.executor.append(self.push(2, Callable.do_not_proceed))
        self.executor.append(self.push(3, Callable.success))

        execution_ret = self.executor.execute()
        self.assertEqual(Callable.do_not_proceed, execution_ret)
        self.assertListEqual([1, 2, 4, 5], self.execs)
コード例 #32
0
ファイル: Driver.py プロジェクト: arpitrathi/parking_lot
 def __init__(self):
     self.commandExecutor = CommandExecutor()