Example #1
0
 def test_prompt_for_confirmation(self):
     """Test :func:`humanfriendly.prompts.prompt_for_confirmation()`."""
     # Test some (more or less) reasonable replies that indicate agreement.
     for reply in 'yes', 'Yes', 'YES', 'y', 'Y':
         with PatchedAttribute(prompts, 'interactive_prompt', lambda p: reply):
             assert prompt_for_confirmation("Are you sure?") is True
     # Test some (more or less) reasonable replies that indicate disagreement.
     for reply in 'no', 'No', 'NO', 'n', 'N':
         with PatchedAttribute(prompts, 'interactive_prompt', lambda p: reply):
             assert prompt_for_confirmation("Are you sure?") is False
     # Test that empty replies select the default choice.
     for default_choice in True, False:
         with PatchedAttribute(prompts, 'interactive_prompt', lambda p: ''):
             assert prompt_for_confirmation("Are you sure?", default=default_choice) is default_choice
     # Test that a warning is shown when no input nor a default is given.
     replies = ['', 'y']
     with PatchedAttribute(prompts, 'interactive_prompt', lambda p: replies.pop(0)):
         with CaptureOutput() as capturer:
             assert prompt_for_confirmation("Are you sure?") is True
             assert "there's no default choice" in capturer.get_text()
     # Test that the default reply is shown in uppercase.
     with PatchedAttribute(prompts, 'interactive_prompt', lambda p: 'y'):
         for default_value, expected_text in (True, 'Y/n'), (False, 'y/N'), (None, 'y/n'):
             with CaptureOutput() as capturer:
                 assert prompt_for_confirmation("Are you sure?", default=default_value) is True
                 assert expected_text in capturer.get_text()
     # Test that interactive prompts eventually give up on invalid replies.
     with PatchedAttribute(prompts, 'interactive_prompt', lambda p: ''):
         self.assertRaises(TooManyInvalidReplies, prompt_for_confirmation, "Are you sure?")
Example #2
0
    def run(self):
        print("Enter the power operation you wish to perform")  # noqa: T001
        operation = prompt_for_choice(['on', 'off', 'reset', 'suspend'],
                                      padding=False)
        attempt_guest = prompt_for_confirmation("Attempt to use guest OS "
                                                "operations, if available? ")

        if prompt_for_confirmation("Multiple VMs? ", default=True):
            folder, folder_name = resolve_path(self.server, "folder",
                                               "with VMs")
            vms = [VM(vm=x) for x in folder.childEntity if is_vm(x)]
            self._log.info("Found %d VMs in folder '%s'", len(vms),
                           folder_name)
            if prompt_for_confirmation("Show the status of the "
                                       "VMs in the folder? "):
                self._log.info(
                    "Folder structure: \n%s",
                    format_structure(
                        folder.enumerate(recursive=True, power_status=True)))
            if prompt_for_confirmation("Continue? ", default=True):
                pbar = tqdm.tqdm(vms,
                                 unit="VMs",
                                 desc="Performing power operation " +
                                 operation)
                for vm in pbar:
                    pbar.set_postfix_str(vm.name)
                    vm.change_state(operation, attempt_guest)
                pbar.close()

        else:
            vm = resolve_path(self.server, "VM")[0]
            self._log.info("Changing power state of '%s' "
                           "to '%s'", vm.name, operation)
            vm.change_state(operation, attempt_guest)
Example #3
0
 def test_prompt_for_confirmation(self):
     """Test :func:`humanfriendly.prompts.prompt_for_confirmation()`."""
     # Test some (more or less) reasonable replies that indicate agreement.
     for reply in 'yes', 'Yes', 'YES', 'y', 'Y':
         with PatchedAttribute(prompts, 'interactive_prompt', lambda p: reply):
             assert prompt_for_confirmation("Are you sure?") is True
     # Test some (more or less) reasonable replies that indicate disagreement.
     for reply in 'no', 'No', 'NO', 'n', 'N':
         with PatchedAttribute(prompts, 'interactive_prompt', lambda p: reply):
             assert prompt_for_confirmation("Are you sure?") is False
     # Test that empty replies select the default choice.
     for default_choice in True, False:
         with PatchedAttribute(prompts, 'interactive_prompt', lambda p: ''):
             assert prompt_for_confirmation("Are you sure?", default=default_choice) is default_choice
     # Test that a warning is shown when no input nor a default is given.
     replies = ['', 'y']
     with PatchedAttribute(prompts, 'interactive_prompt', lambda p: replies.pop(0)):
         with CaptureOutput() as capturer:
             assert prompt_for_confirmation("Are you sure?") is True
             assert "there's no default choice" in capturer.get_text()
     # Test that the default reply is shown in uppercase.
     with PatchedAttribute(prompts, 'interactive_prompt', lambda p: 'y'):
         for default_value, expected_text in (True, 'Y/n'), (False, 'y/N'), (None, 'y/n'):
             with CaptureOutput() as capturer:
                 assert prompt_for_confirmation("Are you sure?", default=default_value) is True
                 assert expected_text in capturer.get_text()
     # Test that interactive prompts eventually give up on invalid replies.
     with PatchedAttribute(prompts, 'interactive_prompt', lambda p: ''):
         self.assertRaises(TooManyInvalidReplies, prompt_for_confirmation, "Are you sure?")
Example #4
0
    def prompt_overwrite(filename):
        '''
		Prompts the user to overwrite an existing output file
		'''
        return prompts.prompt_for_confirmation(
            'Output file {} already exists, overwrite?'.format(filename),
            padding=False)
Example #5
0
def input_for_confirmation(player, info, question):
    if info not in (basic_types_of_decisions + cards_types_of_decisions):
        raise ValueError('type_of_decision not recognized.')
    if isinstance(player.bot, Human):
        return prompt_for_confirmation(question)
    else:
        result = player.bot.get_move('confirmation', info, question)
        if type(result) != bool:
            raise BotError('Bot\'s choice is not bool type.')
        return result
Example #6
0
 def processImage(self):
     print("You are about to see an image and evaluate whether you want to"
           " take action: {}".format(self.actionPrompt))
     takeAction = True
     while takeAction:
         self.displayImage()
         takeAction = prompts.prompt_for_confirmation(self.actionPrompt)
         if takeAction:
             self.processIndices()
         else:
             print("Ending {}".format(self.__class__.__name__))
Example #7
0
    def bp(self,
           address,
           callback=None,
           forced=False,
           **options) -> lldb.SBBreakpoint:
        """
        Add a breakpoint
        :param address:
        :param callback: callback(hilda, *args) to be called
        :param forced: whether the breakpoint should be protected frm usual removal.
        :param options:
        :return:
        """
        if address in [bp.address for bp in self.breakpoints.values()]:
            if prompts.prompt_for_confirmation(
                    'A breakpoint already exist in given location. '
                    'Would you like to delete the previous one?', True):
                breakpoints = list(self.breakpoints.items())
                for bp_id, bp in breakpoints:
                    if address == bp.address:
                        self.remove_hilda_breakpoint(bp_id)

        bp = self.target.BreakpointCreateByAddress(address)
        setattr(bp, 'hilda', self)

        # add into Hilda's internal list of breakpoints
        self.breakpoints[bp.id] = HildaClient.Breakpoint(address=address,
                                                         options=options,
                                                         forced=forced)

        if callback is not None:
            callback_source = ''
            callback_source_lines = inspect.getsource(callback).split('\n')

            def_offset = callback_source_lines[0].index('def ')
            for line in callback_source_lines:
                line = line.replace('\t', '    ')
                callback_source += line[def_offset:] + '\n'
            callback_source += f'\n'
            callback_source += f'lldb.hilda_client._bp_frame = frame\n'
            callback_source += f'{callback.__name__}(lldb.hilda_client, frame, bp_loc, {repr(options)})\n'
            callback_source += f'lldb.hilda_client._bp_frame = None\n'

            err = bp.SetScriptCallbackBody(callback_source)
            if not err.Success():
                self.log_critical(
                    f'failed to set breakpoint script body: {err}')

        self.log_info(f'Breakpoint #{bp.id} has been set')
        return bp
Example #8
0
    def run(self):
        print("What type of thing do you want to get information on?"
              )  # noqa: T001
        thing_type = prompt_for_choice(
            ['vm', 'datastore', 'vsphere', 'folder'], padding=False)

        # Single Virtual Machine
        if thing_type == "vm":
            vm = resolve_path(self.server, "vm",
                              "you want to get information on")[0]
            self._log.info(
                vm.get_info(detailed=True,
                            uuids=True,
                            snapshot=True,
                            vnics=True))

        # Datastore
        elif thing_type == "datastore":
            ds = self.server.get_datastore(
                input("Enter name of the Datastore [leave "
                      "blank for first datastore found]: "))
            self._log.info(ds.get_info())

        # vCenter server
        elif thing_type == "vsphere":
            self._log.info(str(self.server))

        # Folder
        elif thing_type == "folder":
            folder, folder_name = resolve_path(self.server, "folder")
            if "VirtualMachine" in folder.childType \
                    and prompt_for_confirmation("Want to see power state "
                                                "of VMs in the folder?"):
                contents = folder.enumerate(recursive=True, power_status=True)
            else:
                contents = folder.enumerate(recursive=True, power_status=False)
            self._log.info(
                "Information for Folder %s\n"
                "Types of items folder can contain: %s\n%s", folder_name,
                str(folder.childType), format_structure(contents))

        # That's not a thing!
        else:
            self._log.info("Invalid selection: %s", thing_type)
Example #9
0
    def confirm_installation(self, requirement, missing_dependencies, install_command):
        """
        Ask the operator's permission to install missing system packages.

        :param requirement: A :class:`.Requirement` object.
        :param missing_dependencies: A list of strings with missing dependencies.
        :param install_command: A list of strings with the command line needed
                                to install the missing dependencies.
        :raises: :exc:`.DependencyInstallationRefused` when the operator refuses.
        """
        try:
            return prompt_for_confirmation(
                format(
                    "Do you want me to install %s %s?",
                    "this" if len(missing_dependencies) == 1 else "these",
                    "dependency" if len(missing_dependencies) == 1 else "dependencies",
                ),
                default=True,
            )
        except KeyboardInterrupt:
            # Control-C is a negative response but doesn't
            # otherwise interrupt the program flow.
            return False
Example #10
0
    def confirm_installation(self, requirement, missing_dependencies,
                             install_command):
        """
        Ask the operator's permission to install missing system packages.

        :param requirement: A :class:`.Requirement` object.
        :param missing_dependencies: A list of strings with missing dependencies.
        :param install_command: A list of strings with the command line needed
                                to install the missing dependencies.
        :raises: :exc:`.DependencyInstallationRefused` when the operator refuses.
        """
        try:
            return prompt_for_confirmation(format(
                "Do you want me to install %s %s?",
                "this" if len(missing_dependencies) == 1 else "these",
                "dependency"
                if len(missing_dependencies) == 1 else "dependencies",
            ),
                                           default=True)
        except KeyboardInterrupt:
            # Control-C is a negative response but doesn't
            # otherwise interrupt the program flow.
            return False
    def bp(self,
           address,
           callback=None,
           forced=False,
           **options) -> lldb.SBBreakpoint:
        """
        Add a breakpoint
        :param address:
        :param callback: callback(hilda, *args) to be called
        :param forced: whether the breakpoint should be protected frm usual removal.
        :param options:
        :return:
        """
        if address in [bp.address for bp in self.breakpoints.values()]:
            if prompts.prompt_for_confirmation(
                    'A breakpoint already exist in given location. '
                    'Would you like to delete the previous one?', True):
                breakpoints = list(self.breakpoints.items())
                for bp_id, bp in breakpoints:
                    if address == bp.address:
                        self.remove_hilda_breakpoint(bp_id)

        bp = self.target.BreakpointCreateByAddress(address)

        # add into Hilda's internal list of breakpoints
        self.breakpoints[bp.id] = HildaClient.Breakpoint(address=address,
                                                         options=options,
                                                         forced=forced,
                                                         callback=callback)

        if callback is not None:
            bp.SetScriptCallbackFunction(
                'lldb.hilda_client.bp_callback_router')

        self.log_info(f'Breakpoint #{bp.id} has been set')
        return bp
Example #12
0
    def __init__(self,
                 input_: str,
                 output: str = None,
                 cwl_version: str = 'v1.0',
                 token: str = None,
                 platform: str = None,
                 app_revision: str = None,
                 profile: str = 'default',
                 endpoint: str = None,
                 validate: bool = False,
                 update: bool = False,
                 decompose: bool = False):
        msg = 'Converting...'
        logger.info(msg)
        print(colored(msg, 'green'))

        # For local input, output path/app must be specified.
        if is_local(input_) and not output:
            raise Exception('Output file not specified.')

        # Initialize API and user info
        self.api = None
        self.username = None
        self.input_ = input_
        self.output = output
        self.cwl_version = cwl_version
        self.validate = validate
        self.update = update
        self.decompose = decompose

        if not (is_local(input_) and is_local(output)):
            self.api = init_api(profile=profile,
                                platform=platform,
                                dev_token=token,
                                endpoint=endpoint)
            self.username = self.api.users.me().username

        self.app_revision = int(app_revision) \
            if app_revision is not None else None

        # Perform conversion
        self.data = self._parse(self._load_input_cwl())

        # region remove batch
        # Remove batch information from cwl1 version
        def remove_batch(d: dict):
            if 'class' in d and d['class'] == 'Workflow':
                d.pop('sbg:batchInput', None)
                d.pop('sbg:batchBy', None)
                if 'inputs' in d:
                    for i in d['inputs']:
                        i.pop('batchType', None)
                if 'steps' in d:
                    for step in d['steps']:
                        if 'run' in step:
                            remove_batch(step['run'])

        remove_batch(self.data)
        # endregion

        msg = ("Please check javascript expressions and globs "
               "in your wrapper. Errors are possible due to "
               "unsupported backward compatibility.")
        print(colored(msg, 'yellow'))

        # Validate JS expressions for ES5.1 strict-mode syntax
        if validate:
            CWLValidator().validate(self.data)

        msg = 'Converting done.'
        logger.info(msg)
        print(colored(msg, 'green'))

        # Add contribution info
        if output and not is_local(output):
            slash_count = output.count('/')
            if slash_count > 2:
                raise Exception('App id can\'t have more than 2 \'/\'.')
            elif slash_count == 2:
                full_id = output
            elif slash_count == 1:
                full_id = self.username + '/' + output
            else:
                split = input_.split('/')
                full_id = '/'.join([split[0], split[1], output])
            self.data['sbg:createdBy'] = self.username
            self.data['sbg:contributors'] = [self.username]
            self.data['sbg:project'] = '/'.join(
                [self.username, full_id.split('/')[1]])
        else:
            if not is_local(input_):
                full_id = input_
            else:
                full_id = None

            if (self.username and 'sbg:contributors' in self.data
                    and self.username not in self.data['sbg:contributors']
                    and isinstance(self.data['sbg:contributors'], list)):
                self.data['sbg:contributors'].append(self.username)

        if full_id:
            self.data['sbg:id'] = self.data['id'] = full_id

        # Create output
        if is_local(output):
            # Dump output to file
            if output.endswith(tuple(yaml_ext())):
                dict_to_yaml(self.data, output)
            elif output.endswith(tuple(json_ext())):
                dict_to_json(self.data, output)
            # Decompose workflow
            if self.data['class'] == 'Workflow':
                if not decompose:
                    decompose = prompt_for_confirmation(
                        'Do you want to also install '
                        'all tools and subworkflows from this workflow?',
                        default=True)
                if decompose:
                    breakdown_wf_local(wf_path=output)
        else:
            # Add revision note
            rev_note = 'Upgraded to {} from {}'.format(self.data['cwlVersion'],
                                                       input_)
            if 'sbg:revision' in self.data:
                rev_note += ', revision {}'.format(self.data['sbg:revision'])
            data = add_revision_note(self.data, rev_note)

            # Install converted app
            try:  # Check if app exists and create new revision
                old_rev = self.api.apps.get(full_id, api=self.api)
                if not update:
                    update = prompt_for_confirmation(
                        'Do you want to update app with id: \'' + full_id +
                        '\'' + '?',
                        default=True)
                if update:
                    self.api.apps.create_revision(full_id,
                                                  old_rev.revision + 1,
                                                  data,
                                                  api=self.api)
                    msg = ("New revision has just been created.\nID: " +
                           full_id + "\nrevision: " +
                           str(old_rev.revision + 1))
                    print(colored(msg, 'green'))
                    logger.info(msg)
            except NotFound:  # Install if app does not exist
                if not update:
                    update = prompt_for_confirmation(
                        'Do you want to install app with id: \'' + full_id +
                        '\'' + '?',
                        default=True)
                if update:
                    self.api.apps.install_app(full_id, data, api=self.api)
                    msg = ("Application has just been installed.\nID: " +
                           full_id + "\nrevision: 0")
                    print(colored(msg, 'green'))
                    logger.info(msg)
            # Decompose and install parts if it's an installed/updated workflow
            if update and data['class'] == 'Workflow':
                if not decompose:
                    decompose = prompt_for_confirmation(
                        'Do you want to also install'
                        ' all tools and subworkflows from this workflow?',
                        default=True)
                if decompose:
                    breakdown_wf_sbg(wf_name=full_id.split('/')[2],
                                     project_id='/'.join(
                                         full_id.split('/')[:2]),
                                     wf_json=data,
                                     api=self.api)
Example #13
0
    def run(self):
        if prompt_for_confirmation("Multiple VMs? "):
            folder, folder_name = resolve_path(
                self.server, "folder", "that has the VMs/folders "
                "you want to destroy")

            # Display folder structure
            if prompt_for_confirmation("Display the folder structure? "):
                self._log.info(
                    "Folder structure: \n%s",
                    format_structure(
                        folder.enumerate(recursive=True, power_status=True)))

            # Prompt user to configure destruction options
            print("Answer the following questions to configure the cleanup"
                  )  # noqa: T001
            if prompt_for_confirmation("Destroy everything in and "
                                       "including the folder? "):
                vm_prefix = ''
                folder_prefix = ''
                recursive = True
                destroy_folders = True
                destroy_self = True
            else:
                vm_prefix = default_prompt(
                    "Prefix of VMs you wish to destroy"
                    " (CASE SENSITIVE!)",
                    default='')
                recursive = prompt_for_confirmation("Recursively descend "
                                                    "into folders? ")
                destroy_folders = prompt_for_confirmation("Destroy folders in "
                                                          "addition to VMs? ")
                if destroy_folders:
                    folder_prefix = default_prompt(
                        "Prefix of folders "
                        "you wish to destroy"
                        " (CASE SENSITIVE!)",
                        default='')
                    destroy_self = prompt_for_confirmation("Destroy the "
                                                           "folder itself? ")
                else:
                    folder_prefix = ''
                    destroy_self = False

            # Show user what options they selected
            self._log.info(
                "Options selected\nVM Prefix: %s\n"
                "Folder Prefix: %s\nRecursive: %s\n"
                "Folder-destruction: %s\nSelf-destruction: %s", str(vm_prefix),
                str(folder_prefix), recursive, destroy_folders, destroy_self)

            # Show how many items matched the options
            v, f = folder.retrieve_items(vm_prefix,
                                         folder_prefix,
                                         recursive=True)
            num_vms = len(v)
            if destroy_folders:
                num_folders = len(f)
                if destroy_self:
                    num_folders += 1
            else:
                num_folders = 0
            self._log.info("%d VMs and %d folders match the options", num_vms,
                           num_folders)

            # Confirm and destroy
            if prompt_for_confirmation("Continue with destruction? "):
                self._log.info("Destroying folder '%s'...", folder_name)
                folder.cleanup(vm_prefix=vm_prefix,
                               folder_prefix=folder_prefix,
                               recursive=recursive,
                               destroy_folders=destroy_folders,
                               destroy_self=destroy_self)
            else:
                self._log.info("Destruction cancelled")
        else:
            vm = resolve_path(self.server, "vm", "to destroy")[0]

            if prompt_for_confirmation("Display VM info? "):
                self._log.info(
                    vm.get_info(detailed=True,
                                uuids=True,
                                snapshot=True,
                                vnics=True))

            if vm.is_template():  # Warn if template
                if not prompt_for_confirmation("VM '%s' is a Template. "
                                               "Continue? " % vm.name):
                    sys.exit(0)

            if prompt_for_confirmation("Continue with destruction? "):
                self._log.info("Destroying VM '%s'", vm.name)
                vm.destroy()
            else:
                self._log.info("Destruction cancelled")
Example #14
0
    def run(self):
        print("Enter Snapshot operation to perform")  # noqa: T001
        op = prompt_for_choice([
            'create', 'revert', 'revert-current', 'remove', 'remove-all',
            'get', 'get-current', 'get-all', 'disk-usage'
        ],
                               padding=False)
        if op in ['create', 'revert', 'remove', 'get']:
            name = input("Name of snapshot to %s: " % op)
            if op == "create":
                desc = input("Description of snapshot to create: ")
                memory = prompt_for_confirmation("Include memory?")
                quiesce = prompt_for_confirmation(
                    "Quiesce disks? (Requires VMware "
                    "Tools to be running on the VM)")
            elif op == "remove":
                children = prompt_for_confirmation(
                    "Remove any children of the "
                    "snapshot?", default=True)

        if prompt_for_confirmation("Multiple VMs? ", default=True):
            f, f_name = resolve_path(self.server, "folder", "with VMs")
            vms = [VM(vm=x) for x in f.childEntity if is_vm(x)]
            self._log.info("Found %d VMs in folder '%s'", len(vms), f_name)
            if prompt_for_confirmation("Show the status of the "
                                       "VMs in the folder? "):
                self._log.info(
                    "Folder structure: \n%s",
                    format_structure(
                        f.enumerate(recursive=True, power_status=True)))
            if not prompt_for_confirmation("Continue? ", default=True):
                self._log.info("User cancelled operation, exiting...")
                sys.exit(0)
        else:
            vms = [
                resolve_path(self.server, "vm",
                             "to perform snapshot operations on")[0]
            ]

        # Perform the operations
        pbar = tqdm.tqdm(vms,
                         total=len(vms),
                         unit="VMs",
                         desc="Taking snapshots")
        for vm in pbar:
            self._log.info("Performing operation '%s' on VM '%s'", op, vm.name)
            pbar.set_postfix_str(vm.name)
            if op == "create":
                vm.create_snapshot(name=name,
                                   description=desc,
                                   memory=memory,
                                   quiesce=quiesce)
            elif op == "revert":
                vm.revert_to_snapshot(snapshot=name)
            elif op == "revert-current":
                vm.revert_to_current_snapshot()
            elif op == "remove":
                vm.remove_snapshot(snapshot=name, remove_children=children)
            elif op == "remove-all":
                vm.remove_all_snapshots()
            elif op == "get":
                self._log.info(vm.get_snapshot_info(name))
            elif op == "get-current":
                self._log.info(vm.get_snapshot_info())
            elif op == "get-all":
                self._log.info(vm.get_all_snapshots_info())
            elif op == "disk-usage":
                self._log.info(vm.snapshot_disk_usage())
            else:
                self._log.error("Unknown operation: %s", op)
            pbar.update()
        pbar.close()
Example #15
0
    def run(self):
        vms = []
        vm_names = []

        # Single-vm source
        if prompt_for_confirmation("Do you want to clone from a single VM?"):
            v = resolve_path(self.server, "VM",
                             "or template you wish to clone")[0]
            vms.append(v)
            vm_names.append(input("Base name for instances to be created: "))
        # Multi-VM source
        else:
            folder_from, from_name = resolve_path(
                self.server, "folder", "you want to clone all VMs in")
            # Get VMs in the folder
            v = [VM(vm=x) for x in folder_from.childEntity if is_vm(x)]
            vms.extend(v)
            self._log.info("%d VMs found in source folder %s", len(v),
                           from_name)
            if not prompt_for_confirmation("Keep the same names? "):
                names = []
                for i in range(len(v)):
                    names.append(input("Enter base name for VM %d: " % i))
            else:
                names = list(map(lambda x: x.name, v))  # Same names as sources
            vm_names.extend(names)

        create_in, create_in_name = resolve_path(self.server, "folder",
                                                 "in which to create VMs")
        instance_folder_base = None
        if prompt_for_confirmation("Do you want to create a "
                                   "folder for each instance? "):
            instance_folder_base = input("Enter instance folder base name: ")

        num_instances = int(input("Number of instances to be created: "))

        # Determine what will be the default
        pool_name = self.server.get_pool().name
        pool_name = default_prompt(prompt="Resource pool to assign VMs to",
                                   default=pool_name)
        pool = self.server.get_pool(pool_name)

        datastore_name = default_prompt(prompt="Datastore to put clones on")
        datastore = self.server.get_datastore(datastore_name)

        self._log.info("Creating %d instances under folder %s", num_instances,
                       create_in_name)
        for instance in tqdm.trange(num_instances,
                                    desc="Creating instances",
                                    unit="instances"):
            with tqdm.tqdm(total=len(vm_names),
                           leave=False,
                           desc="Creating VMs",
                           unit="VMs") as pbar:
                for vm, name in zip(vms, vm_names):
                    pbar.set_postfix_str(name)
                    if instance_folder_base:
                        # Create instance folders for a nested clone
                        f = self.server.create_folder(instance_folder_base +
                                                      pad(instance),
                                                      create_in=create_in)
                        vm_name = name
                    else:
                        f = create_in
                        vm_name = name + pad(
                            instance)  # Append instance number

                    new_vm = VM(name=vm_name,
                                folder=f,
                                resource_pool=pool,
                                datastore=datastore)
                    new_vm.create(template=vm.get_vim_vm())
                    pbar.update()
Example #16
0
def get_pref(line, result):
    key = lambda x, y: prompt_for_confirmation(
        "Is '{}' better then '{}'?".format(y, x), padding=False)
    insort_left(result, line, key)
Example #17
0
 def run_benchmark(self):
     """Benchmark the effectiveness of the delta transfer implementation."""
     # Make sure the operator realizes what we're going to do, before it happens.
     if os.environ.get("PDIFFCOPY_BENCHMARK") != "allowed":
         logger.notice("Set $PDIFFCOPY_BENCHMARK=allowed to bypass the following interactive prompt.")
         question = """
             This will mutate the target file and then restore
             its original contents. Are you sure this is okay?
         """
         if not prompt_for_confirmation(compact(question), default=False):
             raise BenchmarkAbortedError("Permission to run benchmark denied.")
     samples = []
     logger.info("Performing initial synchronization to level the playing ground ..")
     self.synchronize_once()
     # If the target file didn't exist before we created it then
     # self.target.exists may have cached the value False.
     self.clear_cached_properties()
     # Get the rsync configuration from environment variables.
     rsync_server = os.environ.get("PDIFFCOPY_BENCHMARK_RSYNC_SERVER")
     rsync_module = os.environ.get("PDIFFCOPY_BENCHMARK_RSYNC_MODULE")
     rsync_root = os.environ.get("PDIFFCOPY_BENCHMARK_RSYNC_ROOT")
     have_rsync = rsync_server and rsync_module and rsync_root
     # Run the benchmark for the requested number of iterations.
     for i in range(1, self.benchmark + 1):
         # Initialize timers to compare pdiffcopy and rsync runtime.
         pdiffcopy_timer = Timer(resumable=True)
         rsync_timer = Timer(resumable=True)
         # Mutate the target file.
         difference = 100 / self.benchmark * i
         self.mutate_target(difference)
         # Synchronize using pdiffcopy.
         with Timer(resumable=True) as pdiffcopy_timer:
             num_blocks = self.synchronize_once()
         # Synchronize using rsync?
         if have_rsync:
             self.mutate_target(difference)
             with rsync_timer:
                 rsync_command_line = [
                     "rsync",
                     "--inplace",
                     format(
                         "rsync://%s/%s",
                         rsync_server,
                         os.path.join(rsync_module, os.path.relpath(self.source.filename, rsync_root)),
                     ),
                     self.target.filename,
                 ]
                 logger.info("Synchronizing changes using %s ..", " ".join(map(pipes.quote, rsync_command_line)))
                 subprocess.check_call(rsync_command_line)
                 logger.info("Synchronized changes using rsync in %s ..", rsync_timer)
         # Summarize the results of this iteration.
         metrics = ["%i%%" % difference]
         metrics.append(format_size(num_blocks * self.block_size, binary=True))
         metrics.append(str(pdiffcopy_timer))
         if have_rsync:
             metrics.append(str(rsync_timer))
         samples.append(metrics)
     # Render an overview of the results in the form of a table.
     column_names = ["Delta size", "Data transferred", "Runtime of pdiffcopy"]
     if have_rsync:
         column_names.append("Runtime of rsync")
     output(format_pretty_table(samples, column_names=column_names))
Example #18
0
def collect_packages(archives, directory, prompt=True, cache=None, concurrency=None):
    """
    Interactively copy packages and their dependencies.

    :param archives: An iterable of strings with the filenames of one or more
                     ``*.deb`` files.
    :param directory: The pathname of a directory where the package archives
                      and dependencies should be copied to (a string).
    :param prompt: :data:`True` (the default) to ask confirmation from the
                   operator (using a confirmation prompt rendered on the
                   terminal), :data:`False` to skip the prompt.
    :param cache: The :class:`.PackageCache` to use (defaults to :data:`None`).
    :param concurrency: Override the number of concurrent processes (defaults
                        to the number of `archives` given or to the value of
                        :func:`multiprocessing.cpu_count()`, whichever is
                        smaller).
    :raises: :exc:`~exceptions.ValueError` when no archives are given.

    When more than one archive is given a :mod:`multiprocessing` pool is used
    to collect related archives concurrently, in order to speed up the process
    of collecting large dependency sets.
    """
    archives = list(archives)
    related_archives = set(map(parse_filename, archives))
    if not archives:
        raise ValueError("At least one package archive is required!")
    elif len(archives) == 1:
        # Find the related packages of a single archive.
        related_archives.update(collect_related_packages(archives[0], cache=cache))
    else:
        # Find the related packages of multiple archives (concurrently).
        with AutomaticSpinner(label="Collecting related packages"):
            concurrency = min(len(archives), concurrency or multiprocessing.cpu_count())
            pool = multiprocessing.Pool(concurrency)
            try:
                arguments = [(archive, cache) for archive in archives]
                for result in pool.map(collect_packages_worker, arguments, chunksize=1):
                    related_archives.update(result)
            finally:
                pool.terminate()
    # Ignore package archives that are already in the target directory.
    relevant_archives = set()
    for archive in related_archives:
        basename = os.path.basename(archive.filename)
        if not os.path.isfile(os.path.join(directory, basename)):
            relevant_archives.add(archive)
    # Interactively move the package archives.
    if relevant_archives:
        relevant_archives = sorted(relevant_archives)
        pluralized = pluralize(len(relevant_archives), "package archive", "package archives")
        say("Found %s:", pluralized)
        for file_to_collect in relevant_archives:
            say(" - %s", format_path(file_to_collect.filename))
        prompt_text = "Copy %s to %s?" % (pluralized, format_path(directory))
        if prompt and not prompt_for_confirmation(prompt_text, default=True, padding=False):
            logger.warning("Not copying archive(s) to %s! (aborted by user)", format_path(directory))
        else:
            # Link or copy the file(s).
            for file_to_collect in relevant_archives:
                src = file_to_collect.filename
                dst = os.path.join(directory, os.path.basename(src))
                smart_copy(src, dst)
            logger.info("Done! Copied %s to %s.", pluralized, format_path(directory))
    else:
        logger.info("Nothing to do! (%s previously copied)",
                    pluralize(len(related_archives), "package archive"))
Example #19
0
def collect_packages(archives,
                     directory,
                     prompt=True,
                     cache=None,
                     concurrency=None):
    """
    Interactively copy packages and their dependencies.

    :param archives: An iterable of strings with the filenames of one or more
                     ``*.deb`` files.
    :param directory: The pathname of a directory where the package archives
                      and dependencies should be copied to (a string).
    :param prompt: :data:`True` (the default) to ask confirmation from the
                   operator (using a confirmation prompt rendered on the
                   terminal), :data:`False` to skip the prompt.
    :param cache: The :class:`.PackageCache` to use (defaults to :data:`None`).
    :param concurrency: Override the number of concurrent processes (defaults
                        to the number of `archives` given or to the value of
                        :func:`multiprocessing.cpu_count()`, whichever is
                        smaller).
    :raises: :exc:`~exceptions.ValueError` when no archives are given.

    When more than one archive is given a :mod:`multiprocessing` pool is used
    to collect related archives concurrently, in order to speed up the process
    of collecting large dependency sets.
    """
    archives = list(archives)
    related_archives = set(map(parse_filename, archives))
    if not archives:
        raise ValueError("At least one package archive is required!")
    elif len(archives) == 1:
        # Find the related packages of a single archive.
        related_archives.update(
            collect_related_packages(archives[0], cache=cache))
    else:
        # Find the related packages of multiple archives (concurrently).
        with AutomaticSpinner(label="Collecting related packages"):
            concurrency = min(len(archives), concurrency
                              or multiprocessing.cpu_count())
            pool = multiprocessing.Pool(concurrency)
            try:
                arguments = [(archive, cache) for archive in archives]
                for result in pool.map(collect_packages_worker,
                                       arguments,
                                       chunksize=1):
                    related_archives.update(result)
            finally:
                pool.terminate()
    # Ignore package archives that are already in the target directory.
    relevant_archives = set()
    for archive in related_archives:
        basename = os.path.basename(archive.filename)
        if not os.path.isfile(os.path.join(directory, basename)):
            relevant_archives.add(archive)
    # Interactively move the package archives.
    if relevant_archives:
        relevant_archives = sorted(relevant_archives)
        pluralized = pluralize(len(relevant_archives), "package archive",
                               "package archives")
        say("Found %s:", pluralized)
        for file_to_collect in relevant_archives:
            say(" - %s", format_path(file_to_collect.filename))
        prompt_text = "Copy %s to %s?" % (pluralized, format_path(directory))
        if prompt and not prompt_for_confirmation(
                prompt_text, default=True, padding=False):
            logger.warning("Not copying archive(s) to %s! (aborted by user)",
                           format_path(directory))
        else:
            # Link or copy the file(s).
            for file_to_collect in relevant_archives:
                src = file_to_collect.filename
                dst = os.path.join(directory, os.path.basename(src))
                smart_copy(src, dst)
            logger.info("Done! Copied %s to %s.", pluralized,
                        format_path(directory))
    else:
        logger.info("Nothing to do! (%s previously copied)",
                    pluralize(len(related_archives), "package archive"))
Example #20
0
    def wait_for_pre_boot(self):
        """
        Wait for the pre-boot environment to become available.

        :returns: A :class:`ServerDetails` object.
        :raises: The following exceptions can be raised:

                 - :exc:`SystemUnreachableError` when :attr:`connect_timeout`
                   seconds have passed and we still haven't managed to query
                   the SSH server in the pre-boot environment.
                 - :exc:`UnlockAbortedError` when the post-boot environment is
                   detected and the operator aborts the unlock sequence.
        """
        method_timer = Timer()
        logger.info("Waiting for pre-boot environment to become available ..")
        with AutomaticSpinner("Waiting for pre-boot environment",
                              show_time=True):
            while True:
                iteration_timer = Timer()
                server = self.scan_ssh_server(self.pre_boot)
                known_keys = self.get_known_host_keys('pre-boot-host-keys')
                if server.host_keys and known_keys:
                    logger.verbose(
                        "Checking if SSH host keys match known keys ..")
                    if server.host_keys & known_keys:
                        logger.info(
                            "Matched known SSH host keys of pre-boot environment."
                        )
                        break
                    else:
                        logger.warning(
                            compact("""
                            Detected post-boot environment while waiting for
                            pre-boot environment to become available, will keep
                            retrying...
                        """))
                elif server.match_header('dropbear'):
                    logger.info(
                        "Detected Dropbear in pre-boot environment (as expected)."
                    )
                    break
                elif server.match_header('openssh'):
                    logger.warning(
                        compact("""
                        Detected OpenSSH server while connecting to pre-boot
                        environment where I was expecting Dropbear instead!
                        Could it be that you're accidentally connecting
                        to the post-boot environment?
                    """))
                    if self.interactive:
                        if prompt_for_confirmation(
                                "Continue connecting anyway?"):
                            logger.info(
                                "Continuing unlock sequence with operator consent .."
                            )
                        else:
                            raise UnlockAbortedError(
                                "Unlock sequence aborted by operator.")
                    break
                if method_timer.elapsed_time >= self.connect_timeout:
                    raise SystemUnreachableError(
                        format(
                            "Timed out waiting for pre-boot environment of %s to become available within %s!",
                            self.pre_context,
                            format_timespan(self.connect_timeout),
                        ))
                iteration_timer.sleep(self.retry_interval)
        logger.info("Waited %s for pre-boot environment.", method_timer)
        return server
Example #21
0
def main():
    testing = True
    #     version = '00.01 - 2019.05.28'
    version = constants.version
    audit_url = constants.audit_url
    appName = 'folderAudit'

    cfgfile = appName + '.ini'
    cfgpath = Path.home() / '.config' / appName
    cfgfile = cfgpath / cfgfile

    print('cfgpath', cfgpath)

    logger = logging.getLogger(__name__)
    loggingConfig = resource_path('resources/logging.json')
    setup_logging(default_config=loggingConfig,
                  default_level=logging.ERROR,
                  output_path='~/')

    levelNames = ['DEBUG', 'INFO', 'WARNING']

    # configuration
    myConfig = getConfiguration(cfgfile)
    updateConfig = False

    # set the terminal size to 50x90 characters
    print("\x1b[8;50;90t")

    # set up configuration
    if myConfig.has_option('Main', 'credentials'):
        credential_store = os.path.expanduser(
            myConfig.get('Main', 'credentials'))
    else:
        credential_store = os.path.expanduser(
            os.path.join(cfgpath, 'credentials'))
        updateConfig = True

    if myConfig.has_option('Main', 'useremail'):
        useremail = myConfig.get('Main', 'useremail')
    else:
        useremail = None

    if myConfig.has_option('Main', 'loglevel'):
        loglevel = myConfig.get('Main', 'loglevel')
        if loglevel in levelNames:
            logger.setLevel(loglevel)
    else:
        loglevel = 'ERROR'
        logger.setLevel(loglevel)
        myConfig.set('Main', 'loglevel', loglevel)
        updateConfig = True

    # print the configuration if the logging level is high enough
    if logging.getLogger().getEffectiveLevel() <= 10:
        config_dict = {}
        for section in myConfig.sections():
            config_dict[section] = {}
            for option in myConfig.options(section):
                config_dict[section][option] = myConfig.get(section, option)

        logger.debug('current configuration:')
        logger.debug('\n{}'.format(config_dict))

    about = resource_path('./resources/about.txt')
    about_list = fileToList(about, False)
    wrapper = textwrap.TextWrapper(replace_whitespace=True,
                                   drop_whitespace=True,
                                   width=65)
    print(('{} - Version: {}'.format(appName, version)))

    for line in about_list:
        print(('\n'.join(wrapper.wrap(text=line))))

    print(f'Audit Template: {audit_url}')
    #     logger.setLevel('DEBUG')

    # assume that the configuration is NOT ok and that user will want to reconfigure
    proceed = True
    # start with configuration settings from config file; if user chooses to reconfigure offer opportunity to change
    reconfigure = True

    # check configuration and credentials for google drive
    clientSecrets = resource_path('resources/client_secrets.json')
    try:
        credentials = getCredentials(storage_path=credential_store,
                                     client_secret=clientSecrets)
    except Exception as e:
        logging.critical(e)

    # configure google drive object

    try:
        myDrive = googledrive(credentials)
    except Exception as e:
        logger.error('Could not set up google drive connection: {}'.format(e))
        print(
            'Could not setup google drive connection. Run this program again.')
        print(('If this error persists, please check the logs: {}'.format(
            log_files)))
        print('cannot continue')
        doExit(testing=testing)

    if not useremail:
        logger.warning('No useremail set in configuration file')
        try:
            useremail = myDrive.userinfo['emailAddress']
        except Exception as e:
            logging.error(
                'Error retreving useremail address from drive configuration')
            print('Error fetching configuration information.')
            print('Run the program again')
            print(('If this error persists, please check the logs: {}'.format(
                log_files)))
            doExit(testing=testing)
        myConfig.set('Main', 'useremail', useremail)
        updateConfig = True

    if updateConfig:
        configuration.create_config(cfgfile, myConfig)

    while proceed:
        folderURL = prompts.prompt_for_input(
            'Paste full URL of Folder to Audit:\n')

        folderName = None
        folderID = None

        #         match = re.match('https:\/\/drive.google.com(?:\/.*)+\/([a-zA-Z0-9-]+)\W{0,}$', folderURL)
        match = re.match(
            'https:\/\/drive.google.com\/.*(?:\w+\/|=)([a-zA-Z0-9_-]+)',
            folderURL)
        if not match:
            print('Invlaid URL; try again')
            continue
        else:
            folderID = match[1]
            print('folderID=', folderID)

        try:
            result = myDrive.getprops(fileId=folderID, fields="name, mimeType")
        except GDriveError as e:
            logger.error(
                'Trouble getting information for this folder. Try again.')
            logger.error(e)
            continue

        if not result['mimeType'] == myDrive.mimeTypes['folder']:
            logger.error('This is is not a folder. Try again.')
            continue
        else:
            print('Beginning audit of folder: {}'.format(result['name']))
            outputFile = auditFolder(myDrive=myDrive,
                                     parents=folderID,
                                     name=result['name'])
            if outputFile:
                file = uploadSheet(myDrive, outputFile)
            else:
                print('The audit returned no results')

        if not prompts.prompt_for_confirmation(
                'Would you like to audit another folder?', False):
            proceed = False
            return (file)