def test_get_fact_error(self): inventory = make_inventory(hosts=('anotherhost', )) state = State(inventory, Config()) anotherhost = inventory.get_host('anotherhost') connect_all(state) with patch('pyinfra.api.connectors.ssh.run_shell_command' ) as fake_run_command: fake_run_command.return_value = False, MagicMock() with self.assertRaises(PyinfraError) as context: get_facts(state, 'command', ('fail command', )) assert context.exception.args[0] == 'No hosts remaining!' fake_run_command.assert_called_with( state, anotherhost, 'fail command', print_input=False, print_output=False, shell_executable=None, su_user=None, sudo=False, sudo_user=None, timeout=None, use_sudo_password=False, return_combined_output=True, )
def test_get_fact_error(self): inventory = make_inventory(hosts=("anotherhost", )) state = State(inventory, Config()) anotherhost = inventory.get_host("anotherhost") connect_all(state) with patch("pyinfra.connectors.ssh.run_shell_command" ) as fake_run_command: fake_run_command.return_value = False, MagicMock() with self.assertRaises(PyinfraError) as context: get_facts(state, Command, ("fail command", )) assert context.exception.args[0] == "No hosts remaining!" fake_run_command.assert_called_with( state, anotherhost, "fail command", print_input=False, print_output=False, return_combined_output=True, **_get_executor_defaults(state, anotherhost), )
def test_get_fact(self): inventory = make_inventory(hosts=('anotherhost', )) state = State(inventory, Config()) anotherhost = inventory.get_host('anotherhost') connect_all(state) with patch('pyinfra.api.connectors.ssh.run_shell_command' ) as fake_run_command: fake_run_command.return_value = MagicMock(), [('stdout', 'some-output')] fact_data = get_facts(state, 'command', ('yes', )) assert fact_data == {anotherhost: 'some-output'} fake_run_command.assert_called_with( state, anotherhost, 'yes', print_input=False, print_output=False, shell_executable=None, su_user=None, sudo=False, sudo_user=None, timeout=None, use_sudo_password=False, return_combined_output=True, )
def test_get_fact_error_ignore(self): inventory = make_inventory(hosts=("anotherhost", )) state = State(inventory, Config()) anotherhost = inventory.get_host("anotherhost") connect_all(state) anotherhost.current_op_global_kwargs = { "ignore_errors": True, } with patch("pyinfra.connectors.ssh.run_shell_command" ) as fake_run_command: fake_run_command.return_value = False, MagicMock() fact_data = get_facts(state, Command, ("fail command", )) assert fact_data == {anotherhost: None} fake_run_command.assert_called_with( state, anotherhost, "fail command", print_input=False, print_output=False, return_combined_output=True, **_get_executor_defaults(state, anotherhost), )
def test_get_fact(self): inventory = make_inventory(hosts=("anotherhost", )) state = State(inventory, Config()) anotherhost = inventory.get_host("anotherhost") connect_all(state) with patch("pyinfra.connectors.ssh.run_shell_command" ) as fake_run_command: fake_run_command.return_value = MagicMock(), [("stdout", "some-output")] fact_data = get_facts(state, Command, ("yes", )) assert fact_data == {anotherhost: "some-output"} fake_run_command.assert_called_with( state, anotherhost, "yes", print_input=False, print_output=False, return_combined_output=True, **_get_executor_defaults(state, anotherhost), )
def test_get_fact_cached(self): inventory = make_inventory(hosts=("anotherhost", )) state = State(inventory, Config()) fact_hash = "a-fact-hash" cached_fact = {"this is a cached fact"} anotherhost = inventory.get_host("anotherhost") anotherhost.facts[fact_hash] = cached_fact connect_all(state) with patch("pyinfra.connectors.ssh.run_shell_command" ) as fake_run_command: fake_run_command.return_value = MagicMock(), [("stdout", "some-output")] fact_data = get_facts( state, Command, args=("yes", ), kwargs={"_sudo": True}, fact_hash=fact_hash, ) assert fact_data == {anotherhost: cached_fact} fake_run_command.assert_not_called()
def test_get_fact_current_op_meta(self): inventory = make_inventory(hosts=('anotherhost', )) state = State(inventory, Config()) anotherhost = inventory.get_host('anotherhost') connect_all(state) state.current_op_global_kwargs = { 'sudo': True, 'sudo_user': '******', 'use_sudo_password': True, 'su_user': '******', 'ignore_errors': False, 'timeout': 10, 'env': { 'HELLO': 'WORLD' }, } with patch('pyinfra.api.connectors.ssh.run_shell_command' ) as fake_run_command: fake_run_command.return_value = MagicMock(), [('stdout', 'some-output')] fact_data = get_facts(state, 'command', ('yes', )) assert fact_data == {anotherhost: 'some-output'} fake_run_command.assert_called_with( state, anotherhost, 'yes', print_input=False, print_output=False, shell_executable=None, su_user='******', sudo=True, sudo_user='******', timeout=10, env={'HELLO': 'WORLD'}, use_sudo_password=True, return_combined_output=True, )
def test_get_fact_executor_mixed_arguments(self): inventory = make_inventory(hosts=("anotherhost", )) state = State(inventory, Config()) anotherhost = inventory.get_host("anotherhost") anotherhost.data._sudo = True anotherhost.data._sudo_user = "******" anotherhost.data._su_user = "******" anotherhost.current_op_global_kwargs = { "su_user": "******", } connect_all(state) with patch("pyinfra.connectors.ssh.run_shell_command" ) as fake_run_command: fake_run_command.return_value = MagicMock(), [("stdout", "some-output")] fact_data = get_facts( state, Command, args=("yes", ), kwargs={"_sudo_user": "******"}, ) assert fact_data == {anotherhost: "some-output"} defaults = _get_executor_defaults(state, anotherhost) defaults["sudo"] = True defaults["sudo_user"] = "******" defaults["su_user"] = "******" fake_run_command.assert_called_with( state, anotherhost, "yes", print_input=False, print_output=False, return_combined_output=True, **defaults, )
def test_get_fact_error_ignore(self): inventory = make_inventory(hosts=('anotherhost', )) state = State(inventory, Config()) anotherhost = inventory.get_host('anotherhost') connect_all(state) state.current_op_global_kwargs = { 'sudo': False, 'sudo_user': None, 'use_sudo_password': False, 'su_user': None, 'ignore_errors': True, 'timeout': None, 'env': {}, } with patch('pyinfra.api.connectors.ssh.run_shell_command' ) as fake_run_command: fake_run_command.return_value = False, MagicMock() fact_data = get_facts(state, 'command', ('fail command', )) assert fact_data == {anotherhost: None} fake_run_command.assert_called_with( state, anotherhost, 'fail command', print_input=False, print_output=False, shell_executable=None, su_user=None, sudo=False, sudo_user=None, timeout=None, env={}, use_sudo_password=False, return_combined_output=True, )
def test_get_fact_current_op_global_arguments(self): inventory = make_inventory(hosts=("anotherhost", )) state = State(inventory, Config()) anotherhost = inventory.get_host("anotherhost") connect_all(state) anotherhost.current_op_global_kwargs = { "sudo": True, "sudo_user": "******", "use_sudo_password": True, "su_user": "******", "timeout": 10, "env": { "HELLO": "WORLD" }, } with patch("pyinfra.connectors.ssh.run_shell_command" ) as fake_run_command: fake_run_command.return_value = MagicMock(), [("stdout", "some-output")] fact_data = get_facts(state, Command, ("yes", )) assert fact_data == {anotherhost: "some-output"} defaults = _get_executor_defaults(state, anotherhost) defaults.update(anotherhost.current_op_global_kwargs) fake_run_command.assert_called_with( state, anotherhost, "yes", print_input=False, print_output=False, return_combined_output=True, **defaults, )
group="pyinfra", mode="644", sudo=True, ) # Ensure the state of directories add_op( state, files.directory, path="/tmp/email", user="******", group="pyinfra", mode="755", sudo=True, ) # Copy local files to remote host add_op( state, files.put, src="files/motd", dest="/home/vagrant/motd", ) # And finally we run the ops run_ops(state) # We can also get facts for all the hosts facts = get_facts(state, "os") print(jsonify(facts, indent=4))
run_hook(state, 'before_connect', hook_data) # Connect to all the servers print('--> Connecting to hosts...') connect_all(state) print() # Run the before_connect hook if provided run_hook(state, 'before_facts', hook_data) # Just getting a fact? if arguments['fact']: fact_data = get_facts( state, arguments['fact'], args=arguments['fact_args'], sudo=arguments['sudo'], sudo_user=arguments['sudo_user'], su_user=arguments['su_user'] ) print_fact(fact_data) _exit() # We're building a deploy! print('--> Building deploy scripts...') # Deploy file if arguments['deploy']: def loop_hosts(): # This actually does the op build for host in inventory: pseudo_host.set(host) exec_file(arguments['deploy'])
'pyinfra2', sudo=True) # Ensure the state of files add_op(state, files.file, '/var/log/pyinfra.log', user='******', group='pyinfra', mode='644', sudo=True) # Ensure the state of directories add_op(state, files.directory, '/tmp/email', user='******', group='pyinfra', mode='755', sudo=True) # Copy local files to remote host add_op(state, files.put, 'files/file.txt', '/home/vagrant/file.txt') # And finally we run the ops run_ops(state) # We can also get facts for all the hosts facts = get_facts(state, 'os') print(json.dumps(facts, indent=4))
def _main( inventory, operations, verbosity, user, port, key, key_password, password, sudo, sudo_user, su_user, parallel, fail_percent, dry, limit, no_wait, serial, debug, debug_data, debug_facts, debug_operations, facts=None, print_operations=None, ): print() print('### {0}'.format(click.style('Welcome to pyinfra', bold=True))) print() # Setup logging log_level = logging.DEBUG if debug else logging.INFO setup_logging(log_level) # Bootstrap any virtualenv init_virtualenv() deploy_dir = getcwd() potential_deploy_dirs = [] # This is the most common case: we have a deploy file so use it's # pathname - we only look at the first file as we can't have multiple # deploy directories. if operations[0].endswith('.py'): deploy_file_dir, _ = path.split(operations[0]) above_deploy_file_dir, _ = path.split(deploy_file_dir) deploy_dir = deploy_file_dir potential_deploy_dirs.extend(( deploy_file_dir, above_deploy_file_dir, )) # If we have a valid inventory, look in it's path and it's parent for # group_data or config.py to indicate deploy_dir (--fact, --run). if inventory.endswith('.py') and path.isfile(inventory): inventory_dir, _ = path.split(inventory) above_inventory_dir, _ = path.split(inventory_dir) potential_deploy_dirs.extend(( inventory_dir, above_inventory_dir, )) for potential_deploy_dir in potential_deploy_dirs: logger.debug('Checking potential directory: {0}'.format( potential_deploy_dir, )) if any(( path.isdir(path.join(potential_deploy_dir, 'group_data')), path.isfile(path.join(potential_deploy_dir, 'config.py')), )): logger.debug( 'Setting directory to: {0}'.format(potential_deploy_dir)) deploy_dir = potential_deploy_dir break # List facts if operations[0] == 'fact': command = 'fact' fact_names = operations[1:] facts = [] for name in fact_names: args = None if ':' in name: name, args = name.split(':', 1) args = args.split(',') if not is_fact(name): raise CliError('No fact: {0}'.format(name)) facts.append((name, args)) operations = facts # Execute a raw command with server.shell elif operations[0] == 'exec': command = 'exec' operations = operations[1:] # Deploy files(s) elif all(cmd.endswith('.py') for cmd in operations): command = 'deploy' operations = operations[0:] # Check each file exists for file in operations: if not path.exists(file): raise CliError('No deploy file: {0}'.format(file)) # Operation w/optional args (<module>.<op> ARG1 ARG2 ...) elif len(operations[0].split('.')) == 2: command = 'op' operations = get_operation_and_args(operations) else: raise CliError('''Invalid operations: {0} Operation usage: pyinfra INVENTORY deploy_web.py [deploy_db.py]... pyinfra INVENTORY server.user pyinfra home=/home/pyinfra pyinfra INVENTORY exec -- echo "hello world" pyinfra INVENTORY fact os [users]...'''.format(operations)) # Create an empty/unitialised state object state = State() pseudo_state.set(state) # Setup printing on the new state print_output = verbosity > 0 print_fact_output = verbosity > 1 state.print_output = print_output # -v state.print_fact_info = print_output # -v state.print_fact_output = print_fact_output # -vv print('--> Loading config...') # Load up any config.py from the filesystem config = load_config(deploy_dir) # Load any hooks/config from the deploy file if command == 'deploy': load_deploy_config(operations[0], config) # Arg based config overrides if sudo: config.SUDO = True if sudo_user: config.SUDO_USER = sudo_user if su_user: config.SU_USER = su_user if parallel: config.PARALLEL = parallel if fail_percent is not None: config.FAIL_PERCENT = fail_percent print('--> Loading inventory...') # Load up the inventory from the filesystem inventory, inventory_group = make_inventory( inventory, deploy_dir=deploy_dir, ssh_port=port, ssh_user=user, ssh_key=key, ssh_key_password=key_password, ssh_password=password, ) # Apply any --limit to the inventory limit_hosts = None if limit: try: limit_hosts = inventory.get_group(limit) except NoGroupError: limits = limit.split(',') limit_hosts = [ host for host in inventory if any( fnmatch(host.name, limit) for limit in limits) ] # Attach to pseudo inventory pseudo_inventory.set(inventory) # Initialise the state, passing any initial --limit state.init(inventory, config, initial_limit=limit_hosts) # If --debug-data dump & exit if debug_data: print_inventory(state) _exit() # Set the deploy directory state.deploy_dir = deploy_dir # Setup the data to be passed to config hooks hook_data = FallbackDict( state.inventory.get_override_data(), state.inventory.get_group_data(inventory_group), state.inventory.get_data(), ) # Run the before_connect hook if provided run_hook(state, 'before_connect', hook_data) # Connect to all the servers print('--> Connecting to hosts...') connect_all(state) # Run the before_connect hook if provided run_hook(state, 'before_facts', hook_data) # Just getting a fact? # if command == 'fact': print() print('--> Gathering facts...') # Print facts as we get them state.print_fact_info = True # Print fact output with -v state.print_fact_output = print_output fact_data = {} for i, command in enumerate(operations): name, args = command fact_data[name] = get_facts( state, name, args=args, ) print_facts(fact_data) _exit() # Prepare the deploy! # # Execute a raw command with server.shell if command == 'exec': # Print the output of the command state.print_output = True add_op( state, server.shell, ' '.join(operations), ) # Deploy files(s) elif command == 'deploy': print() print('--> Preparing operations...') # Number of "steps" to make = number of files * number of hosts for i, filename in enumerate(operations): logger.info('Loading: {0}'.format(click.style(filename, bold=True))) state.current_op_file = i load_deploy_file(state, filename) # Operation w/optional args elif command == 'op': print() print('--> Preparing operation...') op, args = operations add_op(state, op, *args[0], **args[1]) # Always show meta output print() print('--> Proposed changes:') print_meta(state) # If --debug-facts or --debug-operations, print and exit if debug_facts or debug_operations: if debug_facts: print_state_facts(state) if debug_operations: print_state_operations(state) _exit() # Run the operations we generated with the deploy file if dry: _exit() print() # Run the before_deploy hook if provided run_hook(state, 'before_deploy', hook_data) print('--> Beginning operation run...') run_ops(state, serial=serial, no_wait=no_wait) # Run the after_deploy hook if provided run_hook(state, 'after_deploy', hook_data) print('--> Results:') print_results(state) # Triggers any executor disconnect requirements disconnect_all(state) _exit()
def _main( inventory, commands, verbosity, user, port, key, key_password, password, sudo, sudo_user, su_user, parallel, fail_percent, dry, limit, no_wait, serial, debug, debug_data, debug_state, facts=None, operations=None, ): print() print('### {0}'.format(click.style('Welcome to pyinfra', bold=True))) print() # Setup logging log_level = logging.DEBUG if debug else logging.INFO setup_logging(log_level) deploy_dir = getcwd() potential_deploy_dirs = [] # This is the most common case: we have a deploy file so use it's # pathname - we only look at the first file as we can't have multiple # deploy directories. if commands[0].endswith('.py'): deploy_file_dir, _ = path.split(commands[0]) above_deploy_file_dir, _ = path.split(deploy_file_dir) deploy_dir = deploy_file_dir potential_deploy_dirs.extend(( deploy_file_dir, above_deploy_file_dir, )) # If we have a valid inventory, look in it's path and it's parent for # group_data or config.py to indicate deploy_dir (--fact, --run). if inventory.endswith('.py') and path.isfile(inventory): inventory_dir, _ = path.split(inventory) above_inventory_dir, _ = path.split(inventory_dir) potential_deploy_dirs.extend(( inventory_dir, above_inventory_dir, )) for potential_deploy_dir in potential_deploy_dirs: logger.debug('Checking potential directory: {0}'.format( potential_deploy_dir, )) if any(( path.isdir(path.join(potential_deploy_dir, 'group_data')), path.isfile(path.join(potential_deploy_dir, 'config.py')), )): logger.debug( 'Setting directory to: {0}'.format(potential_deploy_dir)) deploy_dir = potential_deploy_dir break # List facts if commands[0] == 'fact': command = 'fact' fact_names = commands[1:] facts = [] for name in fact_names: args = None if ':' in name: name, args = name.split(':', 1) args = args.split(',') if not is_fact(name): raise CliError('No fact: {0}'.format(name)) facts.append((name, args)) commands = facts # Execute a raw command with server.shell elif commands[0] == 'exec': command = 'exec' commands = commands[1:] # Deploy files(s) elif all(cmd.endswith('.py') for cmd in commands): command = 'deploy' commands = commands[0:] # Check each file exists for file in commands: if not path.exists(file): raise CliError('No deploy file: {0}'.format(file)) # Operation w/optional args elif len(commands) == 2: command = 'op' commands = get_operation_and_args( commands[0], commands[1], ) else: raise CliError('''Invalid commands: {0} Command usage: pyinfra INVENTORY deploy_web.py [deploy_db.py]... pyinfra INVENTORY server.user pyinfra,home=/home/pyinfra pyinfra INVENTORY exec -- echo "hello world" pyinfra INVENTORY fact os [users]...'''.format(commands)) print('--> Loading config...') # Load up any config.py from the filesystem config = load_config(deploy_dir) # Load any hooks/config from the deploy file if command == 'deploy': load_deploy_config(commands[0], config) # Arg based config overrides if sudo: config.SUDO = True if sudo_user: config.SUDO_USER = sudo_user if su_user: config.SU_USER = su_user if parallel: config.PARALLEL = parallel if fail_percent is not None: config.FAIL_PERCENT = fail_percent print('--> Loading inventory...') # Load up the inventory from the filesystem inventory, inventory_group = make_inventory( inventory, deploy_dir=deploy_dir, limit=limit, ssh_user=user, ssh_key=key, ssh_key_password=key_password, ssh_password=password, ssh_port=port, ) # If --debug-data dump & exit if debug_data: print_inventory(inventory) _exit() # Attach to pseudo inventory pseudo_inventory.set(inventory) # Create/set the state state = State(inventory, config) state.is_cli = True state.print_lines = True state.deploy_dir = deploy_dir # Setup printing on the new state print_output = verbosity > 0 print_fact_output = verbosity > 1 state.print_output = print_output # -v state.print_fact_info = print_output # -v state.print_fact_output = print_fact_output # -vv # Attach to pseudo state pseudo_state.set(state) # Setup the data to be passed to config hooks hook_data = FallbackAttrData( state.inventory.get_override_data(), state.inventory.get_group_data(inventory_group), state.inventory.get_data(), ) # Run the before_connect hook if provided run_hook(state, 'before_connect', hook_data) # Connect to all the servers print('--> Connecting to hosts...') with progress_spinner(state.inventory) as progress: connect_all(state, progress=progress) # Run the before_connect hook if provided run_hook(state, 'before_facts', hook_data) # Just getting a fact? # if command == 'fact': print() print('--> Gathering facts...') # Print facts as we get them state.print_fact_info = True # Print fact output with -v state.print_fact_output = print_output fact_data = {} with progress_spinner(commands) as progress: for i, (name, args) in enumerate(commands): fact_data[name] = get_facts( state, name, args=args, ) progress() print_facts(fact_data) _exit() # Prepare the deploy! # # Execute a raw command with server.shell if command == 'exec': # Print the output of the command state.print_output = True add_op( state, server.shell, ' '.join(commands), ) # Deploy files(s) elif command == 'deploy': print() print('--> Preparing operations...') # Number of "steps" to make = number of files * number of hosts prepare_steps = len(commands) * len(state.inventory) with progress_spinner(prepare_steps) as progress: for filename in commands: load_deploy_file(state, filename, progress=progress) progress() # Operation w/optional args elif command == 'op': print() print('--> Preparing operation...') op, args = commands add_op(state, op, *args[0], **args[1]) # Always show meta output print() print('--> Proposed changes:') print_meta(state, inventory) # If --debug-state, dump state (ops, op order, op meta) now & exit if debug_state: dump_state(state) _exit() # Run the operations we generated with the deploy file if dry: _exit() print() # Run the before_deploy hook if provided run_hook(state, 'before_deploy', hook_data) print('--> Beginning operation run...') # Number of "steps" to make = number of operations * number of hosts operation_steps = len(state.op_order) * len(state.inventory) with progress_spinner(operation_steps) as progress: run_ops( state, serial=serial, no_wait=no_wait, progress=progress, ) # Run the after_deploy hook if provided run_hook(state, 'after_deploy', hook_data) print('--> Results:') print_results(state, inventory) _exit()
def _main( inventory, operations, verbosity, user, port, key, key_password, password, winrm_username, winrm_password, winrm_port, shell_executable, sudo, sudo_user, use_sudo_password, su_user, parallel, fail_percent, dry, limit, no_wait, serial, quiet, debug, debug_data, debug_facts, debug_operations, facts=None, print_operations=None, support=None, ): if not debug and not sys.warnoptions: warnings.simplefilter('ignore') # Setup logging log_level = logging.INFO if debug: log_level = logging.DEBUG elif quiet: log_level = logging.WARNING setup_logging(log_level) # Bootstrap any virtualenv init_virtualenv() deploy_dir = getcwd() potential_deploy_dirs = [] # This is the most common case: we have a deploy file so use it's # pathname - we only look at the first file as we can't have multiple # deploy directories. if operations[0].endswith('.py'): deploy_file_dir, _ = path.split(operations[0]) above_deploy_file_dir, _ = path.split(deploy_file_dir) deploy_dir = deploy_file_dir potential_deploy_dirs.extend(( deploy_file_dir, above_deploy_file_dir, )) # If we have a valid inventory, look in it's path and it's parent for # group_data or config.py to indicate deploy_dir (--fact, --run). if inventory.endswith('.py') and path.isfile(inventory): inventory_dir, _ = path.split(inventory) above_inventory_dir, _ = path.split(inventory_dir) potential_deploy_dirs.extend(( inventory_dir, above_inventory_dir, )) for potential_deploy_dir in potential_deploy_dirs: logger.debug('Checking potential directory: {0}'.format( potential_deploy_dir, )) if any(( path.isdir(path.join(potential_deploy_dir, 'group_data')), path.isfile(path.join(potential_deploy_dir, 'config.py')), )): logger.debug( 'Setting directory to: {0}'.format(potential_deploy_dir)) deploy_dir = potential_deploy_dir break # Create an empty/unitialised state object state = State() # Set the deploy directory state.deploy_dir = deploy_dir pseudo_state.set(state) if verbosity > 0: state.print_fact_info = True state.print_noop_info = True if verbosity > 1: state.print_input = state.print_fact_input = True if verbosity > 2: state.print_output = state.print_fact_output = True if not quiet: click.echo('--> Loading config...', err=True) # Load up any config.py from the filesystem config = load_config(deploy_dir) # Make a copy before we overwrite original_operations = operations # Debug (print) inventory + group data if operations[0] == 'debug-inventory': command = 'debug-inventory' # Get all non-arg facts elif operations[0] == 'all-facts': command = 'fact' fact_names = [] for fact_name in get_fact_names(): fact_class = get_fact_class(fact_name) if (not issubclass(fact_class, ShortFactBase) and not callable(fact_class.command)): fact_names.append(fact_name) operations = [(name, None) for name in fact_names] # Get one or more facts elif operations[0] == 'fact': command = 'fact' fact_names = operations[1:] facts = [] for name in fact_names: args = None if ':' in name: name, args = name.split(':', 1) args = args.split(',') if not is_fact(name): raise CliError('No fact: {0}'.format(name)) facts.append((name, args)) operations = facts # Execute a raw command with server.shell elif operations[0] == 'exec': command = 'exec' operations = operations[1:] # Execute one or more deploy files elif all(cmd.endswith('.py') for cmd in operations): command = 'deploy' operations = operations[0:] for file in operations: if not path.exists(file): raise CliError('No deploy file: {0}'.format(file)) # Operation w/optional args (<module>.<op> ARG1 ARG2 ...) elif len(operations[0].split('.')) == 2: command = 'op' operations = get_operation_and_args(operations) else: raise CliError('''Invalid operations: {0} Operation usage: pyinfra INVENTORY deploy_web.py [deploy_db.py]... pyinfra INVENTORY server.user pyinfra home=/home/pyinfra pyinfra INVENTORY exec -- echo "hello world" pyinfra INVENTORY fact os [users]...'''.format(operations)) # Load any hooks/config from the deploy file if command == 'deploy': load_deploy_config(operations[0], config) # Arg based config overrides if sudo: config.SUDO = True if sudo_user: config.SUDO_USER = sudo_user if use_sudo_password: config.USE_SUDO_PASSWORD = use_sudo_password if su_user: config.SU_USER = su_user if parallel: config.PARALLEL = parallel if shell_executable: config.SHELL = shell_executable if fail_percent is not None: config.FAIL_PERCENT = fail_percent if not quiet: click.echo('--> Loading inventory...', err=True) # Load up the inventory from the filesystem inventory, inventory_group = make_inventory( inventory, deploy_dir=deploy_dir, ssh_port=port, ssh_user=user, ssh_key=key, ssh_key_password=key_password, ssh_password=password, winrm_username=winrm_username, winrm_password=winrm_password, winrm_port=winrm_port, ) # Attach to pseudo inventory pseudo_inventory.set(inventory) # Now that we have inventory, apply --limit config override initial_limit = None if limit: all_limit_hosts = [] for limiter in limit: try: limit_hosts = inventory.get_group(limiter) except NoGroupError: limits = limiter.split(',') if len(limits) > 1: logger.warning(( 'Specifying comma separated --limit values is deprecated, ' 'please use multiple --limit options.')) limit_hosts = [ host for host in inventory if any( fnmatch(host.name, match) for match in limits) ] all_limit_hosts.extend(limit_hosts) initial_limit = list(set(all_limit_hosts)) # Initialise the state state.init(inventory, config, initial_limit=initial_limit) # If --debug-data dump & exit if command == 'debug-inventory' or debug_data: if debug_data: logger.warning( ('--debug-data is deprecated, ' 'please use `pyinfra INVENTORY debug-inventory` instead.')) print_inventory(state) _exit() # Connect to all the servers if not quiet: click.echo(err=True) click.echo('--> Connecting to hosts...', err=True) connect_all(state) # Just getting a fact? # if command == 'fact': if not quiet: click.echo(err=True) click.echo('--> Gathering facts...', err=True) state.print_fact_info = True fact_data = {} for i, command in enumerate(operations): name, args = command fact_key = name if args: fact_key = '{0}{1}'.format(name, tuple(args)) try: fact_data[fact_key] = get_facts( state, name, args=args, apply_failed_hosts=False, ) except PyinfraError: pass print_facts(fact_data) _exit() # Prepare the deploy! # # Execute a raw command with server.shell if command == 'exec': # Print the output of the command state.print_output = True add_op( state, server.shell, ' '.join(operations), _allow_cli_mode=True, ) # Deploy files(s) elif command == 'deploy': if not quiet: click.echo(err=True) click.echo('--> Preparing operations...', err=True) # Number of "steps" to make = number of files * number of hosts for i, filename in enumerate(operations): logger.info('Loading: {0}'.format(click.style(filename, bold=True))) state.current_op_file = i load_deploy_file(state, filename) # Operation w/optional args elif command == 'op': if not quiet: click.echo(err=True) click.echo('--> Preparing operation...', err=True) op, args = operations args, kwargs = args kwargs['_allow_cli_mode'] = True def print_host_ready(host): logger.info('{0}{1} {2}'.format( host.print_prefix, click.style('Ready:', 'green'), click.style(original_operations[0], bold=True), )) kwargs['_after_host_callback'] = print_host_ready add_op(state, op, *args, **kwargs) # Always show meta output if not quiet: click.echo(err=True) click.echo('--> Proposed changes:', err=True) print_meta(state) # If --debug-facts or --debug-operations, print and exit if debug_facts or debug_operations: if debug_facts: print_state_facts(state) if debug_operations: print_state_operations(state) _exit() # Run the operations we generated with the deploy file if dry: _exit() if not quiet: click.echo(err=True) if not quiet: click.echo('--> Beginning operation run...', err=True) run_ops(state, serial=serial, no_wait=no_wait) if not quiet: click.echo('--> Results:', err=True) print_results(state) _exit()
def _main( inventory, operations, verbosity, chdir, ssh_user, ssh_port, ssh_key, ssh_key_password, ssh_password, winrm_username, winrm_password, winrm_port, winrm_transport, shell_executable, sudo, sudo_user, use_sudo_password, su_user, parallel, fail_percent, data, group_data, config_filename, dry, limit, no_wait, serial, quiet, debug, debug_facts, debug_operations, support=None, ): # Setup working directory # if chdir: os_chdir(chdir) # Setup logging # if not debug and not sys.warnoptions: warnings.simplefilter("ignore") log_level = logging.INFO if debug: log_level = logging.DEBUG elif quiet: log_level = logging.WARNING setup_logging(log_level) # Bootstrap any virtualenv init_virtualenv() # Check operations are valid and setup command # # Make a copy before we overwrite original_operations = operations # Debug (print) inventory + group data if operations[0] == "debug-inventory": command = "debug-inventory" # Get one or more facts elif operations[0] == "fact": command = "fact" operations = get_facts_and_args(operations[1:]) # Execute a raw command with server.shell elif operations[0] == "exec": command = "exec" operations = operations[1:] # Execute one or more deploy files elif all(cmd.endswith(".py") for cmd in operations): command = "deploy" filenames = [] for filename in operations[0:]: if path.exists(filename): filenames.append(filename) continue if chdir and filename.startswith(chdir): correct_filename = path.relpath(filename, chdir) logger.warning( ( "Fixing deploy filename under `--chdir` argument: " f"{filename} -> {correct_filename}" ), ) filenames.append(correct_filename) continue raise CliError( "No deploy file: {0}".format( path.join(chdir, filename) if chdir else filename, ), ) operations = filenames # Operation w/optional args (<module>.<op> ARG1 ARG2 ...) elif len(operations[0].split(".")) == 2: command = "op" operations = get_operation_and_args(operations) else: raise CliError( """Invalid operations: {0} Operation usage: pyinfra INVENTORY deploy_web.py [deploy_db.py]... pyinfra INVENTORY server.user pyinfra home=/home/pyinfra pyinfra INVENTORY exec -- echo "hello world" pyinfra INVENTORY fact os [users]...""".format( operations, ), ) # Setup state, config & inventory # cwd = getcwd() if cwd not in sys.path: # ensure cwd is present in sys.path sys.path.append(cwd) state = State() state.cwd = cwd ctx_state.set(state) if verbosity > 0: state.print_fact_info = True state.print_noop_info = True if verbosity > 1: state.print_input = state.print_fact_input = True if verbosity > 2: state.print_output = state.print_fact_output = True if not quiet: click.echo("--> Loading config...", err=True) config = Config() ctx_config.set(config) # Load up any config.py from the filesystem config_filename = path.join(state.cwd, config_filename) if path.exists(config_filename): exec_file(config_filename) # Lock the current config, this allows us to restore this version after # executing deploy files that may alter them. config.lock_current_state() # Arg based config overrides if sudo: config.SUDO = True if sudo_user: config.SUDO_USER = sudo_user if use_sudo_password: config.USE_SUDO_PASSWORD = use_sudo_password if su_user: config.SU_USER = su_user if parallel: config.PARALLEL = parallel if shell_executable: config.SHELL = None if shell_executable in ("None", "null") else shell_executable if fail_percent is not None: config.FAIL_PERCENT = fail_percent if not quiet: click.echo("--> Loading inventory...", err=True) override_data = {} for arg in data: key, value = arg.split("=", 1) override_data[key] = value override_data = {key: parse_cli_arg(value) for key, value in override_data.items()} for key, value in ( ("ssh_user", ssh_user), ("ssh_key", ssh_key), ("ssh_key_password", ssh_key_password), ("ssh_port", ssh_port), ("ssh_password", ssh_password), ("winrm_username", winrm_username), ("winrm_password", winrm_password), ("winrm_port", winrm_port), ("winrm_transport", winrm_transport), ): if value: override_data[key] = value # Load up the inventory from the filesystem inventory, inventory_group = make_inventory( inventory, cwd=state.cwd, override_data=override_data, group_data_directories=group_data, ) ctx_inventory.set(inventory) # Now that we have inventory, apply --limit config override initial_limit = None if limit: all_limit_hosts = [] for limiter in limit: try: limit_hosts = inventory.get_group(limiter) except NoGroupError: limit_hosts = [host for host in inventory if fnmatch(host.name, limiter)] if not limit_hosts: logger.warning("No host matches found for --limit pattern: {0}".format(limiter)) all_limit_hosts.extend(limit_hosts) initial_limit = list(set(all_limit_hosts)) # Initialise the state state.init(inventory, config, initial_limit=initial_limit) if command == "debug-inventory": print_inventory(state) _exit() # Connect to the hosts & start handling the user commands # if not quiet: click.echo(err=True) click.echo("--> Connecting to hosts...", err=True) connect_all(state) if command == "fact": if not quiet: click.echo(err=True) click.echo("--> Gathering facts...", err=True) state.print_fact_info = True fact_data = {} for i, command in enumerate(operations): fact_cls, args, kwargs = command fact_key = fact_cls.name if args or kwargs: fact_key = "{0}{1}{2}".format( fact_cls.name, args or "", " ({0})".format(get_kwargs_str(kwargs)) if kwargs else "", ) try: fact_data[fact_key] = get_facts( state, fact_cls, args=args, kwargs=kwargs, apply_failed_hosts=False, ) except PyinfraError: pass print_facts(fact_data) _exit() if command == "exec": state.print_output = True add_op( state, server.shell, " ".join(operations), _allow_cli_mode=True, ) elif command == "deploy": if not quiet: click.echo(err=True) click.echo("--> Preparing operations...", err=True) # Number of "steps" to make = number of files * number of hosts for i, filename in enumerate(operations): logger.info("Loading: {0}".format(click.style(filename, bold=True))) state.current_op_file_number = i load_deploy_file(state, filename) # Remove any config changes introduced by the deploy file & any includes config.reset_locked_state() elif command == "op": if not quiet: click.echo(err=True) click.echo("--> Preparing operation...", err=True) op, args = operations args, kwargs = args kwargs["_allow_cli_mode"] = True def print_host_ready(host): logger.info( "{0}{1} {2}".format( host.print_prefix, click.style("Ready:", "green"), click.style(original_operations[0], bold=True), ), ) kwargs["_after_host_callback"] = print_host_ready add_op(state, op, *args, **kwargs) # Print proposed changes, execute unless --dry, and exit # if not quiet: click.echo(err=True) click.echo("--> Proposed changes:", err=True) print_meta(state) # If --debug-facts or --debug-operations, print and exit if debug_facts or debug_operations: if debug_facts: print_state_facts(state) if debug_operations: print_state_operations(state) _exit() if dry: _exit() if not quiet: click.echo(err=True) if not quiet: click.echo("--> Beginning operation run...", err=True) run_ops(state, serial=serial, no_wait=no_wait) if not quiet: click.echo("--> Results:", err=True) print_results(state) _exit()