def test_connect_exceptions(self, fake_key_open): for exception_class in ( AuthenticationException, SSHException, gaierror, socket_error, EOFError, ): state = State(make_inventory(hosts=( ('somehost', {'ssh_key': 'testkey'}), )), Config()) def raise_exception(*args, **kwargs): raise exception_class self.fake_connect_mock.side_effect = raise_exception with self.assertRaises(PyinfraError): connect_all(state) assert len(state.active_hosts) == 0
def test_api_op_line_numbers(self): inventory = make_inventory() state = State(inventory, Config()) connect_all(state) another_host = inventory.get_host("anotherhost") def add_another_op(): return add_op(state, server.shell, "echo second-op")[another_host].hash first_op_hash = add_op(state, server.shell, "echo first-op")[another_host].hash second_op_hash = add_another_op( ) # note `add_op` will be called on an earlier line op_order = state.get_op_order() assert len(op_order) == 2 assert op_order[0] == first_op_hash assert op_order[1] == second_op_hash
def test_op_line_numbers(self): inventory = make_inventory() state = State(inventory, Config()) connect_all(state) # Add op to both hosts add_op(state, server.shell, 'echo "hi"') # Add op to just the second host - using the pseudo modules such that # it replicates a deploy file. pseudo_state.set(state) pseudo_host.set(inventory['anotherhost']) first_pseudo_hash = server.user('anotherhost_user').hash first_pseudo_call_line = getframeinfo(currentframe()).lineno - 1 # Add op to just the first host - using the pseudo modules such that # it replicates a deploy file. pseudo_state.set(state) pseudo_host.set(inventory['somehost']) second_pseudo_hash = server.user('somehost_user').hash second_pseudo_call_line = getframeinfo(currentframe()).lineno - 1 pseudo_state.reset() pseudo_host.reset() # Ensure there are two ops op_order = state.get_op_order() assert len(op_order) == 3 # And that the two ops above were called in the expected order assert op_order[1] == first_pseudo_hash assert op_order[2] == second_pseudo_hash # And that they have the expected line numbers assert state.op_line_numbers_to_hash.get((0, first_pseudo_call_line)) == first_pseudo_hash assert state.op_line_numbers_to_hash.get((0, second_pseudo_call_line)) == second_pseudo_hash # Ensure somehost has two ops and anotherhost only has the one assert len(state.ops[inventory.get_host('somehost')]) == 2 assert len(state.ops[inventory.get_host('anotherhost')]) == 2
def test_get_fact_error_ignore(self): inventory = make_inventory(hosts=('anotherhost', )) state = State(inventory, Config()) anotherhost = inventory.get_host('anotherhost') connect_all(state) state.current_op_global_kwargs = { 'sudo': False, 'sudo_user': None, 'use_sudo_password': False, 'su_user': None, 'ignore_errors': True, 'timeout': None, 'env': {}, } with patch('pyinfra.api.connectors.ssh.run_shell_command' ) as fake_run_command: fake_run_command.return_value = False, MagicMock() fact_data = get_facts(state, 'command', ('fail command', )) assert fact_data == {anotherhost: None} fake_run_command.assert_called_with( state, anotherhost, 'fail command', print_input=False, print_output=False, shell_executable=None, su_user=None, sudo=False, sudo_user=None, timeout=None, env={}, use_sudo_password=False, return_combined_output=True, )
def test_connect_with_rsa_ssh_key(self): state = State( make_inventory(hosts=(('somehost', { 'ssh_key': 'testkey' }), )), Config()) with patch('pyinfra.api.connectors.ssh.path.isfile', lambda *args, **kwargs: True), \ patch('pyinfra.api.connectors.ssh.RSAKey.from_private_key_file') as fake_key_open: fake_key = MagicMock() fake_key_open.return_value = fake_key state.deploy_dir = '/' connect_all(state) # Check the key was created properly fake_key_open.assert_called_with(filename='testkey') # Check the certificate file was then loaded fake_key.load_certificate.assert_called_with('testkey.pub') # And check the Paramiko SSH call was correct self.fake_connect_mock.assert_called_with( 'somehost', allow_agent=False, look_for_keys=False, pkey=fake_key, timeout=10, username='******', ) # Check that loading the same key again is cached in the state second_state = State( make_inventory(hosts=(('somehost', { 'ssh_key': 'testkey' }), )), Config()) second_state.private_keys = state.private_keys connect_all(second_state)
def test_get_fact_current_op_meta(self): inventory = make_inventory(hosts=('anotherhost', )) state = State(inventory, Config()) anotherhost = inventory.get_host('anotherhost') connect_all(state) state.current_op_hash = 'abc' state.op_meta['abc'] = { 'sudo': True, 'sudo_user': '******', 'use_sudo_password': True, 'su_user': '******', 'ignore_errors': False, 'timeout': 10, } with patch('pyinfra.api.connectors.ssh.run_shell_command' ) as fake_run_command: fake_run_command.return_value = MagicMock(), [('stdout', 'some-output')] fact_data = get_facts(state, 'command', ('yes', )) assert fact_data == {anotherhost: 'some-output'} fake_run_command.assert_called_with( state, anotherhost, 'yes', print_input=False, print_output=False, shell_executable=None, su_user='******', sudo=True, sudo_user='******', timeout=10, use_sudo_password=True, return_combined_output=True, )
def test_run_once_serial_op(self): inventory = make_inventory() state = State(inventory, Config()) connect_all(state) # Add a run once op add_op(state, server.shell, 'echo "hi"', run_once=True, serial=True) # Ensure it's added to op_order assert len(state.get_op_order()) == 1 somehost = inventory.get_host('somehost') anotherhost = inventory.get_host('anotherhost') # Ensure between the two hosts we only run the one op assert len(state.ops[somehost]) + len(state.ops[anotherhost]) == 1 # Check run works run_ops(state) assert (state.results[somehost]['success_ops'] + state.results[anotherhost]['success_ops']) == 1
def test_op_hosts_limit(self): inventory = make_inventory() state = State(inventory, Config()) connect_all(state) # Add op to both hosts add_op(state, server.shell, 'echo "hi"') # Add op to just the first host add_op( state, server.user, 'somehost_user', hosts=inventory['somehost'], ) # Ensure there are two ops self.assertEqual(len(state.get_op_order()), 2) # Ensure somehost has two ops and anotherhost only has the one self.assertEqual(len(state.ops[inventory.get_host('somehost')]), 2) self.assertEqual(len(state.ops[inventory.get_host('anotherhost')]), 1)
def test_file_download_op(self): inventory = make_inventory() state = State(inventory, Config()) connect_all(state) with patch('pyinfra.operations.files.os_path.isfile', lambda *args, **kwargs: True): add_op( state, files.get, name='First op name', src='/home/vagrant/file.txt', dest='files/file.txt', ) op_order = state.get_op_order() assert len(op_order) == 1 first_op_hash = op_order[0] assert state.op_meta[first_op_hash]['names'] == {'First op name'} somehost = inventory.get_host('somehost') anotherhost = inventory.get_host('anotherhost') # Ensure first op has the right (upload) command assert state.ops[somehost][first_op_hash]['commands'] == [ FileDownloadCommand('/home/vagrant/file.txt', 'files/file.txt'), ] with patch('pyinfra.api.util.open', mock_open(read_data='test!'), create=True): run_ops(state) assert state.results[somehost]['success_ops'] == 1 assert state.results[somehost]['ops'] == 1 assert state.results[anotherhost]['success_ops'] == 1 assert state.results[anotherhost]['ops'] == 1 assert state.results[somehost]['error_ops'] == 0 assert state.results[anotherhost]['error_ops'] == 0
def test_get_fact_current_op_global_arguments(self): inventory = make_inventory(hosts=("anotherhost", )) state = State(inventory, Config()) anotherhost = inventory.get_host("anotherhost") connect_all(state) anotherhost.current_op_global_kwargs = { "sudo": True, "sudo_user": "******", "use_sudo_password": True, "su_user": "******", "timeout": 10, "env": { "HELLO": "WORLD" }, } with patch("pyinfra.connectors.ssh.run_shell_command" ) as fake_run_command: fake_run_command.return_value = MagicMock(), [("stdout", "some-output")] fact_data = get_facts(state, Command, ("yes", )) assert fact_data == {anotherhost: "some-output"} defaults = _get_executor_defaults(state, anotherhost) defaults.update(anotherhost.current_op_global_kwargs) fake_run_command.assert_called_with( state, anotherhost, "yes", print_input=False, print_output=False, return_combined_output=True, **defaults, )
def test_rsync_op(self): inventory = make_inventory(hosts=('somehost',)) state = State(inventory, Config()) connect_all(state) with patch('pyinfra.api.connectors.ssh.check_can_rsync'): add_op(state, files.rsync, 'src', 'dest', sudo=True, sudo_user='******') assert len(state.get_op_order()) == 1 with patch('pyinfra.api.connectors.ssh.run_local_process') as fake_run_local_process: fake_run_local_process.return_value = 0, [] run_ops(state) fake_run_local_process.assert_called_with( ( 'rsync -ax --delete --rsh ' "'ssh -o BatchMode=yes -o StrictHostKeyChecking=no '" " --rsync-path 'sudo -u root rsync' src vagrant@somehost:dest" ), print_output=False, print_prefix=inventory.get_host('somehost').print_prefix, )
def test_ignore_errors_op_fail(self): inventory = make_inventory() state = State(inventory, Config()) connect_all(state) add_op(state, server.shell, 'echo "hi"', ignore_errors=True) with patch('pyinfra.api.connectors.ssh.run_shell_command') as fake_run_command: fake_channel = FakeChannel(1) fake_run_command.return_value = ( False, FakeBuffer('', fake_channel), FakeBuffer('', fake_channel), ) # This should run OK run_ops(state) # Ensure the op was added to results self.assertEqual(state.results['somehost']['ops'], 1) self.assertEqual(state.results['somehost']['error_ops'], 1) # But not as a success self.assertEqual(state.results['somehost']['success_ops'], 0)
def test_connect_with_rsa_ssh_key_missing_password(self): state = State( make_inventory(hosts=(("somehost", { "ssh_key": "testkey" }), )), Config()) with patch("pyinfra.connectors.ssh.path.isfile", lambda *args, **kwargs: True), patch( "pyinfra.connectors.ssh.RSAKey.from_private_key_file", ) as fake_key_open: fake_key_open.side_effect = make_raise_exception_function( PasswordRequiredException) fake_key = MagicMock() fake_key_open.return_value = fake_key with self.assertRaises(PyinfraError) as e: connect_all(state) assert e.exception.args[0] == ( "Private key file (testkey) is encrypted, set ssh_key_password " "to use this key")
def test_full_op_fail(self): inventory = make_inventory() state = State(inventory, Config()) connect_all(state) add_op(state, server.shell, 'echo "hi"') with patch('pyinfra.api.connectors.ssh.run_shell_command') as fake_run_command: fake_channel = FakeChannel(1) fake_run_command.return_value = ( False, FakeBuffer('', fake_channel), FakeBuffer('', fake_channel), ) with self.assertRaises(PyinfraError) as e: run_ops(state) self.assertEqual(e.exception.args[0], 'No hosts remaining!') # Ensure the op was not flagged as success self.assertEqual(state.results['somehost']['success_ops'], 0) # And was flagged asn an error self.assertEqual(state.results['somehost']['error_ops'], 1)
def test_deploy(self): inventory = make_inventory() somehost = inventory.get_host("somehost") anotherhost = inventory.get_host("anotherhost") state = State(inventory, Config()) # Enable printing on this test to catch any exceptions in the formatting state.print_output = True state.print_input = True state.print_fact_info = True state.print_noop_info = True connect_all(state) @deploy def test_deploy(state=None, host=None): server.shell(commands=["echo first command"]) server.shell(commands=["echo second command"]) add_deploy(state, test_deploy) op_order = state.get_op_order() # Ensure we have an op assert len(op_order) == 2 first_op_hash = op_order[0] assert state.op_meta[first_op_hash]["names"] == { "test_deploy | Server/Shell" } assert state.ops[somehost][first_op_hash]["commands"] == [ StringCommand("echo first command"), ] assert state.ops[anotherhost][first_op_hash]["commands"] == [ StringCommand("echo first command"), ] second_op_hash = op_order[1] assert state.op_meta[second_op_hash]["names"] == { "test_deploy | Server/Shell" } assert state.ops[somehost][second_op_hash]["commands"] == [ StringCommand("echo second command"), ] assert state.ops[anotherhost][second_op_hash]["commands"] == [ StringCommand("echo second command"), ] # Ensure run ops works run_ops(state) # Ensure ops completed OK assert state.results[somehost]["success_ops"] == 2 assert state.results[somehost]["ops"] == 2 assert state.results[anotherhost]["success_ops"] == 2 assert state.results[anotherhost]["ops"] == 2 # And w/o errors assert state.results[somehost]["error_ops"] == 0 assert state.results[anotherhost]["error_ops"] == 0 # And with the different modes run_ops(state, serial=True) run_ops(state, no_wait=True) disconnect_all(state)
def test_file_upload_op(self): inventory = make_inventory() state = State(inventory, Config()) connect_all(state) with patch('pyinfra.operations.files.path.isfile', lambda *args, **kwargs: True): # Test normal add_op( state, files.put, {'First op name'}, 'files/file.txt', '/home/vagrant/file.txt', ) # And with sudo add_op( state, files.put, 'files/file.txt', '/home/vagrant/file.txt', sudo=True, sudo_user='******', ) # And with su add_op( state, files.put, 'files/file.txt', '/home/vagrant/file.txt', sudo=True, su_user='******', ) op_order = state.get_op_order() # Ensure we have all ops assert len(op_order) == 3 first_op_hash = op_order[0] # Ensure first op is the right one assert state.op_meta[first_op_hash]['names'] == {'First op name'} somehost = inventory.get_host('somehost') anotherhost = inventory.get_host('anotherhost') # Ensure first op has the right (upload) command assert state.ops[somehost][first_op_hash]['commands'] == [ ('upload', 'files/file.txt', '/home/vagrant/file.txt'), ] # Ensure second op has sudo/sudo_user assert state.op_meta[op_order[1]]['sudo'] is True assert state.op_meta[op_order[1]]['sudo_user'] == 'pyinfra' # Ensure third has su_user assert state.op_meta[op_order[2]]['su_user'] == 'pyinfra' # Check run ops works with patch('pyinfra.api.util.open', mock_open(read_data='test!'), create=True): run_ops(state) # Ensure ops completed OK assert state.results[somehost]['success_ops'] == 3 assert state.results[somehost]['ops'] == 3 assert state.results[anotherhost]['success_ops'] == 3 assert state.results[anotherhost]['ops'] == 3 # And w/o errors assert state.results[somehost]['error_ops'] == 0 assert state.results[anotherhost]['error_ops'] == 0
def test_op(self): inventory = make_inventory() somehost = inventory.get_host('somehost') anotherhost = inventory.get_host('anotherhost') state = State(inventory, Config()) # Enable printing on this test to catch any exceptions in the formatting state.print_output = True state.print_fact_info = True state.print_fact_output = True connect_all(state) add_op( state, files.file, '/var/log/pyinfra.log', user='******', group='pyinfra', mode='644', sudo=True, sudo_user='******', su_user='******', ignore_errors=True, env={ 'TEST': 'what', }, ) op_order = state.get_op_order() # Ensure we have an op assert len(op_order) == 1 first_op_hash = op_order[0] # Ensure the op name assert state.op_meta[first_op_hash]['names'] == {'Files/File'} # Ensure the commands assert state.ops[somehost][first_op_hash]['commands'] == [ 'touch /var/log/pyinfra.log', 'chmod 644 /var/log/pyinfra.log', 'chown pyinfra:pyinfra /var/log/pyinfra.log', ] # Ensure the meta meta = state.op_meta[first_op_hash] assert meta['sudo'] is True assert meta['sudo_user'] == 'test_sudo' assert meta['su_user'] == 'test_su' assert meta['ignore_errors'] is True # Ensure run ops works run_ops(state) # Ensure ops completed OK assert state.results[somehost]['success_ops'] == 1 assert state.results[somehost]['ops'] == 1 assert state.results[anotherhost]['success_ops'] == 1 assert state.results[anotherhost]['ops'] == 1 # And w/o errors assert state.results[somehost]['error_ops'] == 0 assert state.results[anotherhost]['error_ops'] == 0 # And with the different modes run_ops(state, serial=True) run_ops(state, no_wait=True)
def test_connect_all(self): inventory = make_inventory(hosts=('@docker/not-an-image',)) state = State(inventory, Config()) connect_all(state) self.assertEqual(len(state.active_hosts), 1)
def test_connect_all_error(self): inventory = make_inventory(hosts=('@docker/a-broken-image',)) state = State(inventory, Config()) with self.assertRaises(PyinfraError): connect_all(state)
def connect_to_hosts(self): connect_all(self.state)
# the first argument is a tuple of (list all all hosts, global/ALL data) inventory = Inventory((hosts, {}), **groups) # Now we create a new config (w/optional args) config = Config( FAIL_PERCENT=81, CONNECT_TIMEOUT=5, ) # Setup the pyinfra state for this deploy state = State(inventory, config) state.add_callback_handler(StateCallback()) # Connect to all the hosts print("Connecting...") connect_all(state) # Start adding operations print("Generating operations...") add_op( state, server.user, user="******", home="/home/pyinfra", shell="/bin/bash", sudo=True, ) add_op( state, server.group,
def test_nested_deploy(self): inventory = make_inventory() somehost = inventory.get_host('somehost') state = State(inventory, Config()) # Enable printing on this test to catch any exceptions in the formatting state.print_output = True state.print_input = True state.print_fact_info = True state.print_noop_info = True connect_all(state) @deploy def test_nested_deploy(state=None, host=None): server.shell( commands=['echo nested command'], state=state, host=host, ) @deploy def test_deploy(state=None, host=None): server.shell( commands=['echo first command'], state=state, host=host, ) test_nested_deploy( state=state, host=host, ) server.shell( commands=['echo second command'], state=state, host=host, ) add_deploy(state, test_deploy) op_order = state.get_op_order() # Ensure we have an op assert len(op_order) == 3 first_op_hash = op_order[0] assert state.op_meta[first_op_hash]['names'] == { 'test_deploy | Server/Shell' } assert state.ops[somehost][first_op_hash]['commands'] == [ StringCommand('echo first command'), ] second_op_hash = op_order[1] assert state.op_meta[second_op_hash]['names'] == { 'test_deploy | test_nested_deploy | Server/Shell', } assert state.ops[somehost][second_op_hash]['commands'] == [ StringCommand('echo nested command'), ] third_op_hash = op_order[2] assert state.op_meta[third_op_hash]['names'] == { 'test_deploy | Server/Shell' } assert state.ops[somehost][third_op_hash]['commands'] == [ StringCommand('echo second command'), ]
def test_connect_all(self): inventory = make_inventory(hosts=("@dockerssh/somehost:not-an-image",)) state = State(inventory, Config()) connect_all(state) assert len(state.active_hosts) == 1
def test_connect_with_dss_ssh_key_password(self): state = State( make_inventory(hosts=(("somehost", { "ssh_key": "testkey", "ssh_key_password": "******" }), ), ), Config(), ) with patch("pyinfra.connectors.ssh.path.isfile", lambda *args, **kwargs: True), patch( "pyinfra.connectors.ssh.RSAKey.from_private_key_file", ) as fake_rsa_key_open, patch( "pyinfra.connectors.ssh.DSSKey.from_private_key_file", ) as fake_dss_key_open: # noqa def fake_rsa_key_open_fail(*args, **kwargs): if "password" not in kwargs: raise PasswordRequiredException raise SSHException fake_rsa_key_open.side_effect = fake_rsa_key_open_fail fake_dss_key = MagicMock() def fake_dss_key_func(*args, **kwargs): if "password" not in kwargs: raise PasswordRequiredException return fake_dss_key fake_dss_key_open.side_effect = fake_dss_key_func connect_all(state) # Check the key was created properly fake_dss_key_open.assert_called_with(filename="testkey", password="******") # And check the Paramiko SSH call was correct self.fake_connect_mock.assert_called_with( "somehost", allow_agent=False, look_for_keys=False, pkey=fake_dss_key, timeout=10, username="******", _pyinfra_ssh_forward_agent=None, _pyinfra_ssh_config_file=None, _pyinfra_ssh_known_hosts_file=None, _pyinfra_ssh_strict_host_key_checking=None, _pyinfra_ssh_paramiko_connect_kwargs=None, ) # Check that loading the same key again is cached in the state second_state = State( make_inventory(hosts=(("somehost", { "ssh_key": "testkey" }), )), Config(), ) second_state.private_keys = state.private_keys connect_all(second_state)
def _main( inventory, commands, verbosity, user, port, key, key_password, password, sudo, sudo_user, su_user, parallel, fail_percent, dry, limit, no_wait, serial, debug, debug_data, debug_state, facts=None, operations=None, ): print() print('### {0}'.format(click.style('Welcome to pyinfra', bold=True))) print() # Setup logging log_level = logging.DEBUG if debug else logging.INFO setup_logging(log_level) deploy_dir = getcwd() potential_deploy_dirs = [] # This is the most common case: we have a deploy file so use it's # pathname - we only look at the first file as we can't have multiple # deploy directories. if commands[0].endswith('.py'): deploy_file_dir, _ = path.split(commands[0]) above_deploy_file_dir, _ = path.split(deploy_file_dir) deploy_dir = deploy_file_dir potential_deploy_dirs.extend(( deploy_file_dir, above_deploy_file_dir, )) # If we have a valid inventory, look in it's path and it's parent for # group_data or config.py to indicate deploy_dir (--fact, --run). if inventory.endswith('.py') and path.isfile(inventory): inventory_dir, _ = path.split(inventory) above_inventory_dir, _ = path.split(inventory_dir) potential_deploy_dirs.extend(( inventory_dir, above_inventory_dir, )) for potential_deploy_dir in potential_deploy_dirs: logger.debug('Checking potential directory: {0}'.format( potential_deploy_dir, )) if any(( path.isdir(path.join(potential_deploy_dir, 'group_data')), path.isfile(path.join(potential_deploy_dir, 'config.py')), )): logger.debug( 'Setting directory to: {0}'.format(potential_deploy_dir)) deploy_dir = potential_deploy_dir break # List facts if commands[0] == 'fact': command = 'fact' fact_names = commands[1:] facts = [] for name in fact_names: args = None if ':' in name: name, args = name.split(':', 1) args = args.split(',') if not is_fact(name): raise CliError('No fact: {0}'.format(name)) facts.append((name, args)) commands = facts # Execute a raw command with server.shell elif commands[0] == 'exec': command = 'exec' commands = commands[1:] # Deploy files(s) elif all(cmd.endswith('.py') for cmd in commands): command = 'deploy' commands = commands[0:] # Check each file exists for file in commands: if not path.exists(file): raise CliError('No deploy file: {0}'.format(file)) # Operation w/optional args elif len(commands) == 2: command = 'op' commands = get_operation_and_args( commands[0], commands[1], ) else: raise CliError('''Invalid commands: {0} Command usage: pyinfra INVENTORY deploy_web.py [deploy_db.py]... pyinfra INVENTORY server.user pyinfra,home=/home/pyinfra pyinfra INVENTORY exec -- echo "hello world" pyinfra INVENTORY fact os [users]...'''.format(commands)) print('--> Loading config...') # Load up any config.py from the filesystem config = load_config(deploy_dir) # Load any hooks/config from the deploy file if command == 'deploy': load_deploy_config(commands[0], config) # Arg based config overrides if sudo: config.SUDO = True if sudo_user: config.SUDO_USER = sudo_user if su_user: config.SU_USER = su_user if parallel: config.PARALLEL = parallel if fail_percent is not None: config.FAIL_PERCENT = fail_percent print('--> Loading inventory...') # Load up the inventory from the filesystem inventory, inventory_group = make_inventory( inventory, deploy_dir=deploy_dir, limit=limit, ssh_user=user, ssh_key=key, ssh_key_password=key_password, ssh_password=password, ssh_port=port, ) # If --debug-data dump & exit if debug_data: print_inventory(inventory) _exit() # Attach to pseudo inventory pseudo_inventory.set(inventory) # Create/set the state state = State(inventory, config) state.is_cli = True state.print_lines = True state.deploy_dir = deploy_dir # Setup printing on the new state print_output = verbosity > 0 print_fact_output = verbosity > 1 state.print_output = print_output # -v state.print_fact_info = print_output # -v state.print_fact_output = print_fact_output # -vv # Attach to pseudo state pseudo_state.set(state) # Setup the data to be passed to config hooks hook_data = FallbackAttrData( state.inventory.get_override_data(), state.inventory.get_group_data(inventory_group), state.inventory.get_data(), ) # Run the before_connect hook if provided run_hook(state, 'before_connect', hook_data) # Connect to all the servers print('--> Connecting to hosts...') with progress_spinner(state.inventory) as progress: connect_all(state, progress=progress) # Run the before_connect hook if provided run_hook(state, 'before_facts', hook_data) # Just getting a fact? # if command == 'fact': print() print('--> Gathering facts...') # Print facts as we get them state.print_fact_info = True # Print fact output with -v state.print_fact_output = print_output fact_data = {} with progress_spinner(commands) as progress: for i, (name, args) in enumerate(commands): fact_data[name] = get_facts( state, name, args=args, ) progress() print_facts(fact_data) _exit() # Prepare the deploy! # # Execute a raw command with server.shell if command == 'exec': # Print the output of the command state.print_output = True add_op( state, server.shell, ' '.join(commands), ) # Deploy files(s) elif command == 'deploy': print() print('--> Preparing operations...') # Number of "steps" to make = number of files * number of hosts prepare_steps = len(commands) * len(state.inventory) with progress_spinner(prepare_steps) as progress: for filename in commands: load_deploy_file(state, filename, progress=progress) progress() # Operation w/optional args elif command == 'op': print() print('--> Preparing operation...') op, args = commands add_op(state, op, *args[0], **args[1]) # Always show meta output print() print('--> Proposed changes:') print_meta(state, inventory) # If --debug-state, dump state (ops, op order, op meta) now & exit if debug_state: dump_state(state) _exit() # Run the operations we generated with the deploy file if dry: _exit() print() # Run the before_deploy hook if provided run_hook(state, 'before_deploy', hook_data) print('--> Beginning operation run...') # Number of "steps" to make = number of operations * number of hosts operation_steps = len(state.op_order) * len(state.inventory) with progress_spinner(operation_steps) as progress: run_ops( state, serial=serial, no_wait=no_wait, progress=progress, ) # Run the after_deploy hook if provided run_hook(state, 'after_deploy', hook_data) print('--> Results:') print_results(state, inventory) _exit()
def _main( inventory, operations, verbosity, user, port, key, key_password, password, sudo, sudo_user, su_user, parallel, fail_percent, dry, limit, no_wait, serial, debug, debug_data, debug_facts, debug_operations, facts=None, print_operations=None, ): print() print('### {0}'.format(click.style('Welcome to pyinfra', bold=True))) print() # Setup logging log_level = logging.DEBUG if debug else logging.INFO setup_logging(log_level) # Bootstrap any virtualenv init_virtualenv() deploy_dir = getcwd() potential_deploy_dirs = [] # This is the most common case: we have a deploy file so use it's # pathname - we only look at the first file as we can't have multiple # deploy directories. if operations[0].endswith('.py'): deploy_file_dir, _ = path.split(operations[0]) above_deploy_file_dir, _ = path.split(deploy_file_dir) deploy_dir = deploy_file_dir potential_deploy_dirs.extend(( deploy_file_dir, above_deploy_file_dir, )) # If we have a valid inventory, look in it's path and it's parent for # group_data or config.py to indicate deploy_dir (--fact, --run). if inventory.endswith('.py') and path.isfile(inventory): inventory_dir, _ = path.split(inventory) above_inventory_dir, _ = path.split(inventory_dir) potential_deploy_dirs.extend(( inventory_dir, above_inventory_dir, )) for potential_deploy_dir in potential_deploy_dirs: logger.debug('Checking potential directory: {0}'.format( potential_deploy_dir, )) if any(( path.isdir(path.join(potential_deploy_dir, 'group_data')), path.isfile(path.join(potential_deploy_dir, 'config.py')), )): logger.debug( 'Setting directory to: {0}'.format(potential_deploy_dir)) deploy_dir = potential_deploy_dir break # List facts if operations[0] == 'fact': command = 'fact' fact_names = operations[1:] facts = [] for name in fact_names: args = None if ':' in name: name, args = name.split(':', 1) args = args.split(',') if not is_fact(name): raise CliError('No fact: {0}'.format(name)) facts.append((name, args)) operations = facts # Execute a raw command with server.shell elif operations[0] == 'exec': command = 'exec' operations = operations[1:] # Deploy files(s) elif all(cmd.endswith('.py') for cmd in operations): command = 'deploy' operations = operations[0:] # Check each file exists for file in operations: if not path.exists(file): raise CliError('No deploy file: {0}'.format(file)) # Operation w/optional args (<module>.<op> ARG1 ARG2 ...) elif len(operations[0].split('.')) == 2: command = 'op' operations = get_operation_and_args(operations) else: raise CliError('''Invalid operations: {0} Operation usage: pyinfra INVENTORY deploy_web.py [deploy_db.py]... pyinfra INVENTORY server.user pyinfra home=/home/pyinfra pyinfra INVENTORY exec -- echo "hello world" pyinfra INVENTORY fact os [users]...'''.format(operations)) # Create an empty/unitialised state object state = State() pseudo_state.set(state) # Setup printing on the new state print_output = verbosity > 0 print_fact_output = verbosity > 1 state.print_output = print_output # -v state.print_fact_info = print_output # -v state.print_fact_output = print_fact_output # -vv print('--> Loading config...') # Load up any config.py from the filesystem config = load_config(deploy_dir) # Load any hooks/config from the deploy file if command == 'deploy': load_deploy_config(operations[0], config) # Arg based config overrides if sudo: config.SUDO = True if sudo_user: config.SUDO_USER = sudo_user if su_user: config.SU_USER = su_user if parallel: config.PARALLEL = parallel if fail_percent is not None: config.FAIL_PERCENT = fail_percent print('--> Loading inventory...') # Load up the inventory from the filesystem inventory, inventory_group = make_inventory( inventory, deploy_dir=deploy_dir, ssh_port=port, ssh_user=user, ssh_key=key, ssh_key_password=key_password, ssh_password=password, ) # Apply any --limit to the inventory limit_hosts = None if limit: try: limit_hosts = inventory.get_group(limit) except NoGroupError: limits = limit.split(',') limit_hosts = [ host for host in inventory if any( fnmatch(host.name, limit) for limit in limits) ] # Attach to pseudo inventory pseudo_inventory.set(inventory) # Initialise the state, passing any initial --limit state.init(inventory, config, initial_limit=limit_hosts) # If --debug-data dump & exit if debug_data: print_inventory(state) _exit() # Set the deploy directory state.deploy_dir = deploy_dir # Setup the data to be passed to config hooks hook_data = FallbackDict( state.inventory.get_override_data(), state.inventory.get_group_data(inventory_group), state.inventory.get_data(), ) # Run the before_connect hook if provided run_hook(state, 'before_connect', hook_data) # Connect to all the servers print('--> Connecting to hosts...') connect_all(state) # Run the before_connect hook if provided run_hook(state, 'before_facts', hook_data) # Just getting a fact? # if command == 'fact': print() print('--> Gathering facts...') # Print facts as we get them state.print_fact_info = True # Print fact output with -v state.print_fact_output = print_output fact_data = {} for i, command in enumerate(operations): name, args = command fact_data[name] = get_facts( state, name, args=args, ) print_facts(fact_data) _exit() # Prepare the deploy! # # Execute a raw command with server.shell if command == 'exec': # Print the output of the command state.print_output = True add_op( state, server.shell, ' '.join(operations), ) # Deploy files(s) elif command == 'deploy': print() print('--> Preparing operations...') # Number of "steps" to make = number of files * number of hosts for i, filename in enumerate(operations): logger.info('Loading: {0}'.format(click.style(filename, bold=True))) state.current_op_file = i load_deploy_file(state, filename) # Operation w/optional args elif command == 'op': print() print('--> Preparing operation...') op, args = operations add_op(state, op, *args[0], **args[1]) # Always show meta output print() print('--> Proposed changes:') print_meta(state) # If --debug-facts or --debug-operations, print and exit if debug_facts or debug_operations: if debug_facts: print_state_facts(state) if debug_operations: print_state_operations(state) _exit() # Run the operations we generated with the deploy file if dry: _exit() print() # Run the before_deploy hook if provided run_hook(state, 'before_deploy', hook_data) print('--> Beginning operation run...') run_ops(state, serial=serial, no_wait=no_wait) # Run the after_deploy hook if provided run_hook(state, 'after_deploy', hook_data) print('--> Results:') print_results(state) # Triggers any executor disconnect requirements disconnect_all(state) _exit()
def test_connect_all(self): inventory = make_inventory(hosts=('@local', )) state = State(inventory, Config()) connect_all(state) assert len(state.active_hosts) == 1
def test_pipelining_active_works(self): state = State(make_inventory(), Config()) connect_all(state) state.pipelining = True add_op(state, server.shell, 'echo "hi"')
def _main( inventory, operations, verbosity, user, port, key, key_password, password, winrm_username, winrm_password, winrm_port, shell_executable, sudo, sudo_user, use_sudo_password, su_user, parallel, fail_percent, dry, limit, no_wait, serial, quiet, debug, debug_data, debug_facts, debug_operations, facts=None, print_operations=None, support=None, ): if not debug and not sys.warnoptions: warnings.simplefilter('ignore') # Setup logging log_level = logging.INFO if debug: log_level = logging.DEBUG elif quiet: log_level = logging.WARNING setup_logging(log_level) # Bootstrap any virtualenv init_virtualenv() deploy_dir = getcwd() potential_deploy_dirs = [] # This is the most common case: we have a deploy file so use it's # pathname - we only look at the first file as we can't have multiple # deploy directories. if operations[0].endswith('.py'): deploy_file_dir, _ = path.split(operations[0]) above_deploy_file_dir, _ = path.split(deploy_file_dir) deploy_dir = deploy_file_dir potential_deploy_dirs.extend(( deploy_file_dir, above_deploy_file_dir, )) # If we have a valid inventory, look in it's path and it's parent for # group_data or config.py to indicate deploy_dir (--fact, --run). if inventory.endswith('.py') and path.isfile(inventory): inventory_dir, _ = path.split(inventory) above_inventory_dir, _ = path.split(inventory_dir) potential_deploy_dirs.extend(( inventory_dir, above_inventory_dir, )) for potential_deploy_dir in potential_deploy_dirs: logger.debug('Checking potential directory: {0}'.format( potential_deploy_dir, )) if any(( path.isdir(path.join(potential_deploy_dir, 'group_data')), path.isfile(path.join(potential_deploy_dir, 'config.py')), )): logger.debug( 'Setting directory to: {0}'.format(potential_deploy_dir)) deploy_dir = potential_deploy_dir break # Create an empty/unitialised state object state = State() # Set the deploy directory state.deploy_dir = deploy_dir pseudo_state.set(state) if verbosity > 0: state.print_fact_info = True state.print_noop_info = True if verbosity > 1: state.print_input = state.print_fact_input = True if verbosity > 2: state.print_output = state.print_fact_output = True if not quiet: click.echo('--> Loading config...', err=True) # Load up any config.py from the filesystem config = load_config(deploy_dir) # Make a copy before we overwrite original_operations = operations # Debug (print) inventory + group data if operations[0] == 'debug-inventory': command = 'debug-inventory' # Get all non-arg facts elif operations[0] == 'all-facts': command = 'fact' fact_names = [] for fact_name in get_fact_names(): fact_class = get_fact_class(fact_name) if (not issubclass(fact_class, ShortFactBase) and not callable(fact_class.command)): fact_names.append(fact_name) operations = [(name, None) for name in fact_names] # Get one or more facts elif operations[0] == 'fact': command = 'fact' fact_names = operations[1:] facts = [] for name in fact_names: args = None if ':' in name: name, args = name.split(':', 1) args = args.split(',') if not is_fact(name): raise CliError('No fact: {0}'.format(name)) facts.append((name, args)) operations = facts # Execute a raw command with server.shell elif operations[0] == 'exec': command = 'exec' operations = operations[1:] # Execute one or more deploy files elif all(cmd.endswith('.py') for cmd in operations): command = 'deploy' operations = operations[0:] for file in operations: if not path.exists(file): raise CliError('No deploy file: {0}'.format(file)) # Operation w/optional args (<module>.<op> ARG1 ARG2 ...) elif len(operations[0].split('.')) == 2: command = 'op' operations = get_operation_and_args(operations) else: raise CliError('''Invalid operations: {0} Operation usage: pyinfra INVENTORY deploy_web.py [deploy_db.py]... pyinfra INVENTORY server.user pyinfra home=/home/pyinfra pyinfra INVENTORY exec -- echo "hello world" pyinfra INVENTORY fact os [users]...'''.format(operations)) # Load any hooks/config from the deploy file if command == 'deploy': load_deploy_config(operations[0], config) # Arg based config overrides if sudo: config.SUDO = True if sudo_user: config.SUDO_USER = sudo_user if use_sudo_password: config.USE_SUDO_PASSWORD = use_sudo_password if su_user: config.SU_USER = su_user if parallel: config.PARALLEL = parallel if shell_executable: config.SHELL = shell_executable if fail_percent is not None: config.FAIL_PERCENT = fail_percent if not quiet: click.echo('--> Loading inventory...', err=True) # Load up the inventory from the filesystem inventory, inventory_group = make_inventory( inventory, deploy_dir=deploy_dir, ssh_port=port, ssh_user=user, ssh_key=key, ssh_key_password=key_password, ssh_password=password, winrm_username=winrm_username, winrm_password=winrm_password, winrm_port=winrm_port, ) # Attach to pseudo inventory pseudo_inventory.set(inventory) # Now that we have inventory, apply --limit config override initial_limit = None if limit: all_limit_hosts = [] for limiter in limit: try: limit_hosts = inventory.get_group(limiter) except NoGroupError: limits = limiter.split(',') if len(limits) > 1: logger.warning(( 'Specifying comma separated --limit values is deprecated, ' 'please use multiple --limit options.')) limit_hosts = [ host for host in inventory if any( fnmatch(host.name, match) for match in limits) ] all_limit_hosts.extend(limit_hosts) initial_limit = list(set(all_limit_hosts)) # Initialise the state state.init(inventory, config, initial_limit=initial_limit) # If --debug-data dump & exit if command == 'debug-inventory' or debug_data: if debug_data: logger.warning( ('--debug-data is deprecated, ' 'please use `pyinfra INVENTORY debug-inventory` instead.')) print_inventory(state) _exit() # Connect to all the servers if not quiet: click.echo(err=True) click.echo('--> Connecting to hosts...', err=True) connect_all(state) # Just getting a fact? # if command == 'fact': if not quiet: click.echo(err=True) click.echo('--> Gathering facts...', err=True) state.print_fact_info = True fact_data = {} for i, command in enumerate(operations): name, args = command fact_key = name if args: fact_key = '{0}{1}'.format(name, tuple(args)) try: fact_data[fact_key] = get_facts( state, name, args=args, apply_failed_hosts=False, ) except PyinfraError: pass print_facts(fact_data) _exit() # Prepare the deploy! # # Execute a raw command with server.shell if command == 'exec': # Print the output of the command state.print_output = True add_op( state, server.shell, ' '.join(operations), _allow_cli_mode=True, ) # Deploy files(s) elif command == 'deploy': if not quiet: click.echo(err=True) click.echo('--> Preparing operations...', err=True) # Number of "steps" to make = number of files * number of hosts for i, filename in enumerate(operations): logger.info('Loading: {0}'.format(click.style(filename, bold=True))) state.current_op_file = i load_deploy_file(state, filename) # Operation w/optional args elif command == 'op': if not quiet: click.echo(err=True) click.echo('--> Preparing operation...', err=True) op, args = operations args, kwargs = args kwargs['_allow_cli_mode'] = True def print_host_ready(host): logger.info('{0}{1} {2}'.format( host.print_prefix, click.style('Ready:', 'green'), click.style(original_operations[0], bold=True), )) kwargs['_after_host_callback'] = print_host_ready add_op(state, op, *args, **kwargs) # Always show meta output if not quiet: click.echo(err=True) click.echo('--> Proposed changes:', err=True) print_meta(state) # If --debug-facts or --debug-operations, print and exit if debug_facts or debug_operations: if debug_facts: print_state_facts(state) if debug_operations: print_state_operations(state) _exit() # Run the operations we generated with the deploy file if dry: _exit() if not quiet: click.echo(err=True) if not quiet: click.echo('--> Beginning operation run...', err=True) run_ops(state, serial=serial, no_wait=no_wait) if not quiet: click.echo('--> Results:', err=True) print_results(state) _exit()
def test_connect_all(self): inventory = make_inventory() state = State(inventory, Config()) connect_all(state) self.assertEqual(len(state.active_hosts), 2)