def test_connect_with_ssh_key_password(self): state = State( make_inventory(hosts=(('somehost', { 'ssh_key': 'testkey', 'ssh_key_password': '******' }), )), Config()) with patch('pyinfra.api.ssh.path.isfile', lambda *args, **kwargs: True), \ patch('pyinfra.api.ssh.RSAKey.from_private_key_file') as fake_key_open: def fake_key_open_fail(*args, **kwargs): if 'password' not in kwargs: raise PasswordRequiredException() fake_key_open.side_effect = fake_key_open_fail fake_key = FakeRSAKey() fake_key_open.return_value = fake_key state.deploy_dir = '/' connect_all(state) # Check the key was created properly fake_key_open.assert_called_with(filename='testkey', password='******')
def test_connect_with_ssh_key_missing_password(self): state = State( make_inventory(hosts=(('somehost', { 'ssh_key': 'testkey' }), )), Config()) with patch( 'pyinfra.api.connectors.ssh.path.isfile', lambda *args, **kwargs: True, ), patch('pyinfra.api.connectors.ssh.RSAKey.from_private_key_file', ) as fake_key_open: def fake_key_open_fail(*args, **kwargs): raise PasswordRequiredException fake_key_open.side_effect = fake_key_open_fail fake_key = FakeRSAKey() fake_key_open.return_value = fake_key state.deploy_dir = '/' with self.assertRaises(PyinfraError) as e: connect_all(state) assert e.exception.args[0] == ( 'Private key file (testkey) is encrypted, set ssh_key_password ' 'to use this key')
def test_connect_with_ssh_key(self): state = State( make_inventory(hosts=(('somehost', { 'ssh_key': 'testkey' }), )), Config()) with patch('pyinfra.api.ssh.path.isfile', lambda *args, **kwargs: True), \ patch('pyinfra.api.ssh.RSAKey.from_private_key_file') as fake_key_open: fake_key = FakeRSAKey() fake_key_open.return_value = fake_key state.deploy_dir = '/' connect_all(state) # Check the key was created properly fake_key_open.assert_called_with(filename='testkey') # And check the Paramiko SSH call was correct self.fake_connect_mock.assert_called_with('somehost', allow_agent=False, look_for_keys=False, pkey=fake_key, port=22, timeout=10, username='******')
def test_connect_with_ssh_key_wrong_password(self): state = State( make_inventory(hosts=(('somehost', { 'ssh_key': 'testkey', 'ssh_key_password': '******' }), )), Config()) with patch( 'pyinfra.api.connectors.ssh.path.isfile', lambda *args, **kwargs: True, ), patch('pyinfra.api.connectors.ssh.RSAKey.from_private_key_file', ) as fake_key_open: def fake_key_open_fail(*args, **kwargs): if 'password' not in kwargs: raise PasswordRequiredException raise SSHException fake_key_open.side_effect = fake_key_open_fail fake_key = FakeRSAKey() fake_key_open.return_value = fake_key state.deploy_dir = '/' with self.assertRaises(PyinfraError) as e: connect_all(state) assert e.exception.args[ 0] == 'Incorrect password for private key: testkey'
def test_connect_with_rsa_ssh_key_password_from_prompt(self): state = State(make_inventory(hosts=( ('somehost', {'ssh_key': 'testkey'}), )), Config()) with patch( 'pyinfra.api.connectors.ssh.path.isfile', lambda *args, **kwargs: True, ), patch( 'pyinfra.api.connectors.ssh.getpass', lambda *args, **kwargs: 'testpass', ), patch( 'pyinfra.api.connectors.ssh.RSAKey.from_private_key_file', ) as fake_key_open: fake_key = MagicMock() def fake_key_open_fail(*args, **kwargs): if 'password' not in kwargs: raise PasswordRequiredException() return fake_key fake_key_open.side_effect = fake_key_open_fail state.deploy_dir = '/' pyinfra.is_cli = True connect_all(state) pyinfra.is_cli = False # Check the key was created properly fake_key_open.assert_called_with(filename='testkey', password='******') # Check the certificate file was then loaded fake_key.load_certificate.assert_called_with('testkey.pub')
def test_connect_with_dss_ssh_key_password(self): state = State( make_inventory(hosts=(('somehost', { 'ssh_key': 'testkey', 'ssh_key_password': '******' }), )), Config()) with patch('pyinfra.api.connectors.ssh.path.isfile', lambda *args, **kwargs: True), \ patch('pyinfra.api.connectors.ssh.RSAKey.from_private_key_file') as fake_rsa_key_open, \ patch('pyinfra.api.connectors.ssh.DSSKey.from_private_key_file') as fake_dss_key_open: # noqa def fake_rsa_key_open_fail(*args, **kwargs): if 'password' not in kwargs: raise PasswordRequiredException raise SSHException fake_rsa_key_open.side_effect = fake_rsa_key_open_fail fake_dss_key = MagicMock() def fake_dss_key_func(*args, **kwargs): if 'password' not in kwargs: raise PasswordRequiredException return fake_dss_key fake_dss_key_open.side_effect = fake_dss_key_func state.deploy_dir = '/' connect_all(state) # Check the key was created properly fake_dss_key_open.assert_called_with(filename='testkey', password='******') # And check the Paramiko SSH call was correct self.fake_connect_mock.assert_called_with( 'somehost', allow_agent=False, look_for_keys=False, pkey=fake_dss_key, timeout=10, username='******', ) # Check that loading the same key again is cached in the state second_state = State( make_inventory(hosts=(('somehost', { 'ssh_key': 'testkey' }), )), Config()) second_state.private_keys = state.private_keys connect_all(second_state)
def _do_test_deploy(self): correct_op_name_and_host_names = [ ('First main operation', True), # true for all hosts ('Second main operation', ('somehost', )), ('tests/test_deploy/a_task.py | First task operation', ('anotherhost', )), ('tests/test_deploy/a_task.py | Second task operation', ('anotherhost', )), ('tests/test_deploy/a_task.py | First task operation', True), ('tests/test_deploy/a_task.py | Second task operation', True), ('Loop-0 main operation', True), ('Loop-1 main operation', True), ('Third main operation', True), ] hosts = ['somehost', 'anotherhost', 'someotherhost'] shuffle(hosts) inventory = make_inventory(hosts=hosts) state = State(inventory, Config()) state.deploy_dir = path.join('tests', 'test_deploy') connect_all(state) pseudo_state.set(state) pyinfra.is_cli = True load_deploy_file(state, path.join(state.deploy_dir, 'deploy.py')) pyinfra.is_cli = False op_order = state.get_op_order() for i, (correct_op_name, correct_host_names) in enumerate( correct_op_name_and_host_names, ): op_hash = op_order[i] op_meta = state.op_meta[op_hash] self.assertEqual(list(op_meta['names'])[0], correct_op_name) for host in inventory: op_hashes = state.meta[host]['op_hashes'] if correct_host_names is True or host.name in correct_host_names: self.assertIn(op_hash, op_hashes) else: self.assertNotIn(op_hash, op_hashes)
def test_connect_with_rsa_ssh_key(self): state = State( make_inventory(hosts=(('somehost', { 'ssh_key': 'testkey' }), )), Config()) with patch('pyinfra.api.connectors.ssh.path.isfile', lambda *args, **kwargs: True), \ patch('pyinfra.api.connectors.ssh.RSAKey.from_private_key_file') as fake_key_open: fake_key = MagicMock() fake_key_open.return_value = fake_key state.deploy_dir = '/' connect_all(state) # Check the key was created properly fake_key_open.assert_called_with(filename='testkey') # Check the certificate file was then loaded fake_key.load_certificate.assert_called_with('testkey.pub') # And check the Paramiko SSH call was correct self.fake_connect_mock.assert_called_with( 'somehost', allow_agent=False, look_for_keys=False, pkey=fake_key, timeout=10, username='******', ) # Check that loading the same key again is cached in the state second_state = State( make_inventory(hosts=(('somehost', { 'ssh_key': 'testkey' }), )), Config()) second_state.private_keys = state.private_keys connect_all(second_state)
def _main( inventory, operations, verbosity, user, port, key, key_password, password, winrm_username, winrm_password, winrm_port, shell_executable, sudo, sudo_user, use_sudo_password, su_user, parallel, fail_percent, dry, limit, no_wait, serial, quiet, debug, debug_data, debug_facts, debug_operations, facts=None, print_operations=None, support=None, ): if not debug and not sys.warnoptions: warnings.simplefilter('ignore') # Setup logging log_level = logging.INFO if debug: log_level = logging.DEBUG elif quiet: log_level = logging.WARNING setup_logging(log_level) # Bootstrap any virtualenv init_virtualenv() deploy_dir = getcwd() potential_deploy_dirs = [] # This is the most common case: we have a deploy file so use it's # pathname - we only look at the first file as we can't have multiple # deploy directories. if operations[0].endswith('.py'): deploy_file_dir, _ = path.split(operations[0]) above_deploy_file_dir, _ = path.split(deploy_file_dir) deploy_dir = deploy_file_dir potential_deploy_dirs.extend(( deploy_file_dir, above_deploy_file_dir, )) # If we have a valid inventory, look in it's path and it's parent for # group_data or config.py to indicate deploy_dir (--fact, --run). if inventory.endswith('.py') and path.isfile(inventory): inventory_dir, _ = path.split(inventory) above_inventory_dir, _ = path.split(inventory_dir) potential_deploy_dirs.extend(( inventory_dir, above_inventory_dir, )) for potential_deploy_dir in potential_deploy_dirs: logger.debug('Checking potential directory: {0}'.format( potential_deploy_dir, )) if any(( path.isdir(path.join(potential_deploy_dir, 'group_data')), path.isfile(path.join(potential_deploy_dir, 'config.py')), )): logger.debug( 'Setting directory to: {0}'.format(potential_deploy_dir)) deploy_dir = potential_deploy_dir break # Create an empty/unitialised state object state = State() # Set the deploy directory state.deploy_dir = deploy_dir pseudo_state.set(state) if verbosity > 0: state.print_fact_info = True state.print_noop_info = True if verbosity > 1: state.print_input = state.print_fact_input = True if verbosity > 2: state.print_output = state.print_fact_output = True if not quiet: click.echo('--> Loading config...', err=True) # Load up any config.py from the filesystem config = load_config(deploy_dir) # Make a copy before we overwrite original_operations = operations # Debug (print) inventory + group data if operations[0] == 'debug-inventory': command = 'debug-inventory' # Get all non-arg facts elif operations[0] == 'all-facts': command = 'fact' fact_names = [] for fact_name in get_fact_names(): fact_class = get_fact_class(fact_name) if (not issubclass(fact_class, ShortFactBase) and not callable(fact_class.command)): fact_names.append(fact_name) operations = [(name, None) for name in fact_names] # Get one or more facts elif operations[0] == 'fact': command = 'fact' fact_names = operations[1:] facts = [] for name in fact_names: args = None if ':' in name: name, args = name.split(':', 1) args = args.split(',') if not is_fact(name): raise CliError('No fact: {0}'.format(name)) facts.append((name, args)) operations = facts # Execute a raw command with server.shell elif operations[0] == 'exec': command = 'exec' operations = operations[1:] # Execute one or more deploy files elif all(cmd.endswith('.py') for cmd in operations): command = 'deploy' operations = operations[0:] for file in operations: if not path.exists(file): raise CliError('No deploy file: {0}'.format(file)) # Operation w/optional args (<module>.<op> ARG1 ARG2 ...) elif len(operations[0].split('.')) == 2: command = 'op' operations = get_operation_and_args(operations) else: raise CliError('''Invalid operations: {0} Operation usage: pyinfra INVENTORY deploy_web.py [deploy_db.py]... pyinfra INVENTORY server.user pyinfra home=/home/pyinfra pyinfra INVENTORY exec -- echo "hello world" pyinfra INVENTORY fact os [users]...'''.format(operations)) # Load any hooks/config from the deploy file if command == 'deploy': load_deploy_config(operations[0], config) # Arg based config overrides if sudo: config.SUDO = True if sudo_user: config.SUDO_USER = sudo_user if use_sudo_password: config.USE_SUDO_PASSWORD = use_sudo_password if su_user: config.SU_USER = su_user if parallel: config.PARALLEL = parallel if shell_executable: config.SHELL = shell_executable if fail_percent is not None: config.FAIL_PERCENT = fail_percent if not quiet: click.echo('--> Loading inventory...', err=True) # Load up the inventory from the filesystem inventory, inventory_group = make_inventory( inventory, deploy_dir=deploy_dir, ssh_port=port, ssh_user=user, ssh_key=key, ssh_key_password=key_password, ssh_password=password, winrm_username=winrm_username, winrm_password=winrm_password, winrm_port=winrm_port, ) # Attach to pseudo inventory pseudo_inventory.set(inventory) # Now that we have inventory, apply --limit config override initial_limit = None if limit: all_limit_hosts = [] for limiter in limit: try: limit_hosts = inventory.get_group(limiter) except NoGroupError: limits = limiter.split(',') if len(limits) > 1: logger.warning(( 'Specifying comma separated --limit values is deprecated, ' 'please use multiple --limit options.')) limit_hosts = [ host for host in inventory if any( fnmatch(host.name, match) for match in limits) ] all_limit_hosts.extend(limit_hosts) initial_limit = list(set(all_limit_hosts)) # Initialise the state state.init(inventory, config, initial_limit=initial_limit) # If --debug-data dump & exit if command == 'debug-inventory' or debug_data: if debug_data: logger.warning( ('--debug-data is deprecated, ' 'please use `pyinfra INVENTORY debug-inventory` instead.')) print_inventory(state) _exit() # Connect to all the servers if not quiet: click.echo(err=True) click.echo('--> Connecting to hosts...', err=True) connect_all(state) # Just getting a fact? # if command == 'fact': if not quiet: click.echo(err=True) click.echo('--> Gathering facts...', err=True) state.print_fact_info = True fact_data = {} for i, command in enumerate(operations): name, args = command fact_key = name if args: fact_key = '{0}{1}'.format(name, tuple(args)) try: fact_data[fact_key] = get_facts( state, name, args=args, apply_failed_hosts=False, ) except PyinfraError: pass print_facts(fact_data) _exit() # Prepare the deploy! # # Execute a raw command with server.shell if command == 'exec': # Print the output of the command state.print_output = True add_op( state, server.shell, ' '.join(operations), _allow_cli_mode=True, ) # Deploy files(s) elif command == 'deploy': if not quiet: click.echo(err=True) click.echo('--> Preparing operations...', err=True) # Number of "steps" to make = number of files * number of hosts for i, filename in enumerate(operations): logger.info('Loading: {0}'.format(click.style(filename, bold=True))) state.current_op_file = i load_deploy_file(state, filename) # Operation w/optional args elif command == 'op': if not quiet: click.echo(err=True) click.echo('--> Preparing operation...', err=True) op, args = operations args, kwargs = args kwargs['_allow_cli_mode'] = True def print_host_ready(host): logger.info('{0}{1} {2}'.format( host.print_prefix, click.style('Ready:', 'green'), click.style(original_operations[0], bold=True), )) kwargs['_after_host_callback'] = print_host_ready add_op(state, op, *args, **kwargs) # Always show meta output if not quiet: click.echo(err=True) click.echo('--> Proposed changes:', err=True) print_meta(state) # If --debug-facts or --debug-operations, print and exit if debug_facts or debug_operations: if debug_facts: print_state_facts(state) if debug_operations: print_state_operations(state) _exit() # Run the operations we generated with the deploy file if dry: _exit() if not quiet: click.echo(err=True) if not quiet: click.echo('--> Beginning operation run...', err=True) run_ops(state, serial=serial, no_wait=no_wait) if not quiet: click.echo('--> Results:', err=True) print_results(state) _exit()
def _main( inventory, commands, verbosity, user, port, key, key_password, password, sudo, sudo_user, su_user, parallel, fail_percent, dry, limit, no_wait, serial, debug, debug_data, debug_state, facts=None, operations=None, ): print() print('### {0}'.format(click.style('Welcome to pyinfra', bold=True))) print() # Setup logging log_level = logging.DEBUG if debug else logging.INFO setup_logging(log_level) deploy_dir = getcwd() potential_deploy_dirs = [] # This is the most common case: we have a deploy file so use it's # pathname - we only look at the first file as we can't have multiple # deploy directories. if commands[0].endswith('.py'): deploy_file_dir, _ = path.split(commands[0]) above_deploy_file_dir, _ = path.split(deploy_file_dir) deploy_dir = deploy_file_dir potential_deploy_dirs.extend(( deploy_file_dir, above_deploy_file_dir, )) # If we have a valid inventory, look in it's path and it's parent for # group_data or config.py to indicate deploy_dir (--fact, --run). if inventory.endswith('.py') and path.isfile(inventory): inventory_dir, _ = path.split(inventory) above_inventory_dir, _ = path.split(inventory_dir) potential_deploy_dirs.extend(( inventory_dir, above_inventory_dir, )) for potential_deploy_dir in potential_deploy_dirs: logger.debug('Checking potential directory: {0}'.format( potential_deploy_dir, )) if any(( path.isdir(path.join(potential_deploy_dir, 'group_data')), path.isfile(path.join(potential_deploy_dir, 'config.py')), )): logger.debug( 'Setting directory to: {0}'.format(potential_deploy_dir)) deploy_dir = potential_deploy_dir break # List facts if commands[0] == 'fact': command = 'fact' fact_names = commands[1:] facts = [] for name in fact_names: args = None if ':' in name: name, args = name.split(':', 1) args = args.split(',') if not is_fact(name): raise CliError('No fact: {0}'.format(name)) facts.append((name, args)) commands = facts # Execute a raw command with server.shell elif commands[0] == 'exec': command = 'exec' commands = commands[1:] # Deploy files(s) elif all(cmd.endswith('.py') for cmd in commands): command = 'deploy' commands = commands[0:] # Check each file exists for file in commands: if not path.exists(file): raise CliError('No deploy file: {0}'.format(file)) # Operation w/optional args elif len(commands) == 2: command = 'op' commands = get_operation_and_args( commands[0], commands[1], ) else: raise CliError('''Invalid commands: {0} Command usage: pyinfra INVENTORY deploy_web.py [deploy_db.py]... pyinfra INVENTORY server.user pyinfra,home=/home/pyinfra pyinfra INVENTORY exec -- echo "hello world" pyinfra INVENTORY fact os [users]...'''.format(commands)) print('--> Loading config...') # Load up any config.py from the filesystem config = load_config(deploy_dir) # Load any hooks/config from the deploy file if command == 'deploy': load_deploy_config(commands[0], config) # Arg based config overrides if sudo: config.SUDO = True if sudo_user: config.SUDO_USER = sudo_user if su_user: config.SU_USER = su_user if parallel: config.PARALLEL = parallel if fail_percent is not None: config.FAIL_PERCENT = fail_percent print('--> Loading inventory...') # Load up the inventory from the filesystem inventory, inventory_group = make_inventory( inventory, deploy_dir=deploy_dir, limit=limit, ssh_user=user, ssh_key=key, ssh_key_password=key_password, ssh_password=password, ssh_port=port, ) # If --debug-data dump & exit if debug_data: print_inventory(inventory) _exit() # Attach to pseudo inventory pseudo_inventory.set(inventory) # Create/set the state state = State(inventory, config) state.is_cli = True state.print_lines = True state.deploy_dir = deploy_dir # Setup printing on the new state print_output = verbosity > 0 print_fact_output = verbosity > 1 state.print_output = print_output # -v state.print_fact_info = print_output # -v state.print_fact_output = print_fact_output # -vv # Attach to pseudo state pseudo_state.set(state) # Setup the data to be passed to config hooks hook_data = FallbackAttrData( state.inventory.get_override_data(), state.inventory.get_group_data(inventory_group), state.inventory.get_data(), ) # Run the before_connect hook if provided run_hook(state, 'before_connect', hook_data) # Connect to all the servers print('--> Connecting to hosts...') with progress_spinner(state.inventory) as progress: connect_all(state, progress=progress) # Run the before_connect hook if provided run_hook(state, 'before_facts', hook_data) # Just getting a fact? # if command == 'fact': print() print('--> Gathering facts...') # Print facts as we get them state.print_fact_info = True # Print fact output with -v state.print_fact_output = print_output fact_data = {} with progress_spinner(commands) as progress: for i, (name, args) in enumerate(commands): fact_data[name] = get_facts( state, name, args=args, ) progress() print_facts(fact_data) _exit() # Prepare the deploy! # # Execute a raw command with server.shell if command == 'exec': # Print the output of the command state.print_output = True add_op( state, server.shell, ' '.join(commands), ) # Deploy files(s) elif command == 'deploy': print() print('--> Preparing operations...') # Number of "steps" to make = number of files * number of hosts prepare_steps = len(commands) * len(state.inventory) with progress_spinner(prepare_steps) as progress: for filename in commands: load_deploy_file(state, filename, progress=progress) progress() # Operation w/optional args elif command == 'op': print() print('--> Preparing operation...') op, args = commands add_op(state, op, *args[0], **args[1]) # Always show meta output print() print('--> Proposed changes:') print_meta(state, inventory) # If --debug-state, dump state (ops, op order, op meta) now & exit if debug_state: dump_state(state) _exit() # Run the operations we generated with the deploy file if dry: _exit() print() # Run the before_deploy hook if provided run_hook(state, 'before_deploy', hook_data) print('--> Beginning operation run...') # Number of "steps" to make = number of operations * number of hosts operation_steps = len(state.op_order) * len(state.inventory) with progress_spinner(operation_steps) as progress: run_ops( state, serial=serial, no_wait=no_wait, progress=progress, ) # Run the after_deploy hook if provided run_hook(state, 'after_deploy', hook_data) print('--> Results:') print_results(state, inventory) _exit()
# If --debug-data dump & exit if arguments['debug_data']: print_data(inventory) _exit() # Load any hooks/config from the deploy file w/fake state & host pseudo_state.set(FakeState()) pseudo_host.set(FakeHost()) load_deploy_config(arguments['deploy'], config) pseudo_host.reset() pseudo_state.reset() # Create/set the state state = State(inventory, config) state.is_cli = True state.deploy_dir = deploy_dir # Setup printing on the new state print_output = arguments['verbose'] > 0 if arguments['deploy'] is None and arguments['op'] is None: print_fact_output = print_output else: print_fact_output = arguments['verbose'] > 1 state.print_output = print_output # -v state.print_fact_info = print_output # -v state.print_fact_output = print_fact_output # -vv state.print_lines = True # Attach to pseudo state pseudo_state.set(state)
def _main( inventory, operations, verbosity, user, port, key, key_password, password, sudo, sudo_user, su_user, parallel, fail_percent, dry, limit, no_wait, serial, debug, debug_data, debug_facts, debug_operations, facts=None, print_operations=None, ): print() print('### {0}'.format(click.style('Welcome to pyinfra', bold=True))) print() # Setup logging log_level = logging.DEBUG if debug else logging.INFO setup_logging(log_level) # Bootstrap any virtualenv init_virtualenv() deploy_dir = getcwd() potential_deploy_dirs = [] # This is the most common case: we have a deploy file so use it's # pathname - we only look at the first file as we can't have multiple # deploy directories. if operations[0].endswith('.py'): deploy_file_dir, _ = path.split(operations[0]) above_deploy_file_dir, _ = path.split(deploy_file_dir) deploy_dir = deploy_file_dir potential_deploy_dirs.extend(( deploy_file_dir, above_deploy_file_dir, )) # If we have a valid inventory, look in it's path and it's parent for # group_data or config.py to indicate deploy_dir (--fact, --run). if inventory.endswith('.py') and path.isfile(inventory): inventory_dir, _ = path.split(inventory) above_inventory_dir, _ = path.split(inventory_dir) potential_deploy_dirs.extend(( inventory_dir, above_inventory_dir, )) for potential_deploy_dir in potential_deploy_dirs: logger.debug('Checking potential directory: {0}'.format( potential_deploy_dir, )) if any(( path.isdir(path.join(potential_deploy_dir, 'group_data')), path.isfile(path.join(potential_deploy_dir, 'config.py')), )): logger.debug( 'Setting directory to: {0}'.format(potential_deploy_dir)) deploy_dir = potential_deploy_dir break # List facts if operations[0] == 'fact': command = 'fact' fact_names = operations[1:] facts = [] for name in fact_names: args = None if ':' in name: name, args = name.split(':', 1) args = args.split(',') if not is_fact(name): raise CliError('No fact: {0}'.format(name)) facts.append((name, args)) operations = facts # Execute a raw command with server.shell elif operations[0] == 'exec': command = 'exec' operations = operations[1:] # Deploy files(s) elif all(cmd.endswith('.py') for cmd in operations): command = 'deploy' operations = operations[0:] # Check each file exists for file in operations: if not path.exists(file): raise CliError('No deploy file: {0}'.format(file)) # Operation w/optional args (<module>.<op> ARG1 ARG2 ...) elif len(operations[0].split('.')) == 2: command = 'op' operations = get_operation_and_args(operations) else: raise CliError('''Invalid operations: {0} Operation usage: pyinfra INVENTORY deploy_web.py [deploy_db.py]... pyinfra INVENTORY server.user pyinfra home=/home/pyinfra pyinfra INVENTORY exec -- echo "hello world" pyinfra INVENTORY fact os [users]...'''.format(operations)) # Create an empty/unitialised state object state = State() pseudo_state.set(state) # Setup printing on the new state print_output = verbosity > 0 print_fact_output = verbosity > 1 state.print_output = print_output # -v state.print_fact_info = print_output # -v state.print_fact_output = print_fact_output # -vv print('--> Loading config...') # Load up any config.py from the filesystem config = load_config(deploy_dir) # Load any hooks/config from the deploy file if command == 'deploy': load_deploy_config(operations[0], config) # Arg based config overrides if sudo: config.SUDO = True if sudo_user: config.SUDO_USER = sudo_user if su_user: config.SU_USER = su_user if parallel: config.PARALLEL = parallel if fail_percent is not None: config.FAIL_PERCENT = fail_percent print('--> Loading inventory...') # Load up the inventory from the filesystem inventory, inventory_group = make_inventory( inventory, deploy_dir=deploy_dir, ssh_port=port, ssh_user=user, ssh_key=key, ssh_key_password=key_password, ssh_password=password, ) # Apply any --limit to the inventory limit_hosts = None if limit: try: limit_hosts = inventory.get_group(limit) except NoGroupError: limits = limit.split(',') limit_hosts = [ host for host in inventory if any( fnmatch(host.name, limit) for limit in limits) ] # Attach to pseudo inventory pseudo_inventory.set(inventory) # Initialise the state, passing any initial --limit state.init(inventory, config, initial_limit=limit_hosts) # If --debug-data dump & exit if debug_data: print_inventory(state) _exit() # Set the deploy directory state.deploy_dir = deploy_dir # Setup the data to be passed to config hooks hook_data = FallbackDict( state.inventory.get_override_data(), state.inventory.get_group_data(inventory_group), state.inventory.get_data(), ) # Run the before_connect hook if provided run_hook(state, 'before_connect', hook_data) # Connect to all the servers print('--> Connecting to hosts...') connect_all(state) # Run the before_connect hook if provided run_hook(state, 'before_facts', hook_data) # Just getting a fact? # if command == 'fact': print() print('--> Gathering facts...') # Print facts as we get them state.print_fact_info = True # Print fact output with -v state.print_fact_output = print_output fact_data = {} for i, command in enumerate(operations): name, args = command fact_data[name] = get_facts( state, name, args=args, ) print_facts(fact_data) _exit() # Prepare the deploy! # # Execute a raw command with server.shell if command == 'exec': # Print the output of the command state.print_output = True add_op( state, server.shell, ' '.join(operations), ) # Deploy files(s) elif command == 'deploy': print() print('--> Preparing operations...') # Number of "steps" to make = number of files * number of hosts for i, filename in enumerate(operations): logger.info('Loading: {0}'.format(click.style(filename, bold=True))) state.current_op_file = i load_deploy_file(state, filename) # Operation w/optional args elif command == 'op': print() print('--> Preparing operation...') op, args = operations add_op(state, op, *args[0], **args[1]) # Always show meta output print() print('--> Proposed changes:') print_meta(state) # If --debug-facts or --debug-operations, print and exit if debug_facts or debug_operations: if debug_facts: print_state_facts(state) if debug_operations: print_state_operations(state) _exit() # Run the operations we generated with the deploy file if dry: _exit() print() # Run the before_deploy hook if provided run_hook(state, 'before_deploy', hook_data) print('--> Beginning operation run...') run_ops(state, serial=serial, no_wait=no_wait) # Run the after_deploy hook if provided run_hook(state, 'after_deploy', hook_data) print('--> Results:') print_results(state) # Triggers any executor disconnect requirements disconnect_all(state) _exit()