def verify_certificate_chain(self, base=None, crt=None, csr=None, key=None): """ Confirms the key, CSR, and certificate files all match. """ from burlap.common import get_verbose, print_fail, print_success r = self.local_renderer if base: crt = base + '.crt' csr = base + '.csr' key = base + '.key' else: assert crt and csr and key, 'If base not provided, crt and csr and key must be given.' assert os.path.isfile(crt) assert os.path.isfile(csr) assert os.path.isfile(key) csr_md5 = r.local('openssl req -noout -modulus -in %s | openssl md5' % csr, capture=True) key_md5 = r.local('openssl rsa -noout -modulus -in %s | openssl md5' % key, capture=True) crt_md5 = r.local('openssl x509 -noout -modulus -in %s | openssl md5' % crt, capture=True) match = crt_md5 == csr_md5 == key_md5 if self.verbose or not match: print('crt:', crt_md5) print('csr:', csr_md5) print('key:', key_md5) if match: print_success('Files look good!') else: print_fail('Files no not match!') raise Exception('Files no not match!')
def test_connection(self): from jira import JIRA from burlap.common import print_success, print_fail try: print('Connecting to %s with user %s...' % (self.env.server, self.env.basic_auth_username)) jira = JIRA({ 'server': self.env.server }, basic_auth=(self.env.basic_auth_username, self.env.basic_auth_password)) result = jira.search_issues('status=resolved') #print('result:', result) print_success('OK') except Exception as exc: print_fail('ERROR: %s' % exc)
def test_connection(self): from jira import JIRA, JIRAError from burlap.common import print_success, print_fail try: print('Connecting to %s with user %s...' % (self.env.server, self.env.basic_auth_username)) jira = JIRA({ 'server': self.env.server }, basic_auth=(self.env.basic_auth_username, self.env.basic_auth_password)) result = jira.search_issues('status=resolved') #print('result:', result) print_success('OK') except Exception as exc: print_fail('ERROR: %s' % exc)
def update_dns_godaddy(self, domain, record_type, record): from godaddypy import Client, Account from godaddypy.client import BadResponse def get_domains(client): a = set() for d in client.get_domains(): time.sleep(0.25) a.add(d) return a key = self.genv.godaddy_api_keys[domain]['key'] secret = self.genv.godaddy_api_keys[domain]['secret'] my_acct = Account(api_key=key, api_secret=secret) client = Client(my_acct) #allowed_domains = set(client.get_domains()) allowed_domains = get_domains(client) # print('allowed_domains:', allowed_domains) assert domain in allowed_domains, \ 'Domain %s is invalid this account. Only domains %s are allowed.' % (domain, ', '.join(sorted(allowed_domains))) #client.add_record(domain, {'data':'1.2.3.4','name':'test','ttl':3600, 'type':'A'}) print('Adding record:', domain, record_type, record) if not self.dryrun: try: max_retries = 10 for retry in xrange(max_retries): try: client.add_record( domain, { 'data': record.get('ip', record.get('alias')), 'name': record['name'], 'ttl': record['ttl'], 'type': record_type.upper() }) print_success('Record added!') break except ValueError as exc: print( 'Error adding DNS record on attempt %i of %i: %s' % (retry + 1, max_retries, exc)) if retry + 1 == max_retries: raise else: time.sleep(3) except BadResponse as e: if e._message['code'] == 'DUPLICATE_RECORD': print('Ignoring duplicate record.') else: raise
def update_dns_godaddy(self, domain, record_type, record): from godaddypy import Client, Account from godaddypy.client import BadResponse def get_domains(client): a = set() for d in client.get_domains(): time.sleep(0.25) a.add(d) return a key = self.genv.godaddy_api_keys[domain]['key'] secret = self.genv.godaddy_api_keys[domain]['secret'] my_acct = Account(api_key=key, api_secret=secret) client = Client(my_acct) #allowed_domains = set(client.get_domains()) allowed_domains = get_domains(client) # print('allowed_domains:', allowed_domains) assert domain in allowed_domains, \ 'Domain %s is invalid this account. Only domains %s are allowed.' % (domain, ', '.join(sorted(allowed_domains))) #client.add_record(domain, {'data':'1.2.3.4','name':'test','ttl':3600, 'type':'A'}) print('Adding record:', domain, record_type, record) if not self.dryrun: try: max_retries = 10 for retry in six.moves.range(max_retries): try: client.add_record( domain, { 'data': record.get('ip', record.get('alias')), 'name': record['name'], 'ttl': record['ttl'], 'type': record_type.upper() }) print_success('Record added!') break except ValueError as exc: print('Error adding DNS record on attempt %i of %i: %s' % (retry+1, max_retries, exc)) if retry + 1 == max_retries: raise else: time.sleep(3) except BadResponse as e: if e._message['code'] == 'DUPLICATE_RECORD': print('Ignoring duplicate record.') else: raise
def status(self): r = self.local_renderer all_drives_in_system = r.run('ls /dev/sd*') all_drives_in_system = set( ALL_DRIVES_IN_SYSTEM_REGEX.findall(all_drives_in_system)) print('all drives in system:', all_drives_in_system) ret = self.raw_status() all_drives_in_array = set(ALL_DRIVES_IN_ARRAY_REGEX.findall(ret)) print('all drives in array:', all_drives_in_array) drives_needing_to_be_readded = all_drives_in_system.difference( all_drives_in_array) print('drives_needing_to_be_readded:', drives_needing_to_be_readded) all_partitions_in_array = set( ALL_PARTITIONS_IN_ARRAY_REGEX.findall(ret)) print('all partitions in array:', all_partitions_in_array) bad_drives = set(FAILED_DRIVES_REGEX.findall(ret)) print('bad_drives:', bad_drives) empty_slots = [_ for _ in EMPTY_SLOTS_REGEX.findall(ret) if '_' in _] print('empty_slots:', empty_slots) if not all_drives_in_system: print_fail('NO DRIVES FOUND! Something is very wrong!') if not all_drives_in_array: print_fail('NO DRIVES FOUND IN ARRAY! Something is very wrong!') elif bad_drives: print_fail('RAID has degraded! Shutdown and replace drive:', bad_drives) elif empty_slots: if drives_needing_to_be_readded: drive_str = ', '.join(sorted(drives_needing_to_be_readded)) print_fail(( 'RAID has degraded, the failed drive %s has been replaced, ' 'but it needs to be re-added.') \ % drive_str) print(''' # copy partitioning scheme from good drive onto bad drive sudo sfdisk -d /dev/{good_drive} | sudo sfdisk /dev/{new_drive} # add drive back into raid sudo mdadm --manage /dev/md0 --add /dev/{new_drive}1 sudo mdadm --manage /dev/md1 --add /dev/{new_drive}2 # wait until drives synced cat /proc/mdstat #finally install grub on new disk sudo grub-install /dev/{new_drive} '''.format(**dict( good_drive=list(all_drives_in_array)[0], new_drive=list(drives_needing_to_be_readded)[0], ))) else: print_fail( 'RAID has degraded, the failed drive appears to have been removed, ' 'but no new drive has been added.') else: print_success('RAID is good.')
def status(self): r = self.local_renderer all_drives_in_system = r.run('ls /dev/sd*') all_drives_in_system = set(ALL_DRIVES_IN_SYSTEM_REGEX.findall(all_drives_in_system)) print('all drives in system:', all_drives_in_system) ret = self.raw_status() all_drives_in_array = set(ALL_DRIVES_IN_ARRAY_REGEX.findall(ret)) print('all drives in array:', all_drives_in_array) drives_needing_to_be_readded = all_drives_in_system.difference(all_drives_in_array) print('drives_needing_to_be_readded:', drives_needing_to_be_readded) all_partitions_in_array = set(ALL_PARTITIONS_IN_ARRAY_REGEX.findall(ret)) print('all partitions in array:', all_partitions_in_array) bad_drives = set(FAILED_DRIVES_REGEX.findall(ret)) print('bad_drives:', bad_drives) empty_slots = [_ for _ in EMPTY_SLOTS_REGEX.findall(ret) if '_' in _] print('empty_slots:', empty_slots) if not all_drives_in_system: print_fail('NO DRIVES FOUND! Something is very wrong!') if not all_drives_in_array: print_fail('NO DRIVES FOUND IN ARRAY! Something is very wrong!') elif bad_drives: print_fail('RAID has degraded! Shutdown and replace drive:', bad_drives) elif empty_slots: if drives_needing_to_be_readded: drive_str = ', '.join(sorted(drives_needing_to_be_readded)) print_fail(( 'RAID has degraded, the failed drive %s has been replaced, ' 'but it needs to be re-added.') \ % drive_str) print(''' # copy partitioning scheme from good drive onto bad drive sudo sfdisk -d /dev/{good_drive} | sudo sfdisk /dev/{new_drive} # add drive back into raid sudo mdadm --manage /dev/md0 --add /dev/{new_drive}1 sudo mdadm --manage /dev/md1 --add /dev/{new_drive}2 # wait until drives synced cat /proc/mdstat #finally install grub on new disk sudo grub-install /dev/{new_drive} '''.format(**dict( good_drive=list(all_drives_in_array)[0], new_drive=list(drives_needing_to_be_readded)[0], ))) else: print_fail('RAID has degraded, the failed drive appears to have been removed, ' 'but no new drive has been added.') else: print_success('RAID is good.')