def create_cmd(cell, hostname, instance_profile, instance_type, subnet, image, disk): """Create cell ZooKeeper server(s).""" ec2_conn = awscontext.GLOBAL.ec2 ipa_client = awscontext.GLOBAL.ipaclient admin_cell = admin.Cell(context.GLOBAL.ldap.conn) masters = admin_cell.get(cell, dirty=True)['masters'] if hostname: masters = [ master for master in masters if master['hostname'] == hostname ] if not masters: cli.bad_exit('%s not found in the cell config', hostname) for master in masters: try: ec2_instance = ec2client.get_instance( ec2_conn, hostnames=[master['hostname']] ) cli.out('%s EC2 instance already exists', master['hostname']) _LOGGER.debug(ec2_instance) except exc.NotFoundError: hostmanager.create_zk( ec2_conn=ec2_conn, ipa_client=ipa_client, master=master, subnet_id=subnet, instance_type=instance_type, instance_profile=instance_profile, image_id=image, disk=disk ) cli.out('Created: %s', master['hostname'])
def zk_cmd(cell, instance_profile, rotate): """Manage Zookeeper servers""" ec2_conn = awscontext.GLOBAL.ec2 ipa_client = awscontext.GLOBAL.ipaclient admin_cell = admin.Cell(context.GLOBAL.ldap.conn) masters = admin_cell.get(cell, dirty=True)['masters'] ec2_instances = [] # Check that Zookeepers exist; create if missing for master in masters: try: ec2_instances.append( ec2client.get_instance(ec2_conn, hostnames=[master['hostname']])) except exc.NotFoundError: cli.out( hostmanager.create_zk(ec2_conn=ec2_conn, instance_profile=instance_profile, ipa_client=ipa_client, master=master)) if rotate and len(ec2_instances) == len(masters): # Refuse to rotate if quorum would be broken if len(ec2_instances) < 3: cli.out('Not enough healthy Zookeepers to rotate') return cli.out( hostmanager.rotate_zk(ec2_conn=ec2_conn, instance_profile=instance_profile, ipa_client=ipa_client, ec2_instances=ec2_instances, masters=masters)) return
def configure(instance): """Configure instance""" if not instance: instance = {'ids': [metadata.instance_id()]} ec2_conn = awscontext.GLOBAL.ec2 instance_obj = ec2client.get_instance(ec2_conn, **instance) cli.out(formatter(instance_obj))
def test_get_matching_hostname(self): """ Test list_instances call to AWS with full hostname """ ec2_conn = mock.MagicMock() ec2_conn.describe_instances = mock.MagicMock() # Simplified AWS result # TODO: this does not seem like correct test. Hostname is derived from # tag (Name), not from instance_id. ec2_conn.describe_instances.return_value = { 'Reservations': [{ 'Instances': [{ 'InstanceId': 'host1.foo.com' }] }] } result = ec2client.get_instance(ec2_conn, hostnames=['host1.foo.com']) self.assertEqual(ec2_conn.describe_instances.call_count, 1) self.assertEqual(result, {'InstanceId': 'host1.foo.com'})
def rotate_cmd(cell, hostname, instance_profile, instance_type, subnet, image, disk): """Rotate cell ZooKeeper server.""" ec2_conn = awscontext.GLOBAL.ec2 ipa_client = awscontext.GLOBAL.ipaclient admin_cell = admin.Cell(context.GLOBAL.ldap.conn) masters = admin_cell.get(cell, dirty=True)['masters'] try: master = next( master for master in masters if master['hostname'] == hostname ) except StopIteration: cli.bad_exit('%s not found in the cell config', hostname) try: ec2_instance = ec2client.get_instance( ec2_conn, hostnames=[hostname] ) _LOGGER.debug(ec2_instance) except exc.NotFoundError: cli.bad_exit('%s EC2 instance does not exist', hostname) hostmanager.delete_hosts(ec2_conn, ipa_client, [hostname]) cli.out('Deleted: %s', hostname) # Copy subnet, type and image from the old instance unless we override. hostmanager.create_zk( ec2_conn=ec2_conn, ipa_client=ipa_client, master=master, subnet_id=subnet or ec2_instance['SubnetId'], instance_type=instance_type or ec2_instance['InstanceType'], instance_profile=instance_profile, image_id=image or ec2_instance['ImageId'], disk=disk ) cli.out('Created: %s', hostname)
def create(instance, device, reboot, tries, interval, dry_run, name): """Create snapshot from instance.""" ec2_conn = awscontext.GLOBAL.ec2 instance_obj = ec2client.get_instance(ec2_conn, **instance) initial_state = instance_obj['State']['Name'] hostname = _instance_tag(instance_obj, 'Name') if reboot: if initial_state == 'running': _LOGGER.info('%s is in state [running], will stop instance, ' 'take snapshot, start instance.', hostname) do_reboot = True else: raise SnapshotError('--reboot requires instance to be in ' 'state [running], instance is in ' 'state [%s]' % initial_state) else: if initial_state == 'stopped': _LOGGER.info('%s is in state [stopped], will take snapshot ' '(no reboot required).', hostname) do_reboot = False else: raise SnapshotError('instance is in state [%s], use --reboot ' 'to stop instance, take snapshot, then ' 'start instance.' % initial_state) if do_reboot: _stop_instance(ec2_conn, instance_obj, tries, interval, dry_run) volume_id = _get_volume_id_from_instance(instance_obj, device) timestamp = datetime.now().strftime('%Y%m%d%H%M%SZ') sn_type = 'backup' desc = '%s %s %s' % (sn_type, hostname, timestamp) architecture = instance_obj['Architecture'] ena_support = '%r' % instance_obj['EnaSupport'] virtualization_type = instance_obj['VirtualizationType'] tagspecs = [ { 'ResourceType': 'snapshot', 'Tags': [ {'Key': 'Name', 'Value': name}, {'Key': 'Hostname', 'Value': hostname}, {'Key': 'Type', 'Value': 'backup'}, {'Key': 'Device', 'Value': device}, {'Key': 'Architecture', 'Value': architecture}, {'Key': 'EnaSupport', 'Value': ena_support}, {'Key': 'VirtualizationType', 'Value': virtualization_type}, ] } ] kwargs = { 'Description': desc, 'VolumeId': volume_id, 'TagSpecifications': tagspecs, } if dry_run: kwargs['DryRun'] = True response = ec2_conn.create_snapshot(**kwargs) kwargs = {'Filters': [{'Name': 'snapshot-id', 'Values': [response['SnapshotId']]}]} response = ec2_conn.describe_snapshots(**kwargs) cli.out(formatter(response['Snapshots'][0])) if do_reboot: _start_instance(ec2_conn, instance_obj, tries, interval, dry_run)