def process(self, instances): instances = self._filter_ec2_with_volumes( self.filter_instance_state(instances)) if not len(instances): return client = utils.local_session( self.manager.session_factory).client('ec2') failures = {} # Play nice around aws having insufficient capacity... for itype, t_instances in utils.group_by(instances, 'InstanceType').items(): for izone, z_instances in utils.group_by( t_instances, 'Placement.AvailabilityZone').items(): for batch in utils.chunks(z_instances, self.batch_size): fails = self.process_instance_set(client, batch, itype, izone) if fails: failures["%s %s" % (itype, izone)] = [ i['InstanceId'] for i in batch ] if failures: fail_count = sum(map(len, failures.values())) msg = "Could not start %d of %d instances %s" % ( fail_count, len(instances), utils.dumps(failures)) self.log.warning(msg) raise RuntimeError(msg)
def test_group_by(self): sorter = lambda x: x # NOQA E731 sorter = sys.version_info.major == 2 and sorted or sorter items = [{}, {"Type": "a"}, {"Type": "a"}, {"Type": "b"}] self.assertEqual(sorter(list(utils.group_by(items, "Type").keys())), [None, "a", "b"]) items = [ {}, { "Type": { "Part": "a" } }, { "Type": { "Part": "a" } }, { "Type": { "Part": "b" } }, ] self.assertEqual( sorter(list(utils.group_by(items, "Type.Part").keys())), [None, "a", "b"])
def process(self, instances): instances = self._filter_ec2_with_volumes( self.filter_instance_state(instances)) if not len(instances): return client = utils.local_session(self.manager.session_factory).client('ec2') failures = {} # Play nice around aws having insufficient capacity... for itype, t_instances in utils.group_by( instances, 'InstanceType').items(): for izone, z_instances in utils.group_by( t_instances, 'Placement.AvailabilityZone').items(): for batch in utils.chunks(z_instances, self.batch_size): fails = self.process_instance_set(client, batch, itype, izone) if fails: failures["%s %s" % (itype, izone)] = [i['InstanceId'] for i in batch] if failures: fail_count = sum(map(len, failures.values())) msg = "Could not start %d of %d instances %s" % ( fail_count, len(instances), utils.dumps(failures)) self.log.warning(msg) raise RuntimeError(msg)
def test_group_by(self): items = [{}, {"Type": "a"}, {"Type": "a"}, {"Type": "b"}] self.assertEqual(list(utils.group_by(items, "Type").keys()), [None, "a", "b"]) items = [ {}, {"Type": {"Part": "a"}}, {"Type": {"Part": "a"}}, {"Type": {"Part": "b"}}, ] self.assertEqual(list(utils.group_by(items, "Type.Part").keys()), [None, "a", "b"])
def test_group_by(self): sorter = lambda x: x sorter = sys.version_info.major is 2 and sorted or sorter items = [{}, {'Type': 'a'}, {'Type': 'a'}, {'Type': 'b'}] self.assertEqual( sorter(list(utils.group_by(items, 'Type').keys())), [None, 'a', 'b']) items = [ {}, {'Type': {'Part': 'a'}}, {'Type': {'Part': 'a'}}, {'Type': {'Part': 'b'}}] self.assertEqual( sorter(list(utils.group_by(items, 'Type.Part').keys())), [None, 'a', 'b'])
def test_group_by(self): sorter = lambda x: x sorter = sys.version_info.major is 2 and sorted or sorter items = [{}, {'Type': 'a'}, {'Type': 'a'}, {'Type': 'b'}] self.assertEqual( sorter(list(utils.group_by(items, 'Type').keys())), [None, 'a', 'b']) items = [ {}, {'Type': {'Part': 'a'}}, {'Type': {'Part': 'a'}}, {'Type': {'Part': 'b'}}] self.assertEqual( sorter(list(utils.group_by(items, 'Type.Part').keys())), [None, 'a', 'b'])
def test_group_by(self): sorter = lambda x: x # NOQA E731 sorter = sys.version_info.major is 2 and sorted or sorter items = [{}, {"Type": "a"}, {"Type": "a"}, {"Type": "b"}] self.assertEqual( sorter(list(utils.group_by(items, "Type").keys())), [None, "a", "b"] ) items = [ {}, {"Type": {"Part": "a"}}, {"Type": {"Part": "a"}}, {"Type": {"Part": "b"}}, ] self.assertEqual( sorter(list(utils.group_by(items, "Type.Part").keys())), [None, "a", "b"] )
def process(self, instances): instances = self._filter_ec2_with_volumes( self.filter_instance_state(instances)) if not len(instances): return client = utils.local_session( self.manager.session_factory).client('ec2') # Play nice around aws having insufficient capacity... for itype, t_instances in utils.group_by(instances, 'InstanceType').items(): for izone, z_instances in utils.group_by( t_instances, 'AvailabilityZone').items(): for batch in utils.chunks(z_instances, self.batch_size): self.process_instance_set(client, batch, itype, izone)
def process(self, resources): cluster_map = group_by(resources, 'c7n:cluster') for cluster in cluster_map: c_instances = [i['containerInstanceArn'] for i in cluster_map[cluster] if i['status'] != self.data.get('state')] results = self.process_cluster(cluster, c_instances) return results
def process(self, resources): cluster_map = group_by(resources, 'c7n:cluster') for cluster in cluster_map: c_instances = [i['containerInstanceArn'] for i in cluster_map[cluster] if i['status'] != self.data.get('state')] results = self.process_cluster(cluster, c_instances) return results
def process(self, instances): instances = self._filter_ec2_with_volumes( self.filter_instance_state(instances)) if not len(instances): return client = utils.local_session( self.manager.session_factory).client('ec2') # Play nice around aws having insufficient capacity... for itype, t_instances in utils.group_by( instances, 'InstanceType').items(): for izone, z_instances in utils.group_by( t_instances, 'AvailabilityZone').items(): for batch in utils.chunks(z_instances, self.batch_size): self.process_instance_set(client, batch, itype, izone)
def process(self, instances): instances = self._filter_ec2_with_volumes( self.filter_instance_state(instances)) if not len(instances): return client = utils.local_session( self.manager.session_factory).client('ec2') # Play nice around aws having insufficient capacity... for itype, t_instances in utils.group_by(instances, 'InstanceType').items(): for izone, z_instances in utils.group_by( t_instances, 'AvailabilityZone').items(): for batch in utils.chunks(z_instances, self.batch_size): self.process_instance_set(client, batch, itype, izone) # Raise an exception after all batches process if self.exception: if self.exception.response['Error']['Code'] not in ( 'InsufficientInstanceCapacity'): self.log.exception("Error while starting instances error %s", self.exception) raise self.exception