def create_scaling_group(self, log, tenant_id, config, launch, policies=None): """ see :meth:`otter.models.interface.IScalingGroupCollection.create_scaling_group` """ scaling_group_id = generate_key_str('scalinggroup') log.bind(tenant_id=tenant_id, scaling_group_id=scaling_group_id).msg("Creating scaling group") queries = [ _cql_insert.format(cf=self.config_table, name=":scaling"), _cql_insert.format(cf=self.launch_table, name=":launch"), _cql_create_group_state.format(cf=self.state_table)] data = {"tenantId": tenant_id, "groupId": scaling_group_id, "scaling": serialize_json_data(config, 1), "launch": serialize_json_data(launch, 1), } outpolicies = {} _build_policies(policies, self.policies_table, self.event_table, queries, data, outpolicies) b = Batch(queries, data, consistency=get_consistency_level('create', 'group')) d = b.execute(self.connection) d.addCallback(lambda _: { 'groupConfiguration': config, 'launchConfiguration': launch, 'scalingPolicies': outpolicies, 'id': scaling_group_id }) return d
def create_scaling_group(self, log, tenant_id, config, launch, policies=None): """ see :meth:`otter.models.interface.IScalingGroupCollection.create_scaling_group` """ scaling_group_id = generate_key_str('scalinggroup') log.bind(tenant_id=tenant_id, scaling_group_id=scaling_group_id).msg("Creating scaling group") queries = [_cql_create_group.format(cf=self.group_table)] data = {"tenantId": tenant_id, "groupId": scaling_group_id, "group_config": serialize_json_data(config, 1), "launch_config": serialize_json_data(launch, 1), "active": '{}', "pending": '{}', "policyTouched": '{}', "paused": False, "created_at": datetime.utcnow() } outpolicies = {} _build_policies(policies, self.policies_table, self.event_table, queries, data, outpolicies) b = Batch(queries, data, consistency=get_consistency_level('create', 'group')) d = b.execute(self.connection) d.addCallback(lambda _: { 'groupConfiguration': config, 'launchConfiguration': launch, 'scalingPolicies': outpolicies, 'id': scaling_group_id }) return d
def _do_update_config(lastRev): queries = [_cql_update.format(cf=self.config_table, name=":scaling")] b = Batch(queries, {"tenantId": self.tenant_id, "groupId": self.uuid, "scaling": serialize_json_data(data, 1)}, consistency=get_consistency_level('update', 'partial')) return b.execute(self.connection)
def _do_update(_): queries, data = list(), dict() for i, event in enumerate(update_events): polname = 'policy{}'.format(i) queries.append(_cql_insert_event_batch.format(cf=self.event_table, name=':' + polname)) data.update({polname + key: event[key] for key in event}) b = Batch(queries, data, get_consistency_level('insert', 'event')) return b.execute(self.connection)
def _do_update_policy(_): queries = [_cql_update_policy.format(cf=self.policies_table, name=":policy")] b = Batch(queries, {"tenantId": self.tenant_id, "groupId": self.uuid, "policyId": policy_id, "policy": serialize_json_data(data, 1)}, consistency=get_consistency_level('update', 'policy')) return b.execute(self.connection)
def _do_delete(_): queries = [ _cql_delete_all_in_policy.format(cf=self.policies_table), _cql_delete_all_in_policy.format(cf=self.webhooks_table), _cql_delete_policy_events.format(cf=self.event_table)] b = Batch(queries, {"tenantId": self.tenant_id, "groupId": self.uuid, "policyId": policy_id}, consistency=get_consistency_level('delete', 'policy')) return b.execute(self.connection)
def _do_update(_): queries, data = list(), dict() for i, event in enumerate(update_events): polname = 'policy{}'.format(i) queries.append( _cql_insert_event_batch.format(cf=self.event_table, name=':' + polname)) data.update({polname + key: event[key] for key in event}) b = Batch(queries, data, get_consistency_level('insert', 'event')) return b.execute(self.connection)
def _do_update_launch(lastRev): queries = [_cql_update.format(cf=self.group_table, column='launch_config', name=":launch")] b = Batch(queries, {"tenantId": self.tenant_id, "groupId": self.uuid, "launch": serialize_json_data(data, 1)}, consistency=get_consistency_level('update', 'partial')) d = b.execute(self.connection) return d
def test_batch(self): """ Test a simple batch """ batch = Batch(['INSERT * INTO BLAH', 'INSERT * INTO BLOO'], {}) d = batch.execute(self.connection) self.successResultOf(d) expected = 'BEGIN BATCH INSERT * INTO BLAH' expected += ' INSERT * INTO BLOO APPLY BATCH;' self.connection.execute.assert_called_once_with( expected, {}, ConsistencyLevel.ONE)
def test_batch_ts(self): """ Test a simple batch with timestamp set """ batch = Batch(['INSERT * INTO BLAH'], {}, timestamp=123) d = batch.execute(self.connection) self.successResultOf(d) expected = 'BEGIN BATCH USING TIMESTAMP 123' expected += ' INSERT * INTO BLAH APPLY BATCH;' self.connection.execute.assert_called_once_with( expected, {}, ConsistencyLevel.ONE)
def test_batch_ts(self): """ Test a simple batch with timestamp set """ batch = Batch(['INSERT * INTO BLAH'], {}, timestamp=123) d = batch.execute(self.connection) self.successResultOf(d) expected = 'BEGIN BATCH USING TIMESTAMP 123' expected += ' INSERT * INTO BLAH APPLY BATCH;' self.connection.execute.assert_called_once_with(expected, {}, ConsistencyLevel.ONE)
def test_batch(self): """ Test a simple batch """ batch = Batch(['INSERT * INTO BLAH', 'INSERT * INTO BLOO'], {}) d = batch.execute(self.connection) self.successResultOf(d) expected = 'BEGIN BATCH INSERT * INTO BLAH' expected += ' INSERT * INTO BLOO APPLY BATCH;' self.connection.execute.assert_called_once_with(expected, {}, ConsistencyLevel.ONE)
def test_batch_consistency(self): """ Test a simple batch with consistency set """ batch = Batch(['INSERT * INTO BLAH'], {}, consistency=ConsistencyLevel.QUORUM) d = batch.execute(self.connection) self.successResultOf(d) expected = 'BEGIN BATCH' expected += ' INSERT * INTO BLAH APPLY BATCH;' self.connection.execute.assert_called_once_with( expected, {}, ConsistencyLevel.QUORUM)
def test_batch_param(self): """ Test a simple batch with params """ params = {"blah": "ff"} batch = Batch(['INSERT :blah INTO BLAH', 'INSERT * INTO BLOO'], params) d = batch.execute(self.connection) self.successResultOf(d) expected = 'BEGIN BATCH INSERT :blah INTO BLAH' expected += ' INSERT * INTO BLOO APPLY BATCH;' self.connection.execute.assert_called_once_with( expected, params, ConsistencyLevel.ONE)
def _do_create_pol(lastRev): queries = [] cqldata = {"tenantId": self.tenant_id, "groupId": self.uuid} outpolicies = _build_policies(data, self.policies_table, self.event_table, queries, cqldata) b = Batch(queries, cqldata, consistency=get_consistency_level('create', 'policy')) d = b.execute(self.connection) return d.addCallback(lambda _: outpolicies)
def test_batch_param(self): """ Test a simple batch with params """ params = {"blah": "ff"} batch = Batch(['INSERT :blah INTO BLAH', 'INSERT * INTO BLOO'], params) d = batch.execute(self.connection) self.successResultOf(d) expected = 'BEGIN BATCH INSERT :blah INTO BLAH' expected += ' INSERT * INTO BLOO APPLY BATCH;' self.connection.execute.assert_called_once_with(expected, params, ConsistencyLevel.ONE)
def _do_create_pol(lastRev): queries = [] cqldata = {"tenantId": self.tenant_id, "groupId": self.uuid} outpolicies = {} _build_policies(data, self.policies_table, self.event_table, queries, cqldata, outpolicies) b = Batch(queries, cqldata, consistency=get_consistency_level('create', 'policy')) d = b.execute(self.connection) return d.addCallback(lambda _: outpolicies)
def _do_update_policy(_): queries = [ _cql_update_policy.format(cf=self.policies_table, name=":policy") ] b = Batch(queries, { "tenantId": self.tenant_id, "groupId": self.uuid, "policyId": policy_id, "policy": serialize_json_data(data, 1) }, consistency=get_consistency_level('update', 'policy')) return b.execute(self.connection)
def _do_delete(_): queries = [ _cql_delete_all_in_policy.format(cf=self.policies_table), _cql_delete_all_in_policy.format(cf=self.webhooks_table), _cql_delete_policy_events.format(cf=self.event_table) ] b = Batch(queries, { "tenantId": self.tenant_id, "groupId": self.uuid, "policyId": policy_id }, consistency=get_consistency_level('delete', 'policy')) return b.execute(self.connection)
def _do_create(lastRev): queries = [] cql_params = {"tenantId": self.tenant_id, "groupId": self.uuid, "policyId": policy_id} output = _build_webhooks(data, self.webhooks_table, queries, cql_params) b = Batch(queries, cql_params, consistency=get_consistency_level('create', 'webhook')) d = b.execute(self.connection) return d.addCallback(lambda _: output)
def _do_update_config(lastRev): queries = [ _cql_update.format(cf=self.group_table, column='group_config', name=":scaling") ] b = Batch(queries, { "tenantId": self.tenant_id, "groupId": self.uuid, "scaling": serialize_json_data(data, 1) }, consistency=get_consistency_level('update', 'partial')) return b.execute(self.connection)
def update_events_trigger(self, policy_and_triggers): """ see :meth:`otter.models.interface.IScalingScheduleCollection.update_events_trigger` """ queries = [] data = {} for i, (policy_id, trigger) in enumerate(policy_and_triggers): queries.append(_cql_update_event.format(cf=self.event_table, trigger=':trigger{0}'.format(i), policy_id=':policyid{0}'.format(i))) data.update({'trigger{0}'.format(i): trigger, 'policyid{0}'.format(i): policy_id}) b = Batch(queries, data, get_consistency_level('update', 'events')) return b.execute(self.connection)
def _do_update_launch(lastRev): if "type" in lastRev: if lastRev["type"] != data["type"]: raise ValidationError("Cannot change type of a scaling policy") queries = [_cql_update_policy.format(cf=self.policies_table, name=":policy")] b = Batch(queries, {"tenantId": self.tenant_id, "groupId": self.uuid, "policyId": policy_id, "policy": serialize_json_data(data, 1)}, consistency=get_consistency_level('update', 'policy')) d = b.execute(self.connection) return d
def _do_create(lastRev): queries = [] cql_params = { "tenantId": self.tenant_id, "groupId": self.uuid, "policyId": policy_id } output = {} _build_webhooks(data, self.webhooks_table, queries, cql_params, output) b = Batch(queries, cql_params, consistency=get_consistency_level('create', 'webhook')) d = b.execute(self.connection) return d.addCallback(lambda _: output)
def create_scaling_group(self, log, tenant_id, config, launch, policies=None): """ see :meth:`otter.models.interface.IScalingGroupCollection.create_scaling_group` """ scaling_group_id = generate_key_str('scalinggroup') log.bind( tenant_id=tenant_id, scaling_group_id=scaling_group_id).msg("Creating scaling group") queries = [_cql_create_group.format(cf=self.group_table)] data = { "tenantId": tenant_id, "groupId": scaling_group_id, "group_config": serialize_json_data(config, 1), "launch_config": serialize_json_data(launch, 1), "active": '{}', "pending": '{}', "policyTouched": '{}', "paused": False, "created_at": datetime.utcnow() } outpolicies = {} _build_policies(policies, self.policies_table, self.event_table, queries, data, outpolicies) b = Batch(queries, data, consistency=get_consistency_level('create', 'group')) d = b.execute(self.connection) d.addCallback( lambda _: { 'groupConfiguration': config, 'launchConfiguration': launch, 'scalingPolicies': outpolicies, 'id': scaling_group_id }) return d
def _do_update_launch(lastRev): if "type" in lastRev: if lastRev["type"] != data["type"]: raise ValidationError("Cannot change type of a scaling policy") # TODO: Fix in https://issues.rax.io/browse/AUTO-467 if lastRev["type"] == 'schedule': if lastRev["args"] != data["args"]: raise ValidationError("Cannot change scheduled args") queries = [_cql_update_policy.format(cf=self.policies_table, name=":policy")] b = Batch(queries, {"tenantId": self.tenant_id, "groupId": self.uuid, "policyId": policy_id, "policy": serialize_json_data(data, 1)}, consistency=get_consistency_level('update', 'policy')) d = b.execute(self.connection) return d
def _delete_everything(policies): params = { 'tenantId': self.tenant_id, 'groupId': self.uuid } queries = [ _cql_delete_all_in_group.format(cf=table) for table in (self.group_table, self.policies_table, self.webhooks_table)] if len(policies) > 0: events_query, events_params = _delete_many_query_and_params( self.event_table, '"policyId"', [p['id'] for p in policies]) queries.append(events_query) params.update(events_params) b = Batch(queries, params, consistency=get_consistency_level('delete', 'group')) return b.execute(self.connection)
def _delete_everything(policies): params = {'tenantId': self.tenant_id, 'groupId': self.uuid} queries = [ _cql_delete_all_in_group.format(cf=table) for table in (self.group_table, self.policies_table, self.webhooks_table) ] if len(policies) > 0: events_query, events_params = _delete_many_query_and_params( self.event_table, '"policyId"', policies.keys()) queries.append(events_query) params.update(events_params) b = Batch(queries, params, consistency=get_consistency_level('delete', 'group')) return b.execute(self.connection)
def _delete_everything(policies): params = { 'tenantId': self.tenant_id, 'groupId': self.uuid } queries = [ _cql_delete_all_in_group.format(cf=table) for table in (self.config_table, self.launch_table, self.policies_table, self.webhooks_table, self.state_table)] if len(policies) > 0: events_query, events_params = _delete_events_query_and_params( policies.keys(), self.event_table) queries.append(events_query.rstrip(';')) params.update(events_params) b = Batch(queries, params, consistency=get_consistency_level('delete', 'group')) return b.execute(self.connection)