def publish_docs_main(args, base_path, top_level): """ :param list args: The arguments passed to the script. :param FilePath base_path: The executable being run. :param FilePath top_level: The top-level of the flocker repository. """ options = PublishDocsOptions() try: options.parseOptions(args) except UsageError as e: sys.stderr.write("%s: %s\n" % (base_path.basename(), e)) raise SystemExit(1) try: sync_perform( dispatcher=ComposedDispatcher([boto_dispatcher, base_dispatcher]), effect=publish_docs( flocker_version=options['flocker-version'], doc_version=options['doc-version'], environment=options.environment, )) except NotARelease: sys.stderr.write("%s: Can't publish non-release.\n" % (base_path.basename(),)) raise SystemExit(1) except NotTagged: sys.stderr.write( "%s: Can't publish non-tagged version to production.\n" % (base_path.basename(),)) raise SystemExit(1)
def perform_run_remotely(base_dispatcher, intent): """ Run a series of commands on a remote host. """ dispatcher = ComposedDispatcher([ TypeDispatcher({ Run: perform_run, Sudo: perform_sudo, Put: perform_put, Comment: perform_comment, }), base_dispatcher, ]) host_string = "%s@%s" % (intent.username, intent.address) with settings( connection_attempts=24, timeout=5, pty=False, host_string=host_string): sync_perform(dispatcher, intent.commands) disconnect_all()
def update_repo(self, aws, yum, rpm_directory, target_bucket, target_key, source_repo, packages, flocker_version, distro_name, distro_version): """ Call :func:``update_repo``, interacting with a fake AWS and yum utilities. :param FakeAWS aws: Fake AWS to interact with. :param FakeYum yum: Fake yum utilities to interact with. See :py:func:`update_repo` for other parameter documentation. """ dispatchers = [aws.get_dispatcher(), yum.get_dispatcher(), base_dispatcher] sync_perform( ComposedDispatcher(dispatchers), update_repo( rpm_directory=rpm_directory, target_bucket=target_bucket, target_key=target_key, source_repo=source_repo, packages=packages, flocker_version=flocker_version, distro_name=distro_name, distro_version=distro_version, ) )
def test_basic_usage(self): """ Using an implementation of a ziffect interface that """ utils_effects = ziffect.effects(Utils) my_call_logger = RecordCallsUtils() self.expectThat(my_call_logger, ziffect.matchers.Provides(Utils)) dispatcher = ziffect.dispatcher({ Utils: my_call_logger }) sync_perform( dispatcher, utils_effects.add(operator_a=12, operator_b=23) ) self.expectThat( my_call_logger.calls['add'], Equals([(12, 23)])) sync_perform( dispatcher, utils_effects.concat(operator_a='me', operator_b='ow') ) self.expectThat( my_call_logger.calls['concat'], Equals([('me', 'ow')]) )
def run_for_docs(effect): commands = [] @sync_performer def run(dispatcher, intent): commands.append(intent.command) @sync_performer def sudo(dispatcher, intent): commands.append("sudo %s" % (intent.command,)) @sync_performer def comment(dispatcher, intent): commands.append("# %s" % (intent.comment)) @sync_performer def put(dispatcher, intent): commands.append(["cat <<EOF > %s" % (intent.path,)] + intent.content.splitlines() + ["EOF"]) sync_perform( ComposedDispatcher([TypeDispatcher({Run: run, Sudo: sudo, Comment: comment, Put: put}), base_dispatcher]), effect, ) return commands
def publish_docs_main(args, base_path, top_level): """ :param list args: The arguments passed to the script. :param FilePath base_path: The executable being run. :param FilePath top_level: The top-level of the flocker repository. """ options = PublishDocsOptions() try: options.parseOptions(args) except UsageError as e: sys.stderr.write("%s: %s\n" % (base_path.basename(), e)) raise SystemExit(1) try: sync_perform(dispatcher=ComposedDispatcher( [boto_dispatcher, base_dispatcher]), effect=publish_docs( flocker_version=options['flocker-version'], doc_version=options['doc-version'], environment=options.environment, )) except NotARelease: sys.stderr.write("%s: Can't publish non-release.\n" % (base_path.basename(), )) raise SystemExit(1) except NotTagged: sys.stderr.write( "%s: Can't publish non-tagged version to production.\n" % (base_path.basename(), )) raise SystemExit(1)
def test_print(self): sequence = SequenceDispatcher([ (Print('What... is your quest?'), lambda _: None), ]) with sequence.consume(): sync_perform(sequence, program())
def publish_artifacts_main(args, base_path, top_level): """ Publish release artifacts. :param list args: The arguments passed to the scripts. :param FilePath base_path: The executable being run. :param FilePath top_level: The top-level of the flocker repository. """ options = UploadOptions() try: options.parseOptions(args) except UsageError as e: sys.stderr.write("%s: %s\n" % (base_path.basename(), e)) raise SystemExit(1) except NotARelease: sys.stderr.write("%s: Can't publish artifacts for a non-release.\n" % (base_path.basename(),)) raise SystemExit(1) except DocumentationRelease: sys.stderr.write("%s: Can't publish artifacts for a documentation " "release.\n" % (base_path.basename(),)) raise SystemExit(1) dispatcher = ComposedDispatcher([boto_dispatcher, yum_dispatcher, base_dispatcher]) scratch_directory = FilePath(tempfile.mkdtemp( prefix=b'flocker-upload-')) scratch_directory.child('packages').createDirectory() scratch_directory.child('python').createDirectory() scratch_directory.child('pip').createDirectory() try: sync_perform( dispatcher=dispatcher, effect=sequence([ upload_packages( scratch_directory=scratch_directory.child('packages'), target_bucket=options['target'], version=options['flocker-version'], build_server=options['build-server'], top_level=top_level, ), upload_python_packages( scratch_directory=scratch_directory.child('python'), target_bucket=options['target'], top_level=top_level, output=sys.stdout, error=sys.stderr, ), upload_pip_index( scratch_directory=scratch_directory.child('pip'), target_bucket=options['target'], ), ]), ) finally: scratch_directory.remove()
def assert_handles_nova_rate_limiting(self, intent, effect): """ If the provided intent returns a response consistent with Nova rate-limiting requests, then performing the effect will raise a :class:`NovaRateLimitError`. """ failure_body = { "overLimit": { "code": 413, "message": "OverLimit Retry...", "details": "Error Details...", "retryAfter": "2015-02-27T23:42:27Z" } } dispatcher = EQFDispatcher([ (intent, service_request_eqf( stub_pure_response(json.dumps(failure_body), 413))) ]) with self.assertRaises(NovaRateLimitError) as cm: sync_perform(dispatcher, effect) self.assertEqual(cm.exception, NovaRateLimitError("OverLimit Retry..."))
def test_print(self): sequence = SequenceDispatcher([ (Print('What... is your quest?'), lambda _:None), ]) with sequence.consume(): sync_perform(sequence, program())
def test_err_from_tuple(self): """ exc_info tuple can be passed as failure when constructing LogErr in which case failure will be constructed from the tuple """ eff = err((ValueError, ValueError("a"), None), "why") sync_perform(self.disp, eff) self.log.err.assert_called_once_with(CheckFailureValue(ValueError("a")), "why", f1="v")
def test_echo(self, line): sequence = SequenceDispatcher([ (Print('What... is your quest?'), lambda _:None), (Readline(), lambda _:line), (Print(line), lambda _:None), ]) with sequence.consume(): sync_perform(sequence, echo())
def test_echo(self, line): sequence = SequenceDispatcher([ (Print('What... is your quest?'), lambda _: None), (Readline(), lambda _: line), (Print(line), lambda _: None), ]) with sequence.consume(): sync_perform(sequence, echo())
def test_add_clb_nodes(self): """ Produce a request for adding nodes to a load balancer, which returns a successful result on a 202. Parse the common CLB errors, and a :class:`CLBDuplicateNodesError`. """ nodes = [{"address": "1.1.1.1", "port": 80, "condition": "ENABLED"}, {"address": "1.1.1.2", "port": 80, "condition": "ENABLED"}, {"address": "1.1.1.5", "port": 81, "condition": "ENABLED"}] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) # success seq = [ (expected.intent, lambda i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}), lambda _: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {})) # CLBDuplicateNodesError failure msg = ("Duplicate nodes detected. One or more nodes already " "configured on load balancer.") duplicate_nodes = stub_pure_response( json.dumps({'message': msg, 'code': 422}), 422) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg = "Nodes must not exceed 25 per load balancer." limit = stub_pure_response( json.dumps({'message': msg, 'code': 413}), 413) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, "123456")
def publish_dev_box_main(args, base_path, top_level): """ Publish a development Vagrant box. :param list args: The arguments passed to the script. :param FilePath base_path: The executable being run. :param FilePath top_level: The top-level of the flocker repository. """ options = PublishDevBoxOptions() try: options.parseOptions(args) except UsageError as e: sys.stderr.write("%s: %s\n" % (base_path.basename(), e)) raise SystemExit(1) scratch_directory = FilePath(tempfile.mkdtemp( prefix=b'flocker-upload-')) scratch_directory.child('vagrant').createDirectory() box_type = "flocker-dev" prefix = 'vagrant/dev/' box_name = "{box_type}-{version}.box".format( box_type=box_type, version=options['flocker-version'], ) box_url = "https://{bucket}.s3.amazonaws.com/{key}".format( bucket=options['target'], key=prefix + box_name, ) sync_perform( dispatcher=ComposedDispatcher([boto_dispatcher, base_dispatcher]), effect=sequence([ Effect( CopyS3Keys( source_bucket=DEV_ARCHIVE_BUCKET, source_prefix=prefix, destination_bucket=options['target'], destination_prefix=prefix, keys=[box_name], ) ), publish_vagrant_metadata( version=options['flocker-version'], box_url=box_url, scratch_directory=scratch_directory.child('vagrant'), box_name=box_type, target_bucket=options['target'], ), ]), )
def test_err_from_tuple(self): """ exc_info tuple can be passed as failure when constructing LogErr in which case failure will be constructed from the tuple """ eff = err((ValueError, ValueError("a"), None), "why") sync_perform(self.disp, eff) self.log.err.assert_called_once_with(CheckFailureValue( ValueError('a')), 'why', f1='v')
def test_change_clb_node(self): """ Produce a request for modifying a node on a load balancer, which returns a successful result on 202. Parse the common CLB errors, and :class:`NoSuchCLBNodeError`. """ eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition="DRAINING", weight=50, _type='SECONDARY') expected = service_request(ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format( self.lb_id), data={ 'node': { 'condition': 'DRAINING', 'weight': 50, 'type': 'SECONDARY' } }, success_pred=has_code(202)) # success dispatcher = EQFDispatcher([ (expected.intent, service_request_eqf(stub_pure_response('', 202))) ]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) # NoSuchCLBNode failure msg = "Node with id #1234 not found for loadbalancer #{0}".format( self.lb_id) no_such_node = stub_pure_response( json.dumps({ 'message': msg, 'code': 404 }), 404) dispatcher = EQFDispatcher([(expected.intent, service_request_eqf(no_such_node))]) with self.assertRaises(NoSuchCLBNodeError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id), node_id=u'1234')) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, "123456")
def publish_docs(self, aws, flocker_version, doc_version, environment): """ Call :func:``publish_docs``, interacting with a fake AWS. :param FakeAWS aws: Fake AWS to interact with. :param flocker_version: See :py:func:`publish_docs`. :param doc_version: See :py:func:`publish_docs`. :param environment: See :py:func:`environment`. """ sync_perform( ComposedDispatcher([aws.get_dispatcher(), base_dispatcher]), publish_docs(flocker_version, doc_version, environment=environment))
def test_challenge(self, line): sequence = SequenceDispatcher([ (Print('What... is your quest?'), lambda _:None), (Readline(), lambda _: line), (Print('What... is your quest?'), lambda _:None), (Readline(), lambda _:'To seek the Holy Grail.\n'), (Print('What... is your favourite colour?'), lambda _:None), ]) with sequence.consume(): dispatcher = ComposedDispatcher([ sequence, base_dispatcher, ]) sync_perform(dispatcher, challenge())
def test_print(self): outputs = [] @sync_performer def perform_print(dispatcher, print_): outputs.append(print_.line) test_interpreter = ComposedDispatcher([ TypeDispatcher({ Print: perform_print, }), base_dispatcher]) dispatcher = test_interpreter sync_perform(dispatcher, program()) self.assertEqual(["What... is your quest?"], outputs)
def test_err_from_context(self): """ When None is passed as the failure, the exception comes from the context at the time of creating the intent, not the time at which the intent is performed. """ try: raise RuntimeError("original") except RuntimeError: eff = err(None, "why") try: raise RuntimeError("performing") except RuntimeError: sync_perform(self.disp, eff) self.log.err.assert_called_once_with(CheckFailureValue(RuntimeError("original")), "why", f1="v")
def test_msg(self): """ message is logged with original field """ r = sync_perform(self.disp, msg("yo!")) self.assertIsNone(r) self.log.msg.assert_called_once_with("yo!", f1='v')
def test_invalidate_token(self): """Performig causes a call to authenticator.invalidate.""" mock_auth = iMock(ICachingAuthenticator) mock_auth.invalidate.return_value = None eff = Effect(InvalidateToken(mock_auth, 'tenant_id1')) self.assertEqual(sync_perform(get_simple_dispatcher(None), eff), None) mock_auth.invalidate.assert_called_once_with('tenant_id1')
def test_multiple_msg(self): """ Multiple messages are logged when there are multiple log effects """ eff = msg("yo", a="b").on(lambda _: msg("goo", d="c")) self.assertIsNone(sync_perform(self.disp, eff)) self.log.msg.assert_has_calls([mock.call("yo", f1="v", a="b"), mock.call("goo", f1="v", d="c")])
def _stop_acceptance_cluster(): """ Stop the Flocker cluster configured for the acceptance tests. XXX https://clusterhq.atlassian.net/browse/FLOC-1563 Flocker doesn't support using flocker-deploy along-side flocker-control and flocker-agent. Since flocker-deploy (in it's SSH using incarnation) is going away, we do the hack of stopping the cluster before running tests that use flocker-deploy. This introduces an order dependency on the acceptance test-suite. This also removes the environment variables associated with the cluster, so that tests attempting to use it will be skipped. :return: A ``Deferred`` which fires when the cluster is stopped. """ control_node = environ.pop("FLOCKER_ACCEPTANCE_CONTROL_NODE", None) agent_nodes_env_var = environ.pop("FLOCKER_ACCEPTANCE_AGENT_NODES", "") agent_nodes = filter(None, agent_nodes_env_var.split(':')) if control_node and agent_nodes: return succeed( sync_perform(dispatcher, stop_cluster(control_node, agent_nodes))) else: return succeed(None)
def test_returns_flat_list_of_rcv3nodes(self): """ All the nodes returned are in a flat list. """ dispatcher = self.get_dispatcher([ (service_request(ServiceType.RACKCONNECT_V3, 'GET', 'load_balancer_pools').intent, (None, [{'id': str(i)} for i in range(2)])), (service_request(ServiceType.RACKCONNECT_V3, 'GET', 'load_balancer_pools/0/nodes').intent, (None, [{'id': "0node{0}".format(i), 'cloud_server': {'id': '0server{0}'.format(i)}} for i in range(2)])), (service_request(ServiceType.RACKCONNECT_V3, 'GET', 'load_balancer_pools/1/nodes').intent, (None, [{'id': "1node{0}".format(i), 'cloud_server': {'id': '1server{0}'.format(i)}} for i in range(2)])), ]) self.assertEqual( sorted(sync_perform(dispatcher, get_rcv3_contents())), sorted( [RCv3Node(node_id='0node0', cloud_server_id='0server0', description=RCv3Description(lb_id='0')), RCv3Node(node_id='0node1', cloud_server_id='0server1', description=RCv3Description(lb_id='0')), RCv3Node(node_id='1node0', cloud_server_id='1server0', description=RCv3Description(lb_id='1')), RCv3Node(node_id='1node1', cloud_server_id='1server1', description=RCv3Description(lb_id='1'))]))
def _stop_acceptance_cluster(): """ Stop the Flocker cluster configured for the acceptance tests. XXX https://clusterhq.atlassian.net/browse/FLOC-1563 Flocker doesn't support using flocker-deploy along-side flocker-control and flocker-agent. Since flocker-deploy (in it's SSH using incarnation) is going away, we do the hack of stopping the cluster before running tests that use flocker-deploy. This introduces an order dependency on the acceptance test-suite. This also removes the environment variables associated with the cluster, so that tests attempting to use it will be skipped. :return: A ``Deferred`` which fires when the cluster is stopped. """ control_node = environ.pop("FLOCKER_ACCEPTANCE_CONTROL_NODE", None) agent_nodes_env_var = environ.pop("FLOCKER_ACCEPTANCE_AGENT_NODES", "") agent_nodes = filter(None, agent_nodes_env_var.split(':')) if control_node and agent_nodes: return succeed(sync_perform( dispatcher, stop_cluster(control_node, agent_nodes) )) else: return succeed(None)
def test_change_clb_node_default_type(self): """ Produce a request for modifying a node on a load balancer with the default type, which returns a successful result on 202. """ eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition="DRAINING", weight=50) expected = service_request(ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format( self.lb_id), data={ 'node': { 'condition': 'DRAINING', 'weight': 50, 'type': 'PRIMARY' } }, success_pred=has_code(202)) dispatcher = EQFDispatcher([ (expected.intent, service_request_eqf(stub_pure_response('', 202))) ]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202))
def test_throttling(self): """ When the throttler function returns a bracketing function, it's used to throttle the request. """ def throttler(stype, method, tid): if (stype == ServiceType.CLOUD_SERVERS and method == 'get' and tid == 1): return bracket bracket = object() svcreq = service_request( ServiceType.CLOUD_SERVERS, 'GET', 'servers').intent response = stub_pure_response({}, 200) seq = SequenceDispatcher([ (_Throttle(bracket=bracket, effect=mock.ANY), nested_sequence([ (Authenticate(authenticator=self.authenticator, tenant_id=1, log=self.log), lambda i: ('token', fake_service_catalog)), (Request(method='GET', url='http://dfw.openstack/servers', headers=headers('token'), log=self.log), lambda i: response), ])), ]) eff = self._concrete(svcreq, throttler=throttler) with seq.consume(): result = sync_perform(seq, eff) self.assertEqual(result, (response[0], {}))
def test_msg_with_params(self): """ message is logged with its fields combined """ r = sync_perform(self.disp, msg("yo!", a='b')) self.assertIsNone(r) self.log.msg.assert_called_once_with("yo!", f1='v', a='b')
def test_configuration(self): """ Source AMI ID, build region, and target regions can all be overridden in a chosen template. """ expected_build_region = AWS_REGIONS.EU_WEST_1 expected_publish_regions = [AWS_REGIONS.AP_NORTHEAST_1, AWS_REGIONS.AP_SOUTHEAST_1, AWS_REGIONS.AP_SOUTHEAST_2] expected_source_ami = random_name(self) intent = PackerConfigure( build_region=expected_build_region, publish_regions=expected_publish_regions, source_ami=expected_source_ami, template=u"docker", distribution=u"ubuntu-14.04", ) # Call the performer packer_configuration_path = sync_perform( dispatcher=RealPerformers(working_directory=self.make_temporary_directory()).dispatcher(), effect=Effect(intent=intent), ) with packer_configuration_path.open("r") as f: packer_configuration = json.load(f) [builder] = packer_configuration["builders"] build_region = builder["region"] build_source_ami = builder["source_ami"] publish_regions = builder["ami_regions"] [provisioner] = packer_configuration["provisioners"] self.assertEqual( (expected_build_region.value, set(c.value for c in expected_publish_regions), expected_source_ami), (build_region, set(publish_regions), build_source_ami), )
def test_fold_effect_empty(): """ Returns an Effect resulting in the initial value when there are no effects. """ eff = fold_effect(operator.add, 0, []) result = sync_perform(base_dispatcher, eff) assert result == 0
def test_throttling(self): """ When the throttler function returns a bracketing function, it's used to throttle the request. """ def throttler(stype, method, tid): if (stype == ServiceType.CLOUD_SERVERS and method == 'get' and tid == 1): return bracket bracket = object() svcreq = service_request(ServiceType.CLOUD_SERVERS, 'GET', 'servers').intent response = stub_pure_response({}, 200) seq = SequenceDispatcher([ (_Throttle(bracket=bracket, effect=mock.ANY), nested_sequence([ (Authenticate(authenticator=self.authenticator, tenant_id=1, log=self.log), lambda i: ('token', fake_service_catalog)), (Request(method='GET', url='http://dfw.openstack/servers', headers=headers('token'), log=self.log), lambda i: response), ])), ]) eff = self._concrete(svcreq, throttler=throttler) with seq.consume(): result = sync_perform(seq, eff) self.assertEqual(result, (response[0], {}))
def test_nested_msg(self): """ message is logged when nested inside other effects """ eff = Effect(Constant("foo")).on(lambda _: msg("yo", a="b")).on(lambda _: Effect(Constant("goo"))) self.assertEqual(sync_perform(self.disp, eff), "goo") self.log.msg.assert_called_once_with("yo", f1="v", a="b")
def test_publish_autoscale_event(self): """ Publish an event to cloudfeeds. Successfully handle non-JSON data. """ _log = object() eff = cf.publish_autoscale_event({'event': 'stuff'}, log=_log) expected = service_request( ServiceType.CLOUD_FEEDS, 'POST', 'autoscale/events', headers={'content-type': ['application/vnd.rackspace.atom+json']}, data={'event': 'stuff'}, log=_log, success_pred=has_code(201), json_response=False) # success dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('<this is xml>', 201)))]) resp, body = sync_perform(dispatcher, eff) self.assertEqual(body, '<this is xml>') # Add regression test that 202 should be an API error because this # is a bug in CF dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('<this is xml>', 202)))]) self.assertRaises(APIError, sync_perform, dispatcher, eff)
def wrapper(*args, **kwargs): req = request._get_current_object() result = handler(req, *args, **kwargs) if isinstance(result, Effect): return sync_perform(dispatcher, result) else: return result
def publish_rpms_main(args, base_path, top_level): """ The ClusterHQ yum repository contains packages for Flocker, as well as the dependencies which aren't available in Fedora 20 or CentOS 7. It is currently hosted on Amazon S3. When doing a release, we want to add the new Flocker packages, while preserving the existing packages in the repository. To do this, we download the current repository, add the new package, update the metadata, and then upload the repository. :param list args: The arguments passed to the script. :param FilePath base_path: The executable being run. :param FilePath top_level: The top-level of the flocker repository. """ options = UploadOptions() try: options.parseOptions(args) except UsageError as e: sys.stderr.write("%s: %s\n" % (base_path.basename(), e)) raise SystemExit(1) dispatcher = ComposedDispatcher([boto_dispatcher, yum_dispatcher, base_dispatcher]) try: scratch_directory = FilePath(tempfile.mkdtemp( prefix=b'flocker-upload-rpm-')) sync_perform( dispatcher=dispatcher, effect=upload_rpms( scratch_directory=scratch_directory, target_bucket=options['target'], version=options['flocker-version'], build_server=options['build-server'], )) except NotARelease: sys.stderr.write("%s: Can't upload RPMs for a non-release." % (base_path.basename(),)) raise SystemExit(1) except DocumentationRelease: sys.stderr.write("%s: Can't upload RPMs for a documentation release." % (base_path.basename(),)) raise SystemExit(1) finally: scratch_directory.remove()
def test_create(self): model = ZKCrudModel() model.create_makepath = False eff = Effect(zk.CreateNode(path='/foo', value="v")) dispatcher = get_zk_dispatcher(model) result = sync_perform(dispatcher, eff) self.assertEqual(model.nodes, {"/foo": ("v", 0)}) self.assertEqual(result, '/foo')
def test_err(self): """ error is logged with original field """ f = object() r = sync_perform(self.disp, err(f, "yo!")) self.assertIsNone(r) self.log.err.assert_called_once_with(f, "yo!", f1='v')
def test_returns_retry(self): """ `ConvergeLater.as_effect` returns effect with RETRY """ eff = ConvergeLater(reasons=['building']).as_effect() self.assertEqual( sync_perform(base_dispatcher, eff), (StepResult.RETRY, ['building']))
def test_perform_retry(self): """ When the specified effect is successful, its result is propagated. """ retry = Retry(effect=Effect(Constant('foo')), should_retry=lambda e: 1 / 0) result = sync_perform(self.dispatcher, Effect(retry)) self.assertEqual(result, 'foo')
def test_err_with_params(self): """ error is logged with its fields combined """ f = object() r = sync_perform(self.disp, err(f, "yo!", a='b')) self.assertIsNone(r) self.log.err.assert_called_once_with(f, "yo!", f1='v', a='b')
def test_nested_msg(self): """ message is logged when nested inside other effects """ eff = Effect(Constant("foo")).on(lambda _: msg("yo", a='b')).on( lambda _: Effect(Constant("goo"))) self.assertEqual(sync_perform(self.disp, eff), "goo") self.log.msg.assert_called_once_with("yo", f1='v', a='b')
def publish_rpms_main(args, base_path, top_level): """ The ClusterHQ yum repository contains packages for Flocker, as well as the dependencies which aren't available in Fedora 20 or CentOS 7. It is currently hosted on Amazon S3. When doing a release, we want to add the new Flocker packages, while preserving the existing packages in the repository. To do this, we download the current repository, add the new package, update the metadata, and then upload the repository. :param list args: The arguments passed to the script. :param FilePath base_path: The executable being run. :param FilePath top_level: The top-level of the flocker repository. """ options = UploadOptions() try: options.parseOptions(args) except UsageError as e: sys.stderr.write("%s: %s\n" % (base_path.basename(), e)) raise SystemExit(1) dispatcher = ComposedDispatcher( [boto_dispatcher, yum_dispatcher, base_dispatcher]) try: scratch_directory = FilePath( tempfile.mkdtemp(prefix=b'flocker-upload-rpm-')) sync_perform(dispatcher=dispatcher, effect=upload_rpms( scratch_directory=scratch_directory, target_bucket=options['target'], version=options['flocker-version'], build_server=options['build-server'], )) except NotARelease: sys.stderr.write("%s: Can't upload RPMs for a non-release." % (base_path.basename(), )) raise SystemExit(1) except DocumentationRelease: sys.stderr.write("%s: Can't upload RPMs for a documentation release." % (base_path.basename(), )) raise SystemExit(1) finally: scratch_directory.remove()