def prepare(): def set_annotations_on_build(build_id, labels, namespace='default'): assert namespace == 'namespace' new_environ = deepcopy(os.environ) new_environ["BUILD"] = ''' { "metadata": { "name": "asd", "namespace": "namespace" } } ''' flexmock(OSBS, set_annotations_on_build=set_annotations_on_build) flexmock(os) os.should_receive("environ").and_return(new_environ) workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, "test-image") workflow.push_conf.add_pulp_registry("test", LOCALHOST_REGISTRY) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.tag_conf.add_unique_image("namespace/image:asd123") r = workflow.push_conf.add_docker_registry(DOCKER0_REGISTRY) r.digests[TEST_IMAGE] = DIGEST1 r.digests["namespace/image:asd123"] = DIGEST2 setattr(workflow, 'builder', X) setattr(workflow, '_base_image_inspect', {'Id': '01234567'}) workflow.build_logs = ["a", "b"] workflow.source.lg = LazyGit(None, commit="commit") flexmock(workflow.source.lg) workflow.source.lg.should_receive("_commit_id").and_return("commit") return workflow
def test_message_results_in_post(self): # since we mock out the xmpp client in previous tests, we can't rely on it # to call the xmpp_message method. therefore, let's test it separately. fake_conn = flexmock(name='fake_conn') fake_from = flexmock(name='fake_from') fake_from.should_receive('getStripped').and_return('me@public1') fake_event = flexmock(name='fake_event') fake_event.should_receive('getFrom').and_return(fake_from) fake_event.should_receive('getBody').and_return('doesnt matter') fake_event.should_receive('getType').and_return('chat') # mock out the curl call to the AppLoadBalancer, and slip in our own # ip to send the XMPP message to fake_curl = flexmock(name='curl_result') fake_curl.should_receive('read').and_return('Location: http://public2') flexmock(os) os.should_receive('popen').with_args(re.compile('curl')).and_return(fake_curl) # and finally mock out the urllib call flexmock(urllib) urllib.should_receive('urlopen').with_args( "http://public2/_ah/xmpp/message/chat/", str).and_return() receiver = XMPPReceiver(self.appid, self.login_ip, self.password) receiver.xmpp_message(fake_conn, fake_event)
def prepare(): def set_annotations_on_build(build_id, labels, namespace='default'): assert namespace == 'namespace' new_environ = deepcopy(os.environ) new_environ["BUILD"] = ''' { "metadata": { "name": "asd", "namespace": "namespace" } } ''' flexmock(OSBS, set_annotations_on_build=set_annotations_on_build) flexmock(os) os.should_receive("environ").and_return(new_environ) workflow = DockerBuildWorkflow({ "provider": "git", "uri": "asd" }, "test-image") workflow.push_conf.add_pulp_registry("test", LOCALHOST_REGISTRY) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.tag_conf.add_unique_image("namespace/image:asd123") setattr(workflow, 'builder', X) workflow.build_logs = ["a", "b"] workflow.source.lg = LazyGit(None, commit="commit") flexmock(workflow.source.lg) workflow.source.lg.should_receive("_commit_id").and_return("commit") return workflow
def test_java_good_sdk_version(self): target_jar = AppEngineHelper.JAVA_SDK_JAR_PREFIX + '-' \ + AppEngineHelper.SUPPORTED_SDK_VERSION + '.jar' good_jars = ['test.jar', target_jar] flexmock(os) os.should_receive('listdir').with_args('/war/WEB-INF/lib').and_return(good_jars) self.assertEquals(False, AppEngineHelper.is_sdk_mismatch(''))
def prepare(pulp_registries=None, docker_registries=None, before_dockerfile=False): if pulp_registries is None: pulp_registries = ( ("test", LOCALHOST_REGISTRY), ) if docker_registries is None: docker_registries = (DOCKER0_REGISTRY,) def set_annotations_on_build(build_id, annotations): pass def update_labels_on_build(build_id, labels): pass new_environ = deepcopy(os.environ) new_environ["BUILD"] = dedent('''\ { "metadata": { "name": "asd", "namespace": "namespace" } } ''') flexmock(OSBS, set_annotations_on_build=set_annotations_on_build) flexmock(OSBS, update_labels_on_build=update_labels_on_build) (flexmock(osbs.conf) .should_call("Configuration") .with_args(namespace="namespace", conf_file=None, verify_ssl=True, openshift_url="http://example.com/", openshift_uri="http://example.com/", use_auth=True)) flexmock(os) os.should_receive("environ").and_return(new_environ) workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, "test-image") for name, crane_uri in pulp_registries: workflow.push_conf.add_pulp_registry(name, crane_uri) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.tag_conf.add_unique_image("namespace/image:asd123") for docker_registry in docker_registries: r = workflow.push_conf.add_docker_registry(docker_registry) r.digests[TEST_IMAGE] = ManifestDigest(v1=DIGEST_NOT_USED, v2=DIGEST1) r.digests["namespace/image:asd123"] = ManifestDigest(v1=DIGEST_NOT_USED, v2=DIGEST2) if before_dockerfile: setattr(workflow, 'builder', XBeforeDockerfile()) else: setattr(workflow, 'builder', X) setattr(workflow, '_base_image_inspect', {'Id': '01234567'}) workflow.build_logs = [ "a", "b", ] workflow.source.lg = LazyGit(None, commit="commit") flexmock(workflow.source.lg) workflow.source.lg.should_receive("_commit_id").and_return("commit") return workflow
def test_java_good_sdk_version(self): target_jar = AppEngineHelper.JAVA_SDK_JAR_PREFIX + '-' \ + AppEngineHelper.SUPPORTED_SDK_VERSION + '.jar' good_jars = ['test.jar', target_jar] flexmock(os) os.should_receive('listdir').with_args('/war/WEB-INF/lib').and_return( good_jars) self.assertEquals(False, AppEngineHelper.is_sdk_mismatch(''))
def addMockForNoAppScalefile(self, appscale): flexmock(os) os.should_receive('getcwd').and_return('/boo') mock = flexmock(sys.modules['__builtin__']) mock.should_call('open') # set the fall-through (mock.should_receive('open').with_args( '/boo/' + appscale.APPSCALEFILE).and_raise(IOError))
def addMockForNoAppScalefile(self, appscale): flexmock(os) os.should_receive('getcwd').and_return('/boo') mock = flexmock(sys.modules['__builtin__']) mock.should_call('open') # set the fall-through (mock.should_receive('open') .with_args('/boo/' + appscale.APPSCALEFILE) .and_raise(IOError))
def test_skip_delegate_build(self, tmpdir, caplog, delegate_task, is_auto, triggered_task, task_open, koji_task_id, task_exists): class MockedClientSession(object): def __init__(self, hub, opts=None): pass def getBuild(self, build_info): return None def krb_login(self, *args, **kwargs): return True def getTaskInfo(self, task_id, request=False): if not task_exists: return None if task_open: return {'state': koji.TASK_STATES['OPEN']} else: return {'state': koji.TASK_STATES['CLOSED']} session = MockedClientSession('') flexmock(koji, ClientSession=session) new_environ = deepcopy(os.environ) build_json = { "metadata": { "name": "auto-123456", "labels": {} } } if koji_task_id: build_json['metadata']['labels']['koji-task-id'] = 12345 new_environ["BUILD"] = json.dumps(build_json) flexmock(os) os.should_receive("environ").and_return(new_environ) # pylint: disable=no-member plugin = self.prepare(tmpdir, is_auto=is_auto, delegate_task=delegate_task, triggered_after_koji_task=triggered_task) plugin.run() if delegate_task: assert plugin.workflow.triggered_after_koji_task == triggered_task else: assert plugin.workflow.triggered_after_koji_task is None if not delegate_task: assert "delegate_task not enabled, skipping plugin" in caplog.text elif not is_auto: assert "not autorebuild, skipping plugin" in caplog.text elif triggered_task and task_open: assert "koji task already delegated, skipping plugin" in caplog.text if not koji_task_id: assert "koji-task-id label doesn't exist on build" in caplog.text elif not task_exists: assert "koji-task-id label on build, doesn't exist in koji" in caplog.text
def test_get_route_with_s3_credentials(self): # Allow the user to store data in Amazon S3 if they specify all the # correct credentials. server = RESTServer() server.request = flexmock() # Presume that the user has only specified S3 credentials. server.request.should_receive('get').with_args('name').and_return('s3') server.request.should_receive('get').with_args('AWS_ACCESS_KEY').and_return( 'access') server.request.should_receive('get').with_args('AWS_SECRET_KEY').and_return( 'secret') server.request.should_receive('get').with_args('GCS_ACCESS_KEY').and_return( '') server.request.should_receive('get').with_args('GCS_SECRET_KEY').and_return( '') server.request.should_receive('get').with_args('S3_URL').and_return('') server.request.should_receive('get').with_args('AZURE_ACCOUNT_NAME') \ .and_return('') server.request.should_receive('get').with_args('AZURE_ACCOUNT_KEY') \ .and_return('') # Mock out writing the file contents that were sent over. flexmock(uuid) uuid.should_receive('uuid4').and_return('123') fake_file = flexmock(name='fake_file') fake_file.should_receive('read').and_return('file contents') fake_builtins = flexmock(sys.modules['__builtin__']) fake_builtins.should_call('open') fake_builtins.should_receive('open').with_args('/tmp/magik-temp-123', 'r') \ .and_return(fake_file) # Mock out interacting with S3. fake_storage = flexmock(name='fake_storage') fake_storage.should_receive('download_files').with_args([{ 'source' : '/baz/gbaz.txt', 'destination' : '/tmp/magik-temp-123' }]).and_return([{ 'success' : True }]) flexmock(StorageFactory) StorageFactory.should_receive('get_storage').with_args(dict).and_return( fake_storage) # Mock out writing the response. server.response = flexmock() server.response.should_receive('write').and_return() # Finally, mock out removing the tempfile we created. flexmock(os) os.should_receive('remove').with_args('/tmp/magik-temp-123') self.assertEquals(None, server.get('/baz/gbaz.txt'))
def prepare(pulp_registries=None, docker_registries=None): if pulp_registries is None: pulp_registries = ( ("test", LOCALHOST_REGISTRY), ) if docker_registries is None: docker_registries = (DOCKER0_REGISTRY,) def set_annotations_on_build(build_id, annotations): pass def update_labels_on_build(build_id, labels): pass new_environ = deepcopy(os.environ) new_environ["BUILD"] = dedent('''\ { "metadata": { "name": "asd", "namespace": "namespace" } } ''') flexmock(OSBS, set_annotations_on_build=set_annotations_on_build) flexmock(OSBS, update_labels_on_build=update_labels_on_build) (flexmock(osbs.conf) .should_call("Configuration") .with_args(namespace="namespace", conf_file=None, verify_ssl=True, openshift_url="http://example.com/", openshift_uri="http://example.com/", use_auth=True)) flexmock(os) os.should_receive("environ").and_return(new_environ) workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, "test-image") for name, crane_uri in pulp_registries: workflow.push_conf.add_pulp_registry(name, crane_uri) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.tag_conf.add_unique_image("namespace/image:asd123") for docker_registry in docker_registries: r = workflow.push_conf.add_docker_registry(docker_registry) r.digests[TEST_IMAGE] = ManifestDigest(v1='not-used', v2=DIGEST1) r.digests["namespace/image:asd123"] = ManifestDigest(v1='not-used', v2=DIGEST2) setattr(workflow, 'builder', X) setattr(workflow, '_base_image_inspect', {'Id': '01234567'}) workflow.build_logs = [ "a", "b", ] workflow.source.lg = LazyGit(None, commit="commit") flexmock(workflow.source.lg) workflow.source.lg.should_receive("_commit_id").and_return("commit") return workflow
def test_java_good_sdk_version(self): target_jar = AppEngineHelper.JAVA_SDK_JAR_PREFIX + '-' \ + AppEngineHelper.SUPPORTED_SDK_VERSION + '.jar' good_jars = ['test.jar', target_jar] aeh = flexmock(AppEngineHelper) aeh.should_receive('get_appengine_lib_locations').and_return(['blah']) flexmock(os) os.should_receive('listdir').and_return(good_jars) self.assertEquals(False, AppEngineHelper.is_sdk_mismatch(''))
def addMockForAppScalefile(self, appscale, contents): flexmock(os) os.should_receive('getcwd').and_return('/boo') mock = flexmock(sys.modules['__builtin__']) mock.should_call('open') # set the fall-through (mock.should_receive('open').with_args( '/boo/' + appscale.APPSCALEFILE).and_return( flexmock(read=lambda: contents))) return mock
def addMockForAppScalefile(self, appscale, contents): flexmock(os) os.should_receive('getcwd').and_return('/boo') mock = flexmock(sys.modules['__builtin__']) mock.should_call('open') # set the fall-through (mock.should_receive('open') .with_args('/boo/' + appscale.APPSCALEFILE) .and_return(flexmock(read=lambda: contents))) return mock
def testInitWithAppScalefile(self): # calling 'appscale init cloud' if there is an AppScalefile in the local # directory should throw up and die appscale = AppScale() flexmock(os) os.should_receive('getcwd').and_return('/boo') flexmock(os.path) os.path.should_receive('exists').with_args('/boo/' + appscale.APPSCALEFILE).and_return(True) self.assertRaises(AppScalefileException, appscale.init, 'cloud')
def testInitWithAppScalefile(self): # calling 'appscale init cloud' if there is an AppScalefile in the local # directory should throw up and die appscale = AppScale() flexmock(os) os.should_receive('getcwd').and_return('/boo') flexmock(os.path) os.path.should_receive('exists').with_args('/boo/' + appscale.APPSCALEFILE).and_return(True) self.assertRaises(AppScalefileException, appscale.init)
def test_make_appscale_directory_creation(self): # let's say that our ~/.appscale directory # does not exist os.path.should_receive('exists') \ .with_args(LocalState.LOCAL_APPSCALE_PATH) \ .and_return(False) \ .once() # thus, mock out making the appscale dir os.should_receive('mkdir') \ .with_args(LocalState.LOCAL_APPSCALE_PATH) \ .and_return() LocalState.make_appscale_directory()
def test_extract_app_to_dir(self): flexmock(os) os.should_receive('mkdir').and_return() flexmock(os.path) os.path.should_receive('abspath').with_args('relative/app.tar.gz')\ .and_return('/tmp/relative/app.tar.gz') flexmock(LocalState) LocalState.should_receive('shell')\ .with_args(re.compile('tar zxvf /tmp/relative/app.tar.gz'),False)\ .and_return() LocalState.extract_app_to_dir('relative/app.tar.gz',False)
def test_keep_temperature_history_init(self, predictor_mock_class: type) -> None: """Test initialization of temperature history.""" assert "THOTH_ADVISER_NO_HISTORY" not in os.environ predictor = predictor_mock_class() assert predictor.keep_history is False, "Temperature history not kept by default" flexmock(os) os.should_receive("getenv").with_args("THOTH_ADVISER_HISTORY", 0).and_return("0").and_return("1").twice() predictor = predictor_mock_class() assert predictor.keep_history is False predictor = predictor_mock_class() assert predictor.keep_history is True
def prepare(): def set_annotations_on_build(build_id, labels, namespace='default'): pass new_environ = deepcopy(os.environ) new_environ["BUILD"] = ''' { "metadata": { "name": "asd", "namespace": "namespace" } } ''' flexmock(OSBS, set_annotations_on_build=set_annotations_on_build) (flexmock(osbs.conf).should_call("Configuration").with_args( namespace="namespace", conf_file=None, verify_ssl=True, openshift_url="http://example.com/", openshift_uri="http://example.com/", use_auth=True)) flexmock(os) os.should_receive("environ").and_return(new_environ) workflow = DockerBuildWorkflow({ "provider": "git", "uri": "asd" }, "test-image") workflow.push_conf.add_pulp_registry("test", LOCALHOST_REGISTRY) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.tag_conf.add_unique_image("namespace/image:asd123") r = workflow.push_conf.add_docker_registry(DOCKER0_REGISTRY) r.digests[TEST_IMAGE] = DIGEST1 r.digests["namespace/image:asd123"] = DIGEST2 setattr(workflow, 'builder', X) setattr(workflow, '_base_image_inspect', {'Id': '01234567'}) workflow.build_logs = [ "a", "b", ] workflow.source.lg = LazyGit(None, commit="commit") flexmock(workflow.source.lg) workflow.source.lg.should_receive("_commit_id").and_return("commit") return workflow
def test_release_label_already_set(self, tmpdir, caplog, scratch, build_exists, release_label, reactor_config_map): class MockedClientSession(object): def __init__(self, hub, opts=None): pass def getBuild(self, build_info): if build_exists: return {'id': 12345} return build_exists def krb_login(self, *args, **kwargs): return True session = MockedClientSession('') flexmock(koji, ClientSession=session) new_environ = deepcopy(os.environ) new_environ["BUILD"] = dedent('''\ { "metadata": { "labels": {} } } ''') if scratch: new_environ["BUILD"] = dedent('''\ { "metadata": { "labels": {"scratch": "true"} } } ''') flexmock(os) os.should_receive("environ").and_return(new_environ) # pylint: disable=no-member plugin = self.prepare(tmpdir, labels={release_label: '1', 'com.redhat.component': 'component', 'version': 'version'}, reactor_config_map=reactor_config_map) if build_exists and not scratch: with pytest.raises(RuntimeError) as exc: plugin.run() assert 'build already exists in Koji: ' in str(exc) else: plugin.run() assert 'not incrementing' in caplog.text
def testInitWithNoAppScalefile(self): # calling 'appscale init cloud' if there's no AppScalefile in the local # directory should write a new cloud config file there appscale = AppScale() flexmock(os) os.should_receive('getcwd').and_return('/boo').once() flexmock(os.path) os.path.should_receive('exists').with_args('/boo/' + appscale.APPSCALEFILE).and_return(False).once() # mock out the actual writing of the template file flexmock(shutil) shutil.should_receive('copy').with_args(appscale.TEMPLATE_CLOUD_APPSCALEFILE, '/boo/' + appscale.APPSCALEFILE).and_return().once() appscale.init('cloud')
def test_metadata_plugin(tmpdir): def set_annotations_on_build(build_id, labels): pass new_environ = deepcopy(os.environ) new_environ["BUILD"] = '{"metadata": {"name": "asd"}}' flexmock(OSBS, set_annotations_on_build=set_annotations_on_build) flexmock(os) os.should_receive("environ").and_return(new_environ) workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, "test-image") workflow.push_conf.add_pulp_registry("test", LOCALHOST_REGISTRY) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.tag_conf.add_unique_image("namespace/image:asd123") setattr(workflow, 'builder', X) workflow.build_logs = ["a", "b"] workflow.source.lg = LazyGit(None, commit="commit") flexmock(workflow.source.lg) workflow.source.lg.should_receive("_commit_id").and_return("commit") workflow.prebuild_results = { CpDockerfilePlugin.key: "dockerfile-content", DistgitFetchArtefactsPlugin.key: "artefact1\nartefact2", } workflow.postbuild_results = { PostBuildRPMqaPlugin.key: "rpm1\nrpm2", } runner = PostBuildPluginsRunner( None, workflow, [{ 'name': StoreMetadataInOSv3Plugin.key, "args": { "url": "http://example.com/" } }] ) output = runner.run() assert StoreMetadataInOSv3Plugin.key in output labels = output[StoreMetadataInOSv3Plugin.key] assert "dockerfile" in labels assert "artefacts" in labels assert "logs" in labels assert "rpm-packages" in labels assert "repositories" in labels assert "commit_id" in labels
def test_extract_tgz_app_to_dir(self): flexmock(os) os.should_receive('mkdir').and_return() flexmock(os.path) os.path.should_receive('abspath').with_args('relative/app.tar.gz') \ .and_return('/tmp/relative/app.tar.gz') flexmock(LocalState) LocalState.should_receive('shell') \ .with_args(re.compile("tar zxvf '/tmp/relative/app.tar.gz'"), False) \ .and_return() os.should_receive('listdir').and_return(['one_folder']) os.path.should_receive('isdir').with_args(re.compile('one_folder')) \ .and_return(True) location = LocalState.extract_tgz_app_to_dir('relative/app.tar.gz', False) self.assertEquals(True, 'one_folder' in location)
def test_extract_tgz_app_to_dir(self): flexmock(os) os.should_receive('mkdir').and_return() flexmock(os.path) os.path.should_receive('abspath').with_args('relative/app.tar.gz') \ .and_return('/tmp/relative/app.tar.gz') flexmock(LocalState) LocalState.should_receive('shell') \ .with_args(re.compile('tar zxvf /tmp/relative/app.tar.gz'), False) \ .and_return() os.should_receive('listdir').and_return(['one_folder']) os.path.should_receive('isdir').with_args(re.compile('one_folder')) \ .and_return(True) location = LocalState.extract_tgz_app_to_dir('relative/app.tar.gz', False) self.assertEquals(True, 'one_folder' in location)
def prepare(): def set_annotations_on_build(build_id, labels): pass def update_labels_on_build(build_id, labels): pass new_environ = deepcopy(os.environ) new_environ["BUILD"] = ''' { "metadata": { "name": "asd", "namespace": "namespace" } } ''' flexmock(OSBS, set_annotations_on_build=set_annotations_on_build) flexmock(OSBS, update_labels_on_build=update_labels_on_build) (flexmock(osbs.conf) .should_call("Configuration") .with_args(namespace="namespace", conf_file=None, verify_ssl=True, openshift_url="http://example.com/", openshift_uri="http://example.com/", use_auth=True)) flexmock(os) os.should_receive("environ").and_return(new_environ) workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, "test-image") workflow.push_conf.add_pulp_registry("test", LOCALHOST_REGISTRY) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.tag_conf.add_unique_image("namespace/image:asd123") r = workflow.push_conf.add_docker_registry(DOCKER0_REGISTRY) r.digests[TEST_IMAGE] = DIGEST1 r.digests["namespace/image:asd123"] = DIGEST2 setattr(workflow, 'builder', X) setattr(workflow, '_base_image_inspect', {'Id': '01234567'}) workflow.build_logs = [ "a", "b", ] workflow.source.lg = LazyGit(None, commit="commit") flexmock(workflow.source.lg) workflow.source.lg.should_receive("_commit_id").and_return("commit") return workflow
def testInitWithNoAppScalefile(self): # calling 'appscale init cloud' if there's no AppScalefile in the local # directory should write a new cloud config file there appscale = AppScale() flexmock(os) os.should_receive('getcwd').and_return('/boo') flexmock(os.path) os.path.should_receive('exists').with_args( '/boo/' + appscale.APPSCALEFILE).and_return(False) # mock out the actual writing of the template file flexmock(shutil) shutil.should_receive('copy').with_args( appscale.TEMPLATE_APPSCALEFILE, '/boo/' + appscale.APPSCALEFILE) \ .and_return() appscale.init()
def test_appscale_in_two_node_virt_deployment(self): # pretend that the place we're going to put logs into doesn't exist flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args('/tmp/foobaz').and_return( False) # and mock out the mkdir operation flexmock(os) os.should_receive('mkdir').with_args('/tmp/foobaz').and_return() # next, mock out finding the login ip address os.path.should_receive('exists').with_args( LocalState.get_locations_json_location( self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return( json.dumps([{ "public_ip": "public1", "private_ip": "private1", "jobs": ["shadow", "login"] }])) builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) # mock out writing the secret key to ~/.appscale, as well as reading it # later secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # and slip in a fake appcontroller to report on the two IP addrs fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('get_all_public_ips').with_args( 'the secret').and_return(json.dumps(['public1', 'public2'])) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) # fake the creation of the log directories locally os.should_receive('mkdir').with_args( '/tmp/foobaz/public1').and_return() os.should_receive('mkdir').with_args( '/tmp/foobaz/public2').and_return() # finally, fake the copying of the log files flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('/var/log/appscale'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) argv = ["--keyname", self.keyname, "--location", "/tmp/foobaz"] options = ParseArgs(argv, self.function).args AppScaleTools.gather_logs(options)
def test_appscale_in_two_node_virt_deployment(self): # pretend that the place we're going to put logs into doesn't exist flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args('/tmp/foobaz').and_return(False) # and mock out the mkdir operation flexmock(os) os.should_receive('mkdir').with_args('/tmp/foobaz').and_return() # next, mock out finding the login ip address os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") fake_nodes_json.should_receive('read').and_return(json.dumps([{ "public_ip" : "public1", "private_ip" : "private1", "jobs" : ["shadow", "login"] }])) builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) # mock out writing the secret key to ~/.appscale, as well as reading it # later secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # and slip in a fake appcontroller to report on the two IP addrs fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('get_all_public_ips').with_args( 'the secret').and_return(json.dumps(['public1', 'public2'])) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) # fake the creation of the log directories locally os.should_receive('mkdir').with_args('/tmp/foobaz/public1').and_return() os.should_receive('mkdir').with_args('/tmp/foobaz/public2').and_return() # finally, fake the copying of the log files flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('/var/log/appscale'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) argv = [ "--keyname", self.keyname, "--location", "/tmp/foobaz" ] options = ParseArgs(argv, self.function).args AppScaleTools.gather_logs(options)
def test_terminate_in_cloud_and_succeeds(self): # let's say that there is a locations.yaml file, which means appscale is # running, so we should terminate the services on each box flexmock(os.path) os.path.should_call('exists') # set up the fall-through os.path.should_receive('exists').with_args( LocalState.get_secret_key_location(self.keyname)).and_return(True) # mock out reading the locations.yaml file, and pretend that we're on # a virtualized cluster builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') fake_yaml_file = flexmock(name='fake_file') fake_yaml_file.should_receive('read').and_return( yaml.dump({ 'infrastructure': 'ec2', 'group': self.group, })) builtins.should_receive('open').with_args( LocalState.get_locations_yaml_location(self.keyname), 'r') \ .and_return(fake_yaml_file) # mock out reading the json file, and pretend that we're running in a # two node deployment fake_json_file = flexmock(name='fake_file') fake_json_file.should_receive('read').and_return( json.dumps([{ 'public_ip': 'public1', 'jobs': ['shadow'] }, { 'public_ip': 'public2', 'jobs': ['appengine'] }])) builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_json_file) # and slip in a fake secret file fake_secret_file = flexmock(name='fake_file') fake_secret_file.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args( LocalState.get_secret_key_location(self.keyname), 'r') \ .and_return(fake_secret_file) # mock out talking to EC2 fake_ec2 = flexmock(name='fake_ec2') # let's say that three instances are running, and that two of them are in # our deployment fake_one_running = flexmock(name='fake_one', key_name=self.keyname, state='running', id='i-ONE', public_dns_name='public1', private_dns_name='private1') fake_two_running = flexmock(name='fake_two', key_name=self.keyname, state='running', id='i-TWO', public_dns_name='public2', private_dns_name='private2') fake_three_running = flexmock(name='fake_three', key_name='abcdefg', state='running', id='i-THREE', public_dns_name='public3', private_dns_name='private3') fake_reservation_running = flexmock( name='fake_reservation', instances=[fake_one_running, fake_two_running, fake_three_running]) fake_one_terminated = flexmock(name='fake_one', key_name=self.keyname, state='terminated', id='i-ONE', public_dns_name='public1', private_dns_name='private1') fake_two_terminated = flexmock(name='fake_two', key_name=self.keyname, state='terminated', id='i-TWO', public_dns_name='public2', private_dns_name='private2') fake_three_terminated = flexmock(name='fake_three', key_name='abcdefg', state='terminated', id='i-THREE', public_dns_name='public3', private_dns_name='private3') fake_reservation_terminated = flexmock(name='fake_reservation', instances=[ fake_one_terminated, fake_two_terminated, fake_three_terminated ]) fake_ec2.should_receive('get_all_instances').and_return(fake_reservation_running) \ .and_return(fake_reservation_terminated) flexmock(boto.ec2) boto.ec2.should_receive('connect_to_region').and_return(fake_ec2) # and mock out the call to kill the instances fake_ec2.should_receive('terminate_instances').with_args( ['i-ONE', 'i-TWO']).and_return([fake_one_terminated, fake_two_terminated]) # mock out the call to delete the keypair fake_ec2.should_receive('delete_key_pair').and_return() # and the call to delete the security group - let's say that we can't # delete the group the first time, and can the second fake_ec2.should_receive('delete_security_group').and_return(False) \ .and_return(True) # finally, mock out removing the yaml file, json file, and secret key from # this machine flexmock(os) os.should_receive('remove').with_args( LocalState.get_locations_yaml_location(self.keyname)).and_return() os.should_receive('remove').with_args( LocalState.get_locations_json_location(self.keyname)).and_return() os.should_receive('remove').with_args( LocalState.get_secret_key_location(self.keyname)).and_return() # also mock out asking the user for confirmation on shutting down # their cloud builtins.should_receive('raw_input').and_return('yes') argv = ["--keyname", self.keyname] options = ParseArgs(argv, self.function).args AppScaleTools.terminate_instances(options)
def test_upload_app_when_app_exists_on_virt_cluster(self): # we do let you upload an app if it's already running # add in mocks so that there is an app.yaml with an appid set flexmock(os.path) os.path.should_call('exists') app_yaml_location = AppEngineHelper.get_app_yaml_location(self.app_dir) os.path.should_receive('exists').with_args(app_yaml_location) \ .and_return(True) # mock out reading the app.yaml file builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through fake_app_yaml = flexmock(name="fake_app_yaml") fake_app_yaml.should_receive('read').and_return( yaml.dump({ 'application': 'baz', 'runtime': 'python27' })) builtins.should_receive('open').with_args(app_yaml_location, 'r') \ .and_return(fake_app_yaml) # Mock out service host and port app_data = { 'owner': '*****@*****.**', 'hosts': { '192.168.1.1': { 'http': 8080, 'https': 4380 } } } app_stats_data = { 'apps': { 'baz': { 'http': 8080, 'language': 'python27', 'total_reqs': 'no_change', 'appservers': 1, 'https': 4380, 'reqs_enqueued': None } } } # mock out the SOAP call to the AppController and assume it succeeded fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('Database is at public1') fake_appcontroller.should_receive('done_uploading').with_args( 'baz', '/opt/appscale/apps/baz.tar.gz', 'the secret').and_return('OK') fake_appcontroller.should_receive('update').with_args( ['baz'], 'the secret').and_return('OK') fake_appcontroller.should_receive('does_user_exist').with_args( '*****@*****.**', 'the secret').and_return('true') fake_appcontroller.should_receive('does_user_exist').with_args( 'a@public1', 'the secret').and_return('true') fake_appcontroller.should_receive('does_app_exist').with_args( 'baz', 'the secret').and_return(json.dumps(app_data)) fake_appcontroller.should_receive('get_app_data').with_args( 'baz', 'the secret').and_return(json.dumps(app_data)) fake_appcontroller.should_receive('get_all_stats').with_args( 'the secret').and_return(json.dumps(app_stats_data)) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@a.com") flexmock(getpass) getpass.should_receive('getpass').and_return('aaaaaa') # mock out making the remote app directory flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('mkdir -p'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # and mock out tarring and copying the app subprocess.should_receive('Popen').with_args(re.compile('tar -czhf'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) subprocess.should_receive('Popen').with_args(re.compile( '/tmp/appscale-app-baz.tar.gz'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # as well as removing the tar'ed app once we're done copying it flexmock(os) os.should_receive('remove').with_args('/tmp/appscale-app-baz-1234.tar.gz') \ .and_return() # and slap in a mock that says the app comes up after waiting for it # three times fake_socket = flexmock(name='fake_socket') fake_socket.should_receive('connect').with_args(('public1', 8080)).and_raise(Exception).and_raise(Exception) \ .and_return(None) flexmock(socket) socket.should_receive('socket').and_return(fake_socket) argv = ["--keyname", self.keyname, "--file", self.app_dir] options = ParseArgs(argv, self.function).args (host, port) = AppScaleTools.upload_app(options) self.assertEquals('public1', host) self.assertEquals(8080, port)
def test_delegate_build(self, tmpdir, caplog, cancel_isolated_autorebuild, user_params, koji_task_id, original_koji_task_id, triggered_task, task_open, task_priority): class MockedClientSession(object): def __init__(self, hub, opts=None): pass def getBuild(self, build_info): return None def krb_login(self, *args, **kwargs): return True def getTaskInfo(self, task_id, request=False): if task_open: return {'state': koji.TASK_STATES['OPEN']} else: return {'state': koji.TASK_STATES['CLOSED']} def buildContainer(self, source, container_target, task_opts, priority=None): expect_source = "%s#%s" % (user_params.get('git_uri'), user_params.get('git_ref')) assert source == expect_source assert container_target == user_params.get('koji_target') assert priority == task_priority expect_opts = { 'git_branch': user_params.get('git_branch'), 'triggered_after_koji_task': original_koji_task_id, } if user_params.get('yum_repourls'): expect_opts['yum_repourls'] = user_params.get( 'yum_repourls') if user_params.get('signing_intent'): expect_opts['signing_intent'] = user_params.get( 'signing_intent') if user_params.get('compose_ids'): expect_opts['compose_ids'] = user_params.get('compose_ids') if user_params.get('flatpak'): expect_opts['flatpak'] = user_params.get('flatpak') if not expect_opts['triggered_after_koji_task']: expect_opts[ 'triggered_after_koji_task'] = koji_task_id or 0 assert expect_opts == task_opts return 987654321 session = MockedClientSession('') flexmock(koji, ClientSession=session) build_name = "auto-123456" new_environ = deepcopy(os.environ) build_json = { "metadata": { "name": build_name, "labels": { "koji-task-id": koji_task_id } } } if original_koji_task_id: build_json['metadata']['labels'][ 'original-koji-task-id'] = original_koji_task_id new_environ["BUILD"] = json.dumps(build_json) flexmock(os) os.should_receive("environ").and_return(new_environ) # pylint: disable=no-member flexmock(OSBS).should_receive('cancel_build').with_args( build_name).once() plugin = self.prepare(tmpdir, is_auto=True, delegate_task=True, delegated_priority=task_priority, triggered_after_koji_task=triggered_task) plugin.workflow.cancel_isolated_autorebuild = cancel_isolated_autorebuild plugin.workflow.user_params = user_params with pytest.raises(BuildCanceledException): plugin.run() if cancel_isolated_autorebuild: assert "ignoring isolated build for autorebuild, the build will be cancelled" in \ caplog.text assert 'Build was delegated, the build will be cancelled' not in caplog.text else: assert 'Created intermediate task: 987654321' in caplog.text assert 'Build was delegated, the build will be cancelled' in caplog.text
def test_upload_tar_gz_app_successfully(self): # mock out generating a random app dir, for later mocks flexmock(uuid) uuid.should_receive('uuid4').and_return('12345678') app_dir = '/tmp/appscale-app-12345678' # add in mocks so that the gzip'ed file gets extracted to /tmp # as well as for removing it later flexmock(os) os.should_receive('mkdir').with_args(app_dir) \ .and_return(True) flexmock(shutil) shutil.should_receive('rmtree').with_args(app_dir).and_return() local_state = flexmock(LocalState) local_state.should_receive('shell')\ .with_args(re.compile('tar zxvf'),False)\ .and_return() # add in mocks so that there is an app.yaml, but with no appid set flexmock(os.path) os.path.should_call('exists') app_yaml_location = AppEngineHelper.get_app_yaml_location(app_dir) os.path.should_receive('exists').with_args(app_yaml_location) \ .and_return(True) # mock out reading the app.yaml file builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through fake_app_yaml = flexmock(name="fake_app_yaml") fake_app_yaml.should_receive('read').and_return(yaml.dump({ 'application' : 'baz', 'runtime' : 'python' })) builtins.should_receive('open').with_args(app_yaml_location, 'r') \ .and_return(fake_app_yaml) # mock out the SOAP call to the AppController and assume it succeeded fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('Database is at public1') fake_appcontroller.should_receive('done_uploading').with_args('baz', '/var/apps/baz/app/baz.tar.gz', 'the secret').and_return() fake_appcontroller.should_receive('update').with_args(['baz'], 'the secret').and_return() fake_appcontroller.should_receive('is_app_running').with_args('baz', 'the secret').and_return(False).and_return(True) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@a.com', 'the secret').and_return('false') fake_userappserver.should_receive('commit_new_user').with_args( '*****@*****.**', str, 'xmpp_user', 'the secret').and_return('true') fake_userappserver.should_receive('commit_new_user').with_args( 'a@public1', str, 'xmpp_user', 'the secret').and_return('true') fake_userappserver.should_receive('get_app_data').with_args( 'baz', 'the secret').and_return('\n\nnum_ports:0\n') \ .and_return(app_data).and_return(app_data).and_return(app_data) fake_userappserver.should_receive('commit_new_app').with_args( 'baz', '*****@*****.**', 'python', 'the secret').and_return('true') SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@a.com") flexmock(getpass) getpass.should_receive('getpass').and_return('aaaaaa') # mock out making the remote app directory local_state.should_receive('shell')\ .with_args(re.compile('^ssh'),False,5,stdin=re.compile('^mkdir -p'))\ .and_return() # and mock out tarring and copying the app local_state.should_receive('shell')\ .with_args(re.compile('tar -czf'),False)\ .and_return() local_state.should_receive('shell')\ .with_args(re.compile('/tmp/appscale-app-baz.tar.gz'),False,5)\ .and_return() # as well as removing the tar'ed app once we're done copying it flexmock(os) os.should_receive('remove').with_args('/tmp/appscale-app-baz.tar.gz') \ .and_return() # and slap in a mock that says the app comes up after waiting for it # three times fake_socket = flexmock(name='fake_socket') fake_socket.should_receive('connect').with_args(('public1', 8080)).and_raise(Exception).and_raise(Exception) \ .and_return(None) flexmock(socket) socket.should_receive('socket').and_return(fake_socket) argv = [ "--keyname", self.keyname, "--file", self.app_dir + ".tar.gz" ] options = ParseArgs(argv, self.function).args AppScaleTools.upload_app(options)
def test_terminate_in_virtual_cluster_and_succeeds(self): # let's say that there is a locations.yaml file, which means appscale is # running, so we should terminate the services on each box flexmock(os.path) os.path.should_call("exists") # set up the fall-through os.path.should_receive("exists").with_args(LocalState.get_secret_key_location(self.keyname)).and_return(True) # mock out reading the locations.yaml file, and pretend that we're on # a virtualized cluster builtins = flexmock(sys.modules["__builtin__"]) builtins.should_call("open") fake_yaml_file = flexmock(name="fake_file") fake_yaml_file.should_receive("read").and_return(yaml.dump({"infrastructure": "xen"})) builtins.should_receive("open").with_args(LocalState.get_locations_yaml_location(self.keyname), "r").and_return( fake_yaml_file ) # mock out reading the json file, and pretend that we're running in a # two node deployment os.path.should_receive("exists").with_args(LocalState.get_locations_json_location(self.keyname)).and_return( True ) fake_json_file = flexmock(name="fake_file") fake_json_file.should_receive("read").and_return( json.dumps([{"public_ip": "public1", "jobs": ["shadow"]}, {"public_ip": "public2", "jobs": ["appengine"]}]) ) builtins.should_receive("open").with_args(LocalState.get_locations_json_location(self.keyname), "r").and_return( fake_json_file ) # and slip in a fake secret file fake_secret_file = flexmock(name="fake_file") fake_secret_file.should_receive("read").and_return("the secret") builtins.should_receive("open").with_args(LocalState.get_secret_key_location(self.keyname), "r").and_return( fake_secret_file ) # mock out talking to the appcontroller, and assume that it tells us there # there are still two machines in this deployment fake_appcontroller = flexmock(name="fake_appcontroller") fake_appcontroller.should_receive("get_all_public_ips").with_args("the secret").and_return( json.dumps(["public1", "public2"]) ) flexmock(SOAPpy) SOAPpy.should_receive("SOAPProxy").with_args("https://public1:17443").and_return(fake_appcontroller) # and mock out the ssh call to kill the remote appcontroller, assuming that # it fails the first time and passes the second flexmock(subprocess) subprocess.should_receive("Popen").with_args( re.compile("controller stop"), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT ).and_return(self.failed).and_return(self.success) # next, mock out our checks to see how the stopping process is going and # assume that it has stopped flexmock(subprocess) subprocess.should_receive("Popen").with_args( re.compile("ps x"), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT ).and_return(self.success) # finally, mock out removing the yaml file, json file, and secret key from # this machine flexmock(os) os.should_receive("remove").with_args(LocalState.get_locations_yaml_location(self.keyname)).and_return() os.should_receive("remove").with_args(LocalState.get_locations_json_location(self.keyname)).and_return() os.should_receive("remove").with_args(LocalState.get_secret_key_location(self.keyname)).and_return() # also mock out asking the user for confirmation on shutting down # their cloud builtins.should_receive("raw_input").and_return("yes") argv = ["--keyname", self.keyname] options = ParseArgs(argv, self.function).args AppScaleTools.terminate_instances(options)
def test_java_bad_sdk_version(self): bad_jars = ['test.jar', 'appengine-api-1.0-sdk-1.7.3.jar'] flexmock(os) os.should_receive('listdir').with_args('/war/WEB-INF/lib').and_return(bad_jars) self.assertEquals(True, AppEngineHelper.is_sdk_mismatch(''))
def test_upload_app_successfully(self): # add in mocks so that there is an app.yaml, but with no appid set flexmock(os.path) os.path.should_call('exists') app_yaml_location = AppEngineHelper.get_app_yaml_location(self.app_dir) os.path.should_receive('exists').with_args(app_yaml_location) \ .and_return(True) # mock out reading the app.yaml file builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through fake_app_yaml = flexmock(name="fake_app_yaml") fake_app_yaml.should_receive('read').and_return( yaml.dump({ 'application': 'baz', 'runtime': 'python27' })) builtins.should_receive('open').with_args(app_yaml_location, 'r') \ .and_return(fake_app_yaml) # mock out the SOAP call to the AppController and assume it succeeded fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('Database is at public1') fake_appcontroller.should_receive('done_uploading').with_args( 'baz', '/opt/appscale/apps/baz.tar.gz', 'the secret').and_return() fake_appcontroller.should_receive('update').with_args( ['baz'], 'the secret').and_return() fake_appcontroller.should_receive('is_app_running').with_args( 'baz', 'the secret').and_return(False).and_return(True) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@a.com', 'the secret').and_return('false') fake_userappserver.should_receive('does_user_exist').with_args( 'a@public1', 'the secret').and_return('false') fake_userappserver.should_receive('commit_new_user').with_args( '*****@*****.**', str, 'xmpp_user', 'the secret').and_return('true') fake_userappserver.should_receive('commit_new_user').with_args( 'a@public1', str, 'xmpp_user', 'the secret').and_return('true') fake_userappserver.should_receive('get_app_data').with_args( 'baz', 'the secret').and_return('\n\nnum_ports:0\n') \ .and_return(app_data).and_return(app_data).and_return(app_data) fake_userappserver.should_receive('commit_new_app').with_args( 'baz', '*****@*****.**', 'python27', 'the secret').and_return('true') SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@a.com") flexmock(getpass) getpass.should_receive('getpass').and_return('aaaaaa') # mock out making the remote app directory flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('mkdir -p'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # and mock out tarring and copying the app subprocess.should_receive('Popen').with_args(re.compile('tar -czhf'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) subprocess.should_receive('Popen').with_args(re.compile( '/tmp/appscale-app-baz.tar.gz'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # as well as removing the tar'ed app once we're done copying it flexmock(os) os.should_receive('remove').with_args('/tmp/appscale-app-baz-1234.tar.gz') \ .and_return() # and slap in a mock that says the app comes up after waiting for it # three times fake_socket = flexmock(name='fake_socket') fake_socket.should_receive('connect').with_args(('public1', 8080)).and_raise(Exception).and_raise(Exception) \ .and_return(None) flexmock(socket) socket.should_receive('socket').and_return(fake_socket) argv = ["--keyname", self.keyname, "--file", self.app_dir] options = ParseArgs(argv, self.function).args (host, port) = AppScaleTools.upload_app(options) self.assertEquals('public1', host) self.assertEquals(8080, port)
def setUp(self): # mock out all logging, since it clutters our output flexmock(AppScaleLogger) AppScaleLogger.should_receive('log').and_return() # mock out all sleeps, as they aren't necessary for unit testing flexmock(time) time.should_receive('sleep').and_return() # set up some fake options so that we don't have to generate them via # ParseArgs self.options = flexmock(infrastructure='ec2', group='boogroup', machine='ami-ABCDEFG', instance_type='m1.large', keyname='bookey', table='cassandra', verbose=False, test=False, use_spot_instances=False, zone='my-zone-1b', static_ip=None, replication=None, appengine=None, autoscale=None, user_commands=[], flower_password='', max_memory='400', ips=FOUR_NODE_CLOUD) self.my_id = "12345" self.node_layout = NodeLayout(self.options) # set up phony AWS credentials for each test # ones that test not having them present can # remove them for credential in EucalyptusAgent.REQUIRED_EC2_CREDENTIALS: os.environ[credential] = "baz" os.environ['EC2_URL'] = "http://boo" # mock out calls to EC2 # begin by assuming that our ssh keypair doesn't exist, and thus that we # need to create it key_contents = "key contents here" fake_key = flexmock(name="fake_key", material=key_contents) fake_key.should_receive('save').with_args( os.environ['HOME'] + '/.appscale').and_return(None) fake_ec2 = flexmock(name="fake_ec2") fake_ec2.should_receive('get_key_pair').with_args('bookey') \ .and_return(None) fake_ec2.should_receive('create_key_pair').with_args('bookey') \ .and_return(fake_key) # mock out writing the secret key builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret" fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('write').and_return() builtins.should_receive('open').with_args(secret_key_location, 'w') \ .and_return(fake_secret) # also, mock out the keypair writing and chmod'ing ssh_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.key" fake_file = flexmock(name="fake_file") fake_file.should_receive('write').with_args(key_contents).and_return() builtins.should_receive('open').with_args(ssh_key_location, 'w') \ .and_return(fake_file) flexmock(os) os.should_receive('chmod').with_args(ssh_key_location, 0600).and_return() # next, assume there are no security groups up at first, but then it gets # created. udp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='udp') tcp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='tcp') icmp_rule = flexmock(from_port=-1, to_port=-1, ip_protocol='icmp') group = flexmock(name='boogroup', rules=[tcp_rule, udp_rule, icmp_rule]) fake_ec2.should_receive( 'get_all_security_groups').with_args().and_return([]) fake_ec2.should_receive('get_all_security_groups').with_args( 'boogroup').and_return([group]) # and then assume we can create and open our security group fine fake_ec2.should_receive('create_security_group').with_args( 'boogroup', 'AppScale security group').and_return() fake_ec2.should_receive('authorize_security_group').and_return() # next, add in mocks for run_instances # the first time around, let's say that no machines are running # the second time around, let's say that our machine is pending # and that it's up the third time around fake_pending_instance = flexmock(state='pending') fake_pending_reservation = flexmock(instances=fake_pending_instance) fake_running_instance = flexmock(state='running', key_name='bookey', id='i-12345678', ip_address=IP_1, private_ip_address=IP_1) fake_running_reservation = flexmock(instances=fake_running_instance) fake_ec2.should_receive('get_all_instances').and_return([]) \ .and_return([]) \ .and_return([fake_pending_reservation]) \ .and_return([fake_running_reservation]) # next, assume that our run_instances command succeeds fake_ec2.should_receive('run_instances').and_return() # finally, inject our mocked EC2 flexmock(boto.ec2) boto.ec2.should_receive('connect_to_region').and_return(fake_ec2) # assume that ssh comes up on the third attempt fake_socket = flexmock(name='fake_socket') fake_socket.should_receive('connect').with_args(('public1', RemoteHelper.SSH_PORT)).and_raise(Exception).and_raise(Exception) \ .and_return(None) flexmock(socket) socket.should_receive('socket').and_return(fake_socket) # throw some default mocks together for when invoking via shell succeeds # and when it fails self.fake_temp_file = flexmock(name='fake_temp_file') self.fake_temp_file.should_receive('seek').with_args(0).and_return() self.fake_temp_file.should_receive('read').and_return('boo out') self.fake_temp_file.should_receive('close').and_return() flexmock(tempfile) tempfile.should_receive('NamedTemporaryFile')\ .and_return(self.fake_temp_file) self.success = flexmock(name='success', returncode=0) self.success.should_receive('wait').and_return(0) self.failed = flexmock(name='success', returncode=1) self.failed.should_receive('wait').and_return(1) # assume that root login isn't already enabled local_state = flexmock(LocalState) local_state.should_receive('shell') \ .with_args(re.compile('^ssh .*root'), False, 1, stdin='ls') \ .and_return( 'Please login as the user "ubuntu" rather than the user "root"') # and assume that we can ssh in as ubuntu to enable root login local_state = flexmock(LocalState) local_state.should_receive('shell')\ .with_args(re.compile('^ssh .*ubuntu'),False,5)\ .and_return() # also assume that we can scp over our ssh keys local_state.should_receive('shell')\ .with_args(re.compile('scp .*/root/.ssh/id_'),False,5)\ .and_return() local_state.should_receive('shell')\ .with_args(re.compile('scp .*/root/.appscale/bookey.key'),False,5)\ .and_return()
def test_terminate_in_gce_and_succeeds(self): # let's say that there is a locations.yaml file, which means appscale is # running, so we should terminate the services on each box flexmock(os.path) os.path.should_call("exists") # set up the fall-through os.path.should_receive("exists").with_args(LocalState.get_secret_key_location(self.keyname)).and_return(True) os.path.should_receive("exists").with_args(LocalState.get_locations_yaml_location(self.keyname)).and_return( True ) os.path.should_receive("exists").with_args(LocalState.get_client_secrets_location(self.keyname)).and_return( True ) # mock out reading the locations.yaml file, and pretend that we're on # GCE project_id = "1234567890" zone = "my-zone-1b" builtins = flexmock(sys.modules["__builtin__"]) builtins.should_call("open") fake_yaml_file = flexmock(name="fake_file") fake_yaml_file.should_receive("read").and_return( yaml.dump({"infrastructure": "gce", "group": self.group, "project": project_id, "zone": zone}) ) builtins.should_receive("open").with_args(LocalState.get_locations_yaml_location(self.keyname), "r").and_return( fake_yaml_file ) # mock out reading the json file, and pretend that we're running in a # two node deployment fake_json_file = flexmock(name="fake_file") fake_json_file.should_receive("read").and_return( json.dumps([{"public_ip": "public1", "jobs": ["shadow"]}, {"public_ip": "public2", "jobs": ["appengine"]}]) ) builtins.should_receive("open").with_args(LocalState.get_locations_json_location(self.keyname), "r").and_return( fake_json_file ) # and slip in a fake secret file fake_secret_file = flexmock(name="fake_file") fake_secret_file.should_receive("read").and_return("the secret") builtins.should_receive("open").with_args(LocalState.get_secret_key_location(self.keyname), "r").and_return( fake_secret_file ) # also add in a fake client-secrets file for GCE client_secrets = LocalState.get_client_secrets_location(self.keyname) # mock out talking to GCE # first, mock out the oauth library calls fake_flow = flexmock(name="fake_flow") flexmock(oauth2client.client) oauth2client.client.should_receive("flow_from_clientsecrets").with_args(client_secrets, scope=str).and_return( fake_flow ) fake_storage = flexmock(name="fake_storage") fake_storage.should_receive("get").and_return(None) flexmock(oauth2client.file) oauth2client.file.should_receive("Storage").with_args(str).and_return(fake_storage) fake_credentials = flexmock(name="fake_credentials") flexmock(oauth2client.tools) oauth2client.tools.should_receive("run").with_args(fake_flow, fake_storage).and_return(fake_credentials) # next, mock out http calls to GCE fake_http = flexmock(name="fake_http") fake_authorized_http = flexmock(name="fake_authorized_http") flexmock(httplib2) httplib2.should_receive("Http").and_return(fake_http) fake_credentials.should_receive("authorize").with_args(fake_http).and_return(fake_authorized_http) fake_gce = flexmock(name="fake_gce") # let's say that two instances are running instance_one_info = { u"status": u"RUNNING", u"kind": u"compute#instance", u"machineType": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/machineTypes/n1-standard-1", u"name": u"appscale-bazboogroup-one", u"zone": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b", u"tags": {u"fingerprint": u"42WmSpB8rSM="}, u"image": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/images/lucid64", u"disks": [{u"index": 0, u"kind": u"compute#attachedDisk", u"type": u"EPHEMERAL", u"mode": u"READ_WRITE"}], u"canIpForward": False, u"serviceAccounts": [ {u"scopes": [GCEAgent.GCE_SCOPE], u"email": u"*****@*****.**"} ], u"metadata": {u"kind": u"compute#metadata", u"fingerprint": u"42WmSpB8rSM="}, u"creationTimestamp": u"2013-05-22T11:52:33.254-07:00", u"id": u"8684033495853907982", u"selfLink": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/instances/appscale-bazboogroup-feb10b11-62bc-4536-ac25-9734f2267d6d", u"networkInterfaces": [ { u"accessConfigs": [ { u"kind": u"compute#accessConfig", u"type": u"ONE_TO_ONE_NAT", u"name": u"External NAT", u"natIP": u"public1", } ], u"networkIP": u"private1", u"network": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/networks/bazboogroup", u"name": u"nic0", } ], } instance_two_info = { u"status": u"RUNNING", u"kind": u"compute#instance", u"machineType": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/machineTypes/n1-standard-1", u"name": u"appscale-bazboogroup-two", u"zone": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b", u"tags": {u"fingerprint": u"42WmSpB8rSM="}, u"image": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/images/lucid64", u"disks": [{u"index": 0, u"kind": u"compute#attachedDisk", u"type": u"EPHEMERAL", u"mode": u"READ_WRITE"}], u"canIpForward": False, u"serviceAccounts": [ {u"scopes": [GCEAgent.GCE_SCOPE], u"email": u"*****@*****.**"} ], u"metadata": {u"kind": u"compute#metadata", u"fingerprint": u"42WmSpB8rSM="}, u"creationTimestamp": u"2013-05-22T11:52:33.254-07:00", u"id": u"8684033495853907982", u"selfLink": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/instances/appscale-bazboogroup-feb10b11-62bc-4536-ac25-9734f2267d6d", u"networkInterfaces": [ { u"accessConfigs": [ { u"kind": u"compute#accessConfig", u"type": u"ONE_TO_ONE_NAT", u"name": u"External NAT", u"natIP": u"public1", } ], u"networkIP": u"private1", u"network": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/networks/bazboogroup", u"name": u"nic0", } ], } list_instance_info = { u"items": [instance_one_info, instance_two_info], u"kind": u"compute#instanceList", u"id": u"projects/appscale.com:appscale/zones/my-zone-1b/instances", u"selfLink": u"https://www.googleapis.com/compute/v1beta14/projects/961228229472/zones/my-zone-1b/instances", } fake_list_instance_request = flexmock(name="fake_list_instance_request") fake_list_instance_request.should_receive("execute").with_args(http=fake_authorized_http).and_return( list_instance_info ) fake_instances = flexmock(name="fake_instances") fake_instances.should_receive("list").with_args( project=project_id, filter="name eq appscale-bazboogroup-.*", zone=zone ).and_return(fake_list_instance_request) fake_gce.should_receive("instances").and_return(fake_instances) # And assume that we can kill both of our instances fine delete_instance = u"operation-1369676691806-4ddb6b4ab6f39-a095d3de" delete_instance_info_one = { u"status": u"PENDING", u"kind": u"compute#operation", u"name": delete_instance, u"zone": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b", u"startTime": u"2013-05-27T10:44:51.849-07:00", u"insertTime": u"2013-05-27T10:44:51.806-07:00", u"targetId": u"12912855597472179535", u"targetLink": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/instances/appscale-appscalecgb20-0cf89267-5887-4048-b774-ca20de47a07f", u"operationType": u"delete", u"progress": 0, u"id": u"11114355109942058217", u"selfLink": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/operations/operation-1369676691806-4ddb6b4ab6f39-a095d3de", u"user": u"*****@*****.**", } delete_instance_info_two = { u"status": u"PENDING", u"kind": u"compute#operation", u"name": delete_instance, u"zone": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b", u"startTime": u"2013-05-27T10:44:51.849-07:00", u"insertTime": u"2013-05-27T10:44:51.806-07:00", u"targetId": u"12912855597472179535", u"targetLink": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/instances/appscale-appscalecgb20-0cf89267-5887-4048-b774-ca20de47a07f", u"operationType": u"delete", u"progress": 0, u"id": u"11114355109942058217", u"selfLink": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/operations/operation-1369676691806-4ddb6b4ab6f39-a095d3de", u"user": u"*****@*****.**", } fake_delete_instance_request_one = flexmock(name="fake_delete_instance_request_one") fake_delete_instance_request_one.should_receive("execute").with_args(http=fake_authorized_http).and_return( delete_instance_info_one ) fake_instances.should_receive("delete").with_args( project=project_id, zone=zone, instance="appscale-bazboogroup-one" ).and_return(fake_delete_instance_request_one) fake_delete_instance_request_two = flexmock(name="fake_delete_instance_request_two") fake_delete_instance_request_two.should_receive("execute").with_args(http=fake_authorized_http).and_return( delete_instance_info_two ) fake_instances.should_receive("delete").with_args( project=project_id, zone=zone, instance="appscale-bazboogroup-two" ).and_return(fake_delete_instance_request_two) # mock out our waiting for the instances to be deleted all_done = {u"status": u"DONE"} fake_instance_checker = flexmock(name="fake_instance_checker") fake_instance_checker.should_receive("execute").and_return(all_done) fake_blocker = flexmock(name="fake_blocker") fake_blocker.should_receive("get").with_args( project=project_id, operation=delete_instance, zone=zone ).and_return(fake_instance_checker) fake_gce.should_receive("zoneOperations").and_return(fake_blocker) # mock out the call to delete the firewall delete_firewall = u"operation-1369677695390-4ddb6f07cc611-5a8f1654" fake_delete_firewall_info = { u"status": u"PENDING", u"kind": u"compute#operation", u"name": delete_firewall, u"startTime": u"2013-05-27T11:01:35.482-07:00", u"insertTime": u"2013-05-27T11:01:35.390-07:00", u"targetId": u"11748720697396371259", u"targetLink": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/firewalls/appscalecgb20", u"operationType": u"delete", u"progress": 0, u"id": u"15574488986772298961", u"selfLink": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/operations/operation-1369677695390-4ddb6f07cc611-5a8f1654", u"user": u"*****@*****.**", } fake_delete_firewall_request = flexmock(name="fake_delete_firewall_request") fake_delete_firewall_request.should_receive("execute").and_return(fake_delete_firewall_info) fake_firewalls = flexmock(name="fake_firewalls") fake_firewalls.should_receive("delete").with_args(project=project_id, firewall=self.group).and_return( fake_delete_firewall_request ) fake_gce.should_receive("firewalls").and_return(fake_firewalls) # mock out the call to make sure the firewall was deleted fake_firewall_checker = flexmock(name="fake_firewall_checker") fake_firewall_checker.should_receive("execute").and_return(all_done) fake_blocker.should_receive("get").with_args(project=project_id, operation=delete_firewall).and_return( fake_firewall_checker ) fake_gce.should_receive("globalOperations").and_return(fake_blocker) # and the call to delete the network delete_network = u"operation-1369677749954-4ddb6f3bd1849-056cf8ca" fake_delete_network_info = { u"status": u"PENDING", u"kind": u"compute#operation", u"name": delete_network, u"startTime": u"2013-05-27T11:02:30.012-07:00", u"insertTime": u"2013-05-27T11:02:29.954-07:00", u"targetId": u"17688075350400527692", u"targetLink": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/networks/appscalecgb20", u"operationType": u"delete", u"progress": 0, u"id": u"12623697331874594836", u"selfLink": u"https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/operations/operation-1369677749954-4ddb6f3bd1849-056cf8ca", u"user": u"*****@*****.**", } fake_delete_network_request = flexmock(name="fake_delete_network_request") fake_delete_network_request.should_receive("execute").and_return(fake_delete_network_info) fake_networks = flexmock(name="fake_networks") fake_networks.should_receive("delete").with_args(project=project_id, network=self.group).and_return( fake_delete_network_request ) fake_gce.should_receive("networks").and_return(fake_networks) # mock out the call to make sure the network was deleted fake_network_checker = flexmock(name="fake_network_checker") fake_network_checker.should_receive("execute").and_return(all_done) fake_blocker.should_receive("get").with_args(project=project_id, operation=delete_network).and_return( fake_network_checker ) # finally, inject our fake GCE connection flexmock(apiclient.discovery) apiclient.discovery.should_receive("build").with_args("compute", GCEAgent.API_VERSION).and_return(fake_gce) # finally, mock out removing the yaml file, json file, and secret key from # this machine flexmock(os) os.should_receive("remove").with_args(LocalState.get_locations_yaml_location(self.keyname)).and_return() os.should_receive("remove").with_args(LocalState.get_locations_json_location(self.keyname)).and_return() os.should_receive("remove").with_args(LocalState.get_secret_key_location(self.keyname)).and_return() argv = ["--keyname", self.keyname, "--test"] options = ParseArgs(argv, self.function).args AppScaleTools.terminate_instances(options)
def test_appscale_with_ips_layout_flag_and_success(self): # assume that ssh is running on each machine fake_socket = flexmock(name='socket') fake_socket.should_receive('connect').with_args(('1.2.3.4', 22)) \ .and_return(None) fake_socket.should_receive('connect').with_args(('1.2.3.5', 22)) \ .and_return(None) fake_socket.should_receive('connect').with_args(('1.2.3.6', 22)) \ .and_return(None) flexmock(socket) socket.should_receive('socket').and_return(fake_socket) # assume that we have ssh-keygen and ssh-copy-id flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('which ssh-keygen'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('which ssh-copy-id'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # assume that we have a ~/.appscale flexmock(os.path) os.path.should_call('exists') os.path.should_receive('exists').with_args(LocalState.LOCAL_APPSCALE_PATH) \ .and_return(True) # and assume that we don't have public and private keys already made path = LocalState.LOCAL_APPSCALE_PATH + self.keyname public_key = LocalState.LOCAL_APPSCALE_PATH + self.keyname + '.pub' private_key = LocalState.LOCAL_APPSCALE_PATH + self.keyname + '.key' os.path.should_receive('exists').with_args(public_key).and_return( False) os.path.should_receive('exists').with_args(private_key).and_return( False) # next, assume that ssh-keygen ran fine flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('ssh-keygen'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # assume that we can rename the private key flexmock(shutil) shutil.should_receive('copy').with_args(path, private_key).and_return() # finally, assume that we can chmod 0600 those files fine flexmock(os) os.should_receive('chmod').with_args(public_key, 0600).and_return() os.should_receive('chmod').with_args(path, 0600).and_return() # and assume that we can ssh-copy-id to each of the three IPs below flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('ssh-copy-id'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # also, we should be able to copy over our new public and private keys fine flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('id_rsa[.pub]?'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # don't use a 192.168.X.Y IP here, since sometimes we set our virtual # machines to boot with those addresses (and that can mess up our tests). ips_layout = yaml.safe_load(""" master : 1.2.3.4 database: 1.2.3.4 zookeeper: 1.2.3.5 appengine: 1.2.3.6 """) argv = [ "--ips_layout", base64.b64encode(yaml.dump(ips_layout)), "--keyname", self.keyname ] options = ParseArgs(argv, self.function).args AppScaleTools.add_keypair(options)
def test_terminate_in_gce_and_succeeds(self): # let's say that there is a locations.json file with key # 'infrastructure_info', which means appscale is running, so we should # terminate the services on each box flexmock(os.path) os.path.should_call('exists') # set up the fall-through os.path.should_receive('exists').with_args( LocalState.get_client_secrets_location(self.keyname)).and_return(True) os.path.should_receive('exists').with_args( LocalState.get_secret_key_location(self.keyname)).and_return(True) # mock out reading the locations.json file with key # 'infrastructure_info', and pretend that we're on GCE project_id = "1234567890" zone = 'my-zone-1b' builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # Assume persistent disks are used. flexmock(LocalState).should_receive('are_disks_used').and_return(True) # mock out reading the json file, and pretend that we're running in a # two node deployment os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_json_file = flexmock(name='fake_file') fake_json_file.should_receive('read').and_return(json.dumps({ "infrastructure_info": { 'infrastructure': 'gce', 'group': self.group, 'project': project_id, 'zone': zone }, "node_info": [ { 'public_ip': 'public1', 'jobs': ['shadow'] }, { 'public_ip': 'public2', 'jobs': ['appengine'] } ] })) builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_json_file) # and slip in a fake secret file fake_secret_file = flexmock(name='fake_file') fake_secret_file.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args( LocalState.get_secret_key_location(self.keyname), 'r') \ .and_return(fake_secret_file) # also add in a fake client-secrets file for GCE client_secrets = LocalState.get_client_secrets_location(self.keyname) # mock out talking to GCE # first, mock out the oauth library calls fake_flow = flexmock(name='fake_flow') flexmock(oauth2client.client) oauth2client.client.should_receive('flow_from_clientsecrets').with_args( client_secrets, scope=str).and_return(fake_flow) fake_storage = flexmock(name='fake_storage') fake_storage.should_receive('get').and_return(None) fake_flags = oauth2client.tools.argparser.parse_args(args=[]) flexmock(oauth2client.file) oauth2client.file.should_receive('Storage').with_args(str).and_return( fake_storage) fake_credentials = flexmock(name='fake_credentials') flexmock(oauth2client.tools) oauth2client.tools.should_receive('run_flow').with_args(fake_flow, fake_storage, fake_flags).and_return(fake_credentials) # next, mock out http calls to GCE fake_http = flexmock(name='fake_http') fake_authorized_http = flexmock(name='fake_authorized_http') flexmock(httplib2) httplib2.should_receive('Http').and_return(fake_http) fake_credentials.should_receive('authorize').with_args(fake_http) \ .and_return(fake_authorized_http) fake_gce = flexmock(name='fake_gce') # let's say that two instances are running instance_one_info = { u'status': u'RUNNING', u'kind': u'compute#instance', u'machineType': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/machineTypes/n1-standard-1', u'name': u'bazboogroup-one', u'zone': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b', u'tags': {u'fingerprint': u'42WmSpB8rSM='}, u'image': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/images/lucid64', u'disks': [{ u'index': 0, u'kind': u'compute#attachedDisk', u'type': u'EPHEMERAL', u'mode': u'READ_WRITE' }], u'canIpForward': False, u'serviceAccounts': [{ u'scopes': [GCEAgent.GCE_SCOPE], u'email': u'*****@*****.**' }], u'metadata': { u'kind': u'compute#metadata', u'fingerprint': u'42WmSpB8rSM=' }, u'creationTimestamp': u'2013-05-22T11:52:33.254-07:00', u'id': u'8684033495853907982', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/instances/bazboogroup-feb10b11-62bc-4536-ac25-9734f2267d6d', u'networkInterfaces': [{ u'accessConfigs': [{ u'kind': u'compute#accessConfig', u'type': u'ONE_TO_ONE_NAT', u'name': u'External NAT', u'natIP': u'public1' }], u'networkIP': u'private1', u'network': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/networks/bazboogroup', u'name': u'nic0' }] } instance_two_info = { u'status': u'RUNNING', u'kind': u'compute#instance', u'machineType': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/machineTypes/n1-standard-1', u'name': u'bazboogroup-two', u'zone': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b', u'tags': {u'fingerprint': u'42WmSpB8rSM='}, u'image': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/images/lucid64', u'disks': [{ u'index': 0, u'kind': u'compute#attachedDisk', u'type': u'EPHEMERAL', u'mode': u'READ_WRITE' }], u'canIpForward': False, u'serviceAccounts': [{ u'scopes': [GCEAgent.GCE_SCOPE], u'email': u'*****@*****.**' }], u'metadata': { u'kind': u'compute#metadata', u'fingerprint': u'42WmSpB8rSM=' }, u'creationTimestamp': u'2013-05-22T11:52:33.254-07:00', u'id': u'8684033495853907982', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/instances/bazboogroup-feb10b11-62bc-4536-ac25-9734f2267d6d', u'networkInterfaces': [{ u'accessConfigs': [{ u'kind': u'compute#accessConfig', u'type': u'ONE_TO_ONE_NAT', u'name': u'External NAT', u'natIP': u'public1' }], u'networkIP': u'private1', u'network': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/networks/bazboogroup', u'name': u'nic0' }] } list_instance_info = { u'items': [instance_one_info, instance_two_info], u'kind': u'compute#instanceList', u'id': u'projects/appscale.com:appscale/zones/my-zone-1b/instances', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/961228229472/zones/my-zone-1b/instances' } fake_list_instance_request = flexmock(name='fake_list_instance_request') fake_list_instance_request.should_receive('execute').with_args( http=fake_authorized_http).and_return(list_instance_info) fake_instances = flexmock(name='fake_instances') fake_instances.should_receive('list').with_args(project=project_id, filter="name eq bazboogroup-.*", zone=zone) \ .and_return(fake_list_instance_request) fake_gce.should_receive('instances').and_return(fake_instances) # And assume that we can kill both of our instances fine delete_instance = u'operation-1369676691806-4ddb6b4ab6f39-a095d3de' delete_instance_info_one = { u'status': u'PENDING', u'kind': u'compute#operation', u'name': delete_instance, u'zone': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b', u'startTime': u'2013-05-27T10:44:51.849-07:00', u'insertTime': u'2013-05-27T10:44:51.806-07:00', u'targetId': u'12912855597472179535', u'targetLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/instances/appscalecgb20-0cf89267-5887-4048-b774-ca20de47a07f', u'operationType': u'delete', u'progress': 0, u'id': u'11114355109942058217', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/operations/operation-1369676691806-4ddb6b4ab6f39-a095d3de', u'user': u'*****@*****.**' } delete_instance_info_two = { u'status': u'PENDING', u'kind': u'compute#operation', u'name': delete_instance, u'zone': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b', u'startTime': u'2013-05-27T10:44:51.849-07:00', u'insertTime': u'2013-05-27T10:44:51.806-07:00', u'targetId': u'12912855597472179535', u'targetLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/instances/appscalecgb20-0cf89267-5887-4048-b774-ca20de47a07f', u'operationType': u'delete', u'progress': 0, u'id': u'11114355109942058217', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/operations/operation-1369676691806-4ddb6b4ab6f39-a095d3de', u'user': u'*****@*****.**' } fake_delete_instance_request_one = flexmock(name='fake_delete_instance_request_one') fake_delete_instance_request_one.should_receive('execute').with_args( http=fake_authorized_http).and_return(delete_instance_info_one) fake_instances.should_receive('delete').with_args(project=project_id, zone=zone, instance='bazboogroup-one').and_return( fake_delete_instance_request_one) fake_delete_instance_request_two = flexmock(name='fake_delete_instance_request_two') fake_delete_instance_request_two.should_receive('execute').with_args( http=fake_authorized_http).and_return(delete_instance_info_two) fake_instances.should_receive('delete').with_args(project=project_id, zone=zone, instance='bazboogroup-two').and_return( fake_delete_instance_request_two) # mock out our waiting for the instances to be deleted all_done = { u'status' : u'DONE' } fake_instance_checker = flexmock(name='fake_instance_checker') fake_instance_checker.should_receive('execute').and_return(all_done) fake_blocker = flexmock(name='fake_blocker') fake_blocker.should_receive('get').with_args(project=project_id, operation=delete_instance, zone=zone).and_return( fake_instance_checker) fake_gce.should_receive('zoneOperations').and_return(fake_blocker) # mock out the call to delete the firewall delete_firewall = u'operation-1369677695390-4ddb6f07cc611-5a8f1654' fake_delete_firewall_info = { u'status': u'PENDING', u'kind': u'compute#operation', u'name': delete_firewall, u'startTime': u'2013-05-27T11:01:35.482-07:00', u'insertTime': u'2013-05-27T11:01:35.390-07:00', u'targetId': u'11748720697396371259', u'targetLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/firewalls/appscalecgb20', u'operationType': u'delete', u'progress': 0, u'id': u'15574488986772298961', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/operations/operation-1369677695390-4ddb6f07cc611-5a8f1654', u'user': u'*****@*****.**' } fake_delete_firewall_request = flexmock(name='fake_delete_firewall_request') fake_delete_firewall_request.should_receive('execute').and_return(fake_delete_firewall_info) fake_firewalls = flexmock(name='fake_firewalls') fake_firewalls.should_receive('delete').with_args(project=project_id, firewall=self.group).and_return(fake_delete_firewall_request) fake_gce.should_receive('firewalls').and_return(fake_firewalls) # mock out the call to make sure the firewall was deleted fake_firewall_checker = flexmock(name='fake_firewall_checker') fake_firewall_checker.should_receive('execute').and_return(all_done) fake_blocker.should_receive('get').with_args(project=project_id, operation=delete_firewall).and_return(fake_firewall_checker) fake_gce.should_receive('globalOperations').and_return(fake_blocker) # and the call to delete the network delete_network = u'operation-1369677749954-4ddb6f3bd1849-056cf8ca' fake_delete_network_info = { u'status': u'PENDING', u'kind': u'compute#operation', u'name': delete_network, u'startTime': u'2013-05-27T11:02:30.012-07:00', u'insertTime': u'2013-05-27T11:02:29.954-07:00', u'targetId': u'17688075350400527692', u'targetLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/networks/appscalecgb20', u'operationType': u'delete', u'progress': 0, u'id': u'12623697331874594836', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/operations/operation-1369677749954-4ddb6f3bd1849-056cf8ca', u'user': u'*****@*****.**' } fake_delete_network_request = flexmock(name='fake_delete_network_request') fake_delete_network_request.should_receive('execute').and_return(fake_delete_network_info) fake_networks = flexmock(name='fake_networks') fake_networks.should_receive('delete').with_args(project=project_id, network=self.group).and_return(fake_delete_network_request) fake_gce.should_receive('networks').and_return(fake_networks) # mock out the call to make sure the network was deleted fake_network_checker = flexmock(name='fake_network_checker') fake_network_checker.should_receive('execute').and_return(all_done) fake_blocker.should_receive('get').with_args(project=project_id, operation=delete_network).and_return(fake_network_checker) # finally, inject our fake GCE connection flexmock(apiclient.discovery) apiclient.discovery.should_receive('build').with_args('compute', GCEAgent.API_VERSION).and_return(fake_gce) flexmock(GCEAgent).should_receive('get_secrets_type')\ .and_return(CredentialTypes.OAUTH) # finally, mock out removing the yaml file, json file, and secret key from # this machine flexmock(os) os.should_receive('remove').with_args( LocalState.get_locations_json_location(self.keyname)).and_return() os.should_receive('remove').with_args( LocalState.get_secret_key_location(self.keyname)).and_return() flexmock(RemoteHelper).should_receive('terminate_cloud_infrastructure') argv = [ "--keyname", self.keyname, "--test" ] options = ParseArgs(argv, self.function).args AppScaleTools.terminate_instances(options)
def test_terminate_in_cloud_and_succeeds(self): # let's say that there is a locations.json file with key # 'infrastructure_info', which means appscale is running, so we should # terminate the services on each box flexmock(os.path) os.path.should_call('exists') # set up the fall-through os.path.should_receive('exists').with_args( LocalState.get_secret_key_location(self.keyname)).and_return(True) # mock out reading the locations.json file with key # 'infrastructure_info', and pretend that we're on a virtualized cluster builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # Assume persistent disks are used. flexmock(LocalState).should_receive('are_disks_used').and_return(True) # mock out reading the json file, and pretend that we're running in a # two node deployment os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_json_file = flexmock(name='fake_file') fake_json_file.should_receive('read').and_return(json.dumps({ "infrastructure_info": { 'infrastructure': 'ec2', 'group': self.group, }, "node_info": [ { 'public_ip': 'public1', 'jobs': ['shadow'] }, { 'public_ip': 'public2', 'jobs': ['appengine'] } ] })) builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_json_file) # and slip in a fake secret file fake_secret_file = flexmock(name='fake_file') fake_secret_file.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args( LocalState.get_secret_key_location(self.keyname), 'r') \ .and_return(fake_secret_file) # mock out talking to EC2 fake_ec2 = flexmock(name='fake_ec2') # let's say that three instances are running, and that two of them are in # our deployment fake_one_running = flexmock(name='fake_one', key_name=self.keyname, state='running', id='i-ONE', ip_address='1.2.3.4', private_ip_address='1.2.3.4') fake_two_running = flexmock(name='fake_two', key_name=self.keyname, state='running', id='i-TWO', ip_address='1.2.3.4', private_ip_address='1.2.3.4') fake_three_running = flexmock(name='fake_three', key_name='abcdefg', state='running', id='i-THREE', ip_address='1.2.3.4', private_ip_address='1.2.3.4') fake_reservation_running = flexmock(name='fake_reservation', instances=[fake_one_running, fake_two_running, fake_three_running]) fake_one_terminated = flexmock(name='fake_one', key_name=self.keyname, state='terminated', id='i-ONE', ip_address='1.2.3.4', private_ip_address='1.2.3.4') fake_two_terminated = flexmock(name='fake_two', key_name=self.keyname, state='terminated', id='i-TWO', ip_address='1.2.3.4', private_ip_address='1.2.3.4') fake_three_terminated = flexmock(name='fake_three', key_name='abcdefg', state='terminated', id='i-THREE', ip_address='1.2.3.4', private_ip_address='1.2.3.4') fake_reservation_terminated = flexmock(name='fake_reservation', instances=[fake_one_terminated, fake_two_terminated, fake_three_terminated]) fake_ec2.should_receive('get_all_instances').and_return(fake_reservation_running) \ .and_return(fake_reservation_terminated) flexmock(boto.ec2) boto.ec2.should_receive('connect_to_region').and_return(fake_ec2) # and mock out the call to kill the instances fake_ec2.should_receive('terminate_instances').with_args(['i-ONE', 'i-TWO']).and_return([fake_one_terminated, fake_two_terminated]) # mock out the call to delete the keypair fake_ec2.should_receive('delete_key_pair').and_return() # and the call to delete the security group - let's say that we can't # delete the group the first time, and can the second fake_ec2.should_receive('delete_security_group').and_return(False) \ .and_return(True) # finally, mock out removing the yaml file, json file, and secret key from # this machine flexmock(os) os.should_receive('remove').with_args( LocalState.get_locations_json_location(self.keyname)).and_return() os.should_receive('remove').with_args( LocalState.get_secret_key_location(self.keyname)).and_return() flexmock(RemoteHelper).should_receive('terminate_cloud_infrastructure') # also mock out asking the user for confirmation on shutting down # their cloud builtins.should_receive('raw_input').and_return('yes') argv = [ "--keyname", self.keyname ] options = ParseArgs(argv, self.function).args AppScaleTools.terminate_instances(options)
def test_terminate_in_gce_and_succeeds(self): # let's say that there is a locations.yaml file, which means appscale is # running, so we should terminate the services on each box flexmock(os.path) os.path.should_call('exists') # set up the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_yaml_location( self.keyname)).and_return(True) os.path.should_receive('exists').with_args( LocalState.get_client_secrets_location( self.keyname)).and_return(True) os.path.should_receive('exists').with_args( LocalState.get_secret_key_location(self.keyname)).and_return(True) # mock out reading the locations.yaml file, and pretend that we're on # GCE project_id = "1234567890" zone = 'my-zone-1b' builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') fake_yaml_file = flexmock(name='fake_file') fake_yaml_file.should_receive('read').and_return( yaml.dump({ 'infrastructure': 'gce', 'group': self.group, 'project': project_id, 'zone': zone })) builtins.should_receive('open').with_args( LocalState.get_locations_yaml_location(self.keyname), 'r') \ .and_return(fake_yaml_file) # mock out reading the json file, and pretend that we're running in a # two node deployment fake_json_file = flexmock(name='fake_file') fake_json_file.should_receive('read').and_return( json.dumps([{ 'public_ip': 'public1', 'jobs': ['shadow'] }, { 'public_ip': 'public2', 'jobs': ['appengine'] }])) builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_json_file) # and slip in a fake secret file fake_secret_file = flexmock(name='fake_file') fake_secret_file.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args( LocalState.get_secret_key_location(self.keyname), 'r') \ .and_return(fake_secret_file) # also add in a fake client-secrets file for GCE client_secrets = LocalState.get_client_secrets_location(self.keyname) # mock out talking to GCE # first, mock out the oauth library calls fake_flow = flexmock(name='fake_flow') flexmock(oauth2client.client) oauth2client.client.should_receive( 'flow_from_clientsecrets').with_args( client_secrets, scope=str).and_return(fake_flow) fake_storage = flexmock(name='fake_storage') fake_storage.should_receive('get').and_return(None) fake_flags = oauth2client.tools.argparser.parse_args(args=[]) flexmock(oauth2client.file) oauth2client.file.should_receive('Storage').with_args(str).and_return( fake_storage) fake_credentials = flexmock(name='fake_credentials') flexmock(oauth2client.tools) oauth2client.tools.should_receive('run_flow').with_args( fake_flow, fake_storage, fake_flags).and_return(fake_credentials) # next, mock out http calls to GCE fake_http = flexmock(name='fake_http') fake_authorized_http = flexmock(name='fake_authorized_http') flexmock(httplib2) httplib2.should_receive('Http').and_return(fake_http) fake_credentials.should_receive('authorize').with_args(fake_http) \ .and_return(fake_authorized_http) fake_gce = flexmock(name='fake_gce') # let's say that two instances are running instance_one_info = { u'status': u'RUNNING', u'kind': u'compute#instance', u'machineType': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/machineTypes/n1-standard-1', u'name': u'appscale-bazboogroup-one', u'zone': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b', u'tags': { u'fingerprint': u'42WmSpB8rSM=' }, u'image': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/images/lucid64', u'disks': [{ u'index': 0, u'kind': u'compute#attachedDisk', u'type': u'EPHEMERAL', u'mode': u'READ_WRITE' }], u'canIpForward': False, u'serviceAccounts': [{ u'scopes': [GCEAgent.GCE_SCOPE], u'email': u'*****@*****.**' }], u'metadata': { u'kind': u'compute#metadata', u'fingerprint': u'42WmSpB8rSM=' }, u'creationTimestamp': u'2013-05-22T11:52:33.254-07:00', u'id': u'8684033495853907982', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/instances/appscale-bazboogroup-feb10b11-62bc-4536-ac25-9734f2267d6d', u'networkInterfaces': [{ u'accessConfigs': [{ u'kind': u'compute#accessConfig', u'type': u'ONE_TO_ONE_NAT', u'name': u'External NAT', u'natIP': u'public1' }], u'networkIP': u'private1', u'network': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/networks/bazboogroup', u'name': u'nic0' }] } instance_two_info = { u'status': u'RUNNING', u'kind': u'compute#instance', u'machineType': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/machineTypes/n1-standard-1', u'name': u'appscale-bazboogroup-two', u'zone': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b', u'tags': { u'fingerprint': u'42WmSpB8rSM=' }, u'image': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/images/lucid64', u'disks': [{ u'index': 0, u'kind': u'compute#attachedDisk', u'type': u'EPHEMERAL', u'mode': u'READ_WRITE' }], u'canIpForward': False, u'serviceAccounts': [{ u'scopes': [GCEAgent.GCE_SCOPE], u'email': u'*****@*****.**' }], u'metadata': { u'kind': u'compute#metadata', u'fingerprint': u'42WmSpB8rSM=' }, u'creationTimestamp': u'2013-05-22T11:52:33.254-07:00', u'id': u'8684033495853907982', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/instances/appscale-bazboogroup-feb10b11-62bc-4536-ac25-9734f2267d6d', u'networkInterfaces': [{ u'accessConfigs': [{ u'kind': u'compute#accessConfig', u'type': u'ONE_TO_ONE_NAT', u'name': u'External NAT', u'natIP': u'public1' }], u'networkIP': u'private1', u'network': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/networks/bazboogroup', u'name': u'nic0' }] } list_instance_info = { u'items': [instance_one_info, instance_two_info], u'kind': u'compute#instanceList', u'id': u'projects/appscale.com:appscale/zones/my-zone-1b/instances', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/961228229472/zones/my-zone-1b/instances' } fake_list_instance_request = flexmock( name='fake_list_instance_request') fake_list_instance_request.should_receive('execute').with_args( http=fake_authorized_http).and_return(list_instance_info) fake_instances = flexmock(name='fake_instances') fake_instances.should_receive('list').with_args(project=project_id, filter="name eq appscale-bazboogroup-.*", zone=zone) \ .and_return(fake_list_instance_request) fake_gce.should_receive('instances').and_return(fake_instances) # And assume that we can kill both of our instances fine delete_instance = u'operation-1369676691806-4ddb6b4ab6f39-a095d3de' delete_instance_info_one = { u'status': u'PENDING', u'kind': u'compute#operation', u'name': delete_instance, u'zone': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b', u'startTime': u'2013-05-27T10:44:51.849-07:00', u'insertTime': u'2013-05-27T10:44:51.806-07:00', u'targetId': u'12912855597472179535', u'targetLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/instances/appscale-appscalecgb20-0cf89267-5887-4048-b774-ca20de47a07f', u'operationType': u'delete', u'progress': 0, u'id': u'11114355109942058217', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/operations/operation-1369676691806-4ddb6b4ab6f39-a095d3de', u'user': u'*****@*****.**' } delete_instance_info_two = { u'status': u'PENDING', u'kind': u'compute#operation', u'name': delete_instance, u'zone': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b', u'startTime': u'2013-05-27T10:44:51.849-07:00', u'insertTime': u'2013-05-27T10:44:51.806-07:00', u'targetId': u'12912855597472179535', u'targetLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/instances/appscale-appscalecgb20-0cf89267-5887-4048-b774-ca20de47a07f', u'operationType': u'delete', u'progress': 0, u'id': u'11114355109942058217', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/zones/my-zone-1b/operations/operation-1369676691806-4ddb6b4ab6f39-a095d3de', u'user': u'*****@*****.**' } fake_delete_instance_request_one = flexmock( name='fake_delete_instance_request_one') fake_delete_instance_request_one.should_receive('execute').with_args( http=fake_authorized_http).and_return(delete_instance_info_one) fake_instances.should_receive('delete').with_args( project=project_id, zone=zone, instance='appscale-bazboogroup-one').and_return( fake_delete_instance_request_one) fake_delete_instance_request_two = flexmock( name='fake_delete_instance_request_two') fake_delete_instance_request_two.should_receive('execute').with_args( http=fake_authorized_http).and_return(delete_instance_info_two) fake_instances.should_receive('delete').with_args( project=project_id, zone=zone, instance='appscale-bazboogroup-two').and_return( fake_delete_instance_request_two) # mock out our waiting for the instances to be deleted all_done = {u'status': u'DONE'} fake_instance_checker = flexmock(name='fake_instance_checker') fake_instance_checker.should_receive('execute').and_return(all_done) fake_blocker = flexmock(name='fake_blocker') fake_blocker.should_receive('get').with_args( project=project_id, operation=delete_instance, zone=zone).and_return(fake_instance_checker) fake_gce.should_receive('zoneOperations').and_return(fake_blocker) # mock out the call to delete the firewall delete_firewall = u'operation-1369677695390-4ddb6f07cc611-5a8f1654' fake_delete_firewall_info = { u'status': u'PENDING', u'kind': u'compute#operation', u'name': delete_firewall, u'startTime': u'2013-05-27T11:01:35.482-07:00', u'insertTime': u'2013-05-27T11:01:35.390-07:00', u'targetId': u'11748720697396371259', u'targetLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/firewalls/appscalecgb20', u'operationType': u'delete', u'progress': 0, u'id': u'15574488986772298961', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/operations/operation-1369677695390-4ddb6f07cc611-5a8f1654', u'user': u'*****@*****.**' } fake_delete_firewall_request = flexmock( name='fake_delete_firewall_request') fake_delete_firewall_request.should_receive('execute').and_return( fake_delete_firewall_info) fake_firewalls = flexmock(name='fake_firewalls') fake_firewalls.should_receive('delete').with_args( project=project_id, firewall=self.group).and_return(fake_delete_firewall_request) fake_gce.should_receive('firewalls').and_return(fake_firewalls) # mock out the call to make sure the firewall was deleted fake_firewall_checker = flexmock(name='fake_firewall_checker') fake_firewall_checker.should_receive('execute').and_return(all_done) fake_blocker.should_receive('get').with_args( project=project_id, operation=delete_firewall).and_return(fake_firewall_checker) fake_gce.should_receive('globalOperations').and_return(fake_blocker) # and the call to delete the network delete_network = u'operation-1369677749954-4ddb6f3bd1849-056cf8ca' fake_delete_network_info = { u'status': u'PENDING', u'kind': u'compute#operation', u'name': delete_network, u'startTime': u'2013-05-27T11:02:30.012-07:00', u'insertTime': u'2013-05-27T11:02:29.954-07:00', u'targetId': u'17688075350400527692', u'targetLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/networks/appscalecgb20', u'operationType': u'delete', u'progress': 0, u'id': u'12623697331874594836', u'selfLink': u'https://www.googleapis.com/compute/v1beta14/projects/appscale.com:appscale/global/operations/operation-1369677749954-4ddb6f3bd1849-056cf8ca', u'user': u'*****@*****.**' } fake_delete_network_request = flexmock( name='fake_delete_network_request') fake_delete_network_request.should_receive('execute').and_return( fake_delete_network_info) fake_networks = flexmock(name='fake_networks') fake_networks.should_receive('delete').with_args( project=project_id, network=self.group).and_return(fake_delete_network_request) fake_gce.should_receive('networks').and_return(fake_networks) # mock out the call to make sure the network was deleted fake_network_checker = flexmock(name='fake_network_checker') fake_network_checker.should_receive('execute').and_return(all_done) fake_blocker.should_receive('get').with_args( project=project_id, operation=delete_network).and_return(fake_network_checker) # finally, inject our fake GCE connection flexmock(apiclient.discovery) apiclient.discovery.should_receive('build').with_args( 'compute', GCEAgent.API_VERSION).and_return(fake_gce) # finally, mock out removing the yaml file, json file, and secret key from # this machine flexmock(os) os.should_receive('remove').with_args( LocalState.get_locations_yaml_location(self.keyname)).and_return() os.should_receive('remove').with_args( LocalState.get_locations_json_location(self.keyname)).and_return() os.should_receive('remove').with_args( LocalState.get_secret_key_location(self.keyname)).and_return() argv = ["--keyname", self.keyname, "--test"] options = ParseArgs(argv, self.function).args AppScaleTools.terminate_instances(options)
def test_upload_php_app_successfully(self): app_dir = '/tmp/appscale-app-1234' # add in mocks so that the gzip'ed file gets extracted to /tmp # as well as for removing it later flexmock(os) os.should_receive('mkdir').with_args(app_dir) \ .and_return(True) flexmock(shutil) shutil.should_receive('rmtree').with_args(app_dir).and_return() local_state = flexmock(LocalState) local_state.should_receive('shell')\ .with_args(re.compile('tar zxvf'),False)\ .and_return() # add in mocks so that there is an app.yaml, but with no appid set flexmock(os.path) os.path.should_call('exists') app_yaml_location = AppEngineHelper.get_app_yaml_location(app_dir) os.path.should_receive('exists').with_args(app_yaml_location) \ .and_return(True) # mock out reading the app.yaml file builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through fake_app_yaml = flexmock(name="fake_app_yaml") fake_app_yaml.should_receive('read').and_return( yaml.dump({ 'application': 'baz', 'runtime': 'php' })) builtins.should_receive('open').with_args(app_yaml_location, 'r') \ .and_return(fake_app_yaml) # Mock out service host and port app_data = { 'owner': '*****@*****.**', 'hosts': { '192.168.1.1': { 'http': 8080, 'https': 4380 } } } app_stats_data = { 'apps': { 'baz': { 'http': 8080, 'language': 'python27', 'total_reqs': 'no_change', 'appservers': 1, 'https': 4380, 'reqs_enqueued': None } } } remote_tarball = '/opt/appscale/apps/baz.tar.gz' # mock out the SOAP call to the AppController and assume it succeeded fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('Database is at public1') fake_appcontroller.should_receive('done_uploading').with_args( 'baz', remote_tarball, 'the secret').and_return() fake_appcontroller.should_receive('update').with_args( ['baz'], 'the secret').and_return() fake_appcontroller.should_receive('does_user_exist').with_args( '*****@*****.**', 'the secret').and_return('true') fake_appcontroller.should_receive('does_user_exist').with_args( 'a@public1', 'the secret').and_return('true') fake_appcontroller.should_receive('does_app_exist').with_args( 'baz', 'the secret').and_return(json.dumps(app_data)) fake_appcontroller.should_receive('get_app_data').with_args( 'baz', 'the secret').and_return(json.dumps(app_data)) fake_appcontroller.should_receive('get_all_stats').with_args( 'the secret').and_return(json.dumps(app_stats_data)) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@a.com") flexmock(getpass) getpass.should_receive('getpass').and_return('aaaaaa') # mock out making the remote app directory local_state.should_receive('shell') \ .with_args(re.compile('^ssh'), False, 5, stdin=re.compile('^mkdir -p')) \ .and_return() # and mock out tarring and copying the app local_state.should_receive('shell') \ .with_args(re.compile('tar -czf'), False) \ .and_return() local_state.should_receive('shell') \ .with_args(re.compile('/tmp/appscale-app-baz.tar.gz'), False, 5) \ .and_return() # as well as removing the tar'ed app once we're done copying it flexmock(os) os.should_receive('remove').with_args('/tmp/appscale-app-baz-1234.tar.gz') \ .and_return() os.should_receive('listdir').and_return(['app.yaml', 'index.py']) # and slap in a mock that says the app comes up after waiting for it # three times fake_socket = flexmock(name='fake_socket') fake_socket.should_receive('connect').with_args(('public1', 8080)).and_raise(Exception).and_raise(Exception) \ .and_return(None) flexmock(socket) socket.should_receive('socket').and_return(fake_socket) flexmock(RemoteHelper).should_receive('copy_app_to_host').\ and_return(remote_tarball) argv = ["--keyname", self.keyname, "--file", self.app_dir + ".tar.gz"] options = ParseArgs(argv, self.function).args (host, port) = AppScaleTools.upload_app(options) self.assertEquals('public1', host) self.assertEquals(8080, port)
def test_terminate_in_virtual_cluster_and_succeeds(self): # let's say that there is a locations.yaml file, which means appscale is # running, so we should terminate the services on each box flexmock(os.path) os.path.should_call('exists') # set up the fall-through os.path.should_receive('exists').with_args( LocalState.get_secret_key_location(self.keyname)).and_return(True) # mock out reading the locations.yaml file, and pretend that we're on # a virtualized cluster builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') fake_yaml_file = flexmock(name='fake_file') fake_yaml_file.should_receive('read').and_return( yaml.dump({'infrastructure': 'xen'})) builtins.should_receive('open').with_args( LocalState.get_locations_yaml_location(self.keyname), 'r') \ .and_return(fake_yaml_file) # mock out reading the json file, and pretend that we're running in a # two node deployment os.path.should_receive('exists').with_args( LocalState.get_locations_json_location( self.keyname)).and_return(True) fake_json_file = flexmock(name='fake_file') fake_json_file.should_receive('read').and_return( json.dumps([{ 'public_ip': 'public1', 'jobs': ['shadow'] }, { 'public_ip': 'public2', 'jobs': ['appengine'] }])) builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_json_file) # and slip in a fake secret file fake_secret_file = flexmock(name='fake_file') fake_secret_file.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args( LocalState.get_secret_key_location(self.keyname), 'r') \ .and_return(fake_secret_file) # mock out talking to the appcontroller, and assume that it tells us there # there are still two machines in this deployment fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('get_all_public_ips').with_args('the secret') \ .and_return(json.dumps(['public1', 'public2'])) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) # and mock out the ssh call to kill the remote appcontroller, assuming that # it fails the first time and passes the second flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('controller stop'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.failed).and_return(self.success) # next, mock out our checks to see how the stopping process is going and # assume that it has stopped flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('ps x'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # finally, mock out removing the yaml file, json file, and secret key from # this machine flexmock(os) os.should_receive('remove').with_args( LocalState.get_locations_yaml_location(self.keyname)).and_return() os.should_receive('remove').with_args( LocalState.get_locations_json_location(self.keyname)).and_return() os.should_receive('remove').with_args( LocalState.get_secret_key_location(self.keyname)).and_return() # also mock out asking the user for confirmation on shutting down # their cloud builtins.should_receive('raw_input').and_return('yes') argv = ["--keyname", self.keyname] options = ParseArgs(argv, self.function).args AppScaleTools.terminate_instances(options)
def test_upload_app_when_app_exists_on_virt_cluster(self): # we do let you upload an app if it's already running # add in mocks so that there is an app.yaml with an appid set flexmock(os.path) os.path.should_call('exists') app_yaml_location = AppEngineHelper.get_app_yaml_location(self.app_dir) os.path.should_receive('exists').with_args(app_yaml_location) \ .and_return(True) # mock out reading the app.yaml file builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through fake_app_yaml = flexmock(name="fake_app_yaml") fake_app_yaml.should_receive('read').and_return(yaml.dump({ 'application' : 'baz', 'runtime' : 'python27' })) builtins.should_receive('open').with_args(app_yaml_location, 'r') \ .and_return(fake_app_yaml) # mock out the SOAP call to the AppController and assume it succeeded fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('Database is at public1') fake_appcontroller.should_receive('done_uploading').with_args( 'baz', '/opt/appscale/apps/baz.tar.gz', 'the secret').and_return('OK') fake_appcontroller.should_receive('update').with_args( ['baz'], 'the secret').and_return('OK') flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@a.com', 'the secret').and_return('false') fake_userappserver.should_receive('does_user_exist').with_args( 'a@public1', 'the secret').and_return('false') fake_userappserver.should_receive('commit_new_user').with_args( '*****@*****.**', str, 'xmpp_user', 'the secret').and_return('true') fake_userappserver.should_receive('commit_new_user').with_args( 'a@public1', str, 'xmpp_user', 'the secret').and_return('true') fake_userappserver.should_receive('get_app_data').with_args( 'baz', 'the secret').and_return(app_data) SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@a.com") flexmock(getpass) getpass.should_receive('getpass').and_return('aaaaaa') # mock out making the remote app directory flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('mkdir -p'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # and mock out tarring and copying the app subprocess.should_receive('Popen').with_args(re.compile('tar -czhf'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) subprocess.should_receive('Popen').with_args(re.compile( '/tmp/appscale-app-baz.tar.gz'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # as well as removing the tar'ed app once we're done copying it flexmock(os) os.should_receive('remove').with_args('/tmp/appscale-app-baz-1234.tar.gz') \ .and_return() # and slap in a mock that says the app comes up after waiting for it # three times fake_socket = flexmock(name='fake_socket') fake_socket.should_receive('connect').with_args(('public1', 8080)).and_raise(Exception).and_raise(Exception) \ .and_return(None) flexmock(socket) socket.should_receive('socket').and_return(fake_socket) argv = [ "--keyname", self.keyname, "--file", self.app_dir ] options = ParseArgs(argv, self.function).args (host, port) = AppScaleTools.upload_app(options) self.assertEquals('public1', host) self.assertEquals(8080, port)
def test_appscale_in_one_node_gce_deployment(self): # presume that our client_secrets file exists project_id = "appscale.com:appscale" client_secrets = "/boo/client_secrets.json" instance_type = 'n1-standard-8' zone = 'my-zone1-b' os.path.should_receive('exists').with_args(client_secrets).and_return(True) # and that the user does not have an ssh key set up, forcing us to create # one for them private_key = '{0}{1}.key'.format(LocalState.LOCAL_APPSCALE_PATH, self.keyname) public_key = '{0}{1}.pub'.format(LocalState.LOCAL_APPSCALE_PATH, self.keyname) os.path.should_receive('exists').with_args(private_key).and_return(False) os.path.should_receive('exists').with_args(public_key).and_return(False) self.local_state.should_receive('shell').with_args(re.compile('^ssh-keygen'), False).and_return() flexmock(os) original_private_key = LocalState.LOCAL_APPSCALE_PATH + self.keyname os.should_receive('chmod').with_args(original_private_key, 0600) os.should_receive('chmod').with_args(public_key, 0600) flexmock(shutil) shutil.should_receive('copy').with_args(original_private_key, private_key) # also, we should be able to copy over our secret.json file fine shutil.should_receive('copy').with_args(client_secrets, LocalState.get_client_secrets_location(self.keyname)) # let's say that appscale isn't already running self.local_state.should_receive('ensure_appscale_isnt_running').and_return() self.local_state.should_receive('make_appscale_directory').and_return() # mock out talking to logs.appscale.com fake_connection = flexmock(name='fake_connection') fake_connection.should_receive('request').with_args('POST', '/upload', str, AppScaleLogger.HEADERS).and_return() flexmock(httplib) httplib.should_receive('HTTPConnection').with_args('logs.appscale.com') \ .and_return(fake_connection) # mock out generating the secret key flexmock(uuid) uuid.should_receive('uuid4').and_return('the secret') # mock out writing the secret key to ~/.appscale, as well as reading it # later secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') fake_secret.should_receive('write').and_return() self.builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) self.builtins.should_receive('open').with_args(secret_key_location, 'w') \ .and_return(fake_secret) # mock out interactions with GCE # first, mock out the oauth library calls fake_flow = flexmock(name='fake_flow') flexmock(oauth2client.client) oauth2client.client.should_receive('flow_from_clientsecrets').with_args( client_secrets, scope=str).and_return(fake_flow) fake_storage = flexmock(name='fake_storage') fake_storage.should_receive('get').and_return(None) flexmock(oauth2client.file) oauth2client.file.should_receive('Storage').with_args(str).and_return( fake_storage) fake_credentials = flexmock(name='fake_credentials') flexmock(oauth2client.tools) oauth2client.tools.should_receive('run').with_args(fake_flow, fake_storage).and_return(fake_credentials) # next, mock out http calls to GCE fake_http = flexmock(name='fake_http') fake_authorized_http = flexmock(name='fake_authorized_http') flexmock(httplib2) httplib2.should_receive('Http').and_return(fake_http) fake_credentials.should_receive('authorize').with_args(fake_http) \ .and_return(fake_authorized_http) # presume that there is an ssh key stored, but it isn't ours metadata_info = { u'kind': u'compute#project', u'description': u'', u'commonInstanceMetadata': { u'items': [{ u'value': u'cgb:ssh-rsa keyinfo myhost', u'key': u'sshKeys'}], u'kind': u'compute#metadata'}, } fake_metadata_request = flexmock(name='fake_metadata_request') fake_metadata_request.should_receive('execute').with_args( http=fake_authorized_http).and_return(metadata_info) fake_projects = flexmock(name='fake_projects') fake_projects.should_receive('get').with_args(project=project_id) \ .and_return(fake_metadata_request) fake_gce = flexmock(name='fake_gce') fake_gce.should_receive('projects').and_return(fake_projects) # thus we will need to set the metadata with our ssh key fake_ssh_pub_key = flexmock(name="fake_ssh_pub_key") fake_ssh_pub_key.should_receive('read').and_return('ssh-rsa key2info myhost') self.builtins.should_receive('open').with_args(public_key).and_return( fake_ssh_pub_key) new_metadata_body = { "items": [{ "value" : u'cgb:ssh-rsa key2info myhost\ncgb:ssh-rsa keyinfo myhost', "key" : "sshKeys" }], "kind": "compute#metadata" } set_metadata_name = u'operation-222222-4dd41ec7d6c11-8013657f' set_metadata = { u'status': u'PENDING', u'kind': u'compute#operation', u'name': set_metadata_name, u'operationType': u'insert', u'progress': 0, u'selfLink': unicode(GCEAgent.GCE_URL) + \ u'appscale.com:appscale/global/operations/' + \ u'operation-1369175117235-4dd41ec7d6c11-8013657f', u'user': u'*****@*****.**' } fake_set_metadata_request = flexmock(name='fake_set_metadata_request') fake_set_metadata_request.should_receive('execute').and_return(set_metadata) fake_projects.should_receive('setCommonInstanceMetadata').with_args( project=project_id, body=dict).and_return(fake_set_metadata_request) updated_metadata_info = { u'status': u'DONE' } fake_metadata_checker = flexmock(name='fake_network_checker') fake_metadata_checker.should_receive('execute').and_return( updated_metadata_info) fake_blocker = flexmock(name='fake_blocker') fake_blocker.should_receive('get').with_args(project=project_id, operation=set_metadata_name).and_return(fake_metadata_checker) # presume that our image does exist in GCE, with some fake data # acquired by running a not mocked version of this code image_name = 'appscale-image-name' image_info = { u'kind': u'compute#image', u'description': u'', u'rawDisk': {u'containerType': u'TAR', u'source': u''}, u'preferredKernel': unicode(GCEAgent.GCE_URL) + \ u'/google/global/kernels/gce-v20130515', u'sourceType': u'RAW', u'creationTimestamp': u'2013-05-21T08:05:12.198-07:00', u'id': u'4235320207849085220', u'selfLink': unicode(GCEAgent.GCE_URL) + \ u'961228229472/global/images/' + unicode(image_name), u'name': unicode(image_name) } fake_image_request = flexmock(name='fake_image_request') fake_image_request.should_receive('execute').with_args( http=fake_authorized_http).and_return(image_info) fake_images = flexmock(name='fake_images') fake_images.should_receive('get').with_args(project=project_id, image=image_name).and_return(fake_image_request) fake_gce.should_receive('images').and_return(fake_images) # next, presume that the static ip we want to use exists address_name = 'static-ip' address_info = {'items':[]} region_name = 'my-zone1' fake_address_request = flexmock(name='fake_address_request') fake_address_request.should_receive('execute').with_args( http=fake_authorized_http).and_return(address_info) fake_addresses = flexmock(name='fake_addresses') fake_addresses.should_receive('list').with_args(project=project_id, filter="address eq static-ip", region=region_name).and_return( fake_address_request) fake_gce.should_receive('addresses').and_return(fake_addresses) # next, presume that the zone we want to use exists zone_name = 'my-zone1-b' zone_info = {} fake_zone_request = flexmock(name='fake_zone_request') fake_zone_request.should_receive('execute').with_args( http=fake_authorized_http).and_return(zone_info) fake_zones = flexmock(name='fake_zones') fake_zones.should_receive('get').with_args(project=project_id, zone=zone_name).and_return(fake_zone_request) fake_gce.should_receive('zones').and_return(fake_zones) # next, presume that the persistent disk we want to use exists disk_name = 'my-persistent-disk-1' disk_info = {'status':'DONE'} fake_disk_request = flexmock(name='fake_disk_request') fake_disk_request.should_receive('execute').with_args( http=fake_authorized_http).and_return(disk_info) fake_disks = flexmock(name='fake_disks') fake_disks.should_receive('get').with_args(project=project_id, disk=disk_name, zone=zone).and_return(fake_disk_request) fake_disks.should_receive('insert').with_args(project=project_id, sourceImage=str, body=dict, zone=zone_name).and_return(fake_disk_request) fake_gce.should_receive('disks').and_return(fake_disks) # next, presume that the network doesn't exist yet fake_network_request = flexmock(name='fake_network_request') fake_network_request.should_receive('execute').with_args( http=fake_authorized_http).and_raise(apiclient.errors.HttpError, None, None) fake_networks = flexmock(name='fake_networks') fake_networks.should_receive('get').with_args(project=project_id, network='bazgroup').and_return(fake_network_request) fake_gce.should_receive('networks').and_return(fake_networks) # next, presume that the firewall doesn't exist yet fake_firewall_request = flexmock(name='fake_firewall_request') fake_firewall_request.should_receive('execute').with_args( http=fake_authorized_http).and_raise(apiclient.errors.HttpError, None, None) fake_firewalls = flexmock(name='fake_firewalls') fake_firewalls.should_receive('get').with_args(project=project_id, firewall='bazgroup').and_return(fake_firewall_request) fake_gce.should_receive('firewalls').and_return(fake_firewalls) # presume that we can create the network fine create_network = u'operation-1369175117235-4dd41ec7d6c11-8013657f' network_info = { u'status': u'PENDING', u'kind': u'compute#operation', u'name': create_network, u'startTime': u'2013-05-21T15:25:17.308-07:00', u'insertTime': u'2013-05-21T15:25:17.235-07:00', u'targetLink': unicode(GCEAgent.GCE_URL) + \ u'appscale.com:appscale/global/networks/bazgroup', u'operationType': u'insert', u'progress': 0, u'id': u'4904874319704759670', u'selfLink': unicode(GCEAgent.GCE_URL) + \ u'appscale.com:appscale/global/operations/' + \ u'operation-1369175117235-4dd41ec7d6c11-8013657f', u'user': u'*****@*****.**' } fake_network_insert_request = flexmock(name='fake_network_insert_request') fake_network_insert_request.should_receive('execute').with_args( http=fake_authorized_http).and_return(network_info) fake_networks.should_receive('insert').with_args(project=project_id, body=dict).and_return(fake_network_insert_request) created_network_info = { u'status': u'DONE' } fake_network_checker = flexmock(name='fake_network_checker') fake_network_checker.should_receive('execute').and_return( created_network_info) fake_blocker.should_receive('get').with_args(project=project_id, operation=create_network).and_return(fake_network_checker) fake_gce.should_receive('globalOperations').and_return(fake_blocker) # and presume that we can create the firewall fine create_firewall = u'operation-1369176378310-4dd4237a84021-68e4dfa6' firewall_info = { u'status': u'PENDING', u'kind': u'compute#operation', u'name': create_firewall, u'startTime': u'2013-05-21T15:46:18.402-07:00', u'insertTime': u'2013-05-21T15:46:18.310-07:00', u'targetLink': unicode(GCEAgent.GCE_URL) + \ u'appscale.com:appscale/global/firewalls/bazgroup', u'operationType': u'insert', u'progress': 0, u'id': u'13248349431060541723', u'selfLink': unicode(GCEAgent.GCE_URL) + \ u'appscale.com:appscale/global/operations/' + \ u'operation-1369176378310-4dd4237a84021-68e4dfa6', u'user': u'*****@*****.**' } fake_firewall_insert_request = flexmock(name='fake_firewall_insert_request') fake_firewall_insert_request.should_receive('execute').with_args( http=fake_authorized_http).and_return(firewall_info) fake_firewalls.should_receive('insert').with_args(project= u'appscale.com:appscale', body=dict).and_return(fake_firewall_insert_request) created_firewall_info = { u'status': u'DONE' } fake_firewall_checker = flexmock(name='fake_network_checker') fake_firewall_checker.should_receive('execute').and_return( created_firewall_info) fake_blocker.should_receive('get').with_args(project=project_id, operation=create_firewall).and_return(fake_firewall_checker) # we only need to create one node, so set up mocks for that add_instance = u'operation-1369248752891-4dd5311848461-afc55a20' instance_id = 'appscale-bazgroup-feb10b11-62bc-4536-ac25-9734f2267d6d' add_instance_info = { u'status': u'PENDING', u'kind': u'compute#operation', u'name': add_instance, u'azone': unicode(GCEAgent.GCE_URL) + u'appscale.com:appscale/zones/us-central1-a', u'startTime': u'2013-05-22T11:52:32.939-07:00', u'insertTime': u'2013-05-22T11:52:32.891-07:00', u'targetLink': unicode(GCEAgent.GCE_URL) + u'appscale.com:appscale/zones/us-central1-a/instances/' + instance_id, u'operationType': u'insert', u'progress': 0, u'id': u'6663616273628949255', u'selfLink': unicode(GCEAgent.GCE_URL) + u'appscale.com:appscale/zones/us-central1-a/operations/operation-1369248752891-4dd5311848461-afc55a20', u'user': u'*****@*****.**' } fake_add_instance_request = flexmock(name='fake_add_instance_request') fake_add_instance_request.should_receive('execute').with_args( http=fake_authorized_http).and_return(add_instance_info) fake_instances = flexmock(name='fake_instances') fake_gce.should_receive('instances').and_return(fake_instances) fake_instances.should_receive('insert').with_args(project=project_id, body=dict, zone=str).and_return(fake_add_instance_request) created_instance_info = { u'status': u'DONE' } fake_instance_checker = flexmock(name='fake_network_checker') fake_instance_checker.should_receive('execute').and_return( created_instance_info) fake_blocker.should_receive('get').with_args(project=project_id, operation=add_instance).and_return(fake_instance_checker) # add some fake data in where no instances are initially running, then one # is (in response to our insert request) no_instance_info = { } list_instance_info = { u'items': [{ u'status': u'RUNNING', u'kind': u'compute#instance', u'machineType': u'https://www.googleapis.com/compute/v1/projects/appscale.com:appscale/zones/us-central1-a/machineTypes/' + instance_type, u'name': instance_id, u'zone': u'https://www.googleapis.com/compute/v1/projects/appscale.com:appscale/zones/us-central1-a', u'tags': {u'fingerprint': u'42WmSpB8rSM='}, u'image': u'https://www.googleapis.com/compute/v1/projects/appscale.com:appscale/global/images/lucid64', u'disks': [{ u'index': 0, u'kind': u'compute#attachedDisk', u'type': u'EPHEMERAL', u'mode': u'READ_WRITE' }], u'canIpForward': False, u'serviceAccounts': [{ u'scopes': [GCEAgent.GCE_SCOPE], u'email': u'*****@*****.**' }], u'metadata': { u'kind': u'compute#metadata', u'fingerprint': u'42WmSpB8rSM=' }, u'creationTimestamp': u'2013-05-22T11:52:33.254-07:00', u'id': u'8684033495853907982', u'selfLink': u'https://www.googleapis.com/compute/v1/projects/appscale.com:appscale/zones/us-central1-a/instances/' + instance_id, u'networkInterfaces': [{ u'accessConfigs': [{ u'kind': u'compute#accessConfig', u'type': u'ONE_TO_ONE_NAT', u'name': u'External NAT', u'natIP': u'public1' }], u'networkIP': u'private1', u'network': u'https://www.googleapis.com/compute/v1/projects/appscale.com:appscale/global/networks/bazgroup', u'name': u'nic0' }] }], u'kind': u'compute#instanceList', u'id': u'projects/appscale.com:appscale/zones/us-central1-a/instances', u'selfLink': u'https://www.googleapis.com/compute/v1/projects/961228229472/zones/us-central1-a/instances' } fake_list_instance_request = flexmock(name='fake_list_instance_request') fake_list_instance_request.should_receive('execute').with_args( http=fake_authorized_http).and_return(no_instance_info).and_return( no_instance_info).and_return(list_instance_info) fake_instances.should_receive('list').with_args(project=project_id, filter="name eq appscale-bazgroup-.*", zone=zone) \ .and_return(fake_list_instance_request) fake_instances.should_receive('list').with_args(project=project_id, zone=zone).and_return(fake_list_instance_request) # mock out deleting the public IP from the instance fake_delete_access_request = flexmock(name='fake_delete_access_request') fake_delete_access_request.should_receive('execute').with_args( http=fake_authorized_http).and_return() fake_instances.should_receive('deleteAccessConfig').with_args( project=project_id, accessConfig=str, instance=instance_id, networkInterface=str, zone=zone).and_return(fake_delete_access_request) # as well as adding in the new, static IP to the instance fake_add_access_request = flexmock(name='fake_add_access_request') fake_add_access_request.should_receive('execute').with_args( http=fake_authorized_http).and_return() fake_instances.should_receive('addAccessConfig').with_args( project=project_id, instance=instance_id, networkInterface=str, zone=zone, body=dict).and_return(fake_add_access_request) # finally, inject our fake GCE connection flexmock(apiclient.discovery) apiclient.discovery.should_receive('build').with_args('compute', str) \ .and_return(fake_gce) # assume that root login is not enabled self.local_state.should_receive('shell').with_args(re.compile('ssh'), False, 5, stdin='ls').and_raise(ShellException) # assume that we can enable root login self.local_state.should_receive('shell').with_args(re.compile('ssh'), False, 5, stdin=re.compile('sudo cp')).and_return() # and assume that we can copy over our ssh keys fine self.local_state.should_receive('shell').with_args(re.compile('scp .*[r|d]sa'), False, 5).and_return() self.local_state.should_receive('shell').with_args(re.compile('scp .*{0}' .format(self.keyname)), False, 5).and_return() self.setup_appscale_compatibility_mocks() # mock out generating the private key self.local_state.should_receive('shell').with_args(re.compile('openssl'), False, stdin=None) # assume that we started monit fine self.local_state.should_receive('shell').with_args(re.compile('ssh'), False, 5, stdin=re.compile('monit')) # and that we copied over the AppController's monit file self.local_state.should_receive('shell').with_args(re.compile('scp'), False, 5, stdin=re.compile('controller-17443.cfg')) self.setup_socket_mocks('static-ip') self.setup_appcontroller_mocks('static-ip', 'private1') # mock out reading the locations.json file, and slip in our own json self.local_state.should_receive('get_local_nodes_info').and_return(json.loads( json.dumps([{ "public_ip" : "static-ip", "private_ip" : "private1", "jobs" : ["shadow", "login"] }]))) # copying over the locations yaml and json files should be fine self.local_state.should_receive('shell').with_args(re.compile('scp'), False, 5, stdin=re.compile('locations-{0}'.format(self.keyname))) # same for the secret key self.local_state.should_receive('shell').with_args(re.compile('scp'), False, 5, stdin=re.compile('{0}.secret'.format(self.keyname))) self.setup_uaserver_mocks('static-ip') # Finally, pretend we're using a single persistent disk. disk_layout = yaml.safe_load(""" node-1: my-persistent-disk-1 """) argv = [ "--min", "1", "--max", "1", "--disks", base64.b64encode(yaml.dump(disk_layout)), "--group", self.group, "--infrastructure", "gce", "--gce_instance_type", instance_type, "--machine", image_name, "--keyname", self.keyname, "--client_secrets", client_secrets, "--project", project_id, "--test", "--zone", "my-zone1-b", "--static_ip", "static-ip" ] options = ParseArgs(argv, self.function).args AppScaleTools.run_instances(options)
def prepare( pulp_registries=None, docker_registries=None, before_dockerfile=False, # noqa reactor_config_map=False): if pulp_registries is None: pulp_registries = (("test", LOCALHOST_REGISTRY), ) if docker_registries is None: docker_registries = (DOCKER0_REGISTRY, ) def update_annotations_on_build(build_id, annotations): pass def update_labels_on_build(build_id, labels): pass new_environ = deepcopy(os.environ) new_environ["BUILD"] = dedent('''\ { "metadata": { "name": "asd", "namespace": "namespace" } } ''') flexmock(OSBS, update_annotations_on_build=update_annotations_on_build) flexmock(OSBS, update_labels_on_build=update_labels_on_build) config_kwargs = { 'namespace': 'namespace', 'verify_ssl': True, 'openshift_url': 'http://example.com/', 'use_auth': True, 'conf_file': None, 'build_json_dir': None } (flexmock(osbs.conf.Configuration).should_call("__init__").with_args( **config_kwargs)) flexmock(os) os.should_receive("environ").and_return(new_environ) workflow = DockerBuildWorkflow({ "provider": "git", "uri": "asd" }, "test-image") if reactor_config_map: openshift_map = { 'url': 'http://example.com/', 'insecure': False, 'auth': { 'enable': True }, } workflow.plugin_workspace[ReactorConfigPlugin.key] = {} workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\ ReactorConfig({'version': 1, 'openshift': openshift_map}) for name, crane_uri in pulp_registries: workflow.push_conf.add_pulp_registry(name, crane_uri) workflow.tag_conf.add_primary_image(TEST_IMAGE) workflow.tag_conf.add_unique_image("namespace/image:asd123") for docker_registry in docker_registries: r = workflow.push_conf.add_docker_registry(docker_registry) r.digests[TEST_IMAGE] = ManifestDigest(v1=DIGEST_NOT_USED, v2=DIGEST1) r.digests["namespace/image:asd123"] = ManifestDigest( v1=DIGEST_NOT_USED, v2=DIGEST2) if before_dockerfile: setattr(workflow, 'builder', XBeforeDockerfile()) else: setattr(workflow, 'builder', X) setattr(workflow, '_base_image_inspect', {'Id': '01234567'}) workflow.build_logs = [ "a", "b", ] workflow.source.lg = LazyGit(None, commit="commit") flexmock(workflow.source.lg) workflow.source.lg.should_receive("_commit_id").and_return("commit") return workflow
def test_terminate_in_cloud_and_succeeds(self): # let's say that there is a locations.yaml file, which means appscale is # running, so we should terminate the services on each box flexmock(os.path) os.path.should_call('exists') # set up the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_yaml_location(self.keyname)).and_return(True) # mock out reading the locations.yaml file, and pretend that we're on # a virtualized cluster builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') fake_yaml_file = flexmock(name='fake_file') fake_yaml_file.should_receive('read').and_return(yaml.dump({ 'infrastructure' : 'ec2', 'group' : 'bazboogroup' })) builtins.should_receive('open').with_args( LocalState.get_locations_yaml_location(self.keyname), 'r') \ .and_return(fake_yaml_file) # mock out reading the json file, and pretend that we're running in a # two node deployment fake_json_file = flexmock(name='fake_file') fake_json_file.should_receive('read').and_return(json.dumps([ { 'public_ip' : 'public1', 'jobs' : ['shadow'] }, { 'public_ip' : 'public2', 'jobs' : ['appengine'] } ])) builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_json_file) # and slip in a fake secret file fake_secret_file = flexmock(name='fake_file') fake_secret_file.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args( LocalState.get_secret_key_location(self.keyname), 'r') \ .and_return(fake_secret_file) # mock out talking to EC2 fake_ec2 = flexmock(name='fake_ec2') # let's say that three instances are running, and that two of them are in # our deployment fake_one = flexmock(name='fake_one', key_name=self.keyname, state='running', id='i-ONE', public_dns_name='public1', private_dns_name='private1') fake_two = flexmock(name='fake_two', key_name=self.keyname, state='running', id='i-TWO', public_dns_name='public2', private_dns_name='private2') fake_three = flexmock(name='fake_three', key_name='abcdefg', state='running', id='i-THREE', public_dns_name='public3', private_dns_name='private3') fake_reservation = flexmock(name='fake_reservation', instances=[fake_one, fake_two, fake_three]) fake_ec2.should_receive('get_all_instances').and_return(fake_reservation) flexmock(boto) boto.should_receive('connect_ec2').with_args('baz', 'baz') \ .and_return(fake_ec2) # and mock out the call to kill the instances fake_ec2.should_receive('terminate_instances').with_args(['i-ONE', 'i-TWO']).and_return([fake_one, fake_two]) # mock out the call to delete the keypair fake_ec2.should_receive('delete_key_pair').and_return() # and the call to delete the security group - let's say that we can't # delete the group the first time, and can the second fake_ec2.should_receive('delete_security_group').and_return(False) \ .and_return(True) # finally, mock out removing the yaml file, json file, and secret key from # this machine flexmock(os) os.should_receive('remove').with_args( LocalState.get_locations_yaml_location(self.keyname)).and_return() os.should_receive('remove').with_args( LocalState.get_locations_json_location(self.keyname)).and_return() os.should_receive('remove').with_args( LocalState.get_secret_key_location(self.keyname)).and_return() argv = [ "--keyname", self.keyname ] options = ParseArgs(argv, self.function).args AppScaleTools.terminate_instances(options)
def test_increment(self, tmpdir, component, version, next_release, include_target, reactor_config_map): class MockedClientSession(object): def __init__(self, hub, opts=None): pass def getNextRelease(self, build_info): assert build_info['name'] == list(component.values())[0] assert build_info['version'] == list(version.values())[0] return next_release['actual'] def getBuild(self, build_info): assert build_info['name'] == list(component.values())[0] assert build_info['version'] == list(version.values())[0] if build_info['release'] in next_release['builds']: return True return None def ssl_login(self, cert=None, ca=None, serverca=None, proxyuser=None): self.ca_path = ca self.cert_path = cert self.serverca_path = serverca return True def krb_login(self, *args, **kwargs): return True session = MockedClientSession('') flexmock(koji, ClientSession=session) labels = {} labels.update(component) labels.update(version) plugin = self.prepare(tmpdir, labels=labels, include_target=include_target, certs=True, reactor_config_map=reactor_config_map) new_environ = deepcopy(os.environ) new_environ["BUILD"] = dedent('''\ { "metadata": { "labels": {} } } ''') if next_release['scratch']: new_environ = deepcopy(os.environ) new_environ["BUILD"] = dedent('''\ { "metadata": { "labels": {"scratch": "true"} } } ''') if next_release['build_name']: new_environ["BUILD"] = dedent('''\ { "metadata": { "name": "scratch-123456", "labels": {"scratch": "true"} } } ''') flexmock(os) os.should_receive("environ").and_return(new_environ) # pylint: disable=no-member plugin.run() for file_path, expected in [(session.cert_path, 'cert'), (session.serverca_path, 'serverca')]: assert os.path.isfile(file_path) with open(file_path, 'r') as fd: assert fd.read() == expected parser = df_parser(plugin.workflow.builder.df_path, workflow=plugin.workflow) assert parser.labels['release'] == next_release['expected'] # Old-style spellings should not be asserted assert 'Release' not in parser.labels
def test_terminate_in_virtual_cluster_and_succeeds(self): # let's say that there is a locations.yaml file, which means appscale is # running, so we should terminate the services on each box flexmock(os.path) os.path.should_call('exists') # set up the fall-through os.path.should_receive('exists').with_args( LocalState.get_locations_yaml_location(self.keyname)).and_return(True) # mock out reading the locations.yaml file, and pretend that we're on # a virtualized cluster builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') fake_yaml_file = flexmock(name='fake_file') fake_yaml_file.should_receive('read').and_return(yaml.dump({ 'infrastructure' : 'xen' })) builtins.should_receive('open').with_args( LocalState.get_locations_yaml_location(self.keyname), 'r') \ .and_return(fake_yaml_file) # mock out reading the json file, and pretend that we're running in a # two node deployment os.path.should_receive('exists').with_args( LocalState.get_locations_json_location(self.keyname)).and_return(True) fake_json_file = flexmock(name='fake_file') fake_json_file.should_receive('read').and_return(json.dumps([ { 'public_ip' : 'public1', 'jobs' : ['shadow'] }, { 'public_ip' : 'public2', 'jobs' : ['appengine'] } ])) builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_json_file) # and slip in a fake secret file fake_secret_file = flexmock(name='fake_file') fake_secret_file.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args( LocalState.get_secret_key_location(self.keyname), 'r') \ .and_return(fake_secret_file) # mock out talking to the appcontroller, and assume that it tells us there # there are still two machines in this deployment fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('get_all_public_ips').with_args('the secret') \ .and_return(json.dumps(['public1', 'public2'])) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) # and mock out the ssh call to kill the remote appcontroller, assuming that # it fails the first time and passes the second flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('controller stop'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.failed).and_return(self.success) # next, mock out our checks to see how the stopping process is going and # assume that it has stopped flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('ps x'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # finally, mock out removing the yaml file, json file, and secret key from # this machine flexmock(os) os.should_receive('remove').with_args( LocalState.get_locations_yaml_location(self.keyname)).and_return() os.should_receive('remove').with_args( LocalState.get_locations_json_location(self.keyname)).and_return() os.should_receive('remove').with_args( LocalState.get_secret_key_location(self.keyname)).and_return() argv = [ "--keyname", self.keyname ] options = ParseArgs(argv, self.function).args AppScaleTools.terminate_instances(options)
def test_upload_php_app_successfully(self): app_dir = '/tmp/appscale-app-1234' # add in mocks so that the gzip'ed file gets extracted to /tmp # as well as for removing it later flexmock(os) os.should_receive('mkdir').with_args(app_dir) \ .and_return(True) flexmock(shutil) shutil.should_receive('rmtree').with_args(app_dir).and_return() local_state = flexmock(LocalState) local_state.should_receive('shell')\ .with_args(re.compile('tar zxvf'),False)\ .and_return() # add in mocks so that there is an app.yaml, but with no appid set flexmock(os.path) os.path.should_call('exists') app_yaml_location = AppEngineHelper.get_app_yaml_location(app_dir) os.path.should_receive('exists').with_args(app_yaml_location) \ .and_return(True) # mock out reading the app.yaml file builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through fake_app_yaml = flexmock(name="fake_app_yaml") fake_app_yaml.should_receive('read').and_return(yaml.dump({ 'application' : 'baz', 'runtime' : 'php' })) builtins.should_receive('open').with_args(app_yaml_location, 'r') \ .and_return(fake_app_yaml) # Mock out service host and port app_data = {'owner' : '*****@*****.**', 'hosts' : {'192.168.1.1' : { 'http' : 8080, 'https' : 4380 }}} app_stats_data = {'apps': {'baz': {'http': 8080, 'language': 'python27', 'total_reqs': 'no_change', 'appservers': 1, 'https': 4380, 'reqs_enqueued': None}}} # mock out the SOAP call to the AppController and assume it succeeded fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('status').with_args('the secret') \ .and_return('Database is at public1') fake_appcontroller.should_receive('done_uploading').with_args('baz', '/opt/appscale/apps/baz.tar.gz', 'the secret').and_return() fake_appcontroller.should_receive('update').with_args(['baz'], 'the secret').and_return() fake_appcontroller.should_receive('is_app_running').with_args('baz', 'the secret').and_return(False).and_return(True) fake_appcontroller.should_receive('does_user_exist').with_args( '*****@*****.**', 'the secret').and_return('true') fake_appcontroller.should_receive('does_user_exist').with_args( 'a@public1', 'the secret').and_return('true') fake_appcontroller.should_receive('does_app_exist').with_args( 'baz', 'the secret').and_return(json.dumps(app_data)) fake_appcontroller.should_receive('get_app_data').with_args( 'baz', 'the secret').and_return(json.dumps(app_data)) fake_appcontroller.should_receive('get_all_stats').with_args( 'the secret').and_return(json.dumps(app_stats_data)) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://*****:*****@a.com") flexmock(getpass) getpass.should_receive('getpass').and_return('aaaaaa') # mock out making the remote app directory local_state.should_receive('shell') \ .with_args(re.compile('^ssh'), False, 5, stdin=re.compile('^mkdir -p')) \ .and_return() # and mock out tarring and copying the app local_state.should_receive('shell') \ .with_args(re.compile('tar -czf'), False) \ .and_return() local_state.should_receive('shell') \ .with_args(re.compile('/tmp/appscale-app-baz.tar.gz'), False, 5) \ .and_return() # as well as removing the tar'ed app once we're done copying it flexmock(os) os.should_receive('remove').with_args('/tmp/appscale-app-baz-1234.tar.gz') \ .and_return() os.should_receive('listdir').and_return(['app.yaml','index.py']) # and slap in a mock that says the app comes up after waiting for it # three times fake_socket = flexmock(name='fake_socket') fake_socket.should_receive('connect').with_args(('public1', 8080)).and_raise(Exception).and_raise(Exception) \ .and_return(None) flexmock(socket) socket.should_receive('socket').and_return(fake_socket) argv = [ "--keyname", self.keyname, "--file", self.app_dir + ".tar.gz" ] options = ParseArgs(argv, self.function).args (host, port) = AppScaleTools.upload_app(options) self.assertEquals('public1', host) self.assertEquals(8080, port)
def test_source_build_release(self, tmpdir, next_release, reserve_build, init_fails, reactor_config_map): build_id = '123456' token = 'token_123456' koji_name = 'component' koji_version = '3.0' koji_release = '1' koji_source = 'git_reg/repo' class MockedClientSession(object): def __init__(self, hub, opts=None): self.ca_path = None self.cert_path = None self.serverca_path = None def getBuild(self, build_info): if isinstance(build_info, dict): assert build_info['name'] == "%s-source" % koji_name assert build_info['version'] == koji_version if build_info['release'] in next_release['builds']: return {'state': koji.BUILD_STATES['COMPLETE']} return None else: return { 'name': koji_name, 'version': koji_version, 'release': koji_release, 'source': koji_source } def ssl_login(self, cert=None, ca=None, serverca=None, proxyuser=None): self.ca_path = ca self.cert_path = cert self.serverca_path = serverca return True def krb_login(self, *args, **kwargs): return True def CGInitBuild(self, cg_name, nvr_data): assert cg_name == PROG assert nvr_data['name'] == "%s-source" % koji_name assert nvr_data['version'] == koji_version if init_fails: raise init_fails( 'unable to pre-declare build {}'.format(nvr_data)) return {'build_id': build_id, 'token': token} session = MockedClientSession('') flexmock(time).should_receive('sleep').and_return(None) flexmock(koji, ClientSession=session) plugin = self.prepare(tmpdir, certs=True, reactor_config_map=reactor_config_map, reserve_build=reserve_build, fetch_source=True) new_environ = deepcopy(os.environ) if next_release['scratch']: new_environ = deepcopy(os.environ) new_environ["BUILD"] = dedent('''\ { "metadata": { "labels": {"scratch": "true"} } } ''') else: new_environ["BUILD"] = dedent('''\ { "metadata": { "labels": {} } } ''') flexmock(os) os.should_receive("environ").and_return(new_environ) # pylint: disable=no-member if init_fails and reserve_build and reactor_config_map and not next_release[ 'scratch']: with pytest.raises(RuntimeError) as exc: plugin.run() assert 'unable to pre-declare build ' in str(exc.value) return plugin.run() for file_path, expected in [(session.cert_path, 'cert'), (session.serverca_path, 'serverca')]: assert os.path.isfile(file_path) with open(file_path, 'r') as fd: assert fd.read() == expected if reserve_build and reactor_config_map and not next_release['scratch']: assert plugin.workflow.reserved_build_id == build_id assert plugin.workflow.reserved_token == token assert plugin.workflow.koji_source_source_url == koji_source expected_nvr = { 'name': "%s-source" % koji_name, 'version': koji_version, 'release': next_release['expected'] } plugin.workflow.koji_source_nvr = expected_nvr
def test_appscale_in_two_node_virt_deployment(self): # pretend that the place we're going to put logs into doesn't exist flexmock(os.path) os.path.should_call('exists') # set the fall-through os.path.should_receive('exists').with_args('/tmp/foobaz').and_return( False) # and mock out the mkdir operation flexmock(os) os.should_receive('mkdir').with_args('/tmp/foobaz').and_return() # next, mock out finding the login ip address os.path.should_receive('exists').with_args( LocalState.get_locations_json_location( self.keyname)).and_return(True) fake_nodes_json = flexmock(name="fake_nodes_json") nodes_info = { "node_info": [ { "public_ip": "public1", "private_ip": "private1", "jobs": [ "load_balancer", "taskqueue_master", "zookeeper", "db_master", "taskqueue", "shadow", "login" ] }, { "public_ip": "public2", "private_ip": "private2", "jobs": ["memcache", "appengine", "zookeeper"] }, { "public_ip": "public3", "private_ip": "private3", "jobs": ["memcache", "appengine"] }, ] } fake_nodes_json.should_receive('read').and_return( json.dumps(nodes_info)) builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') builtins.should_receive('open').with_args( LocalState.get_locations_json_location(self.keyname), 'r') \ .and_return(fake_nodes_json) # mock out writing the secret key to ~/.appscale, as well as reading it # later secret_key_location = LocalState.get_secret_key_location(self.keyname) fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('read').and_return('the secret') builtins.should_receive('open').with_args(secret_key_location, 'r') \ .and_return(fake_secret) # and slip in a fake appcontroller to report on the two IP addrs fake_appcontroller = flexmock(name='fake_appcontroller') fake_appcontroller.should_receive('get_all_public_ips').with_args( 'the secret').and_return( json.dumps(['public1', 'public2', 'public3'])) fake_appcontroller.should_receive('get_role_info').with_args( 'the secret').and_return(json.dumps(nodes_info['node_info'])) flexmock(SOAPpy) SOAPpy.should_receive('SOAPProxy').with_args('https://public1:17443') \ .and_return(fake_appcontroller) # fake the creation of the log directories locally flexmock(utils) utils.should_receive('mkdir').with_args( '/tmp/foobaz/symlinks/private-ips') utils.should_receive('mkdir').with_args('/tmp/foobaz/public1') utils.should_receive('mkdir').with_args( '/tmp/foobaz/public1/cassandra') utils.should_receive('mkdir').with_args('/tmp/foobaz/public1/rabbitmq') utils.should_receive('mkdir').with_args('/tmp/foobaz/public2') utils.should_receive('mkdir').with_args( '/tmp/foobaz/public2/cassandra') utils.should_receive('mkdir').with_args('/tmp/foobaz/public2/rabbitmq') utils.should_receive('mkdir').with_args('/tmp/foobaz/public3') utils.should_receive('mkdir').with_args( '/tmp/foobaz/public3/cassandra') utils.should_receive('mkdir').with_args('/tmp/foobaz/public3/rabbitmq') utils.should_receive('mkdir').with_args( '/tmp/foobaz/symlinks/load_balancer') utils.should_receive('mkdir').with_args( '/tmp/foobaz/symlinks/taskqueue_master') utils.should_receive('mkdir').with_args( '/tmp/foobaz/symlinks/zookeeper') utils.should_receive('mkdir').with_args( '/tmp/foobaz/symlinks/db_master') utils.should_receive('mkdir').with_args( '/tmp/foobaz/symlinks/taskqueue') utils.should_receive('mkdir').with_args('/tmp/foobaz/symlinks/shadow') utils.should_receive('mkdir').with_args('/tmp/foobaz/symlinks/login') utils.should_receive('mkdir').with_args( '/tmp/foobaz/symlinks/memcache') utils.should_receive('mkdir').with_args( '/tmp/foobaz/symlinks/appengine') # fake creation of symlink to for friendly navigation links_mapping = { '../../public1': [ '/tmp/foobaz/symlinks/private-ips/private1', '/tmp/foobaz/symlinks/load_balancer/public1', '/tmp/foobaz/symlinks/taskqueue_master/public1', '/tmp/foobaz/symlinks/zookeeper/public1', '/tmp/foobaz/symlinks/db_master/public1', '/tmp/foobaz/symlinks/taskqueue/public1', '/tmp/foobaz/symlinks/shadow/public1', '/tmp/foobaz/symlinks/login/public1', ], '../../public2': [ '/tmp/foobaz/symlinks/private-ips/private2', '/tmp/foobaz/symlinks/zookeeper/public2', '/tmp/foobaz/symlinks/appengine/public2', '/tmp/foobaz/symlinks/memcache/public2', ], '../../public3': [ '/tmp/foobaz/symlinks/private-ips/private3', '/tmp/foobaz/symlinks/appengine/public3', '/tmp/foobaz/symlinks/memcache/public3', ] } for original_dir, expected_links in links_mapping.iteritems(): for expected_link in expected_links: os.should_receive('symlink').with_args(original_dir, expected_link) # finally, fake the copying of the log files flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('/var/log/appscale'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) subprocess.should_receive('Popen').with_args(re.compile('/var/log/kern.log*'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) subprocess.should_receive('Popen').with_args(re.compile('/var/log/monit*'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) subprocess.should_receive('Popen').with_args( re.compile('/var/log/haproxy*'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT).and_return(self.success) subprocess.should_receive('Popen').with_args(re.compile('/var/log/nginx'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) subprocess.should_receive('Popen').with_args( re.compile('/var/log/rabbitmq'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT).and_return(self.success) subprocess.should_receive('Popen').with_args(re.compile('/var/log/syslog*'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) subprocess.should_receive('Popen').with_args(re.compile('/var/log/zookeeper'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) subprocess.should_receive('Popen').with_args( re.compile('/opt/cassandra/cassandra/logs'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT).and_return(self.success) argv = ["--keyname", self.keyname, "--location", "/tmp/foobaz"] options = ParseArgs(argv, self.function).args AppScaleTools.gather_logs(options)
def prepare(docker_registries=None, before_dockerfile=False): if docker_registries is None: docker_registries = ( LOCALHOST_REGISTRY, DOCKER0_REGISTRY, ) def update_annotations_on_build(build_id, annotations): pass def update_labels_on_build(build_id, labels): pass new_environ = deepcopy(os.environ) new_environ["BUILD"] = dedent('''\ { "metadata": { "name": "asd", "namespace": "namespace" } } ''') flexmock(OSBS, update_annotations_on_build=update_annotations_on_build) flexmock(OSBS, update_labels_on_build=update_labels_on_build) config_kwargs = { 'namespace': 'namespace', 'verify_ssl': True, 'openshift_url': 'http://example.com/', 'use_auth': True, 'conf_file': None, 'build_json_dir': None } (flexmock(osbs.conf.Configuration).should_call("__init__").with_args( **config_kwargs)) flexmock(os) os.should_receive("environ").and_return(new_environ) # pylint: disable=no-member workflow = DockerBuildWorkflow(source=MOCK_SOURCE) openshift_map = { 'url': 'http://example.com/', 'insecure': False, 'auth': { 'enable': True }, } workflow.plugin_workspace[ReactorConfigPlugin.key] = {} workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\ ReactorConfig({'version': 1, 'openshift': openshift_map}) add_koji_map_in_workflow(workflow, hub_url='/', root_url='') workflow.tag_conf.add_floating_image(TEST_IMAGE) workflow.tag_conf.add_primary_image("namespace/image:version-release") workflow.tag_conf.add_unique_image("namespace/image:asd123") for docker_registry in docker_registries: r = workflow.push_conf.add_docker_registry(docker_registry) r.digests[TEST_IMAGE_NAME] = ManifestDigest(v1=DIGEST_NOT_USED, v2=DIGEST1) r.digests["namespace/image:asd123"] = ManifestDigest( v1=DIGEST_NOT_USED, v2=DIGEST2) if before_dockerfile: setattr(workflow, 'builder', XBeforeDockerfile()) setattr(workflow.builder, 'base_image_inspect', {}) else: setattr(workflow, 'builder', X()) setattr(workflow.builder, 'base_image_inspect', {'Id': '01234567'}) workflow.build_logs = [ "a", "b", ] workflow.source.lg = LazyGit(None, commit="commit") flexmock(workflow.source.lg) # pylint: disable=no-member workflow.source.lg.should_receive("_commit_id").and_return("commit") # pylint: enable=no-member return workflow
def test_java_bad_sdk_version(self): bad_jars = ['test.jar', 'appengine-api-1.0-sdk-1.7.3.jar'] flexmock(os) os.should_receive('listdir').and_return(bad_jars) self.assertEquals(True, AppEngineHelper.is_sdk_mismatch(''))
def test_appscale_with_ips_layout_flag_and_success(self): # assume that ssh is running on each machine fake_socket = flexmock(name='socket') fake_socket.should_receive('connect').with_args(('1.2.3.4', 22)) \ .and_return(None) fake_socket.should_receive('connect').with_args(('1.2.3.5', 22)) \ .and_return(None) fake_socket.should_receive('connect').with_args(('1.2.3.6', 22)) \ .and_return(None) flexmock(socket) socket.should_receive('socket').and_return(fake_socket) # assume that we have ssh-keygen and ssh-copy-id flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('which ssh-keygen'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('which ssh-copy-id'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # assume that we have a ~/.appscale flexmock(os.path) os.path.should_call('exists') os.path.should_receive('exists').with_args(LocalState.LOCAL_APPSCALE_PATH) \ .and_return(True) # and assume that we don't have public and private keys already made path = LocalState.LOCAL_APPSCALE_PATH + self.keyname public_key = LocalState.LOCAL_APPSCALE_PATH + self.keyname + '.pub' private_key = LocalState.LOCAL_APPSCALE_PATH + self.keyname + '.key' os.path.should_receive('exists').with_args(public_key).and_return(False) os.path.should_receive('exists').with_args(private_key).and_return(False) # next, assume that ssh-keygen ran fine flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('ssh-keygen'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # assume that we can rename the private key flexmock(shutil) shutil.should_receive('copy').with_args(path, private_key).and_return() # finally, assume that we can chmod 0600 those files fine flexmock(os) os.should_receive('chmod').with_args(public_key, 0600).and_return() os.should_receive('chmod').with_args(path, 0600).and_return() # and assume that we can ssh-copy-id to each of the three IPs below flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('ssh-copy-id'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # also, we should be able to copy over our new public and private keys fine flexmock(subprocess) subprocess.should_receive('Popen').with_args(re.compile('id_rsa[.pub]?'), shell=True, stdout=self.fake_temp_file, stderr=subprocess.STDOUT) \ .and_return(self.success) # don't use a 192.168.X.Y IP here, since sometimes we set our virtual # machines to boot with those addresses (and that can mess up our tests). ips_layout = yaml.safe_load(""" master : 1.2.3.4 database: 1.2.3.4 zookeeper: 1.2.3.5 appengine: 1.2.3.6 """) argv = [ "--ips_layout", base64.b64encode(yaml.dump(ips_layout)), "--keyname", self.keyname ] options = ParseArgs(argv, self.function).args AppScaleTools.add_keypair(options)
def setUp(self): # mock out all logging, since it clutters our output flexmock(AppScaleLogger) AppScaleLogger.should_receive('log').and_return() # mock out all sleeps, as they aren't necessary for unit testing flexmock(time) time.should_receive('sleep').and_return() # set up some fake options so that we don't have to generate them via # ParseArgs self.options = flexmock(infrastructure='ec2', group='boogroup', machine='ami-ABCDEFG', instance_type='m1.large', keyname='bookey', table='cassandra', verbose=False, test=False, use_spot_instances=False, zone='my-zone-1b', static_ip=None) self.my_id = "12345" self.node_layout = NodeLayout(self.options) # set up phony AWS credentials for each test # ones that test not having them present can # remove them for credential in EucalyptusAgent.REQUIRED_EC2_CREDENTIALS: os.environ[credential] = "baz" os.environ['EC2_URL'] = "http://boo" # mock out calls to EC2 # begin by assuming that our ssh keypair doesn't exist, and thus that we # need to create it key_contents = "key contents here" fake_key = flexmock(name="fake_key", material=key_contents) fake_key.should_receive('save').with_args(os.environ['HOME']+'/.appscale').and_return(None) fake_ec2 = flexmock(name="fake_ec2") fake_ec2.should_receive('get_key_pair').with_args('bookey') \ .and_return(None) fake_ec2.should_receive('create_key_pair').with_args('bookey') \ .and_return(fake_key) # mock out writing the secret key builtins = flexmock(sys.modules['__builtin__']) builtins.should_call('open') # set the fall-through secret_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.secret" fake_secret = flexmock(name="fake_secret") fake_secret.should_receive('write').and_return() builtins.should_receive('open').with_args(secret_key_location, 'w') \ .and_return(fake_secret) # also, mock out the keypair writing and chmod'ing ssh_key_location = LocalState.LOCAL_APPSCALE_PATH + "bookey.key" fake_file = flexmock(name="fake_file") fake_file.should_receive('write').with_args(key_contents).and_return() builtins.should_receive('open').with_args(ssh_key_location, 'w') \ .and_return(fake_file) flexmock(os) os.should_receive('chmod').with_args(ssh_key_location, 0600).and_return() # next, assume there are no security groups up at first, but then it gets # created. udp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='udp') tcp_rule = flexmock(from_port=1, to_port=65535, ip_protocol='tcp') icmp_rule = flexmock(from_port=-1, to_port=-1, ip_protocol='icmp') group = flexmock(name='boogroup', rules=[tcp_rule, udp_rule, icmp_rule]) fake_ec2.should_receive('get_all_security_groups').with_args().and_return([]) fake_ec2.should_receive('get_all_security_groups').with_args('boogroup').and_return([group]) # and then assume we can create and open our security group fine fake_ec2.should_receive('create_security_group').with_args('boogroup', 'AppScale security group').and_return() fake_ec2.should_receive('authorize_security_group').and_return() # next, add in mocks for run_instances # the first time around, let's say that no machines are running # the second time around, let's say that our machine is pending # and that it's up the third time around fake_pending_instance = flexmock(state='pending') fake_pending_reservation = flexmock(instances=fake_pending_instance) fake_running_instance = flexmock(state='running', key_name='bookey', id='i-12345678', public_dns_name='public1', private_dns_name='private1') fake_running_reservation = flexmock(instances=fake_running_instance) fake_ec2.should_receive('get_all_instances').and_return([]) \ .and_return([]) \ .and_return([fake_pending_reservation]) \ .and_return([fake_running_reservation]) # next, assume that our run_instances command succeeds fake_ec2.should_receive('run_instances').and_return() # finally, inject our mocked EC2 flexmock(boto.ec2) boto.ec2.should_receive('connect_to_region').and_return(fake_ec2) # assume that ssh comes up on the third attempt fake_socket = flexmock(name='fake_socket') fake_socket.should_receive('connect').with_args(('public1', RemoteHelper.SSH_PORT)).and_raise(Exception).and_raise(Exception) \ .and_return(None) flexmock(socket) socket.should_receive('socket').and_return(fake_socket) # throw some default mocks together for when invoking via shell succeeds # and when it fails self.fake_temp_file = flexmock(name='fake_temp_file') self.fake_temp_file.should_receive('seek').with_args(0).and_return() self.fake_temp_file.should_receive('read').and_return('boo out') self.fake_temp_file.should_receive('close').and_return() flexmock(tempfile) tempfile.should_receive('NamedTemporaryFile')\ .and_return(self.fake_temp_file) self.success = flexmock(name='success', returncode=0) self.success.should_receive('wait').and_return(0) self.failed = flexmock(name='success', returncode=1) self.failed.should_receive('wait').and_return(1) # assume that root login isn't already enabled local_state = flexmock(LocalState) local_state.should_receive('shell') \ .with_args(re.compile('^ssh .*root'), False, 1, stdin='ls') \ .and_return(RemoteHelper.LOGIN_AS_UBUNTU_USER) # and assume that we can ssh in as ubuntu to enable root login local_state = flexmock(LocalState) local_state.should_receive('shell')\ .with_args(re.compile('^ssh .*ubuntu'),False,5)\ .and_return() # also assume that we can scp over our ssh keys local_state.should_receive('shell')\ .with_args(re.compile('scp .*/root/.ssh/id_'),False,5)\ .and_return() local_state.should_receive('shell')\ .with_args(re.compile('scp .*/root/.appscale/bookey.key'),False,5)\ .and_return()