def cluster_update(self, context, cluster, values): """Set the given properties on cluster and update it.""" values = copy.deepcopy(values) update_shares = values.get('shares') if update_shares: original_shares = (self.db.cluster_get(context, cluster).get('shares', [])) updated_cluster = self.db.cluster_update(context, cluster, values) if update_shares: for share in update_shares: # Only call mount_shares if we have new shares to mount. # We only need one positive case to bother calling mount_shares if share not in original_shares: shares.mount_shares(r.ClusterResource(updated_cluster)) break # Any shares that were on the original, but not on the updated # list will be unmounted unmount_list = [ share for share in original_shares if share not in update_shares ] if len(unmount_list) > 0: shares.unmount_shares(r.ClusterResource(updated_cluster), unmount_list) return updated_cluster
def cluster_update(self, context, cluster, values): """Set the given properties on cluster and update it.""" values = copy.deepcopy(values) update_shares = values.get('shares') if update_shares: original_shares = ( self.db.cluster_get(context, cluster).get('shares', [])) updated_cluster = self.db.cluster_update(context, cluster, values) if update_shares: for share in update_shares: # Only call mount_shares if we have new shares to mount. # We only need one positive case to bother calling mount_shares if share not in original_shares: shares.mount_shares(r.ClusterResource(updated_cluster)) break # Any shares that were on the original, but not on the updated # list will be unmounted unmount_list = [share for share in original_shares if share not in update_shares] if len(unmount_list) > 0: shares.unmount_shares(r.ClusterResource(updated_cluster), unmount_list) return updated_cluster
def test_acl_exists_no_recreate(self, f_manilaclient, f_context): share = _FakeShare(access_list=[mock.Mock( access_level='rw', access_to=ip, access_type='ip') for ip in _NAMENODE_IPS]) f_manilaclient.return_value = mock.Mock( shares=mock.Mock( get=mock.Mock(return_value=share))) namenode_group, namenode_executors = _mock_node_group( _NAMENODE_IPS, [{ 'id': '12345678-1234-1234-1234-123456789012', 'access_level': 'ro', 'path': '/mnt/localpath' }]) datanode_group, datanode_executors = _mock_node_group( _DATANODE_IPS, []) cluster = mock.Mock( node_groups=[namenode_group, datanode_group], shares=[]) shares.mount_shares(cluster) self.assertEqual(0, share.allow.call_count) for executor in namenode_executors: executor.assert_has_calls( _setup_calls() + _expected_calls('/mnt/localpath', '192.168.122.1:/path', '-r')) for executor in datanode_executors: self.assertEqual(0, executor.call_count)
def test_mount_nfs_shares_to_ng(self, f_manilaclient, f_context): share = _FakeShare() f_manilaclient.return_value = mock.Mock( shares=mock.Mock( get=mock.Mock(return_value=share))) namenode_group, namenode_executors = _mock_node_group( _NAMENODE_IPS, [{ 'id': '12345678-1234-1234-1234-123456789012', 'access_level': 'rw', 'path': '/mnt/localpath' }]) datanode_group, datanode_executors = _mock_node_group( _DATANODE_IPS, []) cluster = mock.Mock( node_groups=[namenode_group, datanode_group], shares=[]) shares.mount_shares(cluster) permissions = [mock.call('ip', ip, 'rw') for ip in _NAMENODE_IPS] share.allow.assert_has_calls(permissions, any_order=True) for executor in namenode_executors: executor.assert_has_calls( _setup_calls() + _expected_calls('/mnt/localpath', '192.168.122.1:/path', '-w')) for executor in datanode_executors: self.assertEqual(0, executor.call_count)
def test_share_does_not_exist(self, f_manilaclient, f_context): f_manilaclient.return_value = mock.Mock( shares=mock.Mock( get=mock.Mock( side_effect=manila_ex.NotFound))) namenode_group, namenode_executors = _mock_node_group( ['192.168.122.3', '192.168.122.4'], [ { 'id': '12345678-1234-1234-1234-123456789012', 'access_level': 'rw', 'path': '/mnt/localpath' }, { 'id': 'DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF' } ]) datanode_group, datanode_executors = _mock_node_group( ['192.168.122.5', '192.168.122.6', '192.168.122.7'], []) cluster = mock.Mock( node_groups=[namenode_group, datanode_group], shares=[ { 'id': '12345678-1234-1234-1234-123456789012', 'access_level': 'ro', 'path': '/mnt/somanylocalpaths' } ]) with testtools.ExpectedException(exceptions.NotFoundException): shares.mount_shares(cluster)
def test_mount_nfs_shares_to_cluster(self, f_manilaclient, f_context): global_share = _FakeShare() namenode_only_share = _FakeShare( id='DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF', export_location='192.168.122.2:/path') all_shares = { share.id: share for share in (global_share, namenode_only_share) } f_manilaclient.return_value = mock.Mock(shares=mock.Mock(get=mock.Mock( side_effect=lambda x: all_shares[x]))) namenode_group, namenode_executors = _mock_node_group( ['192.168.122.3', '192.168.122.4'], [{ 'id': '12345678-1234-1234-1234-123456789012', 'access_level': 'rw', 'path': '/mnt/localpath' }, { 'id': 'DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF' }]) datanode_group, datanode_executors = _mock_node_group( ['192.168.122.5', '192.168.122.6', '192.168.122.7'], []) cluster = mock.Mock(node_groups=[namenode_group, datanode_group], shares=[{ 'id': '12345678-1234-1234-1234-123456789012', 'access_level': 'ro', 'path': '/mnt/somanylocalpaths' }]) shares.mount_shares(cluster) all_permissions = [ mock.call('ip', ip, 'ro') for ip in _NAMENODE_IPS + _DATANODE_IPS ] global_share.allow.assert_has_calls(all_permissions, any_order=True) namenode_permissions = [ mock.call('ip', ip, 'rw') for ip in _NAMENODE_IPS ] namenode_only_share.allow.assert_has_calls(namenode_permissions, any_order=True) for executor in namenode_executors: executor.assert_has_calls( _setup_calls() + _expected_calls('/mnt/somanylocalpaths', '192.168.122.1:/path', '-r') + _expected_calls('/mnt/DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF', '192.168.122.2:/path', '-w'), any_order=True) self.assertEqual(6, executor.call_count) for executor in datanode_executors: executor.assert_has_calls(_setup_calls() + _expected_calls( '/mnt/somanylocalpaths', '192.168.122.1:/path', '-r')) self.assertEqual(4, executor.call_count)
def _provision_cluster(cluster_id): ctx, cluster, plugin = _prepare_provisioning(cluster_id) cluster = _update_sahara_info(ctx, cluster) # updating cluster infra cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_INFRAUPDATING) plugin.update_infra(cluster) # creating instances and configuring them cluster = conductor.cluster_get(ctx, cluster_id) context.set_step_type(_("Engine: create cluster")) INFRA.create_cluster(cluster) ntp_service.configure_ntp(cluster_id) # configure cluster cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_CONFIGURING) context.set_step_type(_("Plugin: configure cluster")) if hasattr(plugin, 'validate_images'): plugin.validate_images(cluster, test_only=False) shares.mount_shares(cluster) plugin.configure_cluster(cluster) # starting prepared and configured cluster cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_STARTING) context.set_step_type(_("Plugin: start cluster")) plugin.start_cluster(cluster) # cluster is now up and ready cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_ACTIVE) # schedule execution pending job for cluster for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id): job_manager.run_job(je.id) _refresh_health_for_cluster(cluster_id)
def _provision_cluster(cluster_id): ctx, cluster, plugin = _prepare_provisioning(cluster_id) cluster = _update_sahara_info(ctx, cluster) # updating cluster infra cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_INFRAUPDATING) plugin.update_infra(cluster) # creating instances and configuring them cluster = conductor.cluster_get(ctx, cluster_id) context.set_step_type(_("Engine: create cluster")) INFRA.create_cluster(cluster) # configure cluster cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_CONFIGURING) context.set_step_type(_("Plugin: configure cluster")) if hasattr(plugin, 'validate_images'): plugin.validate_images(cluster, test_only=False) shares.mount_shares(cluster) plugin.configure_cluster(cluster) # starting prepared and configured cluster ntp_service.configure_ntp(cluster_id) cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_STARTING) context.set_step_type(_("Plugin: start cluster")) plugin.start_cluster(cluster) # cluster is now up and ready cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_ACTIVE) # schedule execution pending job for cluster for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id): job_manager.run_job(je.id) _refresh_health_for_cluster(cluster_id)
def mount_share_at_default_path(url, cluster): # Automount this share to the cluster with default path # url example: 'manila://ManilaShare-uuid/path_to_file' share_id = six.moves.urllib.parse.urlparse(url).netloc if cluster.shares: cluster_shares = [dict(s) for s in cluster.shares] else: cluster_shares = [] needed_share = { 'id': share_id, 'path': shares_service.default_mount(share_id), 'access_level': 'rw' } cluster_shares.append(needed_share) cluster = conductor.cluster_update(context.ctx(), cluster, {'shares': cluster_shares}) shares_service.mount_shares(cluster) return shares_service.get_share_path(url, cluster.shares)
def mount_share_at_default_path(url, cluster): # Automount this share to the cluster with default path # url example: 'manila://ManilaShare-uuid/path_to_file' share_id = six.moves.urllib.parse.urlparse(url).netloc if cluster.shares: cluster_shares = [dict(s) for s in cluster.shares] else: cluster_shares = [] needed_share = { 'id': share_id, 'path': shares_service.default_mount(share_id), 'access_level': 'rw' } cluster_shares.append(needed_share) cluster = conductor.cluster_update( context.ctx(), cluster, {'shares': cluster_shares}) shares_service.mount_shares(cluster) return shares_service.get_share_path(url, cluster.shares)
def test_mount_nfs_shares_to_cluster(self, f_manilaclient, f_context): global_share = _FakeShare() namenode_only_share = _FakeShare( id='DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF', export_location='192.168.122.2:/path') all_shares = {share.id: share for share in (global_share, namenode_only_share)} f_manilaclient.return_value = mock.Mock( shares=mock.Mock( get=mock.Mock( side_effect=lambda x: all_shares[x]))) namenode_group, namenode_executors = _mock_node_group( ['192.168.122.3', '192.168.122.4'], [ { 'id': '12345678-1234-1234-1234-123456789012', 'access_level': 'rw', 'path': '/mnt/localpath' }, { 'id': 'DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF' } ]) datanode_group, datanode_executors = _mock_node_group( ['192.168.122.5', '192.168.122.6', '192.168.122.7'], []) cluster = mock.Mock( node_groups=[namenode_group, datanode_group], shares=[ { 'id': '12345678-1234-1234-1234-123456789012', 'access_level': 'ro', 'path': '/mnt/somanylocalpaths' } ]) shares.mount_shares(cluster) all_permissions = [mock.call('ip', ip, 'ro') for ip in _NAMENODE_IPS + _DATANODE_IPS] global_share.allow.assert_has_calls(all_permissions, any_order=True) namenode_permissions = [mock.call('ip', ip, 'rw') for ip in _NAMENODE_IPS] namenode_only_share.allow.assert_has_calls(namenode_permissions, any_order=True) for executor in namenode_executors: executor.assert_has_calls( _setup_calls() + _expected_calls('/mnt/somanylocalpaths', '192.168.122.1:/path', '-r') + _expected_calls('/mnt/DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF', '192.168.122.2:/path', '-w'), any_order=True) self.assertEqual(6, executor.call_count) for executor in datanode_executors: executor.assert_has_calls( _setup_calls() + _expected_calls('/mnt/somanylocalpaths', '192.168.122.1:/path', '-r')) self.assertEqual(4, executor.call_count)