def test_save_mount_points(self, mock_os_isdir, mock_get_mount_point, mock_write_data_dir_to_mount_in_file, log_error, log_info): """ Test when all mounts are on root. """ mock_get_mount_point.side_effect = ["/", "/", "/"] * 2 mock_os_isdir.side_effect = [False, False, False] + [True, True, True] mock_write_data_dir_to_mount_in_file.return_value = True # Function under test dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, self.params, update_cache=False) for (name, args, kwargs) in log_info.mock_calls: print args[0] for (name, args, kwargs) in log_error.mock_calls: print args[0] self.assertEquals(0, log_error.call_count) mock_write_data_dir_to_mount_in_file.assert_called_once_with( self.params, { self.grid0: "/", self.grid1: "/", self.grid2: "/" })
def test_grid_becomes_unmounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point, mock_get_data_dir_to_mount_from_file, log_error, log_info): """ Test when grid2 becomes unmounted """ mock_os_exists.return_value = True # Indicate that history file exists # Initially, all grids were mounted mock_get_data_dir_to_mount_from_file.return_value = {self.grid0: "/dev0", self.grid1: "/dev1", self.grid2: "/dev2"} # Grid2 then becomes unmounted mock_get_mount_point.side_effect = ["/dev0", "/dev1", "/"] * 2 mock_os_isdir.side_effect = [False, False, False] + [True, True, True] # Function under test dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, self.params, update_cache=False) for (name, args, kwargs) in log_info.mock_calls: print args[0] error_logs = [] for (name, args, kwargs) in log_error.mock_calls: error_logs.append(args[0]) # this is a one-tuple error_msg = "".join(error_logs) self.assertEquals(1, log_error.call_count) self.assertTrue("Directory /grid/2/data does not exist and became unmounted from /dev2" in error_msg)
def test_normalized(self, mock_write_data_dir_to_file, log_error, log_info): """ Test that the data dirs are normalized by removing leading and trailing whitespace, and case sensitive. """ params = StubParams() params.data_dir_mount_file = "/etc/hadoop/conf/dfs_data_dir_mount.hist" params.dfs_data_dir = "/grid/0/data , /grid/1/data ,/GRID/2/Data/" # Function under test dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, params, update_cache=False) for (name, args, kwargs) in log_info.mock_calls: print args[0] for (name, args, kwargs) in log_error.mock_calls: print args[0] log_info.assert_any_call("Forcefully creating directory: /grid/0/data") log_info.assert_any_call("Forcefully creating directory: /grid/1/data") log_info.assert_any_call( "Forcefully creating directory: /GRID/2/Data/") self.assertEquals(0, log_error.call_count)
def test_grid_becomes_remounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point, mock_get_data_dir_to_mount_from_file, log_error, log_info): """ Test when grid2 becomes remounted """ mock_os_exists.return_value = True # Indicate that history file exists # Initially, all grids were mounted mock_get_data_dir_to_mount_from_file.return_value = { self.grid0: "/dev0", self.grid1: "/dev1", self.grid2: "/" } # Grid2 then becomes remounted mock_get_mount_point.side_effect = ["/dev0", "/dev1", "/dev2"] * 2 mock_os_isdir.side_effect = [False, False, False] + [True, True, True] # Function under test dfs_datanode_helper.handle_dfs_data_dir(fake_create_dir, self.params, update_cache=False) for (name, args, kwargs) in log_info.mock_calls: print args[0] for (name, args, kwargs) in log_error.mock_calls: print args[0] self.assertEquals(0, log_error.call_count)
def datanode(action=None): import params if action == "configure": Directory( params.dfs_domain_socket_dir, recursive=True, mode=0751, owner=params.hdfs_user, group=params.user_group ) handle_dfs_data_dir(create_dirs, params) elif action == "start" or action == "stop": service(action=action, name="datanode", user=params.hdfs_user, create_pid_dir=True, create_log_dir=True)
def datanode(action=None): if action == "configure": import params Directory(params.dfs_domain_socket_dir, create_parents=True, mode=0751, owner=params.hdfs_user, group=params.user_group) if not os.path.isdir(os.path.dirname(params.data_dir_mount_file)): Directory(os.path.dirname(params.data_dir_mount_file), create_parents=True, mode=0755, owner=params.hdfs_user, group=params.user_group) data_dir_to_mount_file_content = handle_dfs_data_dir( create_dirs, params) File(params.data_dir_mount_file, owner=params.hdfs_user, group=params.user_group, mode=0644, content=data_dir_to_mount_file_content) elif action == "start" or action == "stop": import params service(action=action, name="datanode", user=params.hdfs_user, create_pid_dir=True, create_log_dir=True) elif action == "status": import status_params check_process_status(status_params.datanode_pid_file)
def datanode(action=None): if action == "configure": import params Directory(params.dfs_domain_socket_dir, recursive=True, mode=0751, owner=params.hdfs_user, group=params.user_group) if not os.path.isdir(os.path.dirname(params.data_dir_mount_file)): Directory(os.path.dirname(params.data_dir_mount_file), recursive=True, mode=0755, owner=params.hdfs_user, group=params.user_group) data_dir_to_mount_file_content = handle_dfs_data_dir(create_dirs, params) File(params.data_dir_mount_file, owner=params.hdfs_user, group=params.user_group, mode=0644, content=data_dir_to_mount_file_content ) elif action == "start" or action == "stop": import params service( action=action, name="datanode", user=params.hdfs_user, create_pid_dir=True, create_log_dir=True ) elif action == "status": import status_params check_process_status(status_params.datanode_pid_file)
def datanode(action=None): import params if action == "configure": Directory(params.dfs_domain_socket_dir, recursive=True, mode=0751, owner=params.hdfs_user, group=params.user_group) handle_dfs_data_dir(create_dirs, params) elif action == "start" or action == "stop": service(action=action, name="datanode", user=params.hdfs_user, create_pid_dir=True, create_log_dir=True)
def datanode(action=None): import params if action == "configure": Directory(params.dfs_domain_socket_dir, recursive=True, mode=0751, owner=params.hdfs_user, group=params.user_group) handle_dfs_data_dir(create_dirs, params) if action == "start" or action == "stop": """ In this point, HDP code uses a much more complex execution, I assume it is for standarization porpuses and avoid using /etc/init.d """ cmd=Popen(['service','hadoop-hdfs-datanode',action],stdout=PIPE,stderr=STDOUT) out,err=cmd.communicate() rc = cmd.returncode Logger.info("Datanode service %s: %s" % (action, rc == 0))
def datanode(action=None): if action == "configure": import params Directory(params.dfs_domain_socket_dir, recursive=True, mode=0751, owner=params.hdfs_user, group=params.user_group) handle_dfs_data_dir(create_dirs, params) elif action == "start" or action == "stop": import params Directory(params.hadoop_pid_dir_prefix, mode=0755, owner=params.hdfs_user, group=params.user_group) service(action=action, name="datanode", user=params.hdfs_user, create_pid_dir=True, create_log_dir=True) elif action == "status": import status_params check_process_status(status_params.datanode_pid_file)