def test_blocksize(self): with patch('os.statvfs', return_value=MagicMock(f_bsize=1024)): self.assertEqual(hcc.blocksize('/'), 1024) with patch('os.statvfs', side_effect=OSError(errno.ENOENT, 'message')): self.assertRaises(OSError, hcc.blocksize, '/') with patch('os.statvfs', side_effect=OSError(errno.EACCES, 'message')): self.assertRaises(OSError, hcc.blocksize, '/') with patch('os.statvfs', side_effect=RuntimeError): self.assertRaises(RuntimeError, hcc.blocksize, '/') def fake_statvfs(path): if path == '/abc/def': raise OSError(errno.ENOENT, 'message') elif path == '/abc': return MagicMock(f_bsize=4096) else: raise OSError(errno.ENOENT, 'message') with patch('os.statvfs', side_effect=fake_statvfs): self.assertEqual(hcc.blocksize('/abc/def'), 4096)
def core_site_xml_defaults(workdir, node_info): ''' Default entries for the core-site.xml config file. ''' dflts = { 'dfs.replication': 1, 'fs.defaultFS': 'hdfs://$masterhostaddress:54310', 'fs.inmemory.size.mb': 200, 'hadoop.rpc.socket.factory.class.default': 'org.apache.hadoop.net.StandardSocketFactory', 'hadoop.tmp.dir': '$localworkdir', # If there is hdfs, probably don't set to this blocksize. 'io.file.buffer.size': blocksize(workdir), 'io.sort.factor': 64, 'io.sort.mb': 256, } return dflts
def core_site_xml_defaults(workdir, node_info): """ Default entries for the core-site.xml config file. """ dflts = { "dfs.replication": 1, "fs.defaultFS": "file:///", "fs.inmemory.size.mb": 200, "hadoop.rpc.socket.factory.class.default": "org.apache.hadoop.net.StandardSocketFactory", "hadoop.tmp.dir": "$localworkdir", # If there is hdfs, probably don't set to this blocksize. "io.file.buffer.size": blocksize(workdir), "io.sort.factor": 64, "io.sort.mb": 256, } return dflts