def main(): common.setup() if len(sys.argv) != 3 or sys.argv[1] != '-f': print 'USAGE: {0} -f hdfs_path'.format(common.script_name()) print 'WARNING: This deletes hdfs_path from your cluster.' sys.exit(1) nix = sys.argv[2] common.send_coordinator('/job/clean', {'path': nix}, verify=True)
def main(): common.setup() if len(sys.argv) != 2: print 'USAGE: {0} num_slaves'.format(common.script_name()) sys.exit(1) num_slaves = int(sys.argv[1]) print 'Adding {0} slaves...'.format(num_slaves) common.send_coordinator('/hadoop/add_slaves', {'num_slaves': num_slaves})
def main(): common.setup() if len(sys.argv) != 2: print 'USAGE: {0} num_slaves'.format(common.script_name()) sys.exit(1) num_slaves = int(sys.argv[1]) if num_slaves < cfg.needed_slaves: print('Hadoop needs at least {0} slaves for filesystem ' 'replication').format(cfg.needed_slaves) sys.exit(1) print 'Setting up Hadoop...' common.send_coordinator('/hadoop/launch', {'num_slaves': num_slaves}) common.wait_for_hadoop()
def main(): common.setup() if len(sys.argv) != 2: print 'USAGE: {0} num_slaves'.format(common.script_name()) sys.exit(1) num_slaves = int(sys.argv[1]) if num_slaves < cfg.needed_slaves: print ('Hadoop needs at least {0} slaves for filesystem ' 'replication').format(cfg.needed_slaves) sys.exit(1) print 'Setting up Hadoop...' common.send_coordinator('/hadoop/launch', {'num_slaves': num_slaves}) common.wait_for_hadoop()
def main(): try: common.setup() data = common.send_coordinator('/status/cluster', {}) common.pprint_status(data) except TypeError: print 'The coordinator is not running, or you sent the wrong secret.'