def create(self): """ Method for creating Hadoop clusters in~okeanos.""" try: payload = {"clusterchoice":{"project_name": self.opts['project_name'], "cluster_name": self.opts['name'], "cluster_size": self.opts['cluster_size'], "cpu_master": self.opts['cpu_master'], "ram_master": self.opts['ram_master'], "disk_master": self.opts['disk_master'], "cpu_slaves": self.opts['cpu_slave'], "ram_slaves": self.opts['ram_slave'], "disk_slaves": self.opts['disk_slave'], "disk_template": self.opts['disk_template'], "os_choice": self.opts['image'], "replication_factor": self.opts['replication_factor'], "dfs_blocksize": self.opts['dfs_blocksize']}} yarn_cluster_req = ClusterRequest(self.escience_token, self.server_url, payload, action='cluster') response = yarn_cluster_req.create_cluster() if 'task_id' in response['clusterchoice']: task_id = response['clusterchoice']['task_id'] else: logging.error(response['clusterchoice']['message']) exit(error_fatal) result = task_message(task_id, self.escience_token, self.server_url, wait_timer_create) logging.log(SUMMARY, "YARN Cluster is active, you can access it through {0}:8088/cluster," " and has the following properties:".format(result['master_IP'])) stdout.write("cluster_id: {0}\nmaster_IP: {1}\n" "root password: {2}\n".format(result['cluster_id'], result['master_IP'], result['master_VM_password'])) exit(SUCCESS) except Exception, e: stderr.write('{0}'.format('\r')) logging.error(str(e.args[0])) exit(error_fatal)
def task_message(task_id, escience_token, server_url, wait_timer, task='not_progress_bar'): """ Function to check create and destroy celery tasks running from orka-CLI and log task state messages. """ payload = {"job": {"task_id": task_id}} yarn_cluster_logger = ClusterRequest(escience_token, server_url, payload, action='job') previous_response = {'job': {'state': 'placeholder'}} response = yarn_cluster_logger.retrieve() while 'state' in response['job']: if response['job']['state'].replace('\r','') != previous_response['job']['state'].replace('\r',''): if task == 'has_progress_bar': stderr.write(u'{0}\r'.format(response['job']['state'])) stderr.flush() else: stderr.write('{0}'.format('\r')) logging.log(SUMMARY, '{0}'.format(response['job']['state'])) previous_response = response else: stderr.write('{0}'.format('.')) sleep(wait_timer) response = yarn_cluster_logger.retrieve() stderr.flush() if 'success' in response['job']: stderr.write('{0}'.format('\r')) return response['job']['success'] elif 'error' in response['job']: stderr.write('{0}'.format('\r')) logging.error(response['job']['error']) exit(error_fatal)
def hadoop_action(self): """ Method for applying an action to a Hadoop cluster""" action = str.lower(self.opts['hadoop_status']) clusters = get_user_clusters(self.opts['token'], self.opts['server_url']) active_cluster = None for cluster in clusters: if (cluster['id'] == self.opts['cluster_id']): active_cluster = cluster if cluster['cluster_status'] == const_cluster_status_active: break else: logging.error('Hadoop can only be managed for an active cluster.') exit(error_fatal) if active_cluster: if (active_cluster['hadoop_status'] == const_hadoop_status_started and action == "start"): logging.error('Hadoop already started.') exit(error_fatal) elif (active_cluster['hadoop_status'] == const_hadoop_status_stopped and action == "stop"): logging.error('Hadoop already stopped.') exit(error_fatal) try: payload = {"clusterchoice":{"id": self.opts['cluster_id'], "hadoop_status": action}} yarn_cluster_req = ClusterRequest(self.escience_token, self.server_url, payload, action='cluster') response = yarn_cluster_req.create_cluster() task_id = response['clusterchoice']['task_id'] result = task_message(task_id, self.escience_token, self.server_url, wait_timer_delete) logging.log(SUMMARY, result) exit(SUCCESS) except Exception, e: stderr.write('{0}'.format('\r')) logging.error(str(e.args[0])) exit(error_fatal)
def create(self): """ Method for creating Hadoop clusters in~okeanos.""" try: payload = {"clusterchoice":{"project_name": self.opts['project_name'], "cluster_name": self.opts['name'], "cluster_size": self.opts['cluster_size'], "cpu_master": self.opts['cpu_master'], "ram_master": self.opts['ram_master'], "disk_master": self.opts['disk_master'], "cpu_slaves": self.opts['cpu_slave'], "ram_slaves": self.opts['ram_slave'], "disk_slaves": self.opts['disk_slave'], "disk_template": self.opts['disk_template'], "os_choice": self.opts['image'], "replication_factor": self.opts['replication_factor'], "dfs_blocksize": self.opts['dfs_blocksize']}} yarn_cluster_req = ClusterRequest(self.escience_token, payload, action='cluster') response = yarn_cluster_req.create_cluster() if 'task_id' in response['clusterchoice']: task_id = response['clusterchoice']['task_id'] else: logging.error(response['clusterchoice']['message']) exit(error_fatal) result = task_message(task_id, self.escience_token, wait_timer_create) logging.log(SUMMARY, " Yarn Cluster is active.You can access it through " + result['master_IP'] + ":8088/cluster") logging.log(SUMMARY, " The root password of your master VM is " + result['master_VM_password']) except Exception, e: logging.error(' Fatal error: ' + str(e.args[0])) exit(error_fatal)
def destroy(self): """ Method for deleting Hadoop clusters in~okeanos.""" clusters = get_user_clusters(self.opts['token']) for cluster in clusters: if (cluster['id'] == self.opts['cluster_id']) and cluster['cluster_status'] == const_cluster_status_active: break else: logging.error(' Only active clusters can be destroyed.') exit(error_fatal) try: payload = {"clusterchoice":{"id": self.opts['cluster_id']}} yarn_cluster_req = ClusterRequest(self.escience_token, payload, action='cluster') response = yarn_cluster_req.delete_cluster() task_id = response['clusterchoice']['task_id'] result = task_message(task_id, self.escience_token, wait_timer_delete) logging.log(SUMMARY, ' Cluster with name "%s" and all its resources deleted' %(result)) except Exception, e: logging.error(str(e.args[0])) exit(error_fatal)
def destroy(self): """ Method for deleting Hadoop clusters in~okeanos.""" clusters = get_user_clusters(self.opts['token'], self.opts['server_url']) for cluster in clusters: if (cluster['id'] == self.opts['cluster_id']) and cluster['cluster_status'] == const_cluster_status_active: break else: logging.error('Only active clusters can be destroyed.') exit(error_fatal) try: payload = {"clusterchoice":{"id": self.opts['cluster_id']}} yarn_cluster_req = ClusterRequest(self.escience_token, self.server_url, payload, action='cluster') response = yarn_cluster_req.delete_cluster() task_id = response['clusterchoice']['task_id'] result = task_message(task_id, self.escience_token, self.server_url, wait_timer_delete) logging.log(SUMMARY, 'Cluster with name "{0}" and all its resources deleted'.format(result)) exit(SUCCESS) except Exception, e: stderr.write('{0}'.format('\r')) logging.error(str(e.args[0])) exit(error_fatal)
def destroy_vre_machine(self): """ Method for deleting VRE servers in~okeanos.""" vre_servers = get_user_clusters(self.opts['token'], self.opts['server_url'], choice='vreservers') for server in vre_servers: if (server['id'] == self.opts['server_id']) and server['server_status'] == const_cluster_status_active: break else: logging.error('Only active VRE servers can be destroyed.') exit(error_fatal) try: payload = {"vreserver":{"id": self.opts['server_id']}} yarn_cluster_req = ClusterRequest(self.escience_token, self.server_url, payload, action='vre') response = yarn_cluster_req.delete_cluster() task_id = response['vreserver']['task_id'] result = task_message(task_id, self.escience_token, self.server_url, wait_timer_delete) logging.log(SUMMARY, 'VRE server with name "{0}" and its IP were deleted'.format(result)) exit(SUCCESS) except Exception, e: stderr.write('{0}'.format('\r')) logging.error(str(e.args[0])) exit(error_fatal)
def put_from_server(self): """ Put files from ftp/http server to Hdfs. Send a POST request to orka app server to copy the ftp/http file to the requested """ payload = {"hdfs":{"id": self.opts['cluster_id'], "source": "\'{0}\'".format(self.opts['source']), "dest": "\'{0}\'".format(self.opts['destination']), "user": self.opts['user'], "password": self.opts['password']}} yarn_cluster_req = ClusterRequest(self.escience_token, self.server_url, payload, action='hdfs') response = yarn_cluster_req.post() if 'task_id' in response['hdfs']: task_id = response['hdfs']['task_id'] else: logging.error(response['hdfs']['message']) exit(error_fatal) logging.log(SUMMARY, 'Starting file transfer') result = task_message(task_id, self.escience_token, self.server_url, wait_timer_delete, task='has_progress_bar') if result == 0: stdout.flush() logging.log(SUMMARY, 'Transfered file to Hadoop filesystem') return result
def create_vre_machine(self): """ Method for creating VRE server in~okeanos.""" try: payload = {"vreserver":{"project_name": self.opts['project_name'], "server_name": self.opts['name'], "cpu": self.opts['cpu'], "ram": self.opts['ram'], "disk": self.opts['disk'], "disk_template": self.opts['disk_template'], "os_choice": self.opts['image']}} yarn_cluster_req = ClusterRequest(self.escience_token, self.server_url, payload, action='vre') response = yarn_cluster_req.post() if 'task_id' in response['vreserver']: task_id = response['vreserver']['task_id'] else: logging.error(response['vreserver']['message']) exit(error_fatal) result = task_message(task_id, self.escience_token, self.server_url, wait_timer_create) logging.log(SUMMARY, "VRE server is active and has the following properties:") stdout.write("server_id: {0}\nserver_IP: {1}\n" "root password: {2}\n".format(result['server_id'], result['server_IP'], result['VRE_VM_password'])) exit(SUCCESS) except Exception, e: stderr.write('{0}'.format('\r')) logging.error(str(e.args[0])) exit(error_fatal)