예제 #1
0
파일: orka.py 프로젝트: amathilda/e-science
 def hadoop_action(self):
     """ Method for applying an action to a Hadoop cluster"""
     action = str.lower(self.opts['hadoop_status'])
     clusters = get_user_clusters(self.opts['token'], self.opts['server_url'])
     active_cluster = None
     for cluster in clusters:
         if (cluster['id'] == self.opts['cluster_id']):
             active_cluster = cluster
             if cluster['cluster_status'] == const_cluster_status_active:
                 break
     else:
         logging.error('Hadoop can only be managed for an active cluster.')
         exit(error_fatal)
     if active_cluster:
         if (active_cluster['hadoop_status'] == const_hadoop_status_started and action == "start"):
             logging.error('Hadoop already started.')
             exit(error_fatal)
         elif (active_cluster['hadoop_status'] == const_hadoop_status_stopped and action == "stop"):
             logging.error('Hadoop already stopped.')
             exit(error_fatal)
     try:
         payload = {"clusterchoice":{"id": self.opts['cluster_id'], "hadoop_status": action}}
         yarn_cluster_req = ClusterRequest(self.escience_token, self.server_url, payload, action='cluster')
         response = yarn_cluster_req.create_cluster()
         task_id = response['clusterchoice']['task_id']
         result = task_message(task_id, self.escience_token, self.server_url, wait_timer_delete)
         logging.log(SUMMARY, result)
         exit(SUCCESS)
     except Exception, e:
         stderr.write('{0}'.format('\r'))
         logging.error(str(e.args[0]))
         exit(error_fatal)
예제 #2
0
 def file_action(self):
     """ Method for taking actions to and from Hadoop filesystem """
     # safe getters, defaults to False if the option is not set
     opt_filelist = self.opts.get('filelist', False)
     opt_fileput = self.opts.get('fileput', False)
     opt_fileget = self.opts.get('fileget', False)
     if opt_filelist == True:
         self.list_pithos_files()
     else:
         clusters = get_user_clusters(self.opts['token'])
         active_cluster = None
         for cluster in clusters:
             if (cluster['id'] == self.opts['cluster_id']):
                 if cluster['hadoop_status'] == const_hadoop_status_started:
                     active_cluster = cluster
                     break
         else:
             logging.error(' You can take file actions on active clusters with started hadoop only.')
             exit(error_fatal)
         source_path = self.opts['source'].split("/")
         self.source_filename = source_path[len(source_path)-1]
         if opt_fileput == True:
             try:
                 if is_period(self.opts['destination']) or is_default_dir(self.opts['destination']):
                     self.opts['destination'] = self.source_filename
                 file_protocol, remain = get_file_protocol(self.opts['source'], 'fileput', 'source')
                 self.check_hdfs_destination(active_cluster)
                 if file_protocol == 'http-ftp':
                     self.put_from_server()
                 elif file_protocol == 'file':
                     self.put_from_local(active_cluster)
                 elif file_protocol == 'pithos':
                     kamaki_filespec = remain
                     self.put_from_pithos(active_cluster,kamaki_filespec)
                 else:
                     logging.error(' Error: Unrecognized source filespec.')
                     exit(error_fatal)
             except Exception, e:
                 logging.error(str(e.args[0]))
                 exit(error_fatal)
         elif opt_fileget == True:
             try:
                 if is_period(self.opts['destination']):
                     self.opts['destination'] = os.getcwd()
                 file_protocol, remain = get_file_protocol(self.opts['destination'], 'fileget', 'destination')
                 if file_protocol == 'pithos':
                     self.get_from_hadoop_to_pithos(active_cluster, remain)
                 elif file_protocol == 'file' or file_protocol == "folder":
                     self.get_from_hadoop_to_local(active_cluster)
                 else:
                     logging.error(' Error: Unrecognized destination filespec.')
                     exit(error_fatal)
             except Exception, e:
                 logging.error(str(e.args[0]))
예제 #3
0
 def destroy(self):
     """ Method for deleting Hadoop clusters in~okeanos."""
     clusters = get_user_clusters(self.opts['token'])
     for cluster in clusters:
         if (cluster['id'] == self.opts['cluster_id']) and cluster['cluster_status'] == const_cluster_status_active:
             break
     else:
         logging.error(' Only active clusters can be destroyed.')
         exit(error_fatal)
     try:
         payload = {"clusterchoice":{"id": self.opts['cluster_id']}}
         yarn_cluster_req = ClusterRequest(self.escience_token, payload, action='cluster')
         response = yarn_cluster_req.delete_cluster()
         task_id = response['clusterchoice']['task_id']
         result = task_message(task_id, self.escience_token, wait_timer_delete)
         logging.log(SUMMARY, ' Cluster with name "%s" and all its resources deleted' %(result))
     except Exception, e:
         logging.error(str(e.args[0]))
         exit(error_fatal)
예제 #4
0
파일: orka.py 프로젝트: amathilda/e-science
 def destroy(self):
     """ Method for deleting Hadoop clusters in~okeanos."""
     clusters = get_user_clusters(self.opts['token'], self.opts['server_url'])
     for cluster in clusters:
         if (cluster['id'] == self.opts['cluster_id']) and cluster['cluster_status'] == const_cluster_status_active:
             break
     else:
         logging.error('Only active clusters can be destroyed.')
         exit(error_fatal)
     try:
         payload = {"clusterchoice":{"id": self.opts['cluster_id']}}
         yarn_cluster_req = ClusterRequest(self.escience_token, self.server_url, payload, action='cluster')
         response = yarn_cluster_req.delete_cluster()
         task_id = response['clusterchoice']['task_id']
         result = task_message(task_id, self.escience_token, self.server_url, wait_timer_delete)
         logging.log(SUMMARY, 'Cluster with name "{0}" and all its resources deleted'.format(result))
         exit(SUCCESS)
     except Exception, e:
         stderr.write('{0}'.format('\r'))
         logging.error(str(e.args[0]))
         exit(error_fatal)
예제 #5
0
파일: orka.py 프로젝트: amathilda/e-science
 def destroy_vre_machine(self):
     """ Method for deleting VRE servers in~okeanos."""
     vre_servers = get_user_clusters(self.opts['token'], self.opts['server_url'], choice='vreservers')
     for server in vre_servers:
         if (server['id'] == self.opts['server_id']) and server['server_status'] == const_cluster_status_active:
             break
     else:
         logging.error('Only active VRE servers can be destroyed.')
         exit(error_fatal)
     try:
         payload = {"vreserver":{"id": self.opts['server_id']}}
         yarn_cluster_req = ClusterRequest(self.escience_token, self.server_url, payload, action='vre')
         response = yarn_cluster_req.delete_cluster()
         task_id = response['vreserver']['task_id']
         result = task_message(task_id, self.escience_token, self.server_url, wait_timer_delete)
         logging.log(SUMMARY, 'VRE server with name "{0}" and its IP were deleted'.format(result))
         exit(SUCCESS)
     except Exception, e:
         stderr.write('{0}'.format('\r'))
         logging.error(str(e.args[0]))
         exit(error_fatal)
예제 #6
0
파일: orka.py 프로젝트: amathilda/e-science
 def list(self):
     try:
         self.data.extend(get_user_clusters(self.opts['token'], self.opts['server_url']))
     except ClientError, e:
         logging.error(e.message)
         exit(error_fatal)
예제 #7
0
파일: orka.py 프로젝트: amathilda/e-science
 def file_action(self):
     """ Method for taking actions to and from Hadoop filesystem """
     # safe getters, defaults to False if the option is not set
     opt_filelist = self.opts.get('filelist', False)
     opt_fileput = self.opts.get('fileput', False)
     opt_fileget = self.opts.get('fileget', False)
     opt_filemkdir = self.opts.get('filemkdir', False)
     if opt_filelist == True:
         self.list_pithos_files()
     else:
         clusters = get_user_clusters(self.opts['token'], self.opts['server_url'])
         active_cluster = None
         for cluster in clusters:
             if (cluster['id'] == self.opts['cluster_id']):
                 if cluster['hadoop_status'] == const_hadoop_status_started:
                     active_cluster = cluster
                     break
         else:
             logging.error('You can take file actions on active clusters with started hadoop only.')
             exit(error_fatal)
         if opt_fileput == True:
             try:
                 sourcesLength = len(self.opts['destination'])
                 sources = [self.opts['source']]
                 destination = self.opts['destination'][-1]
                 if sourcesLength > 1:
                     if not destination.endswith("/"):
                         destination += '/'
                     for source in self.opts['destination'][:-1]:
                         sources.append(source)
                 for self.opts['source'] in sources:
                     self.opts['destination'] = destination
                     source_path = self.opts['source'].split("/")
                     self.source_filename = source_path[len(source_path)-1]
                     if is_period(self.opts['destination']) or is_default_dir(self.opts['destination']):
                         self.opts['destination'] = self.source_filename
                     file_protocol, remain = get_file_protocol(self.opts['source'], 'fileput', 'source')
                     self.check_hdfs_destination(active_cluster)
                     if file_protocol == 'http-ftp':
                         self.put_from_server()
                     elif file_protocol == 'file':
                         self.put_from_local(active_cluster)
                     elif file_protocol == 'pithos':
                         kamaki_filespec = remain
                         self.put_from_pithos(active_cluster, kamaki_filespec)
                     else:
                         logging.error('Unrecognized source filespec.')
                         exit(error_fatal)
                     
             except Exception, e:
                 stderr.write('{0}'.format('\r'))
                 logging.error(str(e.args[0]))
                 exit(error_fatal)
         elif opt_fileget == True:
             try:
                 if is_period(self.opts['destination']):
                     self.opts['destination'] = os.getcwd()
                 file_protocol, remain = get_file_protocol(self.opts['destination'], 'fileget', 'destination')
                 if file_protocol == 'pithos':
                     self.get_from_hadoop_to_pithos(active_cluster, remain)
                 elif file_protocol == 'file' or file_protocol == "folder":
                     self.get_from_hadoop_to_local(active_cluster)
                 else:
                     logging.error('Unrecognized destination filespec.')
                     exit(error_fatal)
             except Exception, e:
                 stderr.write('{0}'.format('\r'))
                 logging.error(str(e.args[0]))
                 exit(error_fatal)