def __launch__(self,config,node_list=None): """ Checks to make sure there is enough storage. If not, sends email. If so, sends the job to SGE and records pertinent information. """ if node_list is None: node_list = config.get('Zcat','nodes') SampleQsubProcess.__launch__(self,config) #SampleQsubProcess.__launch__(self,config,node_list=node_list,queue_name='single') return True
def __launch__(self,config,command=None,**kwargs): """ Sends the job to SGE and records pertinent information. """ if command is None: command = ['sleep 30;','qsub'] return SampleQsubProcess.__launch__(self,config,command=command,**kwargs)
def __launch__(self,configs,storage_device,node_list=None): """ Checks to make sure there is enough storage. If not, sends email. If so, sends the job to SGE and records pertinent information. """ #If storage device is full, send a notification and abort. if storage_device.__is_full__(configs['pipeline'].get('Storage','required_fastq_size')): send_email(self.__generate_full_error_text__(configs,storage_device)) return False #This differs from the previous check by the fact that the previous does not #account for jobs that are currently being copied. This error is not as #restrictive due to the fact that the required_fastq_size should be larger than #the actual fastq size thus leaving additional storage once complete. if not storage_device.__is_available__(configs['pipeline'].get('Storage','required_fastq_size')) and self.fail_reported == False: send_email(self.__generate_storage_error_text__(configs,storage_device)) self.fail_reported = True return False if node_list is None: node_list = configs['pipeline'].get('Backup','nodes') SampleQsubProcess.__launch__(self,configs['system'],node_list=node_list,queue_name='single') return True