def _wait_for_burst_ops(self, burst_config): """ sleeps until some operation of the burst is created""" waited = 1 timeout = 50 operations = dao.get_operations_in_burst(burst_config.id) while not len(operations) and waited <= timeout: sleep(1) waited += 1 operations = dao.get_operations_in_burst(burst_config.id) operations = dao.get_operations_in_burst(burst_config.id) return operations
def test_remove_burst_operation(self): burst_config = self._long_burst_launch() waited = 1 timeout = 50 operations = dao.get_operations_in_burst(burst_config.id) while not len(operations) and waited <= timeout: sleep(1) waited += 1 operations = dao.get_operations_in_burst(burst_config.id) operation = dao.get_operations_in_burst(burst_config.id)[0] self.assertEqual(operation.status, model.STATUS_STARTED) self.flow_c.stop_burst_operation(operation.id, 0, True) operation = dao.get_operation_by_id(operation.id) self.assertTrue(operation is None)
def test_stop_burst_operation_group(self): burst_config = self._long_burst_launch(True) waited = 1 timeout = 50 operations = dao.get_operations_in_burst(burst_config.id) while not len(operations) and waited <= timeout: sleep(1) waited += 1 operations = dao.get_operations_in_burst(burst_config.id) operations = dao.get_operations_in_burst(burst_config.id) for operation in operations: self.assertEqual(operation.status, model.STATUS_STARTED) self.flow_c.stop_burst_operation(operation.fk_operation_group, 1, False) for operation in operations: operation = dao.get_operation_by_id(operation.id) self.assertEqual(operation.status, model.STATUS_CANCELED)
def cancel_or_remove_burst(self, burst_id): """ Cancel (if burst is still running) or Remove the burst given by burst_id. :returns True when Remove operation was done and False when Cancel """ burst_entity = dao.get_burst_by_id(burst_id) if burst_entity.status == burst_entity.BURST_RUNNING: self.stop_burst(burst_entity) return False service = ProjectService() ## Remove each DataType in current burst. ## We can not leave all on cascade, because it won't work on SQLite for mapped dataTypes. datatypes = dao.get_all_datatypes_in_burst(burst_id) ## Get operations linked to current burst before removing the burst or else ## the burst won't be there to identify operations any more. remaining_ops = dao.get_operations_in_burst(burst_id) #Remove burst first to delete work-flow steps which still hold foreign keys to operations. correct = dao.remove_entity(burst_entity.__class__, burst_id) if not correct: raise RemoveDataTypeException("Could not remove Burst entity!") for datatype in datatypes: service.remove_datatype(burst_entity.fk_project, datatype.gid, False) ## Remove all Operations remained. correct = True remaining_op_groups = set() project = dao.get_project_by_id(burst_entity.fk_project) for oper in remaining_ops: is_remaining = dao.get_generic_entity(oper.__class__, oper.id) if len(is_remaining) == 0: ### Operation removed cascaded. continue if oper.fk_operation_group is not None and oper.fk_operation_group not in remaining_op_groups: is_remaining = dao.get_generic_entity(model.OperationGroup, oper.fk_operation_group) if len(is_remaining) > 0: remaining_op_groups.add(oper.fk_operation_group) correct = correct and dao.remove_entity( model.OperationGroup, oper.fk_operation_group) correct = correct and dao.remove_entity(oper.__class__, oper.id) service.structure_helper.remove_operation_data( project.name, oper.id) if not correct: raise RemoveDataTypeException( "Could not remove Burst because a linked operation could not be dropped!!" ) return True
def cancel_or_remove_burst(self, burst_id): """ Cancel (if burst is still running) or Remove the burst given by burst_id. :returns True when Remove operation was done and False when Cancel """ burst_entity = dao.get_burst_by_id(burst_id) if burst_entity.status == burst_entity.BURST_RUNNING: self.stop_burst(burst_entity) return False service = ProjectService() ## Remove each DataType in current burst. ## We can not leave all on cascade, because it won't work on SQLite for mapped dataTypes. datatypes = dao.get_all_datatypes_in_burst(burst_id) ## Get operations linked to current burst before removing the burst or else ## the burst won't be there to identify operations any more. remaining_ops = dao.get_operations_in_burst(burst_id) # Remove burst first to delete work-flow steps which still hold foreign keys to operations. correct = dao.remove_entity(burst_entity.__class__, burst_id) if not correct: raise RemoveDataTypeException("Could not remove Burst entity!") for datatype in datatypes: service.remove_datatype(burst_entity.fk_project, datatype.gid, False) ## Remove all Operations remained. correct = True remaining_op_groups = set() project = dao.get_project_by_id(burst_entity.fk_project) for oper in remaining_ops: is_remaining = dao.get_generic_entity(oper.__class__, oper.id) if len(is_remaining) == 0: ### Operation removed cascaded. continue if oper.fk_operation_group is not None and oper.fk_operation_group not in remaining_op_groups: is_remaining = dao.get_generic_entity(model.OperationGroup, oper.fk_operation_group) if len(is_remaining) > 0: remaining_op_groups.add(oper.fk_operation_group) correct = correct and dao.remove_entity(model.OperationGroup, oper.fk_operation_group) correct = correct and dao.remove_entity(oper.__class__, oper.id) service.structure_helper.remove_operation_data(project.name, oper.id) if not correct: raise RemoveDataTypeException("Could not remove Burst because a linked operation could not be dropped!!") return True
def mark_burst_finished(self, burst_entity, error=False, success=False, cancel=False, error_message=None): """ Mark Burst status field. Also compute 'weight' for current burst: no of operations inside, estimate time on disk... :param burst_entity: BurstConfiguration to be updated, at finish time. :param error: When True, burst will be marked as finished with error. :param success: When True, burst will be marked successfully. :param cancel: When True, burst will be marked as user-canceled. """ try: linked_ops_number = dao.get_operations_in_burst(burst_entity.id, is_count=True) linked_datatypes = dao.get_generic_entity(model.DataType, burst_entity.id, "fk_parent_burst") disk_size = linked_ops_number # 1KB for each dataType, considered for operation.xml files dt_group_sizes = dict() for dtype in linked_datatypes: if dtype.disk_size is not None: disk_size = disk_size + dtype.disk_size ### Prepare and compute DataTypeGroup sizes, in case of ranges. if dtype.fk_datatype_group: previous_group_size = dt_group_sizes[dtype.fk_datatype_group] if (dtype.fk_datatype_group in dt_group_sizes) else 0 dt_group_sizes[dtype.fk_datatype_group] = previous_group_size + dtype.disk_size ### If there are any DataType Groups in current Burst, update their counter. burst_dt_groups = dao.get_generic_entity(model.DataTypeGroup, burst_entity.id, "fk_parent_burst") if len(burst_dt_groups) > 0: for dt_group in burst_dt_groups: dt_group.count_results = dao.count_datatypes_in_group(dt_group.id) dt_group.disk_size = dt_group_sizes[dt_group.id] if (dt_group.id in dt_group_sizes) else 0 dao.store_entity(dt_group) ### Update actual Burst entity fields burst_entity.disk_size = disk_size # In KB burst_entity.datatypes_number = len(linked_datatypes) burst_entity.workflows_number = len(dao.get_workflows_for_burst(burst_entity.id)) burst_entity.mark_status(success=success, error=error, cancel=cancel) burst_entity.error_message = error_message dao.store_entity(burst_entity) except Exception, excep: self.logger.error(excep) self.logger.exception("Could not correctly update Burst status and meta-data!") burst_entity.mark_status(error=True) burst_entity.error_message = "Error when updating Burst Status" dao.store_entity(burst_entity)
def mark_burst_finished(self, burst_entity, burst_status=None, error_message=None): """ Mark Burst status field. Also compute 'weight' for current burst: no of operations inside, estimate time on disk... :param burst_entity: BurstConfiguration to be updated, at finish time. :param burst_status: BurstConfiguration status. By default BURST_FINISHED :param error_message: If given, set the status to error and perpetuate the message. """ if burst_status is None: burst_status = model.BurstConfiguration.BURST_FINISHED if error_message is not None: burst_status = model.BurstConfiguration.BURST_ERROR try: ### If there are any DataType Groups in current Burst, update their counter. burst_dt_groups = dao.get_generic_entity(model.DataTypeGroup, burst_entity.id, "fk_parent_burst") for dt_group in burst_dt_groups: dt_group.count_results = dao.count_datatypes_in_group(dt_group.id) dt_group.disk_size, dt_group.subject = dao.get_summary_for_group(dt_group.id) dao.store_entity(dt_group) ### Update actual Burst entity fields ## 1KB for each dataType, considered for operation.xml files linked_ops_number = dao.get_operations_in_burst(burst_entity.id, is_count=True) burst_entity.disk_size = linked_ops_number + dao.get_disk_size_for_burst(burst_entity.id) # In KB burst_entity.datatypes_number = dao.count_datatypes_in_burst(burst_entity.id) burst_entity.workflows_number = dao.get_workflows_for_burst(burst_entity.id, is_count=True) burst_entity.status = burst_status burst_entity.error_message = error_message burst_entity.finish_time = datetime.now() dao.store_entity(burst_entity) except Exception: self.logger.exception("Could not correctly update Burst status and meta-data!") burst_entity.status = burst_status burst_entity.error_message = "Error when updating Burst Status" burst_entity.finish_time = datetime.now() dao.store_entity(burst_entity)
def mark_burst_finished(self, burst_entity, error=False, success=False, cancel=False, error_message=None): """ Mark Burst status field. Also compute 'weight' for current burst: no of operations inside, estimate time on disk... :param burst_entity: BurstConfiguration to be updated, at finish time. :param error: When True, burst will be marked as finished with error. :param success: When True, burst will be marked successfully. :param cancel: When True, burst will be marked as user-canceled. """ try: linked_ops_number = dao.get_operations_in_burst(burst_entity.id, is_count=True) linked_datatypes = dao.get_generic_entity(model.DataType, burst_entity.id, "fk_parent_burst") disk_size = linked_ops_number #### 1KB for each dataType, considered for operation.xml files dt_group_sizes = dict() for dtype in linked_datatypes: if dtype.disk_size is not None: disk_size = disk_size + dtype.disk_size ### Prepare and compute DataTypeGroup sizes, in case of ranges. if dtype.fk_datatype_group: previous_group_size = dt_group_sizes[ dtype.fk_datatype_group] if ( dtype.fk_datatype_group in dt_group_sizes) else 0 dt_group_sizes[ dtype. fk_datatype_group] = previous_group_size + dtype.disk_size ### If there are any DataType Groups in current Burst, update their counter. burst_dt_groups = dao.get_generic_entity(model.DataTypeGroup, burst_entity.id, "fk_parent_burst") if len(burst_dt_groups) > 0: for dt_group in burst_dt_groups: dt_group.count_results = dao.count_datatypes_in_group( dt_group.id) dt_group.disk_size = dt_group_sizes[dt_group.id] if ( dt_group.id in dt_group_sizes) else 0 dao.store_entity(dt_group) ### Update actual Burst entity fields burst_entity.disk_size = disk_size ## In KB burst_entity.datatypes_number = len(linked_datatypes) burst_entity.workflows_number = len( dao.get_workflows_for_burst(burst_entity.id)) burst_entity.mark_status(success=success, error=error, cancel=cancel) burst_entity.error_message = error_message dao.store_entity(burst_entity) except Exception, excep: self.logger.error(excep) self.logger.exception( "Could not correctly update Burst status and meta-data!") burst_entity.mark_status(error=True) burst_entity.error_message = "Error when updating Burst Status" dao.store_entity(burst_entity)