def snapshotall_seq(self, paras): exceptOccurred = False self.logger.log("doing snapshotall now in sequence...") snapshot_result = SnapshotResult() snapshot_info_array = [] all_failed = True is_inconsistent = False try: blobs = paras.blobs if blobs is not None: blob_index = 0 for blob in blobs: blobUri = blob.split("?")[0] self.logger.log("index: " + str(blob_index) + " blobUri: " + str(blobUri)) snapshot_info_array.append(Status.SnapshotInfoObj(False, blobUri, None)) snapshotError, snapshot_info_indexer = self.snapshot_seq(blob, blob_index, paras.backup_metadata) if(snapshotError.errorcode != CommonVariables.success): snapshot_result.errors.append(snapshotError) # update snapshot_info_array element properties from snapshot_info_indexer object self.get_snapshot_info(snapshot_info_indexer, snapshot_info_array[blob_index]) if (snapshot_info_array[blob_index].isSuccessful == True): all_failed = False blob_index = blob_index + 1 return snapshot_result, snapshot_info_array, all_failed, is_inconsistent, exceptOccurred else: self.logger.log("the blobs are None") return snapshot_result, snapshot_info_array, all_failed, is_inconsistent, exceptOccurred except Exception as e: self.logger.log("Unable to perform sequential snapshot with exception" + str(e)) exceptOccurred = True return snapshot_result, snapshot_info_array, all_failed, is_inconsistent, exceptOccurred
def get_snapshot_info(self, responseBody): snapshotinfo_array = [] all_failed = True try: if (responseBody != None): json_reponseBody = json.loads(responseBody) for snapshot_info in json_reponseBody['snapshotInfo']: snapshotinfo_array.append( Status.SnapshotInfoObj(snapshot_info['isSuccessful'], snapshot_info['snapshotUri'], snapshot_info['errorMessage'])) self.logger.log( "IsSuccessful:{0}, SnapshotUri:{1}, ErrorMessage:{2}, StatusCode:{3}" .format(snapshot_info['isSuccessful'], snapshot_info['snapshotUri'], snapshot_info['errorMessage'], snapshot_info['statusCode'])) if (snapshot_info['isSuccessful'] == 'true'): all_failed = False except Exception as e: errorMsg = " deserialization of response body failed with error: %s, stack trace: %s" % ( str(e), traceback.format_exc()) self.logger.log(errorMsg) return snapshotinfo_array, all_failed
def snapshotall_seq(self, paras, freezer, thaw_done, g_fsfreeze_on): exceptOccurred = False self.logger.log("doing snapshotall now in sequence...") snapshot_result = SnapshotResult() snapshot_info_array = [] all_failed = True is_inconsistent = False thaw_done_local = thaw_done unable_to_sleep = False all_snapshots_failed = False try: blobs = paras.blobs if blobs is not None: blob_index = 0 for blob in blobs: blobUri = blob.split("?")[0] self.logger.log("index: " + str(blob_index) + " blobUri: " + str(blobUri)) snapshot_info_array.append( Status.SnapshotInfoObj(False, blobUri, None)) snapshotError, snapshot_info_indexer = self.snapshot_seq( blob, blob_index, paras.backup_metadata) if (snapshotError.errorcode != CommonVariables.success): snapshot_result.errors.append(snapshotError) # update snapshot_info_array element properties from snapshot_info_indexer object self.get_snapshot_info(snapshot_info_indexer, snapshot_info_array[blob_index]) if (snapshot_info_array[blob_index].isSuccessful == True): all_failed = False blob_index = blob_index + 1 all_snapshots_failed = all_failed self.logger.log("Setting all_snapshots_failed to " + str(all_snapshots_failed)) thaw_result = None if g_fsfreeze_on and thaw_done_local == False: time_before_thaw = datetime.datetime.now() thaw_result, unable_to_sleep = freezer.thaw_safe() time_after_thaw = datetime.datetime.now() HandlerUtil.HandlerUtility.add_to_telemetery_data( "ThawTime", str(time_after_thaw - time_before_thaw)) thaw_done_local = True self.logger.log('T:S thaw result ' + str(thaw_result)) if (thaw_result is not None and len(thaw_result.errors) > 0 and (snapshot_result is None or len(snapshot_result.errors) == 0)): snapshot_result.errors.append(thaw_result.errors) is_inconsistent = True return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed else: self.logger.log("the blobs are None") return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep except Exception as e: errorMsg = " Unable to perform sequential snapshot with error: %s, stack trace: %s" % ( str(e), traceback.format_exc()) self.logger.log(errorMsg) exceptOccurred = True return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed
def update_snapshotinfoarray(self, blob_snapshot_info_array): snapshot_info_array = [] self.logger.log('updating snapshot info array from blob snapshot info') if blob_snapshot_info_array != None and blob_snapshot_info_array !=[]: for blob_snapshot_info in blob_snapshot_info_array: if blob_snapshot_info != None: snapshot_info_array.append(Status.SnapshotInfoObj(blob_snapshot_info.isSuccessful, blob_snapshot_info.snapshotUri, blob_snapshot_info.errorMessage)) return snapshot_info_array
def snapshotall_parallel(self, paras, freezer, thaw_done, g_fsfreeze_on): self.logger.log("doing snapshotall now in parallel...") snapshot_result = SnapshotResult() snapshot_info_array = [] all_failed = True exceptOccurred = False is_inconsistent = False thaw_done_local = thaw_done unable_to_sleep = False all_snapshots_failed = False try: mp_jobs = [] global_logger = mp.Queue() global_error_logger = mp.Queue() snapshot_result_error = mp.Queue() snapshot_info_indexer_queue = mp.Queue() time_before_snapshot_start = datetime.datetime.now() blobs = paras.blobs if blobs is not None: # initialize snapshot_info_array mp_jobs = [] blob_index = 0 for blob in blobs: blobUri = blob.split("?")[0] self.logger.log("index: " + str(blob_index) + " blobUri: " + str(blobUri)) snapshot_info_array.append( Status.SnapshotInfoObj(False, blobUri, None)) mp_jobs.append( mp.Process(target=self.snapshot, args=(blob, blob_index, paras.backup_metadata, snapshot_result_error, snapshot_info_indexer_queue, global_logger, global_error_logger))) blob_index = blob_index + 1 for job in mp_jobs: job.start() time_after_snapshot_start = datetime.datetime.now() timeout = self.get_value_from_configfile('timeout') if timeout == None: timeout = 60 for job in mp_jobs: job.join() thaw_result = None if g_fsfreeze_on and thaw_done_local == False: time_before_thaw = datetime.datetime.now() thaw_result, unable_to_sleep = freezer.thaw_safe() time_after_thaw = datetime.datetime.now() HandlerUtil.HandlerUtility.add_to_telemetery_data( "ThawTime", str(time_after_thaw - time_before_thaw)) thaw_done_local = True self.logger.log('T:S thaw result ' + str(thaw_result)) if (thaw_result is not None and len(thaw_result.errors) > 0 and (snapshot_result is None or len(snapshot_result.errors) == 0)): is_inconsistent = True snapshot_result.errors.append(thaw_result.errors) return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed self.logger.log('end of snapshot process') logging = [global_logger.get() for job in mp_jobs] self.logger.log(str(logging)) error_logging = [global_error_logger.get() for job in mp_jobs] self.logger.log(error_logging, False, 'Error') if not snapshot_result_error.empty(): results = [snapshot_result_error.get() for job in mp_jobs] for result in results: if (result.errorcode != CommonVariables.success): snapshot_result.errors.append(result) if not snapshot_info_indexer_queue.empty(): snapshot_info_indexers = [ snapshot_info_indexer_queue.get() for job in mp_jobs ] for snapshot_info_indexer in snapshot_info_indexers: # update snapshot_info_array element properties from snapshot_info_indexer object self.get_snapshot_info( snapshot_info_indexer, snapshot_info_array[snapshot_info_indexer.index]) if (snapshot_info_array[snapshot_info_indexer.index]. isSuccessful == True): all_failed = False self.logger.log( "index: " + str(snapshot_info_indexer.index) + " blobSnapshotUri: " + str(snapshot_info_array[ snapshot_info_indexer.index].snapshotUri)) all_snapshots_failed = all_failed self.logger.log("Setting all_snapshots_failed to " + str(all_snapshots_failed)) return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed else: self.logger.log("the blobs are None") return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep except Exception as e: errorMsg = " Unable to perform parallel snapshot with error: %s, stack trace: %s" % ( str(e), traceback.format_exc()) self.logger.log(errorMsg) exceptOccurred = True return snapshot_result, snapshot_info_array, all_failed, exceptOccurred, is_inconsistent, thaw_done_local, unable_to_sleep, all_snapshots_failed
def snapshotall_parallel(self, paras): self.logger.log("doing snapshotall now in parallel...") snapshot_result = SnapshotResult() snapshot_info_array = [] all_failed = True is_inconsistent = False exceptOccurred = False try: mp_jobs = [] global_logger = mp.Queue() global_error_logger = mp.Queue() snapshot_result_error = mp.Queue() snapshot_info_indexer_queue = mp.Queue() time_before_snapshot_start = datetime.datetime.now() blobs = paras.blobs if blobs is not None: # initialize snapshot_info_array mp_jobs = [] blob_index = 0 for blob in blobs: blobUri = blob.split("?")[0] self.logger.log("index: " + str(blob_index) + " blobUri: " + str(blobUri)) snapshot_info_array.append(Status.SnapshotInfoObj(False, blobUri, None)) mp_jobs.append(mp.Process(target=self.snapshot,args=(blob, blob_index, paras.backup_metadata, snapshot_result_error, snapshot_info_indexer_queue, global_logger, global_error_logger))) blob_index = blob_index + 1 for job in mp_jobs: job.start() time_after_snapshot_start = datetime.datetime.now() timeout = self.get_value_from_configfile('timeout') if timeout == None: timeout = 60 if (time_after_snapshot_start - time_before_snapshot_start) > datetime.timedelta(seconds=int(timeout-1)): is_inconsistent = True for job in mp_jobs: job.join() self.logger.log('end of snapshot process') logging = [global_logger.get() for job in mp_jobs] self.logger.log(str(logging)) error_logging = [global_error_logger.get() for job in mp_jobs] self.logger.log(error_logging,False,'Error') if not snapshot_result_error.empty(): results = [snapshot_result_error.get() for job in mp_jobs] for result in results: if(result.errorcode != CommonVariables.success): snapshot_result.errors.append(result) if not snapshot_info_indexer_queue.empty(): snapshot_info_indexers = [snapshot_info_indexer_queue.get() for job in mp_jobs] for snapshot_info_indexer in snapshot_info_indexers: # update snapshot_info_array element properties from snapshot_info_indexer object self.get_snapshot_info(snapshot_info_indexer, snapshot_info_array[snapshot_info_indexer.index]) if (snapshot_info_array[snapshot_info_indexer.index].isSuccessful == True): all_failed = False self.logger.log("index: " + str(snapshot_info_indexer.index) + " blobSnapshotUri: " + str(snapshot_info_array[snapshot_info_indexer.index].snapshotUri)) return snapshot_result, snapshot_info_array, all_failed, is_inconsistent, exceptOccurred else: self.logger.log("the blobs are None") return snapshot_result, snapshot_info_array, all_failed, is_inconsistent, exceptOccurred except Exception as e: self.logger.log("Unable to perform parallel snapshot" + str(e)) exceptOccurred = True return snapshot_result, snapshot_info_array, all_failed, is_inconsistent, exceptOccurred