def pushToCacheStream(self, s3Data, selectedFileOrFolder, topicName): """ Function to submit the signup request to the kafka stream """ producer = KafkaProducer( bootstrap_servers=['localhost:9092'], key_serializer=lambda x: x.encode('utf-8'), value_serializer=lambda x: dumps(x).encode('utf-8')) # Record Preparation Begins.. data_to_placed_in_the_stream = {} data_to_placed_in_the_stream[ "content"] = self.convertBtyeToExactString(s3Data) data_to_placed_in_the_stream["key"] = selectedFileOrFolder data_to_placed_in_the_stream["bucket"] = topicName # Record Preparation Ends... result = producer.send('cache', key=self._insertCacheTask, value=data_to_placed_in_the_stream) sleep(10) if result.is_done: logging.info( "The record have been successfully pushed to the stream") else: logging.warning("Pushing to the stream have been failed")
def _deleteTheCreatedSavepoint(self, filesToCreateSavepoint): try: deleteSavepointThread = threading.Thread( target=self.__deleteSavepointInBackground, args=(filesToCreateSavepoint, )) deleteSavepointThread.daemon = True deleteSavepointThread.start() except: logging.warning("Error unable to delete Savepoint")
def rollbackForUploadOperation(self, topicName, filesPresentInTheSavepoint): logging.info("Inside rollbackForUploadOperation") for eachFilePresentInTheSavepoint in filesPresentInTheSavepoint: uploadResult = self._otherApiCallsForDifferentServers.writeOrUpdateSavepointInS3( topicName, eachFilePresentInTheSavepoint["key"], eachFilePresentInTheSavepoint["data"]["content"]) if uploadResult == False: logging.warning('Error in Rollback for Upload Operation %s', eachFilePresentInTheSavepoint)
def uploadFilesToDesignatedFolder(self, owner, filesToBeUploaded, topicName, selectedFolder): logging.info("Inside uploadFilesToDesignatedFolder") if selectedFolder is None: return self.uploadFilesToRootFolder(owner, filesToBeUploaded, topicName) else: filesToCreateSavepointExtractedFromS3 = self._s3Connection.listObjectsForFolder( bucketName=topicName, selectedFolder=selectedFolder) filesToCreateSavepoint = FileStructureTransformer.FileStructureTransformer( ).transformationProcessPipeline( filesToCreateSavepointExtractedFromS3) savepointCreatedAndItsFiles = SavepointHandler.SavepointHandler( ).createSavepointForUploadOperation(topicName, owner, filesToCreateSavepoint) if savepointCreatedAndItsFiles: versionIds = self.uploadFiles(filesToBeUploaded, topicName) if False not in versionIds: accessRecordsInsertionResults = self.accessRecordCreationForEachUploadedFiles( owner, filesToBeUploaded) if False not in accessRecordsInsertionResults: cacheInsertionResults = self.putTheUploadedFilesToCache( filesToBeUploaded, topicName) if False not in cacheInsertionResults: logging.info("Cache Insertion is successful") else: try: rollBackThread = threading.Thread( target=self. __rollBackSavepointForUploadOperationInBackground, args=( topicName, savepointCreatedAndItsFiles, )) rollBackThread.daemon = True rollBackThread.start() except: logging.warning("Error: unable to Rollback") logging.critical("Uploading to S3 Failed") else: logging.warning("Savepoint Creation Failed")
def rollBackforDeleteOperation(self, topicName, filesPresentInTheSavepoint): logging.info("Inside rollBackforDeleteOperation") for eachBackupFile in filesPresentInTheSavepoint: uploadResult = self._otherApiCallsForDifferentServers.writeOrUpdateSavepointInS3( topicName, eachBackupFile["key"], eachBackupFile["data"]["content"]) insertOrUpdateResult = self._fileMetaDataApi.writeOrUpdateUserAccessData( eachBackupFile["data"]["access"]) if insertOrUpdateResult and uploadResult == False: logging.warning('Error in Rollback for Delete Operation %s', eachBackupFile)
def uploadFileOrFolder(self): logging.info("Inside uploadFileOrFolder") dataToBeUploaded = self.request["data"] owner = dataToBeUploaded["owner"] filesToBeUploaded = dataToBeUploaded["data"] selectedFolder = dataToBeUploaded["selectedFolder"] topicName = self.defaultTopicName try: uploadInitializationThread = threading.Thread( target=self._initiateUploading, args=( owner, filesToBeUploaded, topicName, selectedFolder, )) uploadInitializationThread.daemon = True uploadInitializationThread.start() return ({"status": True}) except: logging.warning("Error unable to delete Savepoint") return ({"status": False})
def deleteFiles(self, owner, selectedFiles, topicName): logging.info("Inside deleteFiles") """ This function handles the task of deleting the files in S3 """ # Savepoint Creation Begins filesToBeDeleted = FileStructureTransformer.FileStructureTransformer( ).extractFileNamesForDeleteOperation(selectedFiles) folderToCreateSavepoint = FileStructureTransformer.FileStructureTransformer( ).extractFolderNameForSavepointCreationInDeleteOperation( filesToBeDeleted) filesToCreateSavepointExtractedFromS3 = self._s3Connection.listObjectsForFolder( bucketName=topicName, selectedFolder=folderToCreateSavepoint) filesToCreateSavepoint = FileStructureTransformer.FileStructureTransformer( ).transformationProcessPipeline(filesToCreateSavepointExtractedFromS3) savepointCreatedAndItsFiles = SavepointHandler.SavepointHandler( ).createSavepointForDeleteOperation(owner, filesToCreateSavepoint) if savepointCreatedAndItsFiles: RecordsToBeDeleted = {} RecordsToBeDeleted["Objects"] = selectedFiles s3DeletionResults = self._s3Connection.deleteObjects( bucketName=topicName, objects=RecordsToBeDeleted) accessDataDeletionResults = FileMetaDataApi.FileMetaDataApi( ).removeUserAccessDetailsForDeletedFiles(owner, selectedFiles) if s3DeletionResults and accessDataDeletionResults is True: try: savepointCreatedAndItsFiles.clear() except: logging.warning("Error unable to delete Savepoint") return ({"status": True}) else: logging.info("Inside Rollback for delete operation") try: rollBackThread = threading.Thread( target=self. __rollBackSavepointForDeleteOperationInBackground, args=( topicName, savepointCreatedAndItsFiles, )) rollBackThread.daemon = True rollBackThread.start() return ({"status": False}) except: logging.warning("Error unable to Rollback") else: logging.warning("Error unable to Create Savepoint") return ({"status": False})