コード例 #1
0
    def pushTheDownloadedFileToCache(self, s3Data, selectedFileOrFolder,
                                     topicName):

        logging.info("Inside pushTheDownloadedFileToCache")

        ThreadServices.ThreadServices().pushToCacheStream(
            s3Data, selectedFileOrFolder, topicName)
コード例 #2
0
def catch_all(path):
    transformed_request = request_preparation(request, path)
    logging.info("Transformed Request From Web %s", transformed_request)
    service = ServiceInterface.ServiceInterface(transformed_request)
    logging.info("Response  %s", service.result)
    if type(service.result) == bytes: return service.result
    return jsonify(service.result)
    def __pushFilesToTheCache(self, eachFileToBeUploaded, topicName):

        logging.info(" Thread: Inside __pushFilesToTheCache")

        if (eachFileToBeUploaded["path"] == topicName):
            selectedFileOrFolder = eachFileToBeUploaded["name"]
        else:
            selectedFileOrFolder = eachFileToBeUploaded[
                "path"] + eachFileToBeUploaded["name"]

        try:
            thread = threading.Thread(target=self._pushFileToCache,
                                      args=(
                                          eachFileToBeUploaded["file"],
                                          selectedFileOrFolder,
                                          topicName,
                                      ))
            thread.daemon = True
            thread.start()
        except:
            logging.critical(
                "Error in Pushing to the stream for Cache Insertion %s",
                selectedFileOrFolder)
            return False
        return True
コード例 #4
0
    def pushToCacheStream(self, s3Data, selectedFileOrFolder, topicName):
        """ Function to submit the signup request to the kafka stream """

        producer = KafkaProducer(
            bootstrap_servers=['localhost:9092'],
            key_serializer=lambda x: x.encode('utf-8'),
            value_serializer=lambda x: dumps(x).encode('utf-8'))

        # Record Preparation Begins..

        data_to_placed_in_the_stream = {}
        data_to_placed_in_the_stream[
            "content"] = self.convertBtyeToExactString(s3Data)
        data_to_placed_in_the_stream["key"] = selectedFileOrFolder
        data_to_placed_in_the_stream["bucket"] = topicName

        # Record Preparation Ends...

        result = producer.send('cache',
                               key=self._insertCacheTask,
                               value=data_to_placed_in_the_stream)
        sleep(10)
        if result.is_done:
            logging.info(
                "The record have been successfully pushed to the stream")
        else:
            logging.warning("Pushing to the stream have been failed")
コード例 #5
0
    def findAccessDetailsForSpecificFileorFolder(self, neededFileOrFolder):

        logging.info("Inside findAccessDetailsForSpecificFileorFolder")

        for eachFileorFolder in self.accessDetailsForFilesAndFolders:  # untested
            if eachFileorFolder["file"] == neededFileOrFolder:
                return eachFileorFolder
コード例 #6
0
    def displayCurrentFilesandFoldersForSelectedTopics(self):

        logging.info("Inside displayCurrentFilesandFoldersForSelectedTopics")

        username = self.request["param"].get(self.username)
        topicName = self.request["param"].get(self.topicName)
        if topicName == None: topicName = self.defaultTopicName
        return FileServerReadTaskHandlers.FileServerReadTaskHandlers(
        ).getLatestContents(username, topicName)
    def putTheUploadedFilesToCache(self, filesToBeUploaded, topicName):

        logging.info("Inside putTheUploadedFilesToCache")

        cacheInsertionResults = [
            self.__pushFilesToTheCache(eachFileToBeUploaded, topicName)
            for eachFileToBeUploaded in filesToBeUploaded
        ]
        return cacheInsertionResults
    def uploadFiles(self, filesToBeUploaded, topicName):

        logging.info("Inside uploadFiles")

        versionIds = [
            self._s3Connection.uploadObject(eachFileToBeUploaded, topicName)
            for eachFileToBeUploaded in filesToBeUploaded
        ]
        return versionIds
コード例 #9
0
    def deleteSelectedFileOrFolders(self):
        logging.info("Inside deleteSelectedFileOrFolders")

        selctedFilesToBeDeleted = self.request["data"]
        owner = selctedFilesToBeDeleted["owner"]
        selectedFiles = selctedFilesToBeDeleted["Objects"]
        topicName = self.defaultTopicName
        return FileServerWriteTaskHandlers.FileServerWriteTaskHandlers(
        ).deleteFiles(owner, selectedFiles, topicName)
コード例 #10
0
    def getContentForSelectedFile(self, topicName, fileName):
        logging.info("Inside getContentForSelectedFile")

        parameter = {}
        parameter["topicName"] = topicName
        parameter["key"] = fileName
        contentOfSelectedFile = requests.get(url=self._readServerUrl +
                                             "downloadSelectedFileOrFolders",
                                             params=parameter)
        return contentOfSelectedFile.text
コード例 #11
0
    def removeUserAccessDetailsForDeletedFiles(self, owner, deletedFiles):
        logging.info("Inside removeUserAccessDetailsForDeletedFiles")

        DeletedFilesOfTheOwner = {}
        DeletedFilesOfTheOwner["owner"] = owner
        DeletedFilesOfTheOwner["files"] = deletedFiles
        response = requests.post(url=self.__accessDataUrl +
                                 "removeUserAccessDetailsForDeletedFiles",
                                 json=DeletedFilesOfTheOwner)
        return response.json()["status"]
コード例 #12
0
    def transformationProcessPipeline(self, s3ResultToBeTransformed):

        logging.info("Inside transformationProcessPipeline")

        validInput = self._isInputValid(s3ResultToBeTransformed)
        if validInput == False:
            return None
        else:
            filteredResult = self._filterValidS3Result(s3ResultToBeTransformed)
            return filteredResult
コード例 #13
0
    def addUserAccessDetailsForFileorFolderInUserAccessManagementServer(
            self, accessRecordsToBeInserted):
        logging.info(
            "Inside addUserAccessDetailsForFileorFolderInUserAccessManagementServer"
        )

        response = requests.post(url=self.__accessDataUrl +
                                 "addUserAccessDetailForFile",
                                 json=accessRecordsToBeInserted)
        return response.json()["status"]
コード例 #14
0
    def downloadSelectedFileOrFolders(self):

        logging.info("Inside downloadSelectedFileOrFolders")

        selectedFileOrFolder = self.request["param"].get(
            self.selectedFileOrFolder)
        topicName = self.request["param"].get(self.topicName)
        if topicName == None: topicName = self.defaultTopicName
        return FileServerReadTaskHandlers.FileServerReadTaskHandlers(
        ).getFileOrFolder(selectedFileOrFolder, topicName)
コード例 #15
0
    def fetchUserAcessDataForSingleFileFromAccessManagementServer(
            self, selectedFile):
        logging.info(
            "Inside fetchUserAcessDataForSingleFileFromAccessManagementServer")

        parameter = {}
        parameter["file"] = selectedFile
        response = requests.get(url=self.__accessDataUrl +
                                "fetchUserAcessDataForSingleFileOrFolder",
                                params=parameter).json()
        return response
コード例 #16
0
    def accessAssignmentToEachFileAndFolder(self, node, metadata):

        logging.info("Inside accessAssignmentToEachFileAndFolder")

        node["owner"] = metadata["owner"]
        for user in metadata["accessingUsers"]:
            if (user["name"] == self.userName):
                if "read" in user: node["read"] = user["read"]
                if "write" in user: node["write"] = user["write"]
                if "delete" in user: node["delete"] = user["delete"]
        return node
コード例 #17
0
    def rollbackForUploadOperation(self, topicName,
                                   filesPresentInTheSavepoint):

        logging.info("Inside rollbackForUploadOperation")

        for eachFilePresentInTheSavepoint in filesPresentInTheSavepoint:
            uploadResult = self._otherApiCallsForDifferentServers.writeOrUpdateSavepointInS3(
                topicName, eachFilePresentInTheSavepoint["key"],
                eachFilePresentInTheSavepoint["data"]["content"])
            if uploadResult == False:
                logging.warning('Error in Rollback for Upload Operation %s',
                                eachFilePresentInTheSavepoint)
コード例 #18
0
    def deleteFiles(self, owner, selectedFiles, topicName):

        logging.info("Inside deleteFiles")
        """ This function handles the task of deleting the files in S3 """

        # Savepoint Creation Begins

        filesToBeDeleted = FileStructureTransformer.FileStructureTransformer(
        ).extractFileNamesForDeleteOperation(selectedFiles)
        folderToCreateSavepoint = FileStructureTransformer.FileStructureTransformer(
        ).extractFolderNameForSavepointCreationInDeleteOperation(
            filesToBeDeleted)

        filesToCreateSavepointExtractedFromS3 = self._s3Connection.listObjectsForFolder(
            bucketName=topicName, selectedFolder=folderToCreateSavepoint)

        filesToCreateSavepoint = FileStructureTransformer.FileStructureTransformer(
        ).transformationProcessPipeline(filesToCreateSavepointExtractedFromS3)
        savepointCreatedAndItsFiles = SavepointHandler.SavepointHandler(
        ).createSavepointForDeleteOperation(owner, filesToCreateSavepoint)
        if savepointCreatedAndItsFiles:

            RecordsToBeDeleted = {}
            RecordsToBeDeleted["Objects"] = selectedFiles
            s3DeletionResults = self._s3Connection.deleteObjects(
                bucketName=topicName, objects=RecordsToBeDeleted)
            accessDataDeletionResults = FileMetaDataApi.FileMetaDataApi(
            ).removeUserAccessDetailsForDeletedFiles(owner, selectedFiles)

            if s3DeletionResults and accessDataDeletionResults is True:
                try:
                    savepointCreatedAndItsFiles.clear()
                except:
                    logging.warning("Error unable to delete Savepoint")
                return ({"status": True})
            else:
                logging.info("Inside Rollback for delete operation")
                try:
                    rollBackThread = threading.Thread(
                        target=self.
                        __rollBackSavepointForDeleteOperationInBackground,
                        args=(
                            topicName,
                            savepointCreatedAndItsFiles,
                        ))
                    rollBackThread.daemon = True
                    rollBackThread.start()
                    return ({"status": False})
                except:
                    logging.warning("Error unable to Rollback")
        else:
            logging.warning("Error unable to Create Savepoint")
            return ({"status": False})
コード例 #19
0
    def writeOrUpdateUserAccessData(self, accessRecord):
        logging.info("Inside writeOrUpdateUserAccessData")

        accessRecordsToBeInserted = {}
        accessRecordsToBeInserted["file"] = accessRecord["file"]
        accessRecordsToBeInserted["owner"] = accessRecord["owner"]
        accessRecordsToBeInserted["accessing_users"] = accessRecord[
            "accessingUsers"]
        response = requests.post(url=self.__accessDataUrl +
                                 "addUserAccessDetailForFile",
                                 json=accessRecordsToBeInserted)
        return response.json()["status"]
コード例 #20
0
    def createSavepointForUploadOperation(self, topicName, owner,
                                          selectedFiles):

        logging.info("Inside createSavepointForUploadOperation")

        filesCreatedAtSavepointDuringUploadOperation = []
        for selectedFile in selectedFiles:
            file = self.__createSavepointDataFromS3ForEachFile(
                topicName, selectedFile)
            filesCreatedAtSavepointDuringUploadOperation.append(file)

        return filesCreatedAtSavepointDuringUploadOperation
コード例 #21
0
    def getLatestContents(self, username, bucketName):

        logging.info("Inside getLatestContents")

        s3Connection = DataSourceFactory.DataSourceFactory().getS3Access()
        resultFromS3 = s3Connection.listObjects(bucketName)
        accesssDetailsForFilesAndFolders = FileAccessMetaDataApi.FileAccessMetaDataApi(
        ).fetchUserAcessDataForFilesandFolders()
        fileStructureTransformer = FileStructureTransformer.FileStructureTransformer(
            username, bucketName, accesssDetailsForFilesAndFolders)
        hierarchicalStructureForS3result = fileStructureTransformer.transformationProcessPipeline(
            resultFromS3)

        return hierarchicalStructureForS3result
コード例 #22
0
    def uploadFilesToDesignatedFolder(self, owner, filesToBeUploaded,
                                      topicName, selectedFolder):

        logging.info("Inside uploadFilesToDesignatedFolder")

        if selectedFolder is None:
            return self.uploadFilesToRootFolder(owner, filesToBeUploaded,
                                                topicName)
        else:

            filesToCreateSavepointExtractedFromS3 = self._s3Connection.listObjectsForFolder(
                bucketName=topicName, selectedFolder=selectedFolder)
            filesToCreateSavepoint = FileStructureTransformer.FileStructureTransformer(
            ).transformationProcessPipeline(
                filesToCreateSavepointExtractedFromS3)
            savepointCreatedAndItsFiles = SavepointHandler.SavepointHandler(
            ).createSavepointForUploadOperation(topicName, owner,
                                                filesToCreateSavepoint)
            if savepointCreatedAndItsFiles:

                versionIds = self.uploadFiles(filesToBeUploaded, topicName)
                if False not in versionIds:

                    accessRecordsInsertionResults = self.accessRecordCreationForEachUploadedFiles(
                        owner, filesToBeUploaded)
                    if False not in accessRecordsInsertionResults:

                        cacheInsertionResults = self.putTheUploadedFilesToCache(
                            filesToBeUploaded, topicName)

                        if False not in cacheInsertionResults:
                            logging.info("Cache Insertion is successful")
                else:

                    try:
                        rollBackThread = threading.Thread(
                            target=self.
                            __rollBackSavepointForUploadOperationInBackground,
                            args=(
                                topicName,
                                savepointCreatedAndItsFiles,
                            ))
                        rollBackThread.daemon = True
                        rollBackThread.start()
                    except:
                        logging.warning("Error: unable to Rollback")

                    logging.critical("Uploading to S3 Failed")
            else:
                logging.warning("Savepoint Creation Failed")
コード例 #23
0
    def dataStructureTransformerPipeline(self, filteredResult):

        logging.info("Inside dataStructureTransformerPipeline")
        """ transforms the filtered s3 result"""

        transformedResult = {}
        transformedResult["name"] = filteredResult["bucketName"]
        transformedResult["children"] = []
        file_extension_regex = re.compile(
            "([a-zA-Z0-9\s_\\.\-\(\):])+(\....)$")

        start_position = transformedResult
        previous_split = ""
        file_extension = re.compile("([a-zA-Z0-9\s_\\.\-\(\):])+(\..*)$")
        for i in filteredResult["bucketData"]:
            if file_extension_regex.search(
                    i["objectName"]) and "/" not in i["objectName"]:
                transformedResult["children"].append(self.leafAssignemnt(i))
            elif i["objectName"].endswith('/') or file_extension.search(
                    i["objectName"]):
                if file_extension.search(i["objectName"]):
                    splitted_root = list(
                        filter(None, i["objectName"].split("/")))
                    del splitted_root[-1]
                else:
                    splitted_root = list(
                        filter(None, i["objectName"].split("/")))
                if previous_split not in splitted_root:
                    start_position = transformedResult
                    previous_split = self.longestSubstringFinder(
                        previous_split, i["objectName"])

                for each_split in splitted_root:
                    start_position = self.pathFinder(start_position,
                                                     each_split)
                    if (start_position["name"]) == self.bucketName:
                        previous_split = ""
                if file_extension.search(i["objectName"]):
                    start_position["children"].append(self.leafAssignemnt(i))
                else:
                    start_position["children"].append(
                        self.branchAssignment(
                            i["objectName"],
                            i["objectName"].replace(previous_split, "",
                                                    1).replace("/",
                                                               "").strip()))
                    previous_split = i["objectName"]
        pp.pprint(transformedResult)
        return transformedResult
コード例 #24
0
    def uploadFilesToRootFolder(self, owner, filesToBeUploaded, topicName):

        logging.info("Inside uploadFilesToRootFolder")

        versionIds = self.uploadFiles(filesToBeUploaded, topicName)
        if False not in versionIds:
            accessRecordsInsertionResults = self.accessRecordCreationForEachUploadedFiles(
                owner, filesToBeUploaded)
            if False not in accessRecordsInsertionResults:
                cacheInsertionResults = self.putTheUploadedFilesToCache(
                    filesToBeUploaded, topicName)
                if False not in cacheInsertionResults:
                    logging.info("Cache Insertion is successful")
        else:
            logging.critical("Uploading to S3 Failed")
コード例 #25
0
    def createSavepointForDeleteOperation(self, owner, selectedFiles):

        logging.info("Inside createSavepointForDeleteOperation")

        filesCreatedAtSavepointDuringDeleteOperation = []
        for selectedFile in selectedFiles:

            file = self.__createSavepointDataFromS3ForEachFile(
                self.defaultTopicName, selectedFile)
            file["data"][
                "access"] = self._fileMetaDataApi.fetchUserAcessDataForSingleFileFromAccessManagementServer(
                    selectedFile)
            filesCreatedAtSavepointDuringDeleteOperation.append(file)

        return filesCreatedAtSavepointDuringDeleteOperation
コード例 #26
0
    def transformationProcessPipeline(self, s3ResultToBeTransformed):

        logging.info("Inside transformationProcessPipeline")

        validInput = self.checkInput(s3ResultToBeTransformed)
        if validInput == False:
            return self.transformInvalidInput(self.bucketName)
        else:

            # transformation begins

            filteredResult = self.filterValidS3Result(s3ResultToBeTransformed)
            transformedResult = self.dataStructureTransformerPipeline(
                filteredResult)
            return transformedResult
コード例 #27
0
    def rollBackforDeleteOperation(self, topicName,
                                   filesPresentInTheSavepoint):

        logging.info("Inside rollBackforDeleteOperation")

        for eachBackupFile in filesPresentInTheSavepoint:

            uploadResult = self._otherApiCallsForDifferentServers.writeOrUpdateSavepointInS3(
                topicName, eachBackupFile["key"],
                eachBackupFile["data"]["content"])
            insertOrUpdateResult = self._fileMetaDataApi.writeOrUpdateUserAccessData(
                eachBackupFile["data"]["access"])
            if insertOrUpdateResult and uploadResult == False:
                logging.warning('Error in Rollback for Delete Operation %s',
                                eachBackupFile)
コード例 #28
0
def _insertIntoCache(recordsToBeInserted):
    logging.info("Consumer: Inside insertIntoCache")

    redisConnection = DataSourceFactory.DataSourceFactory().getRedisAccess(
        role=_redisRole)

    keyToBeInserted = recordsToBeInserted[
        "bucket"] + '/' + recordsToBeInserted["key"]

    print("keyToBeInserted------->", keyToBeInserted)

    insertionResult = redisConnection.insertObject(
        keyToBeInserted, recordsToBeInserted["content"])
    if insertionResult == 1:
        logging.info("Error in Cache Insertion %s", recordsToBeInserted)
コード例 #29
0
    def accessRecordCreationForEachUploadedFiles(self, owner,
                                                 filesToBeUploaded):

        logging.info("Inside accessRecordCreationForEachUploadedFiles")

        accessRecordsToBeInserted = [
            self.__createAccessRecord(owner, eachFileToBeUploaded)
            for eachFileToBeUploaded in filesToBeUploaded
        ]

        accessRecordsInsertionResults = [
            FileMetaDataApi.FileMetaDataApi().
            addUserAccessDetailsForFileorFolderInUserAccessManagementServer(
                eachAccessRecordsToBeInserted)
            for eachAccessRecordsToBeInserted in accessRecordsToBeInserted
        ]
        return accessRecordsInsertionResults
コード例 #30
0
    def filterValidS3Result(self, validS3Result):

        logging.info("Inside filterValidS3Result")

        filteredResult = {}
        filteredResult["bucketName"] = validS3Result["Name"]
        filteredResult["bucketData"] = []

        for eachContent in validS3Result["Contents"]:
            requiredContentInformation = {}
            requiredContentInformation["objectName"] = eachContent["Key"]
            requiredContentInformation["lastModified"] = str(
                eachContent["LastModified"])
            requiredContentInformation["size"] = eachContent["Size"]
            requiredContentInformation["owner"] = eachContent["Owner"][
                "DisplayName"]
            filteredResult["bucketData"].append(requiredContentInformation)
        return filteredResult