Пример #1
0
	def async_file_upload(self, file_name):
		# Used asynchronously for uploading a file to InfiniDrive after it has been uploaded and cached on the FTP server.
		print('Starting asynchronous upload of ' + str(file_name) + '.')

		# Get Drive service.
		drive_connect = drive_api.get_service()

		# Get directory ID.
		dir_id = drive_api.get_file_id_from_name(drive_connect, file_name)

		# Get a list of the fragments that currently make up the file. If this is a new upload, it should come back empty.
		orig_fragments = drive_api.get_files_list_from_folder(drive_connect, dir_id)

		# Set chunk size for reading files to 9.750365257263184MB (10223999 bytes)
		read_chunk_sizes = 10223999

		# Doc number
		doc_num = 1

		# Used to keep track of the numbers for fragments that have failed uploads.
		failed_fragments = set()

		# Iterate through file in chunks.
		infile = open('ftp_upload_cache/' + str(file_name), 'rb')

		# Read an initial chunk from the file.
		file_bytes = infile.read(read_chunk_sizes)

		# Keep looping until no more data is read.
		while file_bytes:
			if doc_num <= len(orig_fragments):
				# A remote fragment is present, so update it.
				upload_handler.handle_update_fragment(drive_api, orig_fragments[doc_num-1], file_bytes, drive_connect, doc_num, self.debug_log)
			else:
				# Process the fragment and upload it to Google Drive.
				upload_handler.handle_upload_fragment(drive_api, file_bytes, drive_connect, dir_id, doc_num, failed_fragments, self.debug_log)

			# Increment docNum for next Word document and read next chunk of data.
			doc_num = doc_num + 1
			file_bytes = infile.read(read_chunk_sizes)

			# Run garbage collection. Hopefully, this will prevent process terminations by the operating system on memory-limited devices such as the Raspberry Pi.
			gc.collect()

		infile.close()

		# If an update took place and the new file had fewer fragments than the previous file, delete any leftover fragments from the previous upload.
		doc_num = doc_num - 1
		while doc_num < len(orig_fragments):
			drive_api.delete_file_by_id(drive_connect, orig_fragments[doc_num]['id'])
			doc_num = doc_num + 1

		# Process fragment upload failures
		upload_handler.process_failed_fragments(drive_api, failed_fragments, dir_id, self.debug_log)

		# Delete the local cache of the file.
		remove('ftp_upload_cache/' + str(file_name))

		# Report completion of the asynchronous upload.
		print('Asynchronous upload of ' + str(file_name) + ' complete.')
Пример #2
0
    def update(self, file_name=None, file_path=None):
        # If no file name or file path is set, use the command line arguments.
        if file_name == None and file_path == None:
            file_name = sys.argv[2]
            file_path = sys.argv[3]

        # Get Drive service.
        driveConnect = drive_api.get_service()

        # Check if a remote file with the given name exists. If one does not, print an error message and return.
        if not drive_api.file_with_name_exists(driveConnect, file_name):
            print('Remote file with name ' + file_name + ' does not exist.')
            return

        # Get directory ID.
        dirId = drive_api.get_file_id_from_name(driveConnect, file_name)

        # Get a list of the fragments that currently make up the file. If this is a new upload, it should come back empty.
        orig_fragments = drive_api.get_files_list_from_folder(
            driveConnect, dirId)

        # Determine if upload is taking place from an HTTP or HTTPS URL.
        urlUpload = False
        if file_path[0:4].lower() == 'http':
            urlUpload = True
            urlUploadHandle = requests.get(file_path,
                                           stream=True,
                                           allow_redirects=True)

        fileSize = -1  # If file is being uploaded from web server and size cannot be retrieved this will stay at -1.
        if urlUpload:
            try:
                fileSize = int(urlUploadHandle.headers.get('content-length'))
            except TypeError:
                pass
            if fileSize == -1:
                # If fileSize is set to -1, set totalFrags to "an unknown number of"
                totalFrags = 'an unknown number of'
        else:
            fileSize = os.stat(file_path).st_size

        if fileSize != -1:
            totalFrags = math.ceil(fileSize / 10223999)
        print('Upload started. Upload will be composed of ' + str(totalFrags) +
              ' fragments.\n')

        # Set chunk size for reading files to 9.750365257263184MB (10223999 bytes)
        readChunkSizes = 10223999

        # Doc number
        docNum = 1

        # Used to keep track of the numbers for fragments that have failed uploads.
        failedFragmentsSet = set()

        # Progress bar
        if fileSize == -1:
            # The file size is unknown
            upBar = Spinner('Uploading... ')
        else:
            # The file size is known
            upBar = ShadyBar('Uploading...',
                             max=max(math.ceil(fileSize / 10223999),
                                     len(orig_fragments)))

        if urlUpload:
            # If the upload is taking place from a URL...
            # Iterate through remote file until no more data is read.
            for fileBytes in urlUploadHandle.iter_content(
                    chunk_size=readChunkSizes):
                # Advance progress bar
                upBar.next()

                if docNum <= len(orig_fragments):
                    # A remote fragment is present, so update it.
                    upload_handler.handle_update_fragment(
                        drive_api, orig_fragments[docNum - 1], fileBytes,
                        driveConnect, docNum, self.debug_log)
                else:
                    # Process the fragment and upload it to Google Drive.
                    upload_handler.handle_upload_fragment(
                        drive_api, fileBytes, driveConnect, dirId, docNum,
                        failedFragmentsSet, self.debug_log)

                # Increment docNum for next Word document.
                docNum = docNum + 1

                # Run garbage collection. Hopefully, this will prevent process terminations by the operating system on memory-limited devices such as the Raspberry Pi.
                gc.collect()
        else:
            # If the upload is taking place from a file path...
            # Get file byte size
            fileSize = os.path.getsize(file_path)

            # Iterate through file in chunks.
            infile = open(str(file_path), 'rb')

            # Read an initial chunk from the file.
            fileBytes = infile.read(readChunkSizes)

            # Keep looping until no more data is read.
            while fileBytes:
                # Advance progress bar
                upBar.next()

                if docNum <= len(orig_fragments):
                    # A remote fragment is present, so update it.
                    upload_handler.handle_update_fragment(
                        drive_api, orig_fragments[docNum - 1], fileBytes,
                        driveConnect, docNum, self.debug_log)
                else:
                    # Process the fragment and upload it to Google Drive.
                    upload_handler.handle_upload_fragment(
                        drive_api, fileBytes, driveConnect, dirId, docNum,
                        failedFragmentsSet, self.debug_log)

                # Increment docNum for next Word document and read next chunk of data.
                docNum = docNum + 1
                fileBytes = infile.read(readChunkSizes)

                # Run garbage collection. Hopefully, this will prevent process terminations by the operating system on memory-limited devices such as the Raspberry Pi.
                gc.collect()

            infile.close()

        # If an update took place and the new file had fewer fragments than the previous file, delete any leftover fragments from the previous upload.
        docNum = docNum - 1
        while docNum < len(orig_fragments):
            upBar.next()
            drive_api.delete_file_by_id(drive_api.get_service(),
                                        orig_fragments[docNum]['id'])
            docNum = docNum + 1

        # Process fragment upload failures
        upload_handler.process_failed_fragments(drive_api, failedFragmentsSet,
                                                dirId, self.debug_log)

        upBar.finish()

        # If the number of fragments to expect from a file upload is known, verify that the upload is not corrupted.
        if totalFrags != 'an unknown number of':
            print('Verifying upload.')
            foundFrags = len(
                drive_api.get_files_list_from_folder(drive_api.get_service(),
                                                     dirId))
            if (totalFrags != foundFrags):
                self.debug_log.write(
                    "----------------------------------------\n")
                self.debug_log.write(
                    "InfiniDrive detected upload corruption.\n")
                self.debug_log.write("Expected Fragments: " + str(totalFrags) +
                                     "\n")
                self.debug_log.write("Actual Fragments  : " + str(foundFrags) +
                                     "\n")
                print(
                    'InfiniDrive has detected that your upload was corrupted. Please report this issue on the InfiniDrive GitHub issue tracker and upload your "log.txt" file.'
                )

        print('Upload complete!')
Пример #3
0
	def RETR(self, cmd):
		# Downloads an InfiniDrive file
		# Extract the name of the file to download from the command
		filename = cmd[5:-2].lstrip('/')

		# Open the data socket.
		print('Downloading', filename)
		self.conn.send(b'150 Opening data connection.\r\n')
		self.start_datasock()

		if not drive_api.file_with_name_exists(self.drive_service, filename):
			# Check if the file exists. If it does not, close the socket and send an error.
			self.stop_datasock()
			self.conn.send(b'551 File does not exist.\r\n')
			return

		# Get a count of the number of fragments that make up the file.
		fragment_count = drive_api.get_fragment_count(self.drive_service, filename)

		# For indexing fragments.
		fragment_index = 1

		# Get the InfiniDrive file ID from its name
		file_id = drive_api.get_file_id_from_name(self.drive_service, filename)

		# If the client has requested a custom starting position, slice off irrelevant fragments and calculate the fragment byte offset.
		if self.rest:
			fragment_index = self.pos // 10223999 + 1
			self.frag_byte_offset = self.pos % 10223999

		# Asynchronously retrieve a list of all files. We do this so that we can reduce Drive API calls, but if we wait for the list,
		# the FTP client will likely time out before we can finish, so we will retrieve one fragment at a time at first while the
		# entire list is retrieved in the background here.
		files = list()
		threading.Thread(target=drive_api.get_files_list_from_folder_async, args=(drive_api.get_service(), file_id, files)).start()

		# For all fragments...
		while fragment_index <= fragment_count:
			# Get the fragment with the given index
			file = None
			if files == []:
				# The fragment list is not available yet, so retrieve one fragment.
				file = drive_api.get_files_with_name_from_folder(self.drive_service, file_id, str(fragment_index))[0]
			else:
				# The fragment list is available, so use it to locate the fragment.
				file = files[0][fragment_index - 1]

			# Get the RGB pixel values from the image as a list of tuples that we will break up and then convert to a bytestring.
			while True:
				try:
					pixelVals = list(Image.open(drive_api.get_image_bytes_from_doc(self.drive_service, file)).convert('RGB').getdata())
				except Exception as e:
					self.debug_log.write("----------------------------------------\n")
					self.debug_log.write("Fragment download failure\n")
					self.debug_log.write("Error:\n")
					self.debug_log.write(str(e) + "\n")
					continue
				pixelVals = [j for i in pixelVals for j in i]
				if len(pixelVals) == 10224000:
					break

			# If the downloaded values do not match the fragment hash, terminate download and report corruption.
			if hash_handler.is_download_invalid(file, bytearray(pixelVals)):
				self.stop_datasock()
				self.conn.send(b'551 File is corrupted.\r\n')
				return

			# Strip the null byte padding and "spacer byte" from pixelVals.
			pixelVals = array.array('B', pixelVals).tobytes().rstrip(b'\x00')[:-1]

			# If the client requested a custom starting position, slice off the start of the byte array using the calculated frag_byte_offset value.
			if self.rest:
				pixelVals = pixelVals[self.frag_byte_offset:]
				self.rest = False

			# Send the byte array to the client.
			self.datasock.send(pixelVals)

			# Increment fragment_index
			fragment_index += 1

		# File transfer is complete. Close the data socket and report completion.
		self.stop_datasock()
		self.conn.send(b'226 Transfer complete.\r\n')
Пример #4
0
    def download(self):
        # Save file name from command line arguments.
        file_name = str(sys.argv[2])

        # Check if the file exists. If it does not, print an error message and return.
        if not drive_api.file_with_name_exists(drive_api.get_service(),
                                               file_name):
            print('File with name "' + file_name + '" does not exist.')
            return

        # Get Drive service.
        drive_service = drive_api.get_service()

        # Get a count of the number of fragments that make up the file.
        fragment_count = drive_api.get_fragment_count(drive_service, file_name)

        # For indexing fragments.
        fragment_index = 1

        # Get the InfiniDrive file ID from its name
        file_id = drive_api.get_file_id_from_name(drive_service, file_name)

        # Asynchronously retrieve a list of all files. We do this so that we can reduce Drive API calls, but if we wait for the list,
        # the FTP client will likely time out before we can finish, so we will retrieve one fragment at a time at first while the
        # entire list is retrieved in the background here.
        files = list()
        threading.Thread(target=drive_api.get_files_list_from_folder_async,
                         args=(drive_api.get_service(), file_id,
                               files)).start()

        # Open a file at the user-specified path to write the data to
        result = open(str(sys.argv[3]), "wb")

        # Download complete print flag
        showDownloadComplete = True

        # For all fragments...
        downBar = ShadyBar('Downloading...',
                           max=fragment_count)  # Progress bar
        while fragment_index <= fragment_count:
            downBar.next()

            # Get the fragment with the given index
            file = None
            if files == []:
                # The fragment list is not available yet, so retrieve one fragment.
                file = drive_api.get_files_with_name_from_folder(
                    drive_service, file_id, str(fragment_index))[0]
            else:
                # The fragment list is available, so use it to locate the fragment.
                file = files[0][fragment_index - 1]

            # Get the RGB pixel values from the image as a list of tuples that we will break up and then convert to a bytestring.
            while True:
                try:
                    pixelVals = list(
                        Image.open(
                            drive_api.get_image_bytes_from_doc(
                                drive_api.get_service(),
                                file)).convert('RGB').getdata())
                except Exception as e:
                    self.debug_log.write(
                        "----------------------------------------\n")
                    self.debug_log.write("Fragment download failure\n")
                    self.debug_log.write("Error:\n")
                    self.debug_log.write(str(e) + "\n")
                    continue
                pixelVals = [j for i in pixelVals for j in i]
                if len(pixelVals) == 10224000:
                    break

            # If the downloaded values do not match the fragment hash, terminate download and report corruption.
            if hash_handler.is_download_invalid(file, bytearray(pixelVals)):
                downBar.finish()
                print(
                    "\nError: InfiniDrive has detected that the file upload on Google Drive is corrupted and the download cannot complete.",
                    end="")
                showDownloadComplete = False
                break

            # Strip the null byte padding and "spacer byte" from pixelVals.
            pixelVals = array.array('B',
                                    pixelVals).tobytes().rstrip(b'\x00')[:-1]

            # Write the data stored in "pixelVals" to the output file.
            result.write(pixelVals)
            fragment_index += 1

            # Run garbage collection. Hopefully, this will prevent process terminations by the operating system on memory-limited devices such as the Raspberry Pi.
            gc.collect()

        result.close()
        downBar.finish()
        if showDownloadComplete:
            print('\nDownload complete!')