Exemplo n.º 1
0
        def getChunkServerIPs(self):
        	# Debug message for successful calling of function
        	logging.debug('HEARTBEAT: Getting chunk server IPs')
                # If the hosts.txt file exists:
                if os.path.isfile(self.HOSTS):
                        try:
                                # Read from it and parse its contents into a list. Return the list.
                                with open(self.HOSTS, "r") as file:
                                        cs = file.read().splitlines()
                                        # If there are any additional \n in the hosts file, this will
                                        # remove them from our list of chunkservers
                                        cs = filter(None, cs)
                                        return cs
                        # If the hosts.txt file can not be read, alert the logger
                        except IOError:
                                logging.error('HEARTBEAT: Could not read from ' + self.HOSTS)
				listener.logError("Could not open or read from hosts file")
				exit(1)
                # If the hosts.txt file does not exist:
                else:
                        # Something went terribly wrong, so alert the logger and alerty
                        # the listener. Without a list of chunkservers present in the system,
                        # if becomes pointless to continue, so we can then exit.
                        logging.error("HEARTBEAT: " + self.HOSTS + " does not exist.")
			listener.filesMissing()
			exit(1)
Exemplo n.º 2
0
    def getChunkServerIPs(self):
        # Debug message for successful calling of function
        logging.debug('HEARTBEAT: Getting chunk server IPs')
        # If the hosts.txt file exists:
        if os.path.isfile(self.HOSTS):
            try:
                # Read from it and parse its contents into a list. Return the list.
                with open(self.HOSTS, "r") as file:
                    cs = file.read().splitlines()
                    # If there are any additional \n in the hosts file, this will
                    # remove them from our list of chunkservers
                    cs = filter(None, cs)
                    return cs
            # If the hosts.txt file can not be read, alert the logger
            except IOError:
                logging.error('HEARTBEAT: Could not read from ' + self.HOSTS)
                listener.logError("Could not open or read from hosts file")
                exit(1)
# If the hosts.txt file does not exist:
        else:
            # Something went terribly wrong, so alert the logger and alerty
            # the listener. Without a list of chunkservers present in the system,
            # if becomes pointless to continue, so we can then exit.
            logging.error("HEARTBEAT: " + self.HOSTS + " does not exist.")
            listener.filesMissing()
            exit(1)
Exemplo n.º 3
0
	def initialize(self):
		logging.debug('Initializing database')


		# Get a list of active hosts from an active hosts list
		try:
			with open(ACTIVEHOSTSFILE, 'r') as activeFile:
				activeHosts = activeFile.read().splitlines()

		# If unable to read the file, log the error and alert the listener
		except IOError:
			logging.error(ACTIVEHOSTSFILE + " was unable to be read.")
			listener.logError("DATABASE: Unable to read active hosts on initialize.")
			# Define activeHosts to be empty so initialization can continue.
			activeHosts = []


		# Populate the database by parsing the operations log
		self.readFromOpLog()

		for item in activeHosts:
			# Get/update the locations of all the chunks from the chunkservers
			self.interrogateChunkServer(item, 0)
		# Now that the database is setup, go through the used chunkhandles and 
		# set the chunk handle counter to the next unused number
		self.updateChunkCounter()


		####### DEBUG MESSAGES TO MAKE SURE THINGS INITIALIZED AS EXPECTED #######
		# The database dictionary
		logging.debug(self.data)
		# The lookup dictionary
		logging.debug(self.lookup)
		# The list of files to delete
		logging.debug(self.toDelete)
		# The current chunk handle
		logging.debug(self.chunkHandle)
		# The location lookup dictionary
		logging.debug(self.locDict)
		##########################################################################

		

		# Logs and displays a critical warning that an insufficient number of chunkservers
		# are active, which would lead to poor replication strategies and poor performance
		if len(activeHosts) < 3:
			logging.critical("\nLESS THAN THREE CHUNKSERVERS ARE ACTIVE. OPERATIONS MAY BE LIMITED OR INACCESSIBLE.\n")

		logging.debug('Database initialized')
Exemplo n.º 4
0
    def initialize(self):
        logging.debug('Initializing database')

        # Get a list of active hosts from an active hosts list
        try:
            with open(ACTIVEHOSTSFILE, 'r') as activeFile:
                activeHosts = activeFile.read().splitlines()

        # If unable to read the file, log the error and alert the listener
        except IOError:
            logging.error(ACTIVEHOSTSFILE + " was unable to be read.")
            listener.logError(
                "DATABASE: Unable to read active hosts on initialize.")
            # Define activeHosts to be empty so initialization can continue.
            activeHosts = []

        # Populate the database by parsing the operations log
        self.readFromOpLog()

        for item in activeHosts:
            # Get/update the locations of all the chunks from the chunkservers
            self.interrogateChunkServer(item, 0)
        # Now that the database is setup, go through the used chunkhandles and
        # set the chunk handle counter to the next unused number
        self.updateChunkCounter()

        ####### DEBUG MESSAGES TO MAKE SURE THINGS INITIALIZED AS EXPECTED #######
        # The database dictionary
        logging.debug(self.data)
        # The lookup dictionary
        logging.debug(self.lookup)
        # The list of files to delete
        logging.debug(self.toDelete)
        # The current chunk handle
        logging.debug(self.chunkHandle)
        # The location lookup dictionary
        logging.debug(self.locDict)
        ##########################################################################

        # Logs and displays a critical warning that an insufficient number of chunkservers
        # are active, which would lead to poor replication strategies and poor performance
        if len(activeHosts) < 3:
            logging.critical(
                "\nLESS THAN THREE CHUNKSERVERS ARE ACTIVE. OPERATIONS MAY BE LIMITED OR INACCESSIBLE.\n"
            )

        logging.debug('Database initialized')
Exemplo n.º 5
0
import listener, time

# test to see how well logging information to the listener works
# by importing it to another program
for x in range(0, 10):
    listener.logError(x)
    print "Logged " + str(x)
    time.sleep(1)
Exemplo n.º 6
0
	def readFromOpLog(self):
		logging.debug('Initialize readFromOpLog()')


		try:
			# Read the contents of the oplog into a list
			with open(OPLOG, 'r') as oplog:
				opLog = oplog.read().splitlines()

		# If the database in unable to read the opLog, log it, and alert the listener
		except IOError:
			logging.critical(OPLOG + " was unable to be read.")
			listener.logError("DATABASE: Unable to read oplog on initialize.")
			# If we are unable to read from the opLog, something went terribly wrong
			# and we no longer have a map from files -> chunks. Alert the fatal error
			# and exit, to minimize the risk of writing over existing chunks further 
			# down the line.
			logging.error("Database could not be built reliably. To maintain integrity of existing chunks, exiting database.")
			exit(0)


		logging.debug('Got contents of opLog')
		# For every entry in the opLog
		for line in opLog:
			# Separate the string by pipe into a list where the list should be formatted:
			# 		[<OPERATION>, <CHUNKHANDLE>, <FILENAME>]
			#		[ 	   0    ,       1      ,      2    ]
			lineData = line.split("|")

			# If the operation was to create a file, create a new file object and 
			# add it to the database dictionary
			if lineData[0] == 'CREATEFILE':
				# Create a new instance of the File object, taking its file name as
				# a parameter
				file = File(lineData[2])
				# Create a new entry in the database, where the file name is the key
				# and the file object is the value
				self.data[lineData[2]] = file
				logging.debug('CREATEFILE ==> new file, ' + str(lineData[2]) + ', added to database')

			# If the operation was to create a chunk, create a new chunk object and 
			# add it to the database
			elif lineData[0] == 'CREATECHUNK':
				# Create a new instance of the Chunk object
				chunk = Chunk()
				# In the file object associated with the file name the chunk belongs to,
				# add the newly created chunk to the chunk dictionary, where the chunk
				# object is the value and the chunk handle is the key
				self.data[lineData[2]].chunks[lineData[1]] = chunk
				# Update the lookup dictionary with the chunk/fileName pair
				self.lookup[lineData[1]] = lineData[2]
				logging.debug('CREATECHUNK ==> new chunk, ' + str(lineData[1]) + ', added to database')

			# If the operation was to delete a file, change the file object's delete attribute
			# to True, so the scrubber will recognize it as marked for deletion.
			elif lineData[0] == 'DELETE':
				# Flag the given file for deletion
				self.data[lineData[2]].delete = True
				# Add the file name to the list of files to be deleted
				self.toDelete.append(lineData[2])
				logging.debug('DELETE ==> ' + str(lineData[2]) + ' marked True for delete')

			# If the operation was to undelete a file, change the file object's delete attribute
			# back to False, so the scrubber will not delete it.
			elif lineData[0] == "UNDELETE":
				# Flag the given file for deletion
				self.data[lineData[2]].delete = False
				# Add the file name to the list of files to be deleted
				self.toDelete.remove(lineData[2])
				logging.debug('UNDELETE ==> ' + str(lineData[2]) + ' marked False for delete')

			# If the operation was to sanitize, that is, the chunks were actually deleted,
			# as opposed to marked for deletion, then remove the metadata for the file and
			# associated chunks from the database
			elif lineData[0] == "SANITIZED":
				self.sanitizeFile(lineData[2])
				logging.debug('SANITIZED ==> ' + str(lineData[2]) + ' cleansed from chunkservers')

		logging.debug('readFromOpLog() complete')
Exemplo n.º 7
0
import listener, time

# test to see how well logging information to the listener works
# by importing it to another program
for x in range(0, 10):
	listener.logError(x)
	print "Logged " + str(x)
	time.sleep(1)
Exemplo n.º 8
0
    def readFromOpLog(self):
        logging.debug('Initialize readFromOpLog()')

        try:
            # Read the contents of the oplog into a list
            with open(OPLOG, 'r') as oplog:
                opLog = oplog.read().splitlines()

        # If the database in unable to read the opLog, log it, and alert the listener
        except IOError:
            logging.critical(OPLOG + " was unable to be read.")
            listener.logError("DATABASE: Unable to read oplog on initialize.")
            # If we are unable to read from the opLog, something went terribly wrong
            # and we no longer have a map from files -> chunks. Alert the fatal error
            # and exit, to minimize the risk of writing over existing chunks further
            # down the line.
            logging.error(
                "Database could not be built reliably. To maintain integrity of existing chunks, exiting database."
            )
            exit(0)

        logging.debug('Got contents of opLog')
        # For every entry in the opLog
        for line in opLog:
            # Separate the string by pipe into a list where the list should be formatted:
            # 		[<OPERATION>, <CHUNKHANDLE>, <FILENAME>]
            #		[ 	   0    ,       1      ,      2    ]
            lineData = line.split("|")

            # If the operation was to create a file, create a new file object and
            # add it to the database dictionary
            if lineData[0] == 'CREATEFILE':
                # Create a new instance of the File object, taking its file name as
                # a parameter
                file = File(lineData[2])
                # Create a new entry in the database, where the file name is the key
                # and the file object is the value
                self.data[lineData[2]] = file
                logging.debug('CREATEFILE ==> new file, ' + str(lineData[2]) +
                              ', added to database')

            # If the operation was to create a chunk, create a new chunk object and
            # add it to the database
            elif lineData[0] == 'CREATECHUNK':
                # Create a new instance of the Chunk object
                chunk = Chunk()
                # In the file object associated with the file name the chunk belongs to,
                # add the newly created chunk to the chunk dictionary, where the chunk
                # object is the value and the chunk handle is the key
                self.data[lineData[2]].chunks[lineData[1]] = chunk
                # Update the lookup dictionary with the chunk/fileName pair
                self.lookup[lineData[1]] = lineData[2]
                logging.debug('CREATECHUNK ==> new chunk, ' +
                              str(lineData[1]) + ', added to database')

            # If the operation was to delete a file, change the file object's delete attribute
            # to True, so the scrubber will recognize it as marked for deletion.
            elif lineData[0] == 'DELETE':
                # Flag the given file for deletion
                self.data[lineData[2]].delete = True
                # Add the file name to the list of files to be deleted
                self.toDelete.append(lineData[2])
                logging.debug('DELETE ==> ' + str(lineData[2]) +
                              ' marked True for delete')

            # If the operation was to undelete a file, change the file object's delete attribute
            # back to False, so the scrubber will not delete it.
            elif lineData[0] == "UNDELETE":
                # Flag the given file for deletion
                self.data[lineData[2]].delete = False
                # Add the file name to the list of files to be deleted
                self.toDelete.remove(lineData[2])
                logging.debug('UNDELETE ==> ' + str(lineData[2]) +
                              ' marked False for delete')

            # If the operation was to sanitize, that is, the chunks were actually deleted,
            # as opposed to marked for deletion, then remove the metadata for the file and
            # associated chunks from the database
            elif lineData[0] == "SANITIZED":
                self.sanitizeFile(lineData[2])
                logging.debug('SANITIZED ==> ' + str(lineData[2]) +
                              ' cleansed from chunkservers')

        logging.debug('readFromOpLog() complete')