Пример #1
0
    def create_body(self, type_definitions, parameter):
        result = ''
        if 'type' in parameter['schema'] and parameter['schema']['type'] == 'array':
            object_type = parameter['schema']['items']['$ref']
            list_bodyitem = list()
            list_bodyitem.append(Replicator(type_definitions, object_type).as_dict())
            result += json.dumps(list_bodyitem)
        else:
            object_type = parameter['schema']['$ref']
            result += Replicator(type_definitions, object_type, self.use_fuzzing).as_json()

        return result
Пример #2
0
 def replace_url_parameter(self, type_definitions, url, name, object_type):
     if name is not None and object_type is not None:
         value = Replicator(type_definitions, object_type,
                            self.use_fuzzing).create_init_value(object_type)
         new_url = url.replace('{' + name + '}', str(value))
         return new_url
     else:
         return url
Пример #3
0
    def start(self):
        """
		Start the HTTP server.
		If the server is master, also start the replicator.
		"""

        self.httpinterface = HTTPInterface(self)
        self.httpinterface.start()

        self.report()

        if self.ismaster:
            self.replicator = Replicator(self)
            self.replicator.start()
Пример #4
0
import logging
from replicator import Replicator

logger = logging.getLogger()
logger.setLevel(logging.INFO)
# logger.setLevel(logging.DEBUG)

replicator = Replicator()


# This is the method that will be registered
# with Lambda and run on a schedule
# This is the method that will be registered
# with Lambda and run on a schedule
def handler(event={}, context={}):
    if 'ping' in event:
        logger.info('pong')
        return {'message': 'pong'}

    replicator.run()


# If being called locally, just call handler
if __name__ == '__main__':
    import os
    import json
    import sys

    logging.basicConfig()
    event = {}
Пример #5
0
class Filesystem:
    """
	This is the Filesystem class.
	
	It takes a dictionnary in parameter which contains the configuration of the pseudo-filesystem such as:

	* host -- IP:Port of the new node (ie: localhost:4242)
	* datadir -- path of the folder where the files are to be stored
	* secret -- password for joining the distributed filesystem
	* master -- the IP:Port of the master Filesystem server. If it's the same as host, then this server is the master
	* maxstorage -- A number of bytes (or a string like "10G") that are available for fsdfs
	"""

    def __init__(self, config):
        self.config = config

        self.host = self.config["host"]

        self.startTime = time.time()

        # todo: sqlite default when stable
        self.filedb = loadFileDb(self.config.get("filedb", "memory"), self)

        self.nodedb = {}

        self.ismaster = (self.config["master"] == self.config["host"]) or (self.config["master"] is True)

        if not self.config.get("maxstorage", False):
            self.maxstorage = 10 * 1024 * 1024 * 1024  # default 10G
        elif type(self.config["maxstorage"]) == int:
            self.maxstorage = self.config["maxstorage"]
        elif re.match("[0-9]+G", self.config["maxstorage"]):
            self.maxstorage = int(self.config["maxstorage"][0:-1]) * 1024 * 1024 * 1024
        else:
            raise Exception, "Unknown maxstorage format"

    def getReplicationRules(self, filepath):
        """
		Return the replication level.
		"""

        return {"n": 3}

    def getLocalFilePath(self, filepath):
        """
		Return the absolute local path of a file.
		"""

        return os.path.join(self.config["datadir"], filepath)

    def deleteFile(self, filepath):
        """
		Delete a file from the database. 
		"""

        destpath = self.getLocalFilePath(filepath)

        if not os.path.isfile(destpath):
            return True

        try:
            os.unlink(destpath)
            self.filedb.removeFileFromNode(filepath, self.host)
            self.report()
            return True
        except:
            return False

    def nukeFile(self, filepath):
        """
		Deletes a file on all the nodes
		"""

        if not self.ismaster:
            return False
        else:
            self.filedb.update(filepath, {"nuked": time.time()})

            return True

    def importFile(self, src, filepath, mode="copy"):
        """
		Adds a file to the global filesystem
		"""

        destpath = self.getLocalFilePath(filepath)

        if not os.path.isdir(os.path.dirname(destpath)):
            os.makedirs(os.path.dirname(destpath))

        if mode == "copy":
            shutil.copy(src, destpath)
        elif mode == "move":
            shutil.move(src, destpath)
        elif mode == "copyobj":
            f = open(destpath, "wb")
            shutil.copyfileobj(src, f)
            f.close()

        size = os.stat(destpath).st_size

        self.filedb.update(
            filepath,
            {
                "nodes": set([self.host]).union(self.filedb.getNodes(filepath)),
                "t": int(time.time()),
                "size": size,
                "nuked": None,
            },
        )

        self.report()

    def getGlobalStatus(self):

        """
		Returns the global status of the distributed filesystem
		"""

        if not self.ismaster:
            return self.nodeRPC(self.config["master"], "GLOBALSTATUS", parse=True)
        else:

            status = self.getStatus()

            minKns = [(self.filedb.getKn(f), f) for f in self.filedb.getMinKnAll(num=1)]

            status["nodes"] = self.nodedb.keys()
            status["sizeGlobal"] = self.filedb.getSizeAll()
            status["countGlobal"] = self.filedb.getCountAll()
            status["minKnGlobal"] = minKns

            # pass thru JSON to have the same exact returns as if in remote fetch
            return json.loads(json.dumps(status))

    def start(self):
        """
		Start the HTTP server.
		If the server is master, also start the replicator.
		"""

        self.httpinterface = HTTPInterface(self)
        self.httpinterface.start()

        self.report()

        if self.ismaster:
            self.replicator = Replicator(self)
            self.replicator.start()

    def stop(self):
        """
		Stops the filesystem
		"""

        # print "stopping %s" % self.host
        self.httpinterface.server.shutdown()

        self.httpinterface.server.server_close()

        # self.httpinterface.join()

        if self.ismaster:
            self.replicator.shutdown()

            self.replicator.join()

    def searchFile(self, file):
        """
		Returns the nodes where a file is stored
		"""

        nodes = self.nodeRPC(self.config["master"], "SEARCH", {"filepath": file}, parse=True)

        return nodes

    def nodeRPC(self, host, method, params={}, parse=False):
        """
		Inter-node communication method
		"""

        params["_time"] = int(time.time())

        query = json.dumps(params)

        # print "http://%s/%s" % (host,method)
        ret = urllib2.urlopen(
            "http://%s/%s" % (host, method), "h=" + self.hashQuery(query) + "&p=" + urllib.quote(query)
        )

        if parse:
            return json.loads(ret.read())
        else:
            return ret

    def hashQuery(self, query):
        """
		to write.
		"""

        return sha1(sha1(query).hexdigest() + self.config["secret"]).hexdigest()

    def downloadFile(self, filepath):
        """
		Downloads a file from the global filesystem to the local server
		"""

        hosts = self.searchFile(filepath)

        for host in hosts:
            try:
                remote = self.nodeRPC(host, "DOWNLOAD", {"filepath": filepath})
            except Exception, err:
                print err
                continue

                # We don't need checksumming here... we're using TCP
            self.importFile(remote, filepath, mode="copyobj")

            remote.close()

            return True

        return False
Пример #6
0
 def create_form_parameter(self, type_definitions, object_type):
     value = Replicator(type_definitions, object_type,
                        self.use_fuzzing).create_init_value(object_type)
     return str(value)
Пример #7
0
    def restore_db_backup(self):
        '''
        Target:
            - restore a database's backup in PostgreSQL.
        '''
        replicator = Replicator(self.connecter, self.new_dbname,
                                Default.RESTORING_TEMPLATE, self.logger)
        result = self.connecter.allow_db_conn(Default.RESTORING_TEMPLATE)
        if result:
            replicator.replicate_pg_db()
            self.connecter.disallow_db_conn(Default.RESTORING_TEMPLATE)
        else:
            self.logger.stop_exe(
                Messenger.ALLOW_DB_CONN_FAIL.format(
                    dbname=Default.RESTORING_TEMPLATE))

        # Regular expression which must match the backup's name
        regex = r'.*db_(.+)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$'
        regex = re.compile(regex)

        if re.match(regex, self.db_backup):
            # Store the parts of the backup's name (name, date, ext)
            parts = regex.search(self.db_backup).groups()
            # Store only the extension to know the type of file
            ext = parts[2]
        else:
            self.logger.stop_exe(Messenger.NO_BACKUP_FORMAT)

        message = Messenger.BEGINNING_DB_RESTORER.format(
            db_backup=self.db_backup, new_dbname=self.new_dbname)
        self.logger.highlight('info', message, 'white')
        self.logger.info(Messenger.WAIT_PLEASE)

        if ext == 'gz':
            command = 'gunzip -c {} -k | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        elif ext == 'bz2':
            command = 'bunzip2 -c {} -k | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        elif ext == 'zip':
            command = 'unzip -p {} | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        else:
            command = 'pg_restore -U {} -h {} -p {} -d {} {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, self.new_dbname, self.db_backup)

        try:
            start_time = DateTools.get_current_datetime()
            # Make the restauration of the database
            result = subprocess.call(command, shell=True)
            end_time = DateTools.get_current_datetime()
            # Get and show the process' duration
            diff = DateTools.get_diff_datetimes(start_time, end_time)

            if result != 0:
                raise Exception()

            message = Messenger.RESTORE_DB_DONE.format(
                db_backup=self.db_backup,
                new_dbname=self.new_dbname,
                diff=diff)
            self.logger.highlight('info', message, 'green')

            self.logger.highlight('info',
                                  Messenger.RESTORER_DONE,
                                  'green',
                                  effect='bold')

        except Exception as e:
            self.logger.debug('Error en la función "restore_db_backup": '
                              '{}.'.format(str(e)))
            message = Messenger.RESTORE_DB_FAIL.format(
                db_backup=self.db_backup, new_dbname=self.new_dbname)
            self.logger.stop_exe(message)
Пример #8
0
    def restore_db_backup(self):
        '''
        Target:
            - restore a database's backup in PostgreSQL.
        '''
        replicator = Replicator(self.connecter, self.new_dbname,
                                Default.RESTORING_TEMPLATE, self.logger)
        result = self.connecter.allow_db_conn(Default.RESTORING_TEMPLATE)
        if result:
            replicator.replicate_pg_db()
            self.connecter.disallow_db_conn(Default.RESTORING_TEMPLATE)
        else:
            self.logger.stop_exe(Messenger.ALLOW_DB_CONN_FAIL.format(
                dbname=Default.RESTORING_TEMPLATE))

        # Regular expression which must match the backup's name
        regex = r'.*db_(.+)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$'
        regex = re.compile(regex)

        if re.match(regex, self.db_backup):
            # Store the parts of the backup's name (name, date, ext)
            parts = regex.search(self.db_backup).groups()
            # Store only the extension to know the type of file
            ext = parts[2]
        else:
            self.logger.stop_exe(Messenger.NO_BACKUP_FORMAT)

        message = Messenger.BEGINNING_DB_RESTORER.format(
            db_backup=self.db_backup, new_dbname=self.new_dbname)
        self.logger.highlight('info', message, 'white')
        self.logger.info(Messenger.WAIT_PLEASE)

        if ext == 'gz':
            command = 'gunzip -c {} -k | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        elif ext == 'bz2':
            command = 'bunzip2 -c {} -k | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        elif ext == 'zip':
            command = 'unzip -p {} | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        else:
            command = 'pg_restore -U {} -h {} -p {} -d {} {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, self.new_dbname, self.db_backup)

        try:
            start_time = DateTools.get_current_datetime()
            # Make the restauration of the database
            result = subprocess.call(command, shell=True)
            end_time = DateTools.get_current_datetime()
            # Get and show the process' duration
            diff = DateTools.get_diff_datetimes(start_time, end_time)

            if result != 0:
                raise Exception()

            message = Messenger.RESTORE_DB_DONE.format(
                db_backup=self.db_backup, new_dbname=self.new_dbname,
                diff=diff)
            self.logger.highlight('info', message, 'green')

            self.logger.highlight('info', Messenger.RESTORER_DONE, 'green',
                                  effect='bold')

        except Exception as e:
            self.logger.debug('Error en la función "restore_db_backup": '
                              '{}.'.format(str(e)))
            message = Messenger.RESTORE_DB_FAIL.format(
                db_backup=self.db_backup, new_dbname=self.new_dbname)
            self.logger.stop_exe(message)