Beispiel #1
0
    def _commit_puts(self, item_list=None):
        """
        Scan for .pending files and commit the found records by feeding them
        to merge_items().

        :param item_list: A list of items to commit in addition to .pending
        """
        if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
            return
        if item_list is None:
            item_list = []
        with lock_parent_directory(self.pending_file, self.pending_timeout):
            self._preallocate()
            if not os.path.getsize(self.pending_file):
                if item_list:
                    self.merge_items(item_list)
                return
            with open(self.pending_file, 'r+b') as fp:
                for entry in fp.read().split(':'):
                    if entry:
                        try:
                            self._commit_puts_load(item_list, entry)
                        except Exception:
                            self.logger.exception(
                                _('Invalid pending entry %(file)s: %(entry)s'),
                                {'file': self.pending_file, 'entry': entry})
                if item_list:
                    self.merge_items(item_list)
                try:
                    os.ftruncate(fp.fileno(), 0)
                except OSError as err:
                    if err.errno != errno.ENOENT:
                        raise
Beispiel #2
0
    def reclaim(self, age_timestamp, sync_timestamp):
        """
        Delete reclaimable rows and metadata from the db.

        By default this method will delete rows from the db_contains_type table
        that are marked deleted and whose created_at timestamp is <
        age_timestamp, and deletes rows from incoming_sync and outgoing_sync
        where the updated_at timestamp is < sync_timestamp. In addition, this
        calls the :meth:`_reclaim_metadata` method.

        Subclasses may reclaim other items by overriding :meth:`_reclaim`.

        :param age_timestamp: max created_at timestamp of object rows to delete
        :param sync_timestamp: max update_at timestamp of sync rows to delete
        """
        if not self._skip_commit_puts():
            with lock_parent_directory(self.pending_file,
                                       self.pending_timeout):
                self._commit_puts()
        marker = ''
        finished = False
        while not finished:
            with self.get() as conn:
                marker = self._reclaim(conn, age_timestamp, marker)
                if not marker:
                    finished = True
                    self._reclaim_other_stuff(conn, age_timestamp,
                                              sync_timestamp)
                conn.commit()
Beispiel #3
0
    def reclaim(self, age_timestamp, sync_timestamp):
        """
        Delete rows from the db_contains_type table that are marked deleted
        and whose created_at timestamp is < age_timestamp.  Also deletes rows
        from incoming_sync and outgoing_sync where the updated_at timestamp is
        < sync_timestamp.

        In addition, this calls the DatabaseBroker's :func:`_reclaim` method.

        :param age_timestamp: max created_at timestamp of object rows to delete
        :param sync_timestamp: max update_at timestamp of sync rows to delete
        """
        if self.db_file != ':memory:' and os.path.exists(self.pending_file):
            with lock_parent_directory(self.pending_file,
                                       self.pending_timeout):
                self._commit_puts()
        with self.get() as conn:
            conn.execute('''
                DELETE FROM %s WHERE deleted = 1 AND %s < ?
            ''' % (self.db_contains_type, self.db_reclaim_timestamp),
                (age_timestamp,))
            try:
                conn.execute('''
                    DELETE FROM outgoing_sync WHERE updated_at < ?
                ''', (sync_timestamp,))
                conn.execute('''
                    DELETE FROM incoming_sync WHERE updated_at < ?
                ''', (sync_timestamp,))
            except sqlite3.OperationalError as err:
                # Old dbs didn't have updated_at in the _sync tables.
                if 'no such column: updated_at' not in str(err):
                    raise
            DatabaseBroker._reclaim(self, conn, age_timestamp)
            conn.commit()
Beispiel #4
0
    def _commit_puts(self, item_list=None):
        """
        Scan for .pending files and commit the found records by feeding them
        to merge_items().

        :param item_list: A list of items to commit in addition to .pending
        """
        if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
            return
        if item_list is None:
            item_list = []
        with lock_parent_directory(self.pending_file, self.pending_timeout):
            self._preallocate()
            if not os.path.getsize(self.pending_file):
                if item_list:
                    self.merge_items(item_list)
                return
            with open(self.pending_file, 'r+b') as fp:
                for entry in fp.read().split(':'):
                    if entry:
                        try:
                            self._commit_puts_load(item_list, entry)
                        except Exception:
                            self.logger.exception(
                                _('Invalid pending entry %(file)s: %(entry)s'),
                                {'file': self.pending_file, 'entry': entry})
                if item_list:
                    self.merge_items(item_list)
                try:
                    os.ftruncate(fp.fileno(), 0)
                except OSError as err:
                    if err.errno != errno.ENOENT:
                        raise
Beispiel #5
0
 def put_record(self, record):
     if self.db_file == ':memory:':
         self.merge_items([record])
         return
     if not os.path.exists(self.db_file):
         raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
     with lock_parent_directory(self.pending_file, self.pending_timeout):
         pending_size = 0
         try:
             pending_size = os.path.getsize(self.pending_file)
         except OSError as err:
             if err.errno != errno.ENOENT:
                 raise
         if pending_size > PENDING_CAP:
             self._commit_puts([record])
         else:
             with open(self.pending_file, 'a+b') as fp:
                 # Colons aren't used in base64 encoding; so they are our
                 # delimiter
                 fp.write(':')
                 fp.write(
                     pickle.dumps(
                         self.make_tuple_for_pickle(record),
                         protocol=PICKLE_PROTOCOL).encode('base64'))
                 fp.flush()
 def start(self):
     """Start up the ring master"""
     self.logger.notice("Ring-Master starting up")
     self.logger.notice("-> Entering ring orchestration loop.")
     while True:
         try:
             self.pause_if_asked()
             if self.in_change_window():
                 for btype in sorted(self.builder_files.keys()):
                     with lock_parent_directory(self.builder_files[btype],
                                                self.lock_timeout):
                         ring_changed = self.orchestration_pass(btype)
                     if ring_changed:
                         sleep(self.recheck_after_change_interval)
                     else:
                         sleep(self.recheck_interval)
             else:
                 self.logger.debug('Not in change window')
                 sleep(60)
         except exceptions.LockTimeout:
             self.logger.exception('Orchestration LockTimeout Encountered')
         except Exception:
             self.logger.exception('Orchestration Error')
             sleep(60)
         sleep(1)
 def _validate_file(self, filename):
     """Validate md5 of file"""
     if self._changed(filename):
         self.logger.debug("updating md5")
         with lock_parent_directory(self.swiftdir, self.lock_timeout):
             self.last_tstamp[filename] = stat(filename).st_mtime
             self.current_md5[filename] = get_md5sum(filename)
Beispiel #8
0
 def _validate_file(self, filename):
     """Validate md5 of file"""
     if self._changed(filename):
         self.logger.debug("updating md5")
         with lock_parent_directory(self.swiftdir, self.lock_timeout):
             self.last_tstamp[filename] = stat(filename).st_mtime
             self.current_md5[filename] = get_md5sum(filename)
Beispiel #9
0
def main(arguments=None):
    global argv, backup_dir, builder, builder_file, ring_file
    if arguments:
        argv = arguments
    else:
        argv = sys_argv

    if len(argv) < 2:
        print "swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" % \
              globals()
        print Commands.default.__doc__.strip()
        print
        cmds = [c for c, f in Commands.__dict__.iteritems()
                if f.__doc__ and c[0] != '_' and c != 'default']
        cmds.sort()
        for cmd in cmds:
            print Commands.__dict__[cmd].__doc__.strip()
            print
        print parse_search_value.__doc__.strip()
        print
        for line in wrap(' '.join(cmds), 79, initial_indent='Quick list: ',
                         subsequent_indent='            '):
            print line
        print ('Exit codes: 0 = operation successful\n'
               '            1 = operation completed with warnings\n'
               '            2 = error')
        exit(EXIT_SUCCESS)

    builder_file, ring_file = parse_builder_ring_filename_args(argv)

    if exists(builder_file):
        builder = RingBuilder.load(builder_file)
    elif len(argv) < 3 or argv[2] not in('create', 'write_builder'):
        print 'Ring Builder file does not exist: %s' % argv[1]
        exit(EXIT_ERROR)

    backup_dir = pathjoin(dirname(argv[1]), 'backups')
    try:
        mkdir(backup_dir)
    except OSError as err:
        if err.errno != EEXIST:
            raise

    if len(argv) == 2:
        command = "default"
    else:
        command = argv[2]
    if argv[0].endswith('-safe'):
        try:
            with lock_parent_directory(abspath(argv[1]), 15):
                Commands.__dict__.get(command, Commands.unknown.im_func)()
        except exceptions.LockTimeout:
            print "Ring/builder dir currently locked."
            exit(2)
    else:
        Commands.__dict__.get(command, Commands.unknown.im_func)()
Beispiel #10
0
def main(arguments=None):
    global argv, backup_dir, builder, builder_file, ring_file
    if arguments:
        argv = arguments
    else:
        argv = sys_argv

    if len(argv) < 2:
        print "swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" % \
              globals()
        print Commands.default.__doc__.strip()
        print
        cmds = [c for c, f in Commands.__dict__.iteritems()
                if f.__doc__ and c[0] != '_' and c != 'default']
        cmds.sort()
        for cmd in cmds:
            print Commands.__dict__[cmd].__doc__.strip()
            print
        print parse_search_value.__doc__.strip()
        print
        for line in wrap(' '.join(cmds), 79, initial_indent='Quick list: ',
                         subsequent_indent='            '):
            print line
        print('Exit codes: 0 = operation successful\n'
              '            1 = operation completed with warnings\n'
              '            2 = error')
        exit(EXIT_SUCCESS)

    builder_file, ring_file = parse_builder_ring_filename_args(argv)

    if exists(builder_file):
        builder = RingBuilder.load(builder_file)
    elif len(argv) < 3 or argv[2] not in('create', 'write_builder'):
        print 'Ring Builder file does not exist: %s' % argv[1]
        exit(EXIT_ERROR)

    backup_dir = pathjoin(dirname(argv[1]), 'backups')
    try:
        mkdir(backup_dir)
    except OSError as err:
        if err.errno != EEXIST:
            raise

    if len(argv) == 2:
        command = "default"
    else:
        command = argv[2]
    if argv[0].endswith('-safe'):
        try:
            with lock_parent_directory(abspath(argv[1]), 15):
                Commands.__dict__.get(command, Commands.unknown.im_func)()
        except exceptions.LockTimeout:
            print "Ring/builder dir currently locked."
            exit(2)
    else:
        Commands.__dict__.get(command, Commands.unknown.im_func)()
Beispiel #11
0
 def delete_db(self, object_file):
     hash_dir = os.path.dirname(object_file)
     suf_dir = os.path.dirname(hash_dir)
     with lock_parent_directory(object_file):
         shutil.rmtree(hash_dir, True)
     try:
         os.rmdir(suf_dir)
     except OSError, err:
         if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
             self.logger.exception(_("ERROR while trying to clean up %s") % suf_dir)
Beispiel #12
0
    def put_object(self,
                   name,
                   timestamp,
                   size,
                   content_type,
                   etag,
                   deleted=0,
                   storage_policy_index=0):
        """
        Creates an object in the DB with its metadata.

        :param name: object name to be created
        :param timestamp: timestamp of when the object was created
        :param size: object size
        :param content_type: object content-type
        :param etag: object etag
        :param deleted: if True, marks the object as deleted and sets the
                        deteleted_at timestamp to timestamp
        :param storage_policy_index: the storage policy index for the object
        """
        record = {
            'name': name,
            'created_at': timestamp,
            'size': size,
            'content_type': content_type,
            'etag': etag,
            'deleted': deleted,
            'storage_policy_index': storage_policy_index
        }
        if self.db_file == ':memory:':
            self.merge_items([record])
            return
        if not os.path.exists(self.db_file):
            raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
        pending_size = 0
        try:
            pending_size = os.path.getsize(self.pending_file)
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        if pending_size > PENDING_CAP:
            self._commit_puts([record])
        else:
            with lock_parent_directory(self.pending_file,
                                       self.pending_timeout):
                with open(self.pending_file, 'a+b') as fp:
                    # Colons aren't used in base64 encoding; so they are our
                    # delimiter
                    fp.write(':')
                    fp.write(
                        pickle.dumps(
                            (name, timestamp, size, content_type, etag,
                             deleted, storage_policy_index),
                            protocol=PICKLE_PROTOCOL).encode('base64'))
                    fp.flush()
Beispiel #13
0
 def delete_db(self, object_file):
     hash_dir = os.path.dirname(object_file)
     suf_dir = os.path.dirname(hash_dir)
     with lock_parent_directory(object_file):
         shutil.rmtree(hash_dir, True)
     try:
         os.rmdir(suf_dir)
     except OSError, err:
         if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
             self.logger.exception(
                 _('ERROR while trying to clean up %s') % suf_dir)
 def test_ringmaster_validate_locked_dir(self):
     self._setup_builder_rings()
     rma = RingMasterApp({'swiftdir': self.testdir, 'log_path': self.test_log_path, 'locktimeout': "0.1"})
     for i in rma.current_md5:
         self.assertEquals(rma._changed(i), False)
     self._setup_builder_rings(count=5)
     for i in rma.current_md5:
         t = time.time() - 300
         os.utime(i, (t, t))
     with lock_parent_directory(self.testdir):
         for i in rma.current_md5:
             self.assertRaises(LockTimeout, rma._validate_file, i)
Beispiel #15
0
 def _commit_puts_stale_ok(self):
     """
     Catch failures of _commit_puts() if broker is intended for
     reading of stats, and thus does not care for pending updates.
     """
     if self.db_file == ":memory:" or not os.path.exists(self.pending_file):
         return
     try:
         with lock_parent_directory(self.pending_file, self.pending_timeout):
             self._commit_puts()
     except LockTimeout:
         if not self.stale_reads_ok:
             raise
Beispiel #16
0
Datei: db.py Projekt: mahak/swift
 def _commit_puts_stale_ok(self):
     """
     Catch failures of _commit_puts() if broker is intended for
     reading of stats, and thus does not care for pending updates.
     """
     if self._skip_commit_puts():
         return
     try:
         with lock_parent_directory(self.pending_file,
                                    self.pending_timeout):
             self._commit_puts()
     except (LockTimeout, sqlite3.OperationalError):
         if not self.stale_reads_ok:
             raise
Beispiel #17
0
 def delete_db(self, object_file):
     hash_dir = os.path.dirname(object_file)
     suf_dir = os.path.dirname(hash_dir)
     with lock_parent_directory(object_file):
         shutil.rmtree(hash_dir, True)
     try:
         os.rmdir(suf_dir)
     except OSError as err:
         if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
             self.logger.exception(
                 _('ERROR while trying to clean up %s') % suf_dir)
     self.stats['remove'] += 1
     device_name = self.extract_device(object_file)
     self.logger.increment('removes.' + device_name)
Beispiel #18
0
 def _commit_puts_stale_ok(self):
     """
     Catch failures of _commit_puts() if broker is intended for
     reading of stats, and thus does not care for pending updates.
     """
     if self._skip_commit_puts():
         return
     try:
         with lock_parent_directory(self.pending_file,
                                    self.pending_timeout):
             self._commit_puts()
     except (LockTimeout, sqlite3.OperationalError):
         if not self.stale_reads_ok:
             raise
Beispiel #19
0
 def delete_db(self, object_file):
     hash_dir = os.path.dirname(object_file)
     suf_dir = os.path.dirname(hash_dir)
     with lock_parent_directory(object_file):
         shutil.rmtree(hash_dir, True)
     try:
         os.rmdir(suf_dir)
     except OSError as err:
         if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
             self.logger.exception(
                 _('ERROR while trying to clean up %s') % suf_dir)
     self.stats['remove'] += 1
     device_name = self.extract_device(object_file)
     self.logger.increment('removes.' + device_name)
Beispiel #20
0
    def put_container(self, name, put_timestamp, delete_timestamp,
                      object_count, bytes_used):
        """
        Create a container with the given attributes.

        :param name: name of the container to create
        :param put_timestamp: put_timestamp of the container to create
        :param delete_timestamp: delete_timestamp of the container to create
        :param object_count: number of objects in the container
        :param bytes_used: number of bytes used by the container
        """
        if delete_timestamp > put_timestamp and \
                object_count in (None, '', 0, '0'):
            deleted = 1
        else:
            deleted = 0
        record = {
            'name': name,
            'put_timestamp': put_timestamp,
            'delete_timestamp': delete_timestamp,
            'object_count': object_count,
            'bytes_used': bytes_used,
            'deleted': deleted
        }
        if self.db_file == ':memory:':
            self.merge_items([record])
            return
        if not os.path.exists(self.db_file):
            raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
        pending_size = 0
        try:
            pending_size = os.path.getsize(self.pending_file)
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        if pending_size > PENDING_CAP:
            self._commit_puts([record])
        else:
            with lock_parent_directory(self.pending_file,
                                       self.pending_timeout):
                with open(self.pending_file, 'a+b') as fp:
                    # Colons aren't used in base64 encoding; so they are our
                    # delimiter
                    fp.write(':')
                    fp.write(
                        pickle.dumps(
                            (name, put_timestamp, delete_timestamp,
                             object_count, bytes_used, deleted),
                            protocol=PICKLE_PROTOCOL).encode('base64'))
                    fp.flush()
Beispiel #21
0
 def _commit_puts_stale_ok(self):
     """
     Catch failures of _commit_puts() if broker is intended for
     reading of stats, and thus does not care for pending updates.
     """
     if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
         return
     try:
         with lock_parent_directory(self.pending_file,
                                    self.pending_timeout):
             self._commit_puts()
     except LockTimeout:
         if not self.stale_reads_ok:
             raise
Beispiel #22
0
    def put_container(self, name, put_timestamp, delete_timestamp, object_count, bytes_used):
        """
        Create a container with the given attributes.

        :param name: name of the container to create
        :param put_timestamp: put_timestamp of the container to create
        :param delete_timestamp: delete_timestamp of the container to create
        :param object_count: number of objects in the container
        :param bytes_used: number of bytes used by the container
        """
        if delete_timestamp > put_timestamp and object_count in (None, "", 0, "0"):
            deleted = 1
        else:
            deleted = 0
        record = {
            "name": name,
            "put_timestamp": put_timestamp,
            "delete_timestamp": delete_timestamp,
            "object_count": object_count,
            "bytes_used": bytes_used,
            "deleted": deleted,
        }
        if self.db_file == ":memory:":
            self.merge_items([record])
            return
        if not os.path.exists(self.db_file):
            raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
        pending_size = 0
        try:
            pending_size = os.path.getsize(self.pending_file)
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        if pending_size > PENDING_CAP:
            self._commit_puts([record])
        else:
            with lock_parent_directory(self.pending_file, self.pending_timeout):
                with open(self.pending_file, "a+b") as fp:
                    # Colons aren't used in base64 encoding; so they are our
                    # delimiter
                    fp.write(":")
                    fp.write(
                        pickle.dumps(
                            (name, put_timestamp, delete_timestamp, object_count, bytes_used, deleted),
                            protocol=PICKLE_PROTOCOL,
                        ).encode("base64")
                    )
                    fp.flush()
Beispiel #23
0
    def put_container(self, name, put_timestamp, delete_timestamp,
                      object_count, bytes_used, storage_policy_index):
        """
        Create a container with the given attributes.

        :param name: name of the container to create
        :param put_timestamp: put_timestamp of the container to create
        :param delete_timestamp: delete_timestamp of the container to create
        :param object_count: number of objects in the container
        :param bytes_used: number of bytes used by the container
        :param storage_policy_index:  the storage policy for this container
        """
        if delete_timestamp > put_timestamp and \
                object_count in (None, '', 0, '0'):
            deleted = 1
        else:
            deleted = 0
        record = {'name': name, 'put_timestamp': put_timestamp,
                  'delete_timestamp': delete_timestamp,
                  'object_count': object_count,
                  'bytes_used': bytes_used,
                  'deleted': deleted,
                  'storage_policy_index': storage_policy_index}
        if self.db_file == ':memory:':
            self.merge_items([record])
            return
        if not os.path.exists(self.db_file):
            raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
        pending_size = 0
        try:
            pending_size = os.path.getsize(self.pending_file)
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        if pending_size > PENDING_CAP:
            self._commit_puts([record])
        else:
            with lock_parent_directory(self.pending_file,
                                       self.pending_timeout):
                with open(self.pending_file, 'a+b') as fp:
                    # Colons aren't used in base64 encoding; so they are our
                    # delimiter
                    fp.write(':')
                    fp.write(pickle.dumps(
                        (name, put_timestamp, delete_timestamp, object_count,
                         bytes_used, deleted, storage_policy_index),
                        protocol=PICKLE_PROTOCOL).encode('base64'))
                    fp.flush()
Beispiel #24
0
    def put_object(self, name, timestamp, size, content_type, etag, deleted=0, storage_policy_index=0):
        """
        Creates an object in the DB with its metadata.

        :param name: object name to be created
        :param timestamp: timestamp of when the object was created
        :param size: object size
        :param content_type: object content-type
        :param etag: object etag
        :param deleted: if True, marks the object as deleted and sets the
                        deleted_at timestamp to timestamp
        :param storage_policy_index: the storage policy index for the object
        """
        record = {
            "name": name,
            "created_at": timestamp,
            "size": size,
            "content_type": content_type,
            "etag": etag,
            "deleted": deleted,
            "storage_policy_index": storage_policy_index,
        }
        if self.db_file == ":memory:":
            self.merge_items([record])
            return
        if not os.path.exists(self.db_file):
            raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
        pending_size = 0
        try:
            pending_size = os.path.getsize(self.pending_file)
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        if pending_size > PENDING_CAP:
            self._commit_puts([record])
        else:
            with lock_parent_directory(self.pending_file, self.pending_timeout):
                with open(self.pending_file, "a+b") as fp:
                    # Colons aren't used in base64 encoding; so they are our
                    # delimiter
                    fp.write(":")
                    fp.write(
                        pickle.dumps(
                            (name, timestamp, size, content_type, etag, deleted, storage_policy_index),
                            protocol=PICKLE_PROTOCOL,
                        ).encode("base64")
                    )
                    fp.flush()
Beispiel #25
0
    def dump_builder(self):
        """Write out new builder files

        :param builder: The builder to dump
        :builder_file: The builder file to write to
        """
        with lock_parent_directory(self.builder_file, 15):
            bfile, bmd5 = self._make_backup()
            print "Backed up %s to %s (%s)" % (self.builder_file, bfile, bmd5)
            fd, tmppath = mkstemp(dir=dirname(self.builder_file),
                                  suffix='.tmp.builder')
            pickle.dump(self.builder.to_dict(), fdopen(fd, 'wb'), protocol=2)
            rename(tmppath, self.builder_file)
        print "Success. %s updated. (%s)" % (
            self.builder_file, self.get_md5sum(self.builder_file))
        print "Rebalance still required."
Beispiel #26
0
    def dump_builder(self):
        """Write out new builder files

        :param builder: The builder to dump
        :builder_file: The builder file to write to
        """
        with lock_parent_directory(self.builder_file, 15):
            bfile, bmd5 = self._make_backup()
            print "Backed up %s to %s (%s)" % (self.builder_file, bfile, bmd5)
            fd, tmppath = mkstemp(
                dir=dirname(self.builder_file), suffix='.tmp.builder')
            pickle.dump(self.builder.to_dict(), fdopen(fd, 'wb'), protocol=2)
            rename(tmppath, self.builder_file)
        print "Success. %s updated. (%s)" % (
            self.builder_file, self.get_md5sum(self.builder_file))
        print "Rebalance still required."
Beispiel #27
0
 def test_ringmaster_validate_locked_dir(self):
     self._setup_builder_rings()
     rma = RingMasterApp({
         'swiftdir': self.testdir,
         'log_path': self.test_log_path,
         'locktimeout': "0.1"
     })
     for i in rma.current_md5:
         self.assertEquals(rma._changed(i), False)
     self._setup_builder_rings(count=5)
     for i in rma.current_md5:
         t = time.time() - 300
         os.utime(i, (t, t))
     with lock_parent_directory(self.testdir):
         for i in rma.current_md5:
             self.assertRaises(LockTimeout, rma._validate_file, i)
Beispiel #28
0
    def put_container(self, name, put_timestamp, delete_timestamp, object_count, bytes_used):
        """
        Create a container with the given attributes.

        :param name: name of the container to create
        :param put_timestamp: put_timestamp of the container to create
        :param delete_timestamp: delete_timestamp of the container to create
        :param object_count: number of objects in the container
        :param bytes_used: number of bytes used by the container
        """
        if delete_timestamp > put_timestamp and object_count in (None, "", 0, "0"):
            deleted = 1
        else:
            deleted = 0
        record = {
            "name": name,
            "put_timestamp": put_timestamp,
            "delete_timestamp": delete_timestamp,
            "object_count": object_count,
            "bytes_used": bytes_used,
            "deleted": deleted,
        }
        if self.db_file == ":memory:":
            self.merge_items([record])
            return
        commit = False
        with lock_parent_directory(self.pending_file, self.pending_timeout):
            with open(self.pending_file, "a+b") as fp:
                # Colons aren't used in base64 encoding; so they are our
                # delimiter
                fp.write(":")
                fp.write(
                    pickle.dumps(
                        (name, put_timestamp, delete_timestamp, object_count, bytes_used, deleted),
                        protocol=PICKLE_PROTOCOL,
                    ).encode("base64")
                )
                fp.flush()
                if fp.tell() > PENDING_CAP:
                    commit = True
        if commit:
            self._commit_puts()
Beispiel #29
0
    def put_container(self, name, put_timestamp, delete_timestamp,
                      object_count, bytes_used):
        """
        Create a container with the given attributes.

        :param name: name of the container to create
        :param put_timestamp: put_timestamp of the container to create
        :param delete_timestamp: delete_timestamp of the container to create
        :param object_count: number of objects in the container
        :param bytes_used: number of bytes used by the container
        """
        if delete_timestamp > put_timestamp and \
                object_count in (None, '', 0, '0'):
            deleted = 1
        else:
            deleted = 0
        record = {
            'name': name,
            'put_timestamp': put_timestamp,
            'delete_timestamp': delete_timestamp,
            'object_count': object_count,
            'bytes_used': bytes_used,
            'deleted': deleted
        }
        if self.db_file == ':memory:':
            self.merge_items([record])
            return
        commit = False
        with lock_parent_directory(self.pending_file, self.pending_timeout):
            with open(self.pending_file, 'a+b') as fp:
                # Colons aren't used in base64 encoding; so they are our
                # delimiter
                fp.write(':')
                fp.write(
                    pickle.dumps((name, put_timestamp, delete_timestamp,
                                  object_count, bytes_used, deleted),
                                 protocol=PICKLE_PROTOCOL).encode('base64'))
                fp.flush()
                if fp.tell() > PENDING_CAP:
                    commit = True
        if commit:
            self._commit_puts()
Beispiel #30
0
 def _commit_puts(self, item_list=None):
     """Handles commiting rows in .pending files."""
     if self.db_file == ":memory:" or not os.path.exists(self.pending_file):
         return
     if item_list is None:
         item_list = []
     with lock_parent_directory(self.pending_file, self.pending_timeout):
         self._preallocate()
         if not os.path.getsize(self.pending_file):
             if item_list:
                 self.merge_items(item_list)
             return
         with open(self.pending_file, "r+b") as fp:
             for entry in fp.read().split(":"):
                 if entry:
                     try:
                         (name, put_timestamp, delete_timestamp, object_count, bytes_used, deleted) = pickle.loads(
                             entry.decode("base64")
                         )
                         item_list.append(
                             {
                                 "name": name,
                                 "put_timestamp": put_timestamp,
                                 "delete_timestamp": delete_timestamp,
                                 "object_count": object_count,
                                 "bytes_used": bytes_used,
                                 "deleted": deleted,
                             }
                         )
                     except Exception:
                         self.logger.exception(
                             _("Invalid pending entry %(file)s: %(entry)s"),
                             {"file": self.pending_file, "entry": entry},
                         )
             if item_list:
                 self.merge_items(item_list)
             try:
                 os.ftruncate(fp.fileno(), 0)
             except OSError, err:
                 if err.errno != errno.ENOENT:
                     raise
Beispiel #31
0
 def _commit_puts(self, item_list=None):
     """Handles commiting rows in .pending files."""
     if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
         return
     if item_list is None:
         item_list = []
     with lock_parent_directory(self.pending_file, self.pending_timeout):
         self._preallocate()
         if not os.path.getsize(self.pending_file):
             if item_list:
                 self.merge_items(item_list)
             return
         with open(self.pending_file, 'r+b') as fp:
             for entry in fp.read().split(':'):
                 if entry:
                     try:
                         (name, put_timestamp, delete_timestamp,
                                 object_count, bytes_used, deleted) = \
                             pickle.loads(entry.decode('base64'))
                         item_list.append({
                             'name': name,
                             'put_timestamp': put_timestamp,
                             'delete_timestamp': delete_timestamp,
                             'object_count': object_count,
                             'bytes_used': bytes_used,
                             'deleted': deleted
                         })
                     except Exception:
                         self.logger.exception(
                             _('Invalid pending entry %(file)s: %(entry)s'),
                             {
                                 'file': self.pending_file,
                                 'entry': entry
                             })
             if item_list:
                 self.merge_items(item_list)
             try:
                 os.ftruncate(fp.fileno(), 0)
             except OSError, err:
                 if err.errno != errno.ENOENT:
                     raise
Beispiel #32
0
    def put_object(self, name, timestamp, size, content_type, etag,
                   deleted=0, metadata=None):
        """
        Creates an object in the DB with its metadata.

        :param name: object name to be created
        :param timestamp: timestamp of when the object was created
        :param size: object size
        :param content_type: object content-type
        :param etag: object etag
        :param deleted: if True, marks the object as deleted and sets the
                        deteleted_at timestamp to timestamp
        """
        record = {'name': name, 'created_at': timestamp, 'size': size,
                  'content_type': content_type, 'etag': etag,
                  'metadata': metadata, 'deleted': deleted}
        if self.db_file == ':memory:':
            self.merge_items([record])
            return
        if not os.path.exists(self.db_file):
            raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
        pending_size = 0
        try:
            pending_size = os.path.getsize(self.pending_file)
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        if pending_size > PENDING_CAP:
            self._commit_puts([record])
        else:
            with lock_parent_directory(self.pending_file,
                                       self.pending_timeout):
                with open(self.pending_file, 'a+b') as fp:
                    # Colons aren't used in base64 encoding; so they are our
                    # delimiter
                    fp.write(':')
                    fp.write(pickle.dumps(
                        (name, timestamp, size, content_type, etag, deleted,
                         metadata),
                        protocol=PICKLE_PROTOCOL).encode('base64'))
                    fp.flush()
Beispiel #33
0
    def put_record(self, record):
        """
        Put a record into the DB. If the DB has an associated pending file with
        space then the record is appended to that file and a commit to the DB
        is deferred. If the DB is in-memory or its pending file is full then
        the record will be committed immediately.

        :param record: a record to be added to the DB.
        :raises DatabaseConnectionError: if the DB file does not exist or if
            ``skip_commits`` is True.
        :raises LockTimeout: if a timeout occurs while waiting to take a lock
            to write to the pending file.
        """
        if self._db_file == ':memory:':
            self.merge_items([record])
            return
        if not os.path.exists(self.db_file):
            raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
        if self.skip_commits:
            raise DatabaseConnectionError(self.db_file, 'commits not accepted')
        with lock_parent_directory(self.pending_file, self.pending_timeout):
            pending_size = 0
            try:
                pending_size = os.path.getsize(self.pending_file)
            except OSError as err:
                if err.errno != errno.ENOENT:
                    raise
            if pending_size > PENDING_CAP:
                self._commit_puts([record])
            else:
                with open(self.pending_file, 'a+b') as fp:
                    # Colons aren't used in base64 encoding; so they are our
                    # delimiter
                    fp.write(b':')
                    fp.write(
                        base64.b64encode(
                            pickle.dumps(self.make_tuple_for_pickle(record),
                                         protocol=PICKLE_PROTOCOL)))
                    fp.flush()
Beispiel #34
0
Datei: db.py Projekt: mahak/swift
    def put_record(self, record):
        """
        Put a record into the DB. If the DB has an associated pending file with
        space then the record is appended to that file and a commit to the DB
        is deferred. If the DB is in-memory or its pending file is full then
        the record will be committed immediately.

        :param record: a record to be added to the DB.
        :raises DatabaseConnectionError: if the DB file does not exist or if
            ``skip_commits`` is True.
        :raises LockTimeout: if a timeout occurs while waiting to take a lock
            to write to the pending file.
        """
        if self._db_file == ':memory:':
            self.merge_items([record])
            return
        if not os.path.exists(self.db_file):
            raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
        if self.skip_commits:
            raise DatabaseConnectionError(self.db_file,
                                          'commits not accepted')
        with lock_parent_directory(self.pending_file, self.pending_timeout):
            pending_size = 0
            try:
                pending_size = os.path.getsize(self.pending_file)
            except OSError as err:
                if err.errno != errno.ENOENT:
                    raise
            if pending_size > PENDING_CAP:
                self._commit_puts([record])
            else:
                with open(self.pending_file, 'a+b') as fp:
                    # Colons aren't used in base64 encoding; so they are our
                    # delimiter
                    fp.write(b':')
                    fp.write(base64.b64encode(pickle.dumps(
                        self.make_tuple_for_pickle(record),
                        protocol=PICKLE_PROTOCOL)))
                    fp.flush()
Beispiel #35
0
Datei: db.py Projekt: mahak/swift
    def reclaim(self, age_timestamp, sync_timestamp):
        """
        Delete reclaimable rows and metadata from the db.

        By default this method will delete rows from the db_contains_type table
        that are marked deleted and whose created_at timestamp is <
        age_timestamp, and deletes rows from incoming_sync and outgoing_sync
        where the updated_at timestamp is < sync_timestamp. In addition, this
        calls the :meth:`_reclaim_metadata` method.

        Subclasses may reclaim other items by overriding :meth:`_reclaim`.

        :param age_timestamp: max created_at timestamp of object rows to delete
        :param sync_timestamp: max update_at timestamp of sync rows to delete
        """
        if not self._skip_commit_puts():
            with lock_parent_directory(self.pending_file,
                                       self.pending_timeout):
                self._commit_puts()
        with self.get() as conn:
            self._reclaim(conn, age_timestamp, sync_timestamp)
            self._reclaim_metadata(conn, age_timestamp)
            conn.commit()
Beispiel #36
0
 def put_record(self, record):
     if self.db_file == ':memory:':
         self.merge_items([record])
         return
     if not os.path.exists(self.db_file):
         raise DatabaseConnectionError(self.db_file, "DB doesn't exist")
     with lock_parent_directory(self.pending_file, self.pending_timeout):
         pending_size = 0
         try:
             pending_size = os.path.getsize(self.pending_file)
         except OSError as err:
             if err.errno != errno.ENOENT:
                 raise
         if pending_size > PENDING_CAP:
             self._commit_puts([record])
         else:
             with open(self.pending_file, 'a+b') as fp:
                 # Colons aren't used in base64 encoding; so they are our
                 # delimiter
                 fp.write(':')
                 fp.write(pickle.dumps(
                     self.make_tuple_for_pickle(record),
                     protocol=PICKLE_PROTOCOL).encode('base64'))
                 fp.flush()
Beispiel #37
0
    def initialize(self, put_timestamp=None, storage_policy_index=None):
        """
        Create the DB

        The storage_policy_index is passed through to the subclass's
        ``_initialize`` method.  It is ignored by ``AccountBroker``.

        :param put_timestamp: internalized timestamp of initial PUT request
        :param storage_policy_index: only required for containers
        """
        if self.db_file == ':memory:':
            tmp_db_file = None
            conn = get_db_connection(self.db_file, self.timeout)
        else:
            mkdirs(self.db_dir)
            fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir)
            os.close(fd)
            conn = sqlite3.connect(tmp_db_file, check_same_thread=False,
                                   factory=GreenDBConnection, timeout=0)
        # creating dbs implicitly does a lot of transactions, so we
        # pick fast, unsafe options here and do a big fsync at the end.
        with closing(conn.cursor()) as cur:
            cur.execute('PRAGMA synchronous = OFF')
            cur.execute('PRAGMA temp_store = MEMORY')
            cur.execute('PRAGMA journal_mode = MEMORY')
        conn.create_function('chexor', 3, chexor)
        conn.row_factory = sqlite3.Row
        conn.text_factory = str
        conn.executescript("""
            CREATE TABLE outgoing_sync (
                remote_id TEXT UNIQUE,
                sync_point INTEGER,
                updated_at TEXT DEFAULT 0
            );
            CREATE TABLE incoming_sync (
                remote_id TEXT UNIQUE,
                sync_point INTEGER,
                updated_at TEXT DEFAULT 0
            );
            CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync
            BEGIN
                UPDATE outgoing_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync
            BEGIN
                UPDATE outgoing_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync
            BEGIN
                UPDATE incoming_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync
            BEGIN
                UPDATE incoming_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
        """)
        if not put_timestamp:
            put_timestamp = Timestamp(0).internal
        self._initialize(conn, put_timestamp,
                         storage_policy_index=storage_policy_index)
        conn.commit()
        if tmp_db_file:
            conn.close()
            with open(tmp_db_file, 'r+b') as fp:
                os.fsync(fp.fileno())
            with lock_parent_directory(self.db_file, self.pending_timeout):
                if os.path.exists(self.db_file):
                    # It's as if there was a "condition" where different parts
                    # of the system were "racing" each other.
                    raise DatabaseAlreadyExists(self.db_file)
                renamer(tmp_db_file, self.db_file)
            self.conn = get_db_connection(self.db_file, self.timeout)
        else:
            self.conn = conn
Beispiel #38
0
class Replicator(Daemon):
    """
    Implements the logic for directing db replication.
    """

    def __init__(self, conf):
        self.conf = conf
        self.logger = get_logger(conf)
        self.root = conf.get('devices', '/srv/node')
        self.mount_check = conf.get('mount_check', 'true').lower() in \
                              ('true', 't', '1', 'on', 'yes', 'y')
        self.port = int(conf.get('bind_port', self.default_port))
        concurrency = int(conf.get('concurrency', 8))
        self.cpool = GreenPool(size=concurrency)
        swift_dir = conf.get('swift_dir', '/etc/swift')
        self.ring = ring.Ring(os.path.join(swift_dir, self.ring_file))
        self.per_diff = int(conf.get('per_diff', 1000))
        self.run_pause = int(conf.get('run_pause', 30))
        self.vm_test_mode = conf.get(
            'vm_test_mode', 'no').lower() in ('yes', 'true', 'on', '1')
        self.node_timeout = int(conf.get('node_timeout', 10))
        self.conn_timeout = float(conf.get('conn_timeout', 0.5))
        self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
        self._zero_stats()

    def _zero_stats(self):
        """Zero out the stats."""
        self.stats = {'attempted': 0, 'success': 0, 'failure': 0, 'ts_repl': 0,
                      'no_change': 0, 'hashmatch': 0, 'rsync': 0, 'diff': 0,
                      'remove': 0, 'empty': 0, 'remote_merge': 0,
                      'start': time.time()}

    def _report_stats(self):
        """Report the current stats to the logs."""
        self.logger.info(
            _('Attempted to replicate %(count)d dbs in %(time).5f seconds '
              '(%(rate).5f/s)'),
            {'count': self.stats['attempted'],
             'time': time.time() - self.stats['start'],
             'rate': self.stats['attempted'] /
                        (time.time() - self.stats['start'] + 0.0000001)})
        self.logger.info(_('Removed %(remove)d dbs') % self.stats)
        self.logger.info(_('%(success)s successes, %(failure)s failures')
            % self.stats)
        self.logger.info(' '.join(['%s:%s' % item for item in
             self.stats.items() if item[0] in
             ('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl', 'empty')]))

    def _rsync_file(self, db_file, remote_file, whole_file=True):
        """
        Sync a single file using rsync. Used by _rsync_db to handle syncing.

        :param db_file: file to be synced
        :param remote_file: remote location to sync the DB file to
        :param whole-file: if True, uses rsync's --whole-file flag

        :returns: True if the sync was successful, False otherwise
        """
        popen_args = ['rsync', '--quiet', '--no-motd',
                      '--timeout=%s' % int(math.ceil(self.node_timeout)),
                      '--contimeout=%s' % int(math.ceil(self.conn_timeout))]
        if whole_file:
            popen_args.append('--whole-file')
        popen_args.extend([db_file, remote_file])
        proc = subprocess.Popen(popen_args)
        proc.communicate()
        if proc.returncode != 0:
            self.logger.error(_('ERROR rsync failed with %(code)s: %(args)s'),
                              {'code': proc.returncode, 'args': popen_args})
        return proc.returncode == 0

    def _rsync_db(self, broker, device, http, local_id,
            replicate_method='complete_rsync', replicate_timeout=None):
        """
        Sync a whole db using rsync.

        :param broker: DB broker object of DB to be synced
        :param device: device to sync to
        :param http: ReplConnection object
        :param local_id: unique ID of the local database replica
        :param replicate_method: remote operation to perform after rsync
        :param replicate_timeout: timeout to wait in seconds
        """
        if self.vm_test_mode:
            remote_file = '%s::%s%s/%s/tmp/%s' % (device['ip'],
                    self.server_type, device['port'], device['device'],
                    local_id)
        else:
            remote_file = '%s::%s/%s/tmp/%s' % (device['ip'],
                    self.server_type, device['device'], local_id)
        mtime = os.path.getmtime(broker.db_file)
        if not self._rsync_file(broker.db_file, remote_file):
            return False
        # perform block-level sync if the db was modified during the first sync
        if os.path.exists(broker.db_file + '-journal') or \
                    os.path.getmtime(broker.db_file) > mtime:
            # grab a lock so nobody else can modify it
            with broker.lock():
                if not self._rsync_file(broker.db_file, remote_file, False):
                    return False
        with Timeout(replicate_timeout or self.node_timeout):
            response = http.replicate(replicate_method, local_id)
        return response and response.status >= 200 and response.status < 300

    def _usync_db(self, point, broker, http, remote_id, local_id):
        """
        Sync a db by sending all records since the last sync.

        :param point: synchronization high water mark between the replicas
        :param broker: database broker object
        :param http: ReplConnection object for the remote server
        :param remote_id: database id for the remote replica
        :param local_id: database id for the local replica

        :returns: boolean indicating completion and success
        """
        self.stats['diff'] += 1
        self.logger.debug(_('Syncing chunks with %s'), http.host)
        sync_table = broker.get_syncs()
        objects = broker.get_items_since(point, self.per_diff)
        while len(objects):
            with Timeout(self.node_timeout):
                response = http.replicate('merge_items', objects, local_id)
            if not response or response.status >= 300 or response.status < 200:
                if response:
                    self.logger.error(_('ERROR Bad response %(status)s from '
                        '%(host)s'),
                        {'status': response.status, 'host': http.host})
                return False
            point = objects[-1]['ROWID']
            objects = broker.get_items_since(point, self.per_diff)
        with Timeout(self.node_timeout):
            response = http.replicate('merge_syncs', sync_table)
        if response and response.status >= 200 and response.status < 300:
            broker.merge_syncs([{'remote_id': remote_id,
                    'sync_point': point}], incoming=False)
            return True
        return False

    def _in_sync(self, rinfo, info, broker, local_sync):
        """
        Determine whether or not two replicas of a databases are considered
        to be in sync.

        :param rinfo: remote database info
        :param info: local database info
        :param broker: database broker object
        :param local_sync: cached last sync point between replicas

        :returns: boolean indicating whether or not the replicas are in sync
        """
        if max(rinfo['point'], local_sync) >= info['max_row']:
            self.stats['no_change'] += 1
            return True
        if rinfo['hash'] == info['hash']:
            self.stats['hashmatch'] += 1
            broker.merge_syncs([{'remote_id': rinfo['id'],
                'sync_point': rinfo['point']}], incoming=False)
            return True

    def _http_connect(self, node, partition, db_file):
        """
        Make an http_connection using ReplConnection

        :param node: node dictionary from the ring
        :param partition: partition partition to send in the url
        :param db_file: DB file

        :returns: ReplConnection object
        """
        return ReplConnection(node, partition,
                os.path.basename(db_file).split('.', 1)[0], self.logger)

    def _repl_to_node(self, node, broker, partition, info):
        """
        Replicate a database to a node.

        :param node: node dictionary from the ring to be replicated to
        :param broker: DB broker for the DB to be replication
        :param partition: partition on the node to replicate to
        :param info: DB info as a dictionary of {'max_row', 'hash', 'id',
                     'created_at', 'put_timestamp', 'delete_timestamp',
                     'metadata'}

        :returns: True if successful, False otherwise
        """
        with ConnectionTimeout(self.conn_timeout):
            http = self._http_connect(node, partition, broker.db_file)
        if not http:
            self.logger.error(
                _('ERROR Unable to connect to remote server: %s'), node)
            return False
        with Timeout(self.node_timeout):
            response = http.replicate('sync', info['max_row'], info['hash'],
                info['id'], info['created_at'], info['put_timestamp'],
                info['delete_timestamp'], info['metadata'])
        if not response:
            return False
        elif response.status == HTTPNotFound.code:  # completely missing, rsync
            self.stats['rsync'] += 1
            return self._rsync_db(broker, node, http, info['id'])
        elif response.status == HTTPInsufficientStorage.code:
            raise DriveNotMounted()
        elif response.status >= 200 and response.status < 300:
            rinfo = simplejson.loads(response.data)
            local_sync = broker.get_sync(rinfo['id'], incoming=False)
            if self._in_sync(rinfo, info, broker, local_sync):
                return True
            # if the difference in rowids between the two differs by
            # more than 50%, rsync then do a remote merge.
            if rinfo['max_row'] / float(info['max_row']) < 0.5:
                self.stats['remote_merge'] += 1
                return self._rsync_db(broker, node, http, info['id'],
                        replicate_method='rsync_then_merge',
                        replicate_timeout=(info['count'] / 2000))
            # else send diffs over to the remote server
            return self._usync_db(max(rinfo['point'], local_sync),
                        broker, http, rinfo['id'], info['id'])

    def _replicate_object(self, partition, object_file, node_id):
        """
        Replicate the db, choosing method based on whether or not it
        already exists on peers.

        :param partition: partition to be replicated to
        :param object_file: DB file name to be replicated
        :param node_id: node id of the node to be replicated to
        """
        self.logger.debug(_('Replicating db %s'), object_file)
        self.stats['attempted'] += 1
        try:
            broker = self.brokerclass(object_file, pending_timeout=30)
            broker.reclaim(time.time() - self.reclaim_age,
                           time.time() - (self.reclaim_age * 2))
            info = broker.get_replication_info()
        except Exception, e:
            if 'no such table' in str(e):
                self.logger.error(_('Quarantining DB %s'), object_file)
                quarantine_db(broker.db_file, broker.db_type)
            else:
                self.logger.exception(_('ERROR reading db %s'), object_file)
            self.stats['failure'] += 1
            return
        # The db is considered deleted if the delete_timestamp value is greater
        # than the put_timestamp, and there are no objects.
        delete_timestamp = 0
        try:
            delete_timestamp = float(info['delete_timestamp'])
        except ValueError:
            pass
        put_timestamp = 0
        try:
            put_timestamp = float(info['put_timestamp'])
        except ValueError:
            pass
        if delete_timestamp < (time.time() - self.reclaim_age) and \
                delete_timestamp > put_timestamp and \
                info['count'] in (None, '', 0, '0'):
            with lock_parent_directory(object_file):
                shutil.rmtree(os.path.dirname(object_file), True)
                self.stats['remove'] += 1
            return
        responses = []
        nodes = self.ring.get_part_nodes(int(partition))
        shouldbehere = bool([n for n in nodes if n['id'] == node_id])
        repl_nodes = [n for n in nodes if n['id'] != node_id]
        more_nodes = self.ring.get_more_nodes(int(partition))
        for node in repl_nodes:
            success = False
            try:
                success = self._repl_to_node(node, broker, partition, info)
            except DriveNotMounted:
                repl_nodes.append(more_nodes.next())
                self.logger.error(_('ERROR Remote drive not mounted %s'), node)
            except (Exception, TimeoutError):
                self.logger.exception(_('ERROR syncing %(file)s with node'
                        ' %(node)s'), {'file': object_file, 'node': node})
            self.stats['success' if success else 'failure'] += 1
            responses.append(success)
        if not shouldbehere and all(responses):
            # If the db shouldn't be on this node and has been successfully
            # synced to all of its peers, it can be removed.
            with lock_parent_directory(object_file):
                shutil.rmtree(os.path.dirname(object_file), True)
                self.stats['remove'] += 1
Beispiel #39
0
 def delete_db(self, object_file):
     with lock_parent_directory(object_file):
         shutil.rmtree(os.path.dirname(object_file), True)
     self.stats['remove'] += 1
     device_name = self.extract_device(object_file)
     self.logger.increment('removes.' + device_name)
Beispiel #40
0
def main(arguments=None):
    global argv, backup_dir, builder, builder_file, ring_file
    if arguments:
        argv = arguments
    else:
        argv = sys_argv

    if len(argv) < 2:
        print("swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" %
              globals())
        print(Commands.default.__doc__.strip())
        print()
        cmds = [c for c, f in Commands.__dict__.items()
                if f.__doc__ and not c.startswith('_') and c != 'default']
        cmds.sort()
        for cmd in cmds:
            print(Commands.__dict__[cmd].__doc__.strip())
            print()
        print(parse_search_value.__doc__.strip())
        print()
        for line in wrap(' '.join(cmds), 79, initial_indent='Quick list: ',
                         subsequent_indent='            '):
            print(line)
        print('Exit codes: 0 = operation successful\n'
              '            1 = operation completed with warnings\n'
              '            2 = error')
        exit(EXIT_SUCCESS)

    builder_file, ring_file = parse_builder_ring_filename_args(argv)
    if builder_file != argv[1]:
        print('Note: using %s instead of %s as builder file' % (
              builder_file, argv[1]))

    try:
        builder = RingBuilder.load(builder_file)
    except exceptions.UnPicklingError as e:
        print(e)
        exit(EXIT_ERROR)
    except (exceptions.FileNotFoundError, exceptions.PermissionError) as e:
        if len(argv) < 3 or argv[2] not in('create', 'write_builder'):
            print(e)
            exit(EXIT_ERROR)
    except Exception as e:
        print('Problem occurred while reading builder file: %s. %s' %
              (builder_file, e))
        exit(EXIT_ERROR)

    backup_dir = pathjoin(dirname(builder_file), 'backups')
    try:
        mkdir(backup_dir)
    except OSError as err:
        if err.errno != EEXIST:
            raise

    if len(argv) == 2:
        command = "default"
    else:
        command = argv[2]
    if argv[0].endswith('-safe'):
        try:
            with lock_parent_directory(abspath(builder_file), 15):
                Commands.__dict__.get(command, Commands.unknown.__func__)()
        except exceptions.LockTimeout:
            print("Ring/builder dir currently locked.")
            exit(2)
    else:
        Commands.__dict__.get(command, Commands.unknown.__func__)()
Beispiel #41
0
def main(arguments=None):
    global argv, backup_dir, builder, builder_file, ring_file
    if arguments:
        argv = arguments
    else:
        argv = sys_argv

    if len(argv) < 2:
        print("swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" %
              globals())
        print(Commands.default.__doc__.strip())
        print()
        cmds = [
            c for c, f in Commands.__dict__.items()
            if f.__doc__ and c[0] != '_' and c != 'default'
        ]
        cmds.sort()
        for cmd in cmds:
            print(Commands.__dict__[cmd].__doc__.strip())
            print()
        print(parse_search_value.__doc__.strip())
        print()
        for line in wrap(' '.join(cmds),
                         79,
                         initial_indent='Quick list: ',
                         subsequent_indent='            '):
            print(line)
        print('Exit codes: 0 = operation successful\n'
              '            1 = operation completed with warnings\n'
              '            2 = error')
        exit(EXIT_SUCCESS)

    builder_file, ring_file = parse_builder_ring_filename_args(argv)
    if builder_file != argv[1]:
        print('Note: using %s instead of %s as builder file' %
              (builder_file, argv[1]))

    try:
        builder = RingBuilder.load(builder_file)
    except exceptions.UnPicklingError as e:
        print(e)
        exit(EXIT_ERROR)
    except (exceptions.FileNotFoundError, exceptions.PermissionError) as e:
        if len(argv) < 3 or argv[2] not in ('create', 'write_builder'):
            print(e)
            exit(EXIT_ERROR)
    except Exception as e:
        print('Problem occurred while reading builder file: %s. %s' %
              (builder_file, e))
        exit(EXIT_ERROR)

    backup_dir = pathjoin(dirname(builder_file), 'backups')
    try:
        mkdir(backup_dir)
    except OSError as err:
        if err.errno != EEXIST:
            raise

    if len(argv) == 2:
        command = "default"
    else:
        command = argv[2]
    if argv[0].endswith('-safe'):
        try:
            with lock_parent_directory(abspath(builder_file), 15):
                Commands.__dict__.get(command, Commands.unknown.__func__)()
        except exceptions.LockTimeout:
            print("Ring/builder dir currently locked.")
            exit(2)
    else:
        Commands.__dict__.get(command, Commands.unknown.__func__)()
Beispiel #42
0
def main(arguments=None):
    global argv, backup_dir, builder, builder_file, ring_file
    if arguments is not None:
        argv = arguments
    else:
        argv = sys_argv

    if len(argv) < 2:
        print("swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" %
              globals())
        print(Commands.default.__doc__.strip())
        print()
        cmds = [
            c for c in dir(Commands) if getattr(Commands, c).__doc__
            and not c.startswith('_') and c != 'default'
        ]
        cmds.sort()
        for cmd in cmds:
            print(getattr(Commands, cmd).__doc__.strip())
            print()
        print(parse_search_value.__doc__.strip())
        print()
        for line in wrap(' '.join(cmds),
                         79,
                         initial_indent='Quick list: ',
                         subsequent_indent='            '):
            print(line)
        print('Exit codes: 0 = operation successful\n'
              '            1 = operation completed with warnings\n'
              '            2 = error')
        exit(EXIT_SUCCESS)

    # 1、解析参数,返回builder_file和ring_file的元组,builder_file是以.builder结尾,ring_file是以.ring.gz结尾
    builder_file, ring_file = parse_builder_ring_filename_args(argv)
    if builder_file != argv[1]:
        print('Note: using %s instead of %s as builder file' %
              (builder_file, argv[1]))

    # 2、读取builder_file文件,生成RingBuilder对象实例
    try:
        builder = RingBuilder.load(builder_file)
    except exceptions.UnPicklingError as e:
        print(e)
        exit(EXIT_ERROR)
    except (exceptions.FileNotFoundError, exceptions.PermissionError) as e:
        if len(argv) < 3 or argv[2] not in ('create', 'write_builder'):
            print(e)
            exit(EXIT_ERROR)
    except Exception as e:
        print('Problem occurred while reading builder file: %s. %s' %
              (builder_file, e))
        exit(EXIT_ERROR)

    # 3、生成备份目录
    backup_dir = pathjoin(dirname(builder_file), 'backups')
    try:
        mkdir(backup_dir)
    except OSError as err:
        if err.errno != EEXIST:
            raise

    if len(argv) == 2:
        command = "default"
    else:
        command = argv[2]
    # 4、调用运行command中指定的处理ring的方法;
    if argv[0].endswith('-safe'):
        try:
            with lock_parent_directory(abspath(builder_file), 15):
                getattr(Commands, command, Commands.unknown)()
        except exceptions.LockTimeout:
            print("Ring/builder dir currently locked.")
            exit(2)
    else:
        getattr(Commands, command, Commands.unknown)()
Beispiel #43
0
 def delete_db(self, object_file):
     with lock_parent_directory(object_file):
         shutil.rmtree(os.path.dirname(object_file), True)
     self.stats['remove'] += 1
     device_name = self.extract_device(object_file)
     self.logger.increment('removes.' + device_name)
Beispiel #44
0
    def initialize(self, put_timestamp=None):
        """
        Create the DB

        :param put_timestamp: timestamp of initial PUT request
        """
        if self.db_file == ":memory:":
            tmp_db_file = None
            conn = get_db_connection(self.db_file, self.timeout)
        else:
            mkdirs(self.db_dir)
            fd, tmp_db_file = mkstemp(suffix=".tmp", dir=self.db_dir)
            os.close(fd)
            conn = sqlite3.connect(tmp_db_file, check_same_thread=False, factory=GreenDBConnection, timeout=0)
        # creating dbs implicitly does a lot of transactions, so we
        # pick fast, unsafe options here and do a big fsync at the end.
        with closing(conn.cursor()) as cur:
            cur.execute("PRAGMA synchronous = OFF")
            cur.execute("PRAGMA temp_store = MEMORY")
            cur.execute("PRAGMA journal_mode = MEMORY")
        conn.create_function("chexor", 3, chexor)
        conn.row_factory = sqlite3.Row
        conn.text_factory = str
        conn.executescript(
            """
            CREATE TABLE outgoing_sync (
                remote_id TEXT UNIQUE,
                sync_point INTEGER,
                updated_at TEXT DEFAULT 0
            );
            CREATE TABLE incoming_sync (
                remote_id TEXT UNIQUE,
                sync_point INTEGER,
                updated_at TEXT DEFAULT 0
            );
            CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync
            BEGIN
                UPDATE outgoing_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync
            BEGIN
                UPDATE outgoing_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync
            BEGIN
                UPDATE incoming_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync
            BEGIN
                UPDATE incoming_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
        """
        )
        if not put_timestamp:
            put_timestamp = normalize_timestamp(0)
        self._initialize(conn, put_timestamp)
        conn.commit()
        if tmp_db_file:
            conn.close()
            with open(tmp_db_file, "r+b") as fp:
                os.fsync(fp.fileno())
            with lock_parent_directory(self.db_file, self.pending_timeout):
                if os.path.exists(self.db_file):
                    # It's as if there was a "condition" where different parts
                    # of the system were "racing" each other.
                    raise DatabaseAlreadyExists(self.db_file)
                renamer(tmp_db_file, self.db_file)
            self.conn = get_db_connection(self.db_file, self.timeout)
        else:
            self.conn = conn
Beispiel #45
0
    def initialize(self, put_timestamp=None):
        """
        Create the DB

        :param put_timestamp: timestamp of initial PUT request
        """
        if self.db_file == ':memory:':
            tmp_db_file = None
            conn = get_db_connection(self.db_file, self.timeout)
        else:
            mkdirs(self.db_dir)
            fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir)
            os.close(fd)
            conn = sqlite3.connect(tmp_db_file,
                                   check_same_thread=False,
                                   factory=GreenDBConnection,
                                   timeout=0)
        # creating dbs implicitly does a lot of transactions, so we
        # pick fast, unsafe options here and do a big fsync at the end.
        with closing(conn.cursor()) as cur:
            cur.execute('PRAGMA synchronous = OFF')
            cur.execute('PRAGMA temp_store = MEMORY')
            cur.execute('PRAGMA journal_mode = MEMORY')
        conn.create_function('chexor', 3, chexor)
        conn.row_factory = sqlite3.Row
        conn.text_factory = str
        conn.executescript("""
            CREATE TABLE outgoing_sync (
                remote_id TEXT UNIQUE,
                sync_point INTEGER,
                updated_at TEXT DEFAULT 0
            );
            CREATE TABLE incoming_sync (
                remote_id TEXT UNIQUE,
                sync_point INTEGER,
                updated_at TEXT DEFAULT 0
            );
            CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync
            BEGIN
                UPDATE outgoing_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync
            BEGIN
                UPDATE outgoing_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync
            BEGIN
                UPDATE incoming_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync
            BEGIN
                UPDATE incoming_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
        """)
        if not put_timestamp:
            put_timestamp = normalize_timestamp(0)
        self._initialize(conn, put_timestamp)
        conn.commit()
        if tmp_db_file:
            conn.close()
            with open(tmp_db_file, 'r+b') as fp:
                os.fsync(fp.fileno())
            with lock_parent_directory(self.db_file, self.pending_timeout):
                if os.path.exists(self.db_file):
                    # It's as if there was a "condition" where different parts
                    # of the system were "racing" each other.
                    raise DatabaseAlreadyExists(self.db_file)
                renamer(tmp_db_file, self.db_file)
            self.conn = get_db_connection(self.db_file, self.timeout)
        else:
            self.conn = conn
Beispiel #46
0
def main(arguments=None):
    global argv, backup_dir, builder, builder_file, ring_file
    if arguments is not None:
        argv = arguments
    else:
        argv = sys_argv

    if len(argv) < 2:
        print("swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" % globals())
        print(Commands.default.__doc__.strip())
        print()
        cmds = [c for c in dir(Commands) if getattr(Commands, c).__doc__ and not c.startswith("_") and c != "default"]
        cmds.sort()
        for cmd in cmds:
            print(getattr(Commands, cmd).__doc__.strip())
            print()
        print(parse_search_value.__doc__.strip())
        print()
        for line in wrap(" ".join(cmds), 79, initial_indent="Quick list: ", subsequent_indent="            "):
            print(line)
        print(
            "Exit codes: 0 = operation successful\n"
            "            1 = operation completed with warnings\n"
            "            2 = error"
        )
        exit(EXIT_SUCCESS)

    builder_file, ring_file = parse_builder_ring_filename_args(argv)
    if builder_file != argv[1]:
        print("Note: using %s instead of %s as builder file" % (builder_file, argv[1]))

    try:
        builder = RingBuilder.load(builder_file)
    except exceptions.UnPicklingError as e:
        print(e)
        exit(EXIT_ERROR)
    except (exceptions.FileNotFoundError, exceptions.PermissionError) as e:
        if len(argv) < 3 or argv[2] not in ("create", "write_builder"):
            print(e)
            exit(EXIT_ERROR)
    except Exception as e:
        print("Problem occurred while reading builder file: %s. %s" % (builder_file, e))
        exit(EXIT_ERROR)

    backup_dir = pathjoin(dirname(builder_file), "backups")
    try:
        mkdir(backup_dir)
    except OSError as err:
        if err.errno != EEXIST:
            raise

    if len(argv) == 2:
        command = "default"
    else:
        command = argv[2]
    if argv[0].endswith("-safe"):
        try:
            with lock_parent_directory(abspath(builder_file), 15):
                getattr(Commands, command, Commands.unknown)()
        except exceptions.LockTimeout:
            print("Ring/builder dir currently locked.")
            exit(2)
    else:
        getattr(Commands, command, Commands.unknown)()