Beispiel #1
0
    def clone(self, id=None, src=None, backup=None, size=None):
        """
        This runs a clone job outside of the storage api,
        which is useful for performance testing backup restores
        (Example: storage tools clone volume-clone
          --backup volume-backup --src volume-original)
        """
        # Set basic Logging
        logging.basicConfig()
        # Get the lunr logger
        log = logger.get_logger()
        # Output Debug level info
        log.logger.setLevel(logging.DEBUG)
        # Load the local storage configuration
        conf = LunrConfig.from_storage_conf()
        # Init the volume helper
        volume = VolumeHelper(conf)

        # Attempt to figure out the original volume size
        size = size or str(volume.get(src)['size'] / 1073741824)
        # Size is in gigs
        if not re.match('G', size):
            size = size + 'G'
        # Create a tag to apply to the lvm volume
        tag = encode_tag(source_volume_id=src, backup_id=backup)
        # Create the volume
        execute('lvcreate', volume.volume_group,
                name=id, size=size, addtag=tag)
        # Get info for the newly created volume
        new = volume.get(id)

        with self.timeit():
            print("Starting Backup")
            # Restore volume from the backup
            volume.clone(new, src, backup)
Beispiel #2
0
 def setUp(self):
     self.stream = StringIO()
     self.logger = logger.get_logger()
     self.handler = logging.StreamHandler(self.stream)
     self.formatter = logger.LunrFormatter('%(message)s')
     self.handler.setFormatter(self.formatter)
     self.logger.setLevel(logging.DEBUG)
     self.logger.logger.addHandler(self.handler)
     super(TestLunrFormatter, self).setUp()
Beispiel #3
0
 def setUp(self):
     self.stream = StringIO()
     self.logger = logger.get_logger()
     self.handler = logging.StreamHandler(self.stream)
     self.formatter = logger.LunrFormatter('%(message)s')
     self.handler.setFormatter(self.formatter)
     self.logger.setLevel(logging.DEBUG)
     self.logger.logger.addHandler(self.handler)
     super(TestLunrFormatter, self).setUp()
Beispiel #4
0
def filter_factory(global_conf, **local_conf):
    section = 'filter:trans-logger'
    conf = LunrConfig({section: local_conf})

    echo = conf.bool(section, 'echo', False)
    level = conf.option(section, 'level', 'DEBUG', cast=log_level)
    name = conf.string(section, 'name', '')

    global_logger = logger
    if name:
        local_logger = logger.get_logger(name)
    else:
        local_logger = global_logger

    def trans_logger_filter(app):
        @wsgify
        def log_response(req):
            req.headers['X-Request-Id'] = req.headers.get(
                'x-request-id', 'lunr-%s' % uuid4())
            logger.local.request_id = req.headers['x-request-id']
            if echo:
                local_logger.log(level, 'REQUEST:\n%s', req)
            resp = req.get_response(app)
            resp.headers['X-Request-Id'] = req.headers['x-request-id']
            if req.params:
                request_str = '?'.join((req.path, urlencode(req.params)))
            else:
                request_str = req.path
            global_logger.info(' '.join(
                str(x) for x in (
                    # add more fields here
                    req.remote_addr or '-',
                    '"%s %s"' % (req.method, request_str),
                    resp.status_int,
                    resp.content_length,
                )))
            if echo:
                local_logger.log(level, 'RESPONSE:\n%s', resp)
            logger.local.request_id = None
            return resp

        return log_response

    return trans_logger_filter
Beispiel #5
0
    def backup(self, id=None, src=None, timestamp=None):
        """
        This runs a backup job outside of the storage api,
        which is useful for performance testing backups
        """
        # Set basic Logging
        logging.basicConfig()
        # Get the lunr logger
        log = logger.get_logger()
        # Output Debug level info
        log.logger.setLevel(logging.DEBUG)
        # Load the local storage configuration
        conf = LunrConfig.from_storage_conf()
        # If no time provided, use current time
        timestamp = timestamp or time()
        # Init our helpers
        volume = VolumeHelper(conf)
        backup = BackupHelper(conf)

        try:
            # Create the snapshot
            snapshot = volume.create_snapshot(src, id, timestamp)

            # For testing non-snapshot speeds
            #snapshot = volume.get(src)
            #snapshot['backup_id'] = id
            #snapshot['origin'] = src
            #snapshot['timestamp'] = 1338410885.0
            #del snapshot['volume']

            print("Created snap-shot: ", pprint(snapshot))

            with self.timeit(snapshot['size']):
                # Backup the snapshot
                print("Starting Backup")
                backup.save(snapshot, id)

        finally:
            # Delete the snapshot if it was created
            if 'snapshot' in locals():
                self._remove_volume(snapshot['path'])
Beispiel #6
0
    def backup(self, id=None, src=None, timestamp=None):
        """
        This runs a backup job outside of the storage api,
        which is useful for performance testing backups
        """
        # Set basic Logging
        logging.basicConfig()
        # Get the lunr logger
        log = logger.get_logger()
        # Output Debug level info
        log.logger.setLevel(logging.DEBUG)
        # Load the local storage configuration
        conf = LunrConfig.from_storage_conf()
        # If no time provided, use current time
        timestamp = timestamp or time()
        # Init our helpers
        volume = VolumeHelper(conf)
        backup = BackupHelper(conf)

        try:
            # Create the snapshot
            snapshot = volume.create_snapshot(src, id, timestamp)

            # For testing non-snapshot speeds
            #snapshot = volume.get(src)
            #snapshot['backup_id'] = id
            #snapshot['origin'] = src
            #snapshot['timestamp'] = 1338410885.0
            #del snapshot['volume']

            print("Created snap-shot: ", pprint(snapshot))

            with self.timeit(snapshot['size']):
                # Backup the snapshot
                print("Starting Backup")
                backup.save(snapshot, id)

        finally:
            # Delete the snapshot if it was created
            if 'snapshot' in locals():
                self._remove_volume(snapshot['path'])
Beispiel #7
0
def filter_factory(global_conf, **local_conf):
    section = 'filter:trans-logger'
    conf = LunrConfig({section: local_conf})

    echo = conf.bool(section, 'echo', False)
    level = conf.option(section, 'level', 'DEBUG', cast=log_level)
    name = conf.string(section, 'name', '')

    global_logger = logger
    if name:
        local_logger = logger.get_logger(name)
    else:
        local_logger = global_logger

    def trans_logger_filter(app):
        @wsgify
        def log_response(req):
            req.headers['X-Request-Id'] = req.headers.get(
                'x-request-id', 'lunr-%s' % uuid4())
            logger.local.request_id = req.headers['x-request-id']
            if echo:
                local_logger.log(level, 'REQUEST:\n%s', req)
            resp = req.get_response(app)
            resp.headers['X-Request-Id'] = req.headers['x-request-id']
            if req.params:
                request_str = '?'.join((req.path, urlencode(req.params)))
            else:
                request_str = req.path
            global_logger.info(' '.join(str(x) for x in (
                # add more fields here
                req.remote_addr or '-',
                '"%s %s"' % (req.method, request_str),
                resp.status_int,
                resp.content_length,
            )))
            if echo:
                local_logger.log(level, 'RESPONSE:\n%s', resp)
            logger.local.request_id = None
            return resp
        return log_response
    return trans_logger_filter
Beispiel #8
0
    def clone(self, id=None, src=None, backup=None, size=None):
        """
        This runs a clone job outside of the storage api,
        which is useful for performance testing backup restores
        (Example: storage tools clone volume-clone
          --backup volume-backup --src volume-original)
        """
        # Set basic Logging
        logging.basicConfig()
        # Get the lunr logger
        log = logger.get_logger()
        # Output Debug level info
        log.logger.setLevel(logging.DEBUG)
        # Load the local storage configuration
        conf = LunrConfig.from_storage_conf()
        # Init the volume helper
        volume = VolumeHelper(conf)

        # Attempt to figure out the original volume size
        size = size or str(volume.get(src)['size'] / 1073741824)
        # Size is in gigs
        if not re.match('G', size):
            size = size + 'G'
        # Create a tag to apply to the lvm volume
        tag = encode_tag(source_volume_id=src, backup_id=backup)
        # Create the volume
        execute('lvcreate',
                volume.volume_group,
                name=id,
                size=size,
                addtag=tag)
        # Get info for the newly created volume
        new = volume.get(id)

        with self.timeit():
            print("Starting Backup")
            # Restore volume from the backup
            volume.clone(new, src, backup)
Beispiel #9
0
# See the License for the specific language governing permissions and
# limitations under the License.

from lunr.storage.helper.utils import directio, ProcessError, execute
from timeit import default_timer as Timer
from struct import unpack_from
from lunr.common import logger
from tempfile import mkdtemp
from shutil import rmtree
from mmap import mmap

import time
import os
import re

log = logger.get_logger()


class ScrubError(RuntimeError):
    pass


class Scrub(object):

    def __init__(self, conf):
        self._display_only = conf.bool('scrub', 'display-only', False)
        self._display_exceptions = conf.bool('scrub',
                                             'display-exceptions', False)
        # Throttle speed is in MB/s
        self._throttle_speed = conf.float('scrub', 'throttle_speed', 0)
        self.scrub_buf = ''
Beispiel #10
0
from json import loads
import re
import socket
from time import mktime, time
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError

from sqlalchemy.sql import func
from sqlalchemy.exc import OperationalError

from lunr.common.exc import HTTPClientError
from lunr.db.models import Backup, Volume
from lunr.orbit import CronJob
from lunr.common import logger

log = logger.get_logger('orbit.suspects')


class EmptyResponse():
    def __init__(self, code):
        self.code = code

    def getcode(self):
        return self.code


class Suspects(CronJob):
    def __init__(self, conf):
        CronJob.__init__(self)
        self.span = conf.string('suspects', 'span', 'hours=1')
        self.interval = conf.string('suspects', 'interval', 'seconds=5')
Beispiel #11
0
from lunr.orbit.jobs.suspects import AuditSuspects, BackupSuspects,\
        RestoreSuspects, ScrubSuspects, PruneSuspects
from lunr.orbit.daemon import Daemon, DaemonError
from lunr.orbit.jobs.detach import Detach
from lunr.common.config import LunrConfig
from lunr.orbit import Cron, CronError
from argparse import ArgumentParser
from lunr.common import logger
from lunr import db
import signal
import time
import sys
import os
import re

log = logger.get_logger('orbit')


def main():
    parser = ArgumentParser(description="Orbit, lunr's maintiance droid")
    parser.add_argument('-c',
                        '--config',
                        action='store',
                        help="Provide a config file for orbit to use")
    parser.add_argument('-p',
                        '--pid',
                        action='store',
                        help="Specify the file name of the pid to use")
    parser.add_argument('-u',
                        '--user',
                        action='store',
Beispiel #12
0
import re
import socket
from time import mktime, time
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError

from sqlalchemy.sql import func
from sqlalchemy.exc import OperationalError

from lunr.common.exc import HTTPClientError
from lunr.db.models import Backup, Volume
from lunr.orbit import CronJob
from lunr.common import logger


log = logger.get_logger('orbit.suspects')


class EmptyResponse():
    def __init__(self, code):
        self.code = code

    def getcode(self):
        return self.code


class Suspects(CronJob):

    def __init__(self, conf):
        CronJob.__init__(self)
        self.span = conf.string('suspects', 'span', 'hours=1')
Beispiel #13
0
# See the License for the specific language governing permissions and
# limitations under the License.

from lunr.storage.helper.utils import directio, ProcessError, execute
from timeit import default_timer as Timer
from struct import unpack_from
from lunr.common import logger
from tempfile import mkdtemp
from shutil import rmtree
from mmap import mmap

import time
import os
import re

log = logger.get_logger()


class ScrubError(RuntimeError):
    pass


class Scrub(object):
    def __init__(self, conf):
        self._display_only = conf.bool('scrub', 'display-only', False)
        self._display_exceptions = conf.bool('scrub', 'display-exceptions',
                                             False)
        # Throttle speed is in MB/s
        self._throttle_speed = conf.float('scrub', 'throttle_speed', 0)
        self.scrub_buf = ''
Beispiel #14
0
def spawn(resource, job, *args, **kwargs):
    """
    Attempt to start job_name if not already running for those args.

    param job: job to run
    param args: args for job's run method
    keyword callback: callback function to pass to job
    keyword error_callback: error_callback function to pass to job
    """
    callback = kwargs.pop('callback', None)
    error_callback = kwargs.pop('error_callback', None)
    interruptible = kwargs.pop('interruptible', False)

    # If we asked to skip fork for testing
    if kwargs.pop('skip_fork', False):
        return run(resource, job, callback, error_callback, args)

    # Fork Once to create a child
    pid = os.fork()
    if pid:
        # wait on the child to fork and exit to prevent zombie
        os.waitpid(pid, 0)
        # Our child now owns the resource, this avoids resource
        # file clean up when we the controller returns 200
        resource.owned = False
        return

    # Fork Twice to orphan the child
    pid = os.fork()
    if pid:
        # exit to orphan child, and release waiting parent
        os._exit(0)

    # Lock resource prior to read/write
    with resource:
        if interruptible:
            # Add the interruptible flag if process can be interrupted
            resource.acquire({'pid': os.getpid(), 'interruptible': True})
        else:
            # Re-assign the owner of the resource to us
            resource.acquire({'pid': os.getpid()})

    # NOTE: explict close of syslog handler to force reconnect and suppress
    # traceback when the next log message goes and finds it's sockets fd is
    # inexplictly no longer valid, this is obviously jank
    # Manually nuking the logging global lock is the best thing ever.
    logging._lock = threading.RLock()
    log = logger.get_logger()
    root = getattr(log, 'logger', log).root
    for handler in root.handlers:
        try:
            # Re-create log handlers RLocks incase we forked during a locked
            # write operation; Not doing this may result in a deadlock the
            # next time we write to a log handler
            handler.createLock()
            handler.close()
        except AttributeError:
            pass

    # Become Session leader
    os.setsid()
    # chdir root
    os.chdir('/')
    # Prevent GC close() race condition
    gc.collect()
    # close fd for api server's socket
    os.closerange(3, unix.getrlimit(unix.RLIMIT_NOFILE)[1])
    # Run the job and exit
    os._exit(run(resource, job, callback, error_callback, args))
Beispiel #15
0
import re
import socket
from time import mktime, time
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError

from sqlalchemy.sql import func
from sqlalchemy.exc import OperationalError

from lunr.common.exc import HTTPClientError
from lunr.cinder import cinderclient
from lunr.db.models import Export
from lunr.orbit import CronJob
from lunr.common import logger

log = logger.get_logger('orbit.detach')


class Detach(CronJob):

    def __init__(self, conf, session):
        CronJob.__init__(self)
        self.interval = self.parse(conf.string('detach', 'interval',
                                               'seconds=5'))
        self.span = self.parse(conf.string('detach', 'span', 'hours=1'))
        self.timeout = conf.float('orbit', 'timeout', 120)
        self._sess = session
        self.conf = conf

    def get(self, url, **kwargs):
        req = Request(url, urlencode(kwargs))
Beispiel #16
0
        RestoreSuspects, ScrubSuspects, PruneSuspects
from lunr.orbit.daemon import Daemon, DaemonError
from lunr.orbit.jobs.detach import Detach
from lunr.common.config import LunrConfig
from lunr.orbit import Cron, CronError
from argparse import ArgumentParser
from lunr.common import logger
from lunr import db
import signal
import time
import sys
import os
import re


log = logger.get_logger('orbit')


def main():
    parser = ArgumentParser(description="Orbit, lunr's maintiance droid")
    parser.add_argument('-c', '--config', action='store',
                        help="Provide a config file for orbit to use")
    parser.add_argument('-p', '--pid', action='store',
                        help="Specify the file name of the pid to use")
    parser.add_argument('-u', '--user', action='store',
                        help="Specify the user the daemon will run as")
    parser.add_argument(
        'command', nargs='?', default='foreground',
        help="(start|stop|status|foreground) defaults to foreground")
    options = parser.parse_args()
Beispiel #17
0
def spawn(resource, job, *args, **kwargs):
    """
    Attempt to start job_name if not already running for those args.

    param job: job to run
    param args: args for job's run method
    keyword callback: callback function to pass to job
    keyword error_callback: error_callback function to pass to job
    """
    callback = kwargs.pop('callback', None)
    error_callback = kwargs.pop('error_callback', None)
    interruptible = kwargs.pop('interruptible', False)

    # If we asked to skip fork for testing
    if kwargs.pop('skip_fork', False):
        return run(resource, job, callback, error_callback, args)

    # Fork Once to create a child
    pid = os.fork()
    if pid:
        # wait on the child to fork and exit to prevent zombie
        os.waitpid(pid, 0)
        # Our child now owns the resource, this avoids resource
        # file clean up when we the controller returns 200
        resource.owned = False
        return

    # Fork Twice to orphan the child
    pid = os.fork()
    if pid:
        # exit to orphan child, and release waiting parent
        os._exit(0)

    # Lock resource prior to read/write
    with resource:
        if interruptible:
            # Add the interruptible flag if process can be interrupted
            resource.acquire({'pid': os.getpid(), 'interruptible': True})
        else:
            # Re-assign the owner of the resource to us
            resource.acquire({'pid': os.getpid()})

    # NOTE: explict close of syslog handler to force reconnect and suppress
    # traceback when the next log message goes and finds it's sockets fd is
    # inexplictly no longer valid, this is obviously jank
    # Manually nuking the logging global lock is the best thing ever.
    logging._lock = threading.RLock()
    log = logger.get_logger()
    root = getattr(log, 'logger', log).root
    for handler in root.handlers:
        try:
            # Re-create log handlers RLocks incase we forked during a locked
            # write operation; Not doing this may result in a deadlock the
            # next time we write to a log handler
            handler.createLock()
            handler.close()
        except AttributeError:
            pass

    # Become Session leader
    os.setsid()
    # chdir root
    os.chdir('/')
    # Prevent GC close() race condition
    gc.collect()
    # close fd for api server's socket
    os.closerange(3, unix.getrlimit(unix.RLIMIT_NOFILE)[1])
    # Run the job and exit
    os._exit(run(resource, job, callback, error_callback, args))
Beispiel #18
0
import re
import socket
from time import mktime, time
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError

from sqlalchemy.sql import func
from sqlalchemy.exc import OperationalError

from lunr.common.exc import HTTPClientError
from lunr.cinder import cinderclient
from lunr.db.models import Export
from lunr.orbit import CronJob
from lunr.common import logger

log = logger.get_logger('orbit.detach')


class Detach(CronJob):
    def __init__(self, conf, session):
        CronJob.__init__(self)
        self.interval = self.parse(
            conf.string('detach', 'interval', 'seconds=5'))
        self.span = self.parse(conf.string('detach', 'span', 'hours=1'))
        self.timeout = conf.float('orbit', 'timeout', 120)
        self._sess = session
        self.conf = conf

    def get(self, url, **kwargs):
        req = Request(url, urlencode(kwargs))
        req.get_method = lambda *args, **kwargs: 'GET'