Ejemplo n.º 1
0
def main(argv):
    if "-h" in argv:
        print "./simulate.py [backup-timestamps]"
        return 1

    # Do some default simulation
    if not argv:
        s = BackupSimulator(parse_deltas("1d 7d 30d"))

        until = s.now + timedelta(days=17)
        while s.now <= until:
            s.go_by(timedelta(days=1))
            s.backup()

        for name, date in s.backups.iteritems():
            print name

    # Simulate a backup with the timestamps given
    else:
        s = BackupSimulator(parse_deltas("1d 7d 30d"))
        s.add([d for d in argv])
        deleted, _ = s.expire()

        print "Deleted backups:"
        for name in deleted:
            print (name)

        print ""
        print "Remaining backups:"
        for name, date in s.backups.iteritems():
            print name
Ejemplo n.º 2
0
def main(argv):
    if '-h' in argv:
        print "./simulate.py [backup-timestamps]"
        return 1

    # Do some default simulation
    if not argv:
        s = BackupSimulator(parse_deltas('1d 7d 30d'))

        until = s.now + timedelta(days=17)
        while s.now <= until:
            s.go_by(timedelta(days=1))
            s.backup()

        for name, date in s.backups.iteritems():
            print name

    # Simulate a backup with the timestamps given
    else:
        s = BackupSimulator(parse_deltas('1d 7d 30d'))
        s.add([d for d in argv])
        deleted, _ = s.expire()

        print "Deleted backups:"
        for name in deleted:
            print(name)

        print ""
        print "Remaining backups:"
        for name, date in s.backups.iteritems():
            print name
Ejemplo n.º 3
0
 def __init__(self, deltas, expire_func=default_expire_func):
     if isinstance(deltas, basestring):
         deltas = parse_deltas(deltas)
     self.deltas = deltas
     self.expire_func = expire_func
     self.now = datetime.now()
     self.backups = OrderedDict()
Ejemplo n.º 4
0
    def read_volume(name):
        _log = _logger.new(volume_name=name, )
        env_name = name.replace('-', '_').upper()
        deltas_str = os.environ.get('VOLUME_{}_DELTAS'.format(env_name))
        if not deltas_str:
            raise ConfigError(
                'A volume {} was defined, but VOLUME_{}_DELTAS is not set'.
                format(name, env_name))

        zone = os.environ.get('VOLUME_{}_ZONE'.format(env_name))
        if not zone:
            raise ConfigError(
                'A volume {} was defined, but VOLUME_{}_ZONE is not set'.
                format(name, env_name))

        _log = _log.bind(
            deltas_str=deltas_str,
            zone=zone,
        )

        rule = Rule(
            name=name,
            claim_name='',
            namespace='',
            deltas=parse_deltas(deltas_str),
            gce_disk=name,
            gce_disk_zone=zone,
        )

        _log.info(events.Rule.ADDED_FROM_CONFIG, rule=rule)

        return rule
Ejemplo n.º 5
0
def rule_from_pv(volume, use_claim_name=False):
    """Given a persistent volume object, create a backup role
    object. Can return None if this volume is not configured for
    backups, or is not suitable.

    `use_claim_name` - if the persistent volume is bound, and it's
    name is auto-generated, then prefer to use the name of the claim
    for the snapshot.
    """

    # TODO: Currently, K8s does not allow a PersistentVolumeClaim to
    # specify any annotations for the PersistentVolume a provisioner
    # would create. Indeed, this might ever be possible. We might
    # want to follow the claimRef link and see if the claim specifies
    # any rules, and then use those.
    provider = volume.annotations.get('pv.kubernetes.io/provisioned-by')
    if provider != 'kubernetes.io/gce-pd':
        logger.debug('Volume {} not a GCE persistent disk', volume.name)
        return

    deltas_unparsed = volume.annotations.get('backup.kubernetes.io/deltas')
    if not deltas_unparsed:
        logger.debug('Volume {} does not define backup deltas (via {})',
                     volume.name, DELTA_ANNOTATION_KEY)
        return

    try:
        deltas = parse_deltas(deltas_unparsed)
    except ConfigError as e:
        logger.error(
            'Deltas defined by volume {} are not valid, error message was: {}',
            volume.name, e)
        return

    rule = Rule()
    rule.name = volume.name
    rule.namespace = volume.namespace
    rule.deltas = deltas
    rule.deltas_unparsed = deltas_unparsed
    rule.gce_disk = volume.obj['spec']['gcePersistentDisk']['pdName']

    # How can we know the zone? In theory, the storage class can
    # specify a zone; but if not specified there, K8s can choose a
    # random zone within the master region. So we really can't trust
    # that value anyway.
    # There is a label that gives a failure region, but labels aren't
    # really a trustworthy source for this.
    # Apparently, this is a thing in the Kubernetes source too, see:
    # getDiskByNameUnknownZone in pkg/cloudprovider/providers/gce/gce.go,
    # e.g. https://github.com/jsafrane/kubernetes/blob/2e26019629b5974b9a311a9f07b7eac8c1396875/pkg/cloudprovider/providers/gce/gce.go#L2455
    rule.gce_disk_zone = volume.labels.get(
        'failure-domain.beta.kubernetes.io/zone')

    if use_claim_name and volume.obj['spec'].get('claimRef'):
        if volume.annotations.get(
                'kubernetes.io/createdby') == 'gce-pd-dynamic-provisioner':
            ref = volume.obj['spec'].get('claimRef')
            rule.claim_name = '{1}--{0}'.format(ref['name'], ref['namespace'])
    return rule
Ejemplo n.º 6
0
 def job(self, deltas='1d 2d', name='test', **kwargs):
     """Make a job object.
     """
     opts = dict(target="$name-$date",
                 deltas=parse_deltas(deltas),
                 name=name,
                 sources=[self._tmpdir])
     opts.update(kwargs)
     return Job(**opts)
Ejemplo n.º 7
0
 def job(self, deltas='1d 2d', name='test', **kwargs):
     """Make a job object.
     """
     opts = dict(
         target="$name-$date",
         deltas=parse_deltas(deltas),
         name=name,
         sources=[self._tmpdir])
     opts.update(kwargs)
     return Job(**opts)
Ejemplo n.º 8
0
def main(argv):
    s = BackupSimulator(parse_deltas('1d 7d 30d'))

    until = s.now + timedelta(days=17)
    while s.now <= until:
        s.go_by(timedelta(days=1))
        s.backup()

    for name, date in s.backups.iteritems():
        print name
Ejemplo n.º 9
0
    def read_volume(name):
        env_name = name.replace('-', '_').upper()
        deltas = os.environ.get('VOLUME_{}_DELTAS'.format(env_name))
        if not deltas:
            raise ConfigError(
                'A volume {} was defined, but no deltas'.format(name))

        zone = os.environ.get('VOLUME_{}_ZONE'.format(env_name))
        if not zone:
            raise ConfigError(
                'A volume {} was defined, but no zone'.format(name))

        logger.info('Loading env-defined volume {} with deltas {}', name,
                    deltas)

        rule = Rule()
        rule.name = name
        rule.namespace = ''
        rule.deltas = parse_deltas(deltas)
        rule.deltas_unparsed = deltas
        rule.gce_disk = name
        rule.gce_disk_zone = zone
        return rule
Ejemplo n.º 10
0
def load_config(text):
    """Load the config file and return a dict of jobs, with the local
    and global configurations merged.
    """
    config = yaml.load(text)

    default_dateformat = config.pop('dateformat', None)
    default_deltas = parse_deltas(config.pop('deltas', None))
    default_target = require_placeholders(config.pop('target', None),
                                          ['name', 'date'], 'The global target')

    read_jobs = {}
    jobs_section = config.pop('jobs')
    if not jobs_section:
        raise ConfigError('config must define at least one job')
    for job_name, job_dict in jobs_section.iteritems():
        job_dict = job_dict or {}
        # sources
        if 'sources' in job_dict and 'source' in job_dict:
            raise ConfigError(('%s: Use either the "source" or "sources" '+
                              'option, not both') % job_name)
        if 'source' in job_dict:
            sources = [job_dict.pop('source')]
        else:
            sources = job_dict.pop('sources', None)
        # aliases
        if 'aliases' in job_dict and 'alias' in job_dict:
            raise ConfigError(('%s: Use either the "alias" or "aliases" '+
                              'option, not both') % job_name)
        if 'alias' in job_dict:
            aliases = [job_dict.pop('alias')]
        else:
            aliases = job_dict.pop('aliases', None)
        # excludes
        if 'excludes' in job_dict and 'exclude' in job_dict:
            raise ConfigError(('%s: Use either the "excludes" or "exclude" '+
                              'option, not both') % job_name)
        if 'exclude' in job_dict:
            excludes = [job_dict.pop('exclude')]
        else:
            excludes = job_dict.pop('excludes', [])
        new_job = Job(**{
            'name': job_name,
            'sources': sources,
            'aliases': aliases,
            'excludes': excludes,
            'target': job_dict.pop('target', default_target),
            'force': job_dict.pop('force', False),
            'deltas': parse_deltas(job_dict.pop('deltas', None)) or default_deltas,
            'dateformat': job_dict.pop('dateformat', default_dateformat),
            'exec_before': job_dict.pop('exec_before', None),
            'exec_after': job_dict.pop('exec_after', None),
        })
        if not new_job.target:
            raise ConfigError('%s does not have a target name' % job_name)
        # Note: It's ok to define jobs without sources or deltas. Those
        # can only be used for selected commands, then.
        require_placeholders(new_job.target, ['date'], '%s: target')
        if job_dict:
            raise ConfigError('%s has unsupported configuration values: %s' % (
                job_name, ", ".join(job_dict.keys())))

        read_jobs[job_name] = new_job

    # Return jobs, and all global keys not popped
    return read_jobs, config