Ejemplo n.º 1
0
    def list_auto_snapshot_sets(self, tag=None):
        """
        Returns a list of zfs filesystems and volumes tagged with
        the "com.sun:auto-snapshot" property set to "true", either
        set locally or inherited. Snapshots are excluded from the
        returned result.

        Keyword Arguments:
        tag:
            A string indicating one of the standard auto-snapshot schedules
            tags to check (eg. "frequent" will map to the tag:
            com.sun:auto-snapshot:frequent). If specified as a zfs property
            on a zfs dataset, the property corresponding to the tag will 
            override the wildcard property: "com.sun:auto-snapshot"
            Default value = None
        """
        #Get auto-snap property in two passes. First with the global
        #value, then overriding with the label/schedule specific value

        included = []
        excluded = []

        cmd = [
            ZFSCMD, "list", "-H", "-t", "filesystem,volume", "-o",
            "name,com.sun:auto-snapshot", "-s", "name"
        ]
        if tag:
            overrideprop = "com.sun:auto-snapshot:" + tag
            scmd = [
                ZFSCMD, "list", "-H", "-t", "filesystem,volume", "-o",
                "name," + overrideprop, "-s", "name"
            ]
            outdata, errdata = util.run_command(scmd)
            for line in outdata.rstrip().split('\n'):
                line = line.split()
                if line[1] == "true":
                    included.append(line[0])
                elif line[1] == "false":
                    excluded.append(line[0])
        outdata, errdata = util.run_command(cmd)
        for line in outdata.rstrip().split('\n'):
            line = line.split()
            # Only set values that aren't already set. Don't override
            try:
                included.index(line[0])
                continue
            except ValueError:
                try:
                    excluded.index(line[0])
                    continue
                except ValueError:
                    # Dataset is not listed in either list.
                    if line[1] == "true":
                        included.append(line[0])
        return included
Ejemplo n.º 2
0
def get_default_schedules():
    """
    Finds the default schedules that are enabled (online, offline or degraded)
    """
    #This is not the fastest method but it is the safest, we need
    #to ensure that default schedules are processed in the pre-defined
    #order to ensure that the overlap between them is adhered to
    #correctly. monthly->weekly->daily->hourly->frequent. They have
    #to be processed first and they HAVE to be in the correct order.
    _defaultSchedules = []
    for s in factoryDefaultSchedules:
        instanceName = "%s:%s" % (BASESVC, s)
        cmd = [smf.SVCSCMD, "-H", "-o", "state", instanceName]
        _scheddetaillock.acquire()
        try:
            outdata, errdata = util.run_command(cmd)
        finally:
            _scheddetaillock.release()
        result = outdata.rstrip()
        # Note that the schedules, being dependent on the time-slider service
        # itself will typically be in an offline state when enabled. They will
        # transition to an "online" state once time-slider itself comes
        # "online" to satisfy it's dependency
        if result == "online" or result == "offline" or result == "degraded":
            instance = AutoSnap(s)
            try:
                _defaultSchedules.append(instance.get_schedule_details())
            except RuntimeError as message:
                raise RuntimeError("Error getting schedule details for " + \
                                    "default auto-snapshot SMF instance:" + \
                                    "\n\t" + instanceName + "\nDetails:\n" + \
                                    str(message))
    return _defaultSchedules
Ejemplo n.º 3
0
def get_custom_schedules():
    """
    Finds custom schedules ie. not the factory default
    'monthly', 'weekly', 'hourly', 'daily' and 'frequent' schedules
    """
    _customSchedules = []
    cmd = [smf.SVCSCMD, "-H", "-o", "state,FMRI", BASESVC]
    _scheddetaillock.acquire()
    try:
        outdata, errdata = util.run_command(cmd)
    finally:
        _scheddetaillock.release()

    for line in outdata.rstrip().split('\n'):
        line = line.rstrip().split()
        state = line[0]
        fmri = line[1]
        fmri = fmri.rsplit(":", 1)
        label = fmri[1]
        if label not in factoryDefaultSchedules:
            # Note that the schedules, being dependent on the time-slider service
            # itself will typically be in an offline state when enabled. They will
            # transition to an "online" state once time-slider itself comes
            # "online" to satisfy it's dependency
            if state == "online" or state == "offline" or state == "degraded":
                instance = AutoSnap(label)
                try:
                    _customSchedules.append(instance.get_schedule_details())
                except RuntimeError as message:
                    raise RuntimeError("Error getting schedule details " + \
                                        "for custom auto-snapshot SMF " + \
                                        "instance:\n\t" + label + "\n" + \
                                        "Details:\n" + str(message))
    return _customSchedules
Ejemplo n.º 4
0
 def get_creation_time(self):
     if self.__creationTime == None:
         cmd = [
             ZFSCMD, "get", "-H", "-p", "-o", "value", "creation", self.name
         ]
         outdata, errdata = util.run_command(cmd)
         self.__creationTime = int(outdata.rstrip())
     return self.__creationTime
Ejemplo n.º 5
0
 def get_mountpoint(self):
     if (self.__mountpoint == None):
         cmd = [ZFSCMD, "get", "-H", "-o", "value", "mountpoint", \
                self.name]
         outdata, errdata = util.run_command(cmd)
         result = outdata.rstrip()
         self.__mountpoint = result
     return self.__mountpoint
Ejemplo n.º 6
0
def list_zpools():
    """Returns a list of all zpools on the system"""
    result = []
    cmd = [ZPOOLCMD, "list", "-H", "-o", "name"]
    outdata, errdata = util.run_command(cmd)
    for line in outdata.rstrip().split('\n'):
        result.append(line.rstrip())
    return result
Ejemplo n.º 7
0
    def get_prop(self, propgroup, propname):
        cmd = [SVCPROPCMD, "-c", "-p", \
               propgroup + '/' + propname,\
               self.instanceName]
        outdata, errdata = util.run_command(cmd)
        result = outdata.rstrip()

        return result
Ejemplo n.º 8
0
 def __get_health(self):
     """
     Returns pool health status: 'ONLINE', 'DEGRADED' or 'FAULTED'
     """
     cmd = [ZPOOLCMD, "list", "-H", "-o", "health", self.name]
     outdata, errdata = util.run_command(cmd)
     result = outdata.rstrip()
     return result
Ejemplo n.º 9
0
 def is_mounted(self):
     cmd = [ZFSCMD, "get", "-H", "-o", "value", "mounted", \
            self.name]
     outdata, errdata = util.run_command(cmd)
     result = outdata.rstrip()
     if result == "yes":
         return True
     else:
         return False
Ejemplo n.º 10
0
 def get_user_property(self, prop, local=False):
     if local == True:
         cmd = [
             ZFSCMD, "get", "-s", "local", "-H", "-o", "value", prop,
             self.name
         ]
     else:
         cmd = [ZFSCMD, "get", "-H", "-o", "value", prop, self.name]
     outdata, errdata = util.run_command(cmd)
     return outdata.rstrip()
Ejemplo n.º 11
0
 def get_verbose(self):
     cmd = [SVCPROPCMD, "-c", "-p", \
            DAEMONPROPGROUP + '/' + "verbose", \
            self.instanceName]
     outdata, errdata = util.run_command(cmd)
     result = outdata.rstrip()
     if result == "true":
         return True
     else:
         return False
Ejemplo n.º 12
0
 def has_clones(self):
     """Returns True if the snapshot has any dependent clones"""
     cmd = [ZFSCMD, "list", "-H", "-o", "origin,name"]
     outdata, errdata = util.run_command(cmd)
     for line in outdata.rstrip().split('\n'):
         details = line.rstrip().split()
         if details[0] == self.name and \
             details[1] != '-':
             return True
     return False
Ejemplo n.º 13
0
 def find_dependency_errors(self):
     errors = []
     #FIXME - do this in one pass.
     for dep in self.svcdeps:
         cmd = [SVCSCMD, "-H", "-o", "state", dep]
         outdata, errdata = util.run_command(cmd)
         result = outdata.rstrip()
         if result != "online":
             errors.append("%s\t%s" % (result, dep))
     return errors
Ejemplo n.º 14
0
 def get_referenced_size(self):
     """
     How much unique storage space is used by this snapshot.
     Answer in bytes
     """
     cmd = [ZFSCMD, "get", "-H", "-p", \
            "-o", "value", "referenced", \
            self.name]
     outdata, errdata = util.run_command(cmd)
     return int(outdata.rstrip())
Ejemplo n.º 15
0
 def list_children(self):
     cmd = [
         ZFSCMD, "list", "-H", "-r", "-t", "filesystem", "-o", "name",
         self.name
     ]
     outdata, errdata = util.run_command(cmd)
     result = []
     for line in outdata.rstrip().split('\n'):
         if line.rstrip() != self.name:
             result.append(line.rstrip())
     return result
Ejemplo n.º 16
0
 def get_auto_snap(self, schedule=None):
     if schedule:
         cmd = [ZFSCMD, "get", "-H", "-o", "value", \
            "com.sun:auto-snapshot", self.name]
     cmd = [ZFSCMD, "get", "-H", "-o", "value", \
            "com.sun:auto-snapshot", self.name]
     outdata, errdata = util.run_command(cmd)
     if outdata.rstrip() == "true":
         return True
     else:
         return False
Ejemplo n.º 17
0
    def hold(self, tag):
        """
        Place a hold on the snapshot with the specified "tag" string.
        """
        # FIXME - fails if hold is already held
        # Be sure it genuninely exists before trying to place a hold
        if self.exists() == False:
            return

        cmd = [PFCMD, ZFSCMD, "hold", tag, self.name]
        outdata, errdata = util.run_command(cmd)
Ejemplo n.º 18
0
 def list_children(self):
     """Returns a recursive list of child snapshots of this snapshot"""
     cmd = [
         ZFSCMD, "list", "-t", "snapshot", "-H", "-r", "-o", "name",
         self.fsname
     ]
     outdata, errdata = util.run_command(cmd)
     result = []
     for line in outdata.rstrip().split('\n'):
         if re.search("@%s" % (self.snaplabel), line) and \
             line != self.name:
             result.append(line)
     return result
Ejemplo n.º 19
0
    def list_children(self):

        # Note, if more dataset types ever come around they will
        # need to be added to the filsystem,volume args below.
        # Not for the forseeable future though.
        cmd = [
            ZFSCMD, "list", "-H", "-r", "-t", "filesystem,volume", "-o",
            "name", self.name
        ]
        outdata, errdata = util.run_command(cmd)
        result = []
        for line in outdata.rstrip().split('\n'):
            if line.rstrip() != self.name:
                result.append(line.rstrip())
        return result
Ejemplo n.º 20
0
    def holds(self):
        """
        Returns a list of user hold tags for this snapshot
        """
        cmd = [ZFSCMD, "holds", self.name]
        results = []
        outdata, errdata = util.run_command(cmd)

        for line in outdata.rstrip().split('\n'):
            if len(line) == 0:
                continue
            # The first line heading columns are  NAME TAG TIMESTAMP
            # Filter that line out.
            line = line.split()
            if (line[0] != "NAME" and line[1] != "TAG"):
                results.append(line[1])
        return results
Ejemplo n.º 21
0
    def destroy(self, deferred=True):
        """
        Permanently remove this snapshot from the filesystem
        Performs deferred destruction by default.
        """
        # Be sure it genuninely exists before trying to destroy it
        if self.exists() == False:
            return
        if deferred == False:
            cmd = [PFCMD, ZFSCMD, "destroy", self.name]
        else:
            cmd = [PFCMD, ZFSCMD, "destroy", "-d", self.name]

        outdata, errdata = util.run_command(cmd)
        # Clear the global snapshot cache so that a rescan will be
        # triggered on the next call to Datasets.list_snapshots()
        self.datasets.refresh_snapshots()
Ejemplo n.º 22
0
 def list_held_snapshots(self):
     """
     Returns a list of snapshots that have a "userrefs"
     property value of greater than 0. Resul list is
     sorted in order of creation time. Oldest listed first.
     """
     cmd = [
         ZFSCMD, "list", "-H", "-t", "snapshot", "-s", "creation", "-o",
         "userrefs,name"
     ]
     outdata, errdata = util.run_command(cmd)
     result = []
     for line in outdata.rstrip().split('\n'):
         details = line.split()
         if details[0] != "0":
             result.append(details[1])
     return result
Ejemplo n.º 23
0
 def list_cloned_snapshots(self):
     """
     Returns a list of snapshots that have cloned filesystems
     dependent on them.
     Snapshots with cloned filesystems can not be destroyed
     unless dependent cloned filesystems are first destroyed.
     """
     cmd = [ZFSCMD, "list", "-H", "-o", "origin"]
     outdata, errdata = util.run_command(cmd)
     result = []
     for line in outdata.rstrip().split('\n'):
         details = line.rstrip()
         if details != "-":
             try:
                 result.index(details)
             except ValueError:
                 result.append(details)
     return result
Ejemplo n.º 24
0
 def get_profiles(self):
     cmd = ["/usr/bin/profiles", self.name]
     profiles = []
     outdata, errdata = util.run_command(cmd)
     for line in outdata.split('\n'):
         if line.isspace():
             continue
         else:
             try:
                 line.index(self.name + " :")
             except ValueError:
                 profiles.append(line.strip())
     # Remove "All" because it's (seemingly) meaningless
     try:
         profiles.remove("All")
     except ValueError:
         return profiles
     return profiles
Ejemplo n.º 25
0
    def execute(self, schedule, label):

        triggers = self.smfInst.get_trigger_list()
        try:
            triggers.index("all")
        except ValueError:
            try:
                triggers.index(schedule)
            except ValueError:
                return

        # Skip if already running
        if self.is_running() == True:
            util.debug("Plugin: %s is already running. Skipping execution" \
                       % (self.smfInst.instanceName), \
                       self.verbose)
            return
        # Skip if plugin FMRI has been disabled or placed into maintenance
        cmd = [smf.SVCSCMD, "-H", "-o", "state", self.smfInst.instanceName]
        outdata, errdata = util.run_command(cmd)
        state = outdata.strip()
        if state == "disabled" or state == "maintenance":
            util.debug("Plugin: %s is in %s state. Skipping execution" \
                       % (self.smfInst.instanceName, state), \
                       self.verbose)
            return

        cmd = self.smfInst.get_trigger_command()
        util.debug("Executing plugin command: %s" % str(cmd), self.verbose)
        svcFmri = "%s:%s" % (autosnapsmf.BASESVC, schedule)

        os.putenv("AUTOSNAP_FMRI", svcFmri)
        os.putenv("AUTOSNAP_LABEL", label)
        try:
            os.putenv("PLUGIN_FMRI", self.smfInst.instanceName)
            self._proc = subprocess.Popen(cmd,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE,
                                          close_fds=True,
                                          universal_newlines=True)
        except OSError as message:
            raise RuntimeError("%s subprocess error:\n %s" % \
                                (cmd, str(message)))
            self._proc = None
Ejemplo n.º 26
0
    def execute(self, schedule, label):

        triggers = self.smfInst.get_trigger_list()
        try:
            triggers.index("all")
        except ValueError:
            try:
                triggers.index(schedule)
            except ValueError:
                return

        # Skip if already running
        if self.is_running() == True:
            util.debug("Plugin: %s is already running. Skipping execution" \
                       % (self.smfInst.instanceName), \
                       self.verbose)
            return
        # Skip if plugin FMRI has been disabled or placed into maintenance
        cmd = [smf.SVCSCMD, "-H", "-o", "state", self.smfInst.instanceName]
        outdata,errdata = util.run_command(cmd)
        state = outdata.strip()
        if state == "disabled" or state == "maintenance":
            util.debug("Plugin: %s is in %s state. Skipping execution" \
                       % (self.smfInst.instanceName, state), \
                       self.verbose)
            return

        cmd = self.smfInst.get_trigger_command()
        util.debug("Executing plugin command: %s" % str(cmd), self.verbose)
        svcFmri = "%s:%s" % (autosnapsmf.BASESVC, schedule)

        os.putenv("AUTOSNAP_FMRI", svcFmri)
        os.putenv("AUTOSNAP_LABEL", label)
        try:
            os.putenv("PLUGIN_FMRI", self.smfInst.instanceName) 
            self._proc = subprocess.Popen(cmd,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE,
                                          close_fds=True)
        except OSError, message:
            raise RuntimeError, "%s subprocess error:\n %s" % \
                                (cmd, str(message))
            self._proc = None
Ejemplo n.º 27
0
    def release(
        self,
        tag,
    ):
        """
        Release the hold on the snapshot with the specified "tag" string.
        """
        # FIXME raises exception if no hold exists.
        # Be sure it genuninely exists before trying to destroy it
        if self.exists() == False:
            return

        cmd = [PFCMD, ZFSCMD, "release", tag, self.name]

        outdata, errdata = util.run_command(cmd)
        # Releasing the snapshot might cause it get automatically
        # deleted by zfs.
        # Clear the global snapshot cache so that a rescan will be
        # triggered on the next call to Datasets.list_snapshots()
        self.datasets.refresh_snapshots()
Ejemplo n.º 28
0
    def get_capacity(self):
        """
        Returns the percentage of total pool storage in use.
        Calculated based on the "used" and "available" properties
        of the pool's top-level filesystem because the values account
        for reservations and quotas of children in their calculations,
        giving a more practical indication of how much capacity is used
        up on the pool.
        """
        if self.health == "FAULTED":
            raise ZPoolFaultedError("Can not determine capacity of zpool: %s" \
                                    "because it is in a FAULTED state" \
                                    % (self.name))

        cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", \
               "used,available", self.name]
        outdata, errdata = util.run_command(cmd)
        _used, _available = outdata.rstrip().split('\n')
        used = float(_used)
        available = float(_available)
        return 100.0 * used / (used + available)
Ejemplo n.º 29
0
    def create_snapshot(self, snaplabel, recursive=False):
        """
        Create a snapshot for the ReadWritable dataset using the supplied
        snapshot label.

        Keyword Arguments:
        snaplabel:
            A string to use as the snapshot label.
            The bit that comes after the "@" part of the snapshot
            name.
        recursive:
            Recursively snapshot childfren of this dataset.
            Default = False
        """
        cmd = [PFCMD, ZFSCMD, "snapshot"]
        if recursive == True:
            cmd.append("-r")
        cmd.append("%s@%s" % (self.name, snaplabel))
        outdata, errdata = util.run_command(cmd, False)
        if errdata:
            print(errdata)
        self.datasets.refresh_snapshots()
Ejemplo n.º 30
0
def main(argv):

    # Check appropriate environment variables habe been supplied
    # by time-slider
    #
    # The label used for the snapshot set just taken, ie. the
    # component proceeding the "@" in the snapshot name
    snaplabel = os.getenv("AUTOSNAP_LABEL")
    # The SMF fmri of the auto-snapshot instance corresponding to
    # the snapshot set just taken.
    snapfmri = os.getenv("AUTOSNAP_FMRI")
    # The SMF fmri of the time-slider plugin instance associated with
    # this command.
    pluginfmri = os.getenv("PLUGIN_FMRI")

    if pluginfmri == None:
        sys.stderr.write("No time-slider plugin SMF instance FMRI defined. " \
                         "This plugin does not support command line "
                         "execution. Exiting\n")
        sys.exit(-1)
    syslog.openlog(pluginfmri, 0, syslog.LOG_DAEMON)

    cmd = [smf.SVCPROPCMD, "-p", verboseprop, pluginfmri]
    outdata,errdata = util.run_command(cmd)
    if outdata.rstrip() == "true":
        verbose = True
    else:
        verbose = False

    if snaplabel == None:
        log_error(syslog.LOG_ERR,
                  "No snapshot label defined. Exiting")
        sys.exit(-1)
    if snapfmri == None:
        log_error(syslog.LOG_ERR,
                  "No auto-snapshot SMF instance FMRI defined. Exiting")
        sys.exit(-1)

    schedule = snapfmri.rsplit(':', 1)[1]
    plugininstance = pluginfmri.rsplit(':', 1)[1]

    # The user property/tag used when tagging and holding zfs datasets
    propname = "%s:%s" % (propbasename, plugininstance)

    # Identifying snapshots is a two stage process.
    #
    # First: identify all snapshots matching the AUTOSNAP_LABEL
    # value passed in by the time-slider daemon.
    #
    # Second: we need to filter the results and ensure that the
    # filesystem/voluem corresponding to each snapshot is actually
    # tagged with the property (com.sun:auto-snapshot<:schedule>)
    #
    # This is necessary to avoid confusion whereby a snapshot might
    # have been sent|received from one zpool to another on the same
    # system. The received snapshot will show up in the first pass
    # results but is not actually part of the auto-snapshot set
    # created by time-slider. It also avoids incorrectly placing
    # zfs holds on the imported snapshots.

    datasets = zfs.Datasets()
    candidates = datasets.list_snapshots(snaplabel)
    originsets = datasets.list_auto_snapshot_sets(schedule)
    snappeddatasets = []
    snapnames = [name for [name,ctime] in candidates \
                 if name.split('@',1)[0] in originsets]


    # Place a hold on the the newly created snapshots so
    # they can be backed up without fear of being destroyed
    # before the backup gets a chance to complete.
    for snap in snapnames:
        snapshot = zfs.Snapshot(snap)
        holds = snapshot.holds()
        try:
            holds.index(propname)
        except ValueError:
            util.debug("Placing hold on %s" % (snap), verbose)
            snapshot.hold(propname)
        datasetname = snapshot.fsname
        # Insert datasetnames in alphabetically sorted order because
        # zfs receive falls over if it receives a child before the
        # parent if the "-F" option is not used.
        insort(snappeddatasets, datasetname)

    # Find out the receive command property value
    cmd = [smf.SVCPROPCMD, "-c", "-p", "receive/command", pluginfmri]
    outdata,errdata = util.run_command(cmd)
    # Strip out '\' characters inserted by svcprop
    recvcmd = outdata.strip().replace('\\', '').split()

    # Check to see if the receive command is accessible and executable
    try:
        statinfo = os.stat(recvcmd[0])
        other_x = (statinfo.st_mode & 01)
        if other_x == 0:
            log_error(syslog.LOG_ERR,
                      "Plugin: %s: Configured receive/command is not " \
                      "executable: %s" \
                      % (pluginfmri, outdata))
            maintenance(pluginfmri)
            sys.exit(-1)
    except OSError:
        log_error(syslog.LOG_ERR,
                  "Plugin: %s: Can not access the configured " \
                  "receive/command: %s" \
                  % (pluginfmri, outdata)) 
        maintenance(pluginfmri)   
        sys.exit(-1)

    for dataset in snappeddatasets:
        sendcmd = None
        prevsnapname = None
        ds = zfs.ReadableDataset(dataset)
        prevlabel = ds.get_user_property(propname)

        snapname = "%s@%s" % (ds.name, snaplabel)
        if (prevlabel == None or prevlabel == '-' or len(prevlabel) == 0):
            # No previous backup - send a full replication stream
            sendcmd = [zfs.ZFSCMD, "send", snapname]
            util.debug("No previous backup registered for %s" % ds.name, verbose)
        else:
            # A record of a previous backup exists.
            # Check that it exists to enable send of an incremental stream.
            prevsnapname = "%s@%s" % (ds.name, prevlabel)
            util.debug("Previously sent snapshot: %s" % prevsnapname, verbose)
            prevsnap = zfs.Snapshot(prevsnapname)
            if prevsnap.exists():
                sendcmd = [zfs.ZFSCMD, "send", "-i", prevsnapname, snapname]
            else:
                # This should not happen under normal operation since we
                # place a hold on the snapshot until it gets sent. So
                # getting here suggests that something else released the
                # hold on the snapshot, allowing it to get destroyed
                # prematurely.
                log_error(syslog.LOG_ERR,
                          "Previously sent snapshot no longer exists: %s" \
                          % prevsnapname)
                maintenance(pluginfmri)
                sys.exit(-1)
        
        
        # Invoke the send and receive commands via pfexec(1) since
        # we are not using the role's shell to take care of that
        # for us.
        sendcmd.insert(0, smf.PFCMD)
        recvcmd.insert(0, smf.PFCMD)

        try:
            sendP = subprocess.Popen(sendcmd,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE,
                                     close_fds=True)
            recvP = subprocess.Popen(recvcmd,
                                     stdin=sendP.stdout,
                                     stderr=subprocess.PIPE,
                                     close_fds=True)

            recvout,recverr = recvP.communicate()
            recverrno = recvP.wait()
            sendout,senderr = sendP.communicate()
            senderrno = sendP.wait()

            if senderrno != 0:
                raise RuntimeError, "Send command: %s failed with exit code" \
                                    "%d. Error message: \n%s" \
                                    % (str(sendcmd), senderrno, senderr)
            if recverrno != 0:
                raise RuntimeError, "Receive command %s failed with exit " \
                                    "code %d. Error message: \n%s" \
                                    % (str(recvcmd), recverrno, recverr)

            if prevsnapname != None:
                util.debug("Releasing hold on %s" % (prevsnapname), verbose)
                snapshot = zfs.Snapshot(prevsnapname)
                util.debug("Releasing hold on previous snapshot: %s" \
                      % (prevsnapname),
                      verbose)
                snapshot.release(propname)
        except Exception, message:
            log_error(syslog.LOG_ERR,
                      "Error during snapshot send/receive operation: %s" \
                      % (message))

            maintenance(pluginfmri)
            sys.exit(-1)            

        # Finally, after success, make a record of the latest backup
        # and release the old snapshot.
        ds.set_user_property(propname, snaplabel)
        util.debug("Sending of \"%s\"snapshot streams completed" \
              % (snaplabel),
              verbose)
Ejemplo n.º 31
0
def main(argv):

    # Check appropriate environment variables habe been supplied
    # by time-slider
    #
    # The label used for the snapshot set just taken, ie. the
    # component proceeding the "@" in the snapshot name
    snaplabel = os.getenv("AUTOSNAP_LABEL")
    # The SMF fmri of the auto-snapshot instance corresponding to
    # the snapshot set just taken.
    snapfmri = os.getenv("AUTOSNAP_FMRI")
    # The SMF fmri of the time-slider plugin instance associated with
    # this command.
    pluginfmri = os.getenv("PLUGIN_FMRI")

    if pluginfmri == None:
        sys.stderr.write("No time-slider plugin SMF instance FMRI defined. " \
                         "This plugin does not support command line "
                         "execution. Exiting\n")
        sys.exit(-1)
    syslog.openlog(pluginfmri, 0, syslog.LOG_DAEMON)

    cmd = [smf.SVCPROPCMD, "-p", verboseprop, pluginfmri]
    outdata,errdata = util.run_command(cmd)
    if outdata.rstrip() == "true":
        verbose = True
    else:
        verbose = False

    if snaplabel == None:
        log_error(syslog.LOG_ERR,
                  "No snapshot label defined. Exiting")
        sys.exit(-1)
    if snapfmri == None:
        log_error(syslog.LOG_ERR,
                  "No auto-snapshot SMF instance FMRI defined. Exiting")
        sys.exit(-1)

    schedule = snapfmri.rsplit(':', 1)[1]
    plugininstance = pluginfmri.rsplit(':', 1)[1]

    # The user property/tag used when tagging and holding zfs datasets
    propname = "%s:%s" % (propbasename, plugininstance)

    # Identifying snapshots is a two stage process.
    #
    # First: identify all snapshots matching the AUTOSNAP_LABEL
    # value passed in by the time-slider daemon.
    #
    # Second: we need to filter the results and ensure that the
    # filesystem/voluem corresponding to each snapshot is actually
    # tagged with the property (com.sun:auto-snapshot<:schedule>)
    #
    # This is necessary to avoid confusion whereby a snapshot might
    # have been sent|received from one zpool to another on the same
    # system. The received snapshot will show up in the first pass
    # results but is not actually part of the auto-snapshot set
    # created by time-slider. It also avoids incorrectly placing
    # zfs holds on the imported snapshots.

    datasets = zfs.Datasets()
    candidates = datasets.list_snapshots(snaplabel)
    originsets = datasets.list_auto_snapshot_sets(schedule)
    snappeddatasets = []
    snapnames = [name for [name,ctime] in candidates \
                 if name.split('@',1)[0] in originsets]


    # Place a hold on the the newly created snapshots so
    # they can be backed up without fear of being destroyed
    # before the backup gets a chance to complete.
    for snap in snapnames:
        snapshot = zfs.Snapshot(snap)
        holds = snapshot.holds()
        try:
            holds.index(propname)
        except ValueError:
            util.debug("Placing hold on %s" % (snap), verbose)
            snapshot.hold(propname)
        datasetname = snapshot.fsname
        # Insert datasetnames in alphabetically sorted order because
        # zfs receive falls over if it receives a child before the
        # parent if the "-F" option is not used.
        insort(snappeddatasets, datasetname)

    # Find out the receive command property value
    cmd = [smf.SVCPROPCMD, "-c", "-p", "receive/command", pluginfmri]
    outdata,errdata = util.run_command(cmd)
    # Strip out '\' characters inserted by svcprop
    recvcmd = outdata.strip().replace('\\', '').split()

    # Check to see if the receive command is accessible and executable
    try:
        statinfo = os.stat(recvcmd[0])
        other_x = (statinfo.st_mode & 01)
        if other_x == 0:
            log_error(syslog.LOG_ERR,
                      "Plugin: %s: Configured receive/command is not " \
                      "executable: %s" \
                      % (pluginfmri, outdata))
            maintenance(pluginfmri)
            sys.exit(-1)
    except OSError:
        log_error(syslog.LOG_ERR,
                  "Plugin: %s: Can not access the configured " \
                  "receive/command: %s" \
                  % (pluginfmri, outdata)) 
        maintenance(pluginfmri)   
        sys.exit(-1)

    for dataset in snappeddatasets:
        sendcmd = None
        prevsnapname = None
        ds = zfs.ReadableDataset(dataset)
        prevlabel = ds.get_user_property(propname)

        snapname = "%s@%s" % (ds.name, snaplabel)
        if (prevlabel == None or prevlabel == '-' or len(prevlabel) == 0):
            # No previous backup - send a full replication stream
            sendcmd = [zfs.ZFSCMD, "send", snapname]
            util.debug("No previous backup registered for %s" % ds.name, verbose)
        else:
            # A record of a previous backup exists.
            # Check that it exists to enable send of an incremental stream.
            prevsnapname = "%s@%s" % (ds.name, prevlabel)
            util.debug("Previously sent snapshot: %s" % prevsnapname, verbose)
            prevsnap = zfs.Snapshot(prevsnapname)
            if prevsnap.exists():
                sendcmd = [zfs.ZFSCMD, "send", "-i", prevsnapname, snapname]
            else:
                # This should not happen under normal operation since we
                # place a hold on the snapshot until it gets sent. So
                # getting here suggests that something else released the
                # hold on the snapshot, allowing it to get destroyed
                # prematurely.
                log_error(syslog.LOG_ERR,
                          "Previously sent snapshot no longer exists: %s" \
                          % prevsnapname)
                maintenance(pluginfmri)
                sys.exit(-1)
        
        
        # Invoke the send and receive commands via pfexec(1) since
        # we are not using the role's shell to take care of that
        # for us.
        sendcmd.insert(0, smf.PFCMD)
        recvcmd.insert(0, smf.PFCMD)

        try:
            sendP = subprocess.Popen(sendcmd,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE,
                                     close_fds=True)
            recvP = subprocess.Popen(recvcmd,
                                     stdin=sendP.stdout,
                                     stderr=subprocess.PIPE,
                                     close_fds=True)

            recvout,recverr = recvP.communicate()
            recverrno = recvP.wait()
            sendout,senderr = sendP.communicate()
            senderrno = sendP.wait()

            if senderrno != 0:
                raise RuntimeError, "Send command: %s failed with exit code" \
                                    "%d. Error message: \n%s" \
                                    % (str(sendcmd), senderrno, senderr)
            if recverrno != 0:
                raise RuntimeError, "Receive command %s failed with exit " \
                                    "code %d. Error message: \n%s" \
                                    % (str(recvcmd), recverrno, recverr)

            if prevsnapname != None:
                util.debug("Releasing hold on %s" % (prevsnapname), verbose)
                snapshot = zfs.Snapshot(prevsnapname)
                util.debug("Releasing hold on previous snapshot: %s" \
                      % (prevsnapname),
                      verbose)
                snapshot.release(propname)
        except Exception, message:
            log_error(syslog.LOG_ERR,
                      "Error during snapshot send/receive operation: %s" \
                      % (message))

            maintenance(pluginfmri)
            sys.exit(-1)            

        # Finally, after success, make a record of the latest backup
        # and release the old snapshot.
        ds.set_user_property(propname, snaplabel)
        util.debug("Sending of \"%s\"snapshot streams completed" \
              % (snaplabel),
              verbose)
Ejemplo n.º 32
0
def main(argv):
    # Check that appropriate environment variables have been
    # provided by time-sliderd
    #
    # The label used for the snapshot set just taken, ie. the
    # component proceeding the "@" in the snapshot name
    snaplabel = os.getenv("AUTOSNAP_LABEL")
    # The SMF fmri of the auto-snapshot instance corresponding to
    # the snapshot set just taken.
    snapfmri = os.getenv("AUTOSNAP_FMRI")
    # The SMF fmri of the time-slider plugin instance associated with
    # this command.
    pluginfmri = os.getenv("PLUGIN_FMRI")

    if pluginfmri == None:
        sys.stderr.write("No time-slider plugin SMF instance FMRI defined. " \
                         "This plugin does not support command line "
                         "execution. Exiting\n")
        sys.exit(-1)
    syslog.openlog(pluginfmri, 0, syslog.LOG_DAEMON)

    cmd = [smf.SVCPROPCMD, "-p", verboseprop, pluginfmri]
    outdata, errdata = util.run_command(cmd)
    if outdata.rstrip() == "true":
        verbose = True
    else:
        verbose = False

    if snaplabel == None:
        log_error(syslog.LOG_ERR, "No snapshot label provided. Exiting")
        sys.exit(-1)
    if snapfmri == None:
        log_error(syslog.LOG_ERR,
                  "No auto-snapshot SMF instance FMRI provided. Exiting")
        sys.exit(-1)

    schedule = snapfmri.rsplit(':', 1)[1]
    plugininstance = pluginfmri.rsplit(':', 1)[1]

    # The user property/tag used when tagging and holding zfs datasets
    propname = "%s:%s" % (propbasename, plugininstance)

    # Identifying snapshots is a 3 stage process.
    #
    # First: identify all snapshots matching the AUTOSNAP_LABEL
    # value passed in by the time-slider daemon.
    #
    # Second: Filter out snapshots of volumes, since rsync can only
    # back up filesystems.
    #
    # Third: we need to filter the results and ensure that the
    # filesystem corresponding to each snapshot is actually
    # tagged with the property (com.sun:auto-snapshot<:schedule>)
    #
    # This is necessary to avoid confusion whereby a snapshot might
    # have been sent|received from one zpool to another on the same
    # system. The received snapshot will show up in the first pass
    # results but is not actually part of the auto-snapshot set
    # created by time-slider. It also avoids incorrectly placing
    # zfs holds on the imported snapshots.

    datasets = zfs.Datasets()
    candidates = datasets.list_snapshots(snaplabel)
    autosnapsets = datasets.list_auto_snapshot_sets(schedule)
    autosnapfs = [name for [name,mount] in datasets.list_filesystems() \
                   if name in autosnapsets]
    snappeddatasets = []
    snapnames = [name for [name,ctime] in candidates \
                 if name.split('@',1)[0] in autosnapfs]

    # Mark the snapshots with a user property. Doing this instead of
    # placing a physical hold on the snapshot allows time-slider to
    # expire the snapshots naturally or destroy them if a zpool fills
    # up and triggers a remedial cleanup.
    # It also prevents the possiblity of leaving snapshots lying around
    # indefinitely on the system if the plugin SMF instance becomes
    # disabled or having to release a pile of held snapshots.
    # We set org.opensolaris:time-slider-plugin:<instance> to "pending",
    # indicate
    snapshots = []
    for snap in snapnames:
        snapshot = zfs.Snapshot(snap)
        fs = zfs.Filesystem(snapshot.fsname)
        if fs.get_user_property(rsyncsmf.RSYNCFSTAG) == "true":
            if fs.is_mounted() == True:
                snapshot.set_user_property(propname, "pending")
                util.debug("Marking %s as pending rsync" % (snap), verbose)
            else:
                util.debug("Ignoring snapshot of unmounted fileystem: %s" \
                           % (snap), verbose)
Ejemplo n.º 33
0
def main(argv):
    # Check that appropriate environment variables have been
    # provided by time-sliderd
    #
    # The label used for the snapshot set just taken, ie. the
    # component proceeding the "@" in the snapshot name
    snaplabel = os.getenv("AUTOSNAP_LABEL")
    # The SMF fmri of the auto-snapshot instance corresponding to
    # the snapshot set just taken.
    snapfmri = os.getenv("AUTOSNAP_FMRI")
    # The SMF fmri of the time-slider plugin instance associated with
    # this command.
    pluginfmri = os.getenv("PLUGIN_FMRI")

    if pluginfmri == None:
        sys.stderr.write(
            "No time-slider plugin SMF instance FMRI defined. "
            "This plugin does not support command line "
            "execution. Exiting\n"
        )
        sys.exit(-1)
    syslog.openlog(pluginfmri, 0, syslog.LOG_DAEMON)

    cmd = [smf.SVCPROPCMD, "-p", verboseprop, pluginfmri]
    outdata, errdata = util.run_command(cmd)
    if outdata.rstrip() == "true":
        verbose = True
    else:
        verbose = False

    if snaplabel == None:
        log_error(syslog.LOG_ERR, "No snapshot label provided. Exiting")
        sys.exit(-1)
    if snapfmri == None:
        log_error(syslog.LOG_ERR, "No auto-snapshot SMF instance FMRI provided. Exiting")
        sys.exit(-1)

    schedule = snapfmri.rsplit(":", 1)[1]
    plugininstance = pluginfmri.rsplit(":", 1)[1]

    # The user property/tag used when tagging and holding zfs datasets
    propname = "%s:%s" % (propbasename, plugininstance)

    # Identifying snapshots is a 3 stage process.
    #
    # First: identify all snapshots matching the AUTOSNAP_LABEL
    # value passed in by the time-slider daemon.
    #
    # Second: Filter out snapshots of volumes, since rsync can only
    # back up filesystems.
    #
    # Third: we need to filter the results and ensure that the
    # filesystem corresponding to each snapshot is actually
    # tagged with the property (com.sun:auto-snapshot<:schedule>)
    #
    # This is necessary to avoid confusion whereby a snapshot might
    # have been sent|received from one zpool to another on the same
    # system. The received snapshot will show up in the first pass
    # results but is not actually part of the auto-snapshot set
    # created by time-slider. It also avoids incorrectly placing
    # zfs holds on the imported snapshots.

    datasets = zfs.Datasets()
    candidates = datasets.list_snapshots(snaplabel)
    autosnapsets = datasets.list_auto_snapshot_sets(schedule)
    autosnapfs = [name for [name, mount] in datasets.list_filesystems() if name in autosnapsets]
    snappeddatasets = []
    snapnames = [name for [name, ctime] in candidates if name.split("@", 1)[0] in autosnapfs]

    # Mark the snapshots with a user property. Doing this instead of
    # placing a physical hold on the snapshot allows time-slider to
    # expire the snapshots naturally or destroy them if a zpool fills
    # up and triggers a remedial cleanup.
    # It also prevents the possiblity of leaving snapshots lying around
    # indefinitely on the system if the plugin SMF instance becomes
    # disabled or having to release a pile of held snapshots.
    # We set org.opensolaris:time-slider-plugin:<instance> to "pending",
    # indicate
    snapshots = []
    for snap in snapnames:
        snapshot = zfs.Snapshot(snap)
        fs = zfs.Filesystem(snapshot.fsname)
        if fs.get_user_property(rsyncsmf.RSYNCFSTAG) == "true":
            if fs.is_mounted() == True:
                snapshot.set_user_property(propname, "pending")
                util.debug("Marking %s as pending rsync" % (snap), verbose)
            else:
                util.debug("Ignoring snapshot of unmounted fileystem: %s" % (snap), verbose)