Ejemplo n.º 1
0
 def __init__(self):
     threading.Thread.__init__(self)
     self.errors = []
     self.datasets = zfs.Datasets()
     self.snapshots = []
     self.rsynced_fs = []
     self.rsynced_backups = []
Ejemplo n.º 2
0
    def __init__(self, bus):
        # Used to wake up the run() method prematurely in the event
        # of a SIGHUP/SMF refresh
        self._conditionLock = threading.Condition(threading.RLock())
        # Used when schedules are being rebuilt or examined.
        self._refreshLock = threading.Lock()
        # Indicates that cleanup is in progress when locked
        self._cleanupLock = threading.Lock()
        self._datasets = zfs.Datasets()
        # Indicates that schedules need to be rebuilt from scratch
        self._stale = True
        self._lastCleanupCheck = 0
        self._zpools = []
        self._poolstatus = {}
        self._destroyedsnaps = []

        # This is also checked during the refresh() method but we need
        # to know it sooner for instantiation of the PluginManager
        self._smf = timeslidersmf.TimeSliderSMF()
        try:
            self.verbose = self._smf.get_verbose()
        except RuntimeError as message:
            sys.stderr.write("Error determing whether debugging is enabled\n")
            self.verbose = False

        self._dbus = dbussvc.AutoSnap(bus,
                                      '/org/opensolaris/TimeSlider/autosnap',
                                      self)

        self._plugin = plugin.PluginManager(self.verbose)
        self.exitCode = smf.SMF_EXIT_OK
        self.refresh()

        # Seems we're up and running OK.
        # Signal our parent so we can daemonise
        os.kill(os.getppid(), signal.SIGUSR1)

        # SMF/svc.startd sends SIGHUP to force a
        # a refresh of the daemon
        signal.signal(signal.SIGHUP, self._signalled)

        # Init done. Now initiaslise threading.
        threading.Thread.__init__(self)
        self.setDaemon(True)
Ejemplo n.º 3
0
    def __init__(self, snapshots=None):
        self.builder = Gtk.Builder()

        self.builder.set_translation_domain(GETTEXT_DOMAIN)

        self.builder.add_from_file("%s/../../ui/time-slider-delete.ui" \
                                  % (os.path.dirname(__file__)))

        self.backuptodelete = []
        self.shortcircuit = []
        maindialog = self.builder.get_object("time-slider-delete")
        self.pulsedialog = self.builder.get_object("pulsedialog")
        self.pulsedialog.set_transient_for(maindialog)
        self.datasets = zfs.Datasets()
        if snapshots:
            maindialog.hide()
            self.shortcircuit = snapshots
        else:
            GLib.idle_add(self.__init_scan)

        self.progressdialog = self.builder.get_object("deletingdialog")
        self.progressdialog.set_transient_for(maindialog)
        self.progressbar = self.builder.get_object("deletingprogress")
        # signal dictionary
        dic = {
            "on_closebutton_clicked": Gtk.main_quit,
            "on_window_delete_event": Gtk.main_quit,
            "on_snapshotmanager_delete_event": Gtk.main_quit,
            "on_fsfilterentry_changed": self.__on_filterentry_changed,
            "on_schedfilterentry_changed": self.__on_filterentry_changed,
            "on_typefiltercombo_changed": self.__on_filterentry_changed,
            "on_selectbutton_clicked": self.__on_selectbutton_clicked,
            "on_deselectbutton_clicked": self.__on_deselectbutton_clicked,
            "on_deletebutton_clicked": self.__on_deletebutton_clicked,
            "on_confirmcancel_clicked": self.__on_confirmcancel_clicked,
            "on_confirmdelete_clicked": self.__on_confirmdelete_clicked,
            "on_errordialog_response": self.__on_errordialog_response
        }
        self.builder.connect_signals(dic)
Ejemplo n.º 4
0
def main(argv):

    # Check appropriate environment variables habe been supplied
    # by time-slider
    #
    # The label used for the snapshot set just taken, ie. the
    # component proceeding the "@" in the snapshot name
    snaplabel = os.getenv("AUTOSNAP_LABEL")
    # The SMF fmri of the auto-snapshot instance corresponding to
    # the snapshot set just taken.
    snapfmri = os.getenv("AUTOSNAP_FMRI")
    # The SMF fmri of the time-slider plugin instance associated with
    # this command.
    pluginfmri = os.getenv("PLUGIN_FMRI")

    if pluginfmri == None:
        sys.stderr.write("No time-slider plugin SMF instance FMRI defined. " \
                         "This plugin does not support command line "
                         "execution. Exiting\n")
        sys.exit(-1)
    syslog.openlog(pluginfmri, 0, syslog.LOG_DAEMON)

    cmd = [smf.SVCPROPCMD, "-p", verboseprop, pluginfmri]
    outdata,errdata = util.run_command(cmd)
    if outdata.rstrip() == "true":
        verbose = True
    else:
        verbose = False

    if snaplabel == None:
        log_error(syslog.LOG_ERR,
                  "No snapshot label defined. Exiting")
        sys.exit(-1)
    if snapfmri == None:
        log_error(syslog.LOG_ERR,
                  "No auto-snapshot SMF instance FMRI defined. Exiting")
        sys.exit(-1)

    schedule = snapfmri.rsplit(':', 1)[1]
    plugininstance = pluginfmri.rsplit(':', 1)[1]

    # The user property/tag used when tagging and holding zfs datasets
    propname = "%s:%s" % (propbasename, plugininstance)

    # Identifying snapshots is a two stage process.
    #
    # First: identify all snapshots matching the AUTOSNAP_LABEL
    # value passed in by the time-slider daemon.
    #
    # Second: we need to filter the results and ensure that the
    # filesystem/voluem corresponding to each snapshot is actually
    # tagged with the property (com.sun:auto-snapshot<:schedule>)
    #
    # This is necessary to avoid confusion whereby a snapshot might
    # have been sent|received from one zpool to another on the same
    # system. The received snapshot will show up in the first pass
    # results but is not actually part of the auto-snapshot set
    # created by time-slider. It also avoids incorrectly placing
    # zfs holds on the imported snapshots.

    datasets = zfs.Datasets()
    candidates = datasets.list_snapshots(snaplabel)
    originsets = datasets.list_auto_snapshot_sets(schedule)
    snappeddatasets = []
    snapnames = [name for [name,ctime] in candidates \
                 if name.split('@',1)[0] in originsets]


    # Place a hold on the the newly created snapshots so
    # they can be backed up without fear of being destroyed
    # before the backup gets a chance to complete.
    for snap in snapnames:
        snapshot = zfs.Snapshot(snap)
        holds = snapshot.holds()
        try:
            holds.index(propname)
        except ValueError:
            util.debug("Placing hold on %s" % (snap), verbose)
            snapshot.hold(propname)
        datasetname = snapshot.fsname
        # Insert datasetnames in alphabetically sorted order because
        # zfs receive falls over if it receives a child before the
        # parent if the "-F" option is not used.
        insort(snappeddatasets, datasetname)

    # Find out the receive command property value
    cmd = [smf.SVCPROPCMD, "-c", "-p", "receive/command", pluginfmri]
    outdata,errdata = util.run_command(cmd)
    # Strip out '\' characters inserted by svcprop
    recvcmd = outdata.strip().replace('\\', '').split()

    # Check to see if the receive command is accessible and executable
    try:
        statinfo = os.stat(recvcmd[0])
        other_x = (statinfo.st_mode & 01)
        if other_x == 0:
            log_error(syslog.LOG_ERR,
                      "Plugin: %s: Configured receive/command is not " \
                      "executable: %s" \
                      % (pluginfmri, outdata))
            maintenance(pluginfmri)
            sys.exit(-1)
    except OSError:
        log_error(syslog.LOG_ERR,
                  "Plugin: %s: Can not access the configured " \
                  "receive/command: %s" \
                  % (pluginfmri, outdata)) 
        maintenance(pluginfmri)   
        sys.exit(-1)

    for dataset in snappeddatasets:
        sendcmd = None
        prevsnapname = None
        ds = zfs.ReadableDataset(dataset)
        prevlabel = ds.get_user_property(propname)

        snapname = "%s@%s" % (ds.name, snaplabel)
        if (prevlabel == None or prevlabel == '-' or len(prevlabel) == 0):
            # No previous backup - send a full replication stream
            sendcmd = [zfs.ZFSCMD, "send", snapname]
            util.debug("No previous backup registered for %s" % ds.name, verbose)
        else:
            # A record of a previous backup exists.
            # Check that it exists to enable send of an incremental stream.
            prevsnapname = "%s@%s" % (ds.name, prevlabel)
            util.debug("Previously sent snapshot: %s" % prevsnapname, verbose)
            prevsnap = zfs.Snapshot(prevsnapname)
            if prevsnap.exists():
                sendcmd = [zfs.ZFSCMD, "send", "-i", prevsnapname, snapname]
            else:
                # This should not happen under normal operation since we
                # place a hold on the snapshot until it gets sent. So
                # getting here suggests that something else released the
                # hold on the snapshot, allowing it to get destroyed
                # prematurely.
                log_error(syslog.LOG_ERR,
                          "Previously sent snapshot no longer exists: %s" \
                          % prevsnapname)
                maintenance(pluginfmri)
                sys.exit(-1)
        
        
        # Invoke the send and receive commands via pfexec(1) since
        # we are not using the role's shell to take care of that
        # for us.
        sendcmd.insert(0, smf.PFCMD)
        recvcmd.insert(0, smf.PFCMD)

        try:
            sendP = subprocess.Popen(sendcmd,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE,
                                     close_fds=True)
            recvP = subprocess.Popen(recvcmd,
                                     stdin=sendP.stdout,
                                     stderr=subprocess.PIPE,
                                     close_fds=True)

            recvout,recverr = recvP.communicate()
            recverrno = recvP.wait()
            sendout,senderr = sendP.communicate()
            senderrno = sendP.wait()

            if senderrno != 0:
                raise RuntimeError, "Send command: %s failed with exit code" \
                                    "%d. Error message: \n%s" \
                                    % (str(sendcmd), senderrno, senderr)
            if recverrno != 0:
                raise RuntimeError, "Receive command %s failed with exit " \
                                    "code %d. Error message: \n%s" \
                                    % (str(recvcmd), recverrno, recverr)

            if prevsnapname != None:
                util.debug("Releasing hold on %s" % (prevsnapname), verbose)
                snapshot = zfs.Snapshot(prevsnapname)
                util.debug("Releasing hold on previous snapshot: %s" \
                      % (prevsnapname),
                      verbose)
                snapshot.release(propname)
        except Exception, message:
            log_error(syslog.LOG_ERR,
                      "Error during snapshot send/receive operation: %s" \
                      % (message))

            maintenance(pluginfmri)
            sys.exit(-1)            

        # Finally, after success, make a record of the latest backup
        # and release the old snapshot.
        ds.set_user_property(propname, snaplabel)
        util.debug("Sending of \"%s\"snapshot streams completed" \
              % (snaplabel),
              verbose)
Ejemplo n.º 5
0
def main(argv):
    # Check that appropriate environment variables have been
    # provided by time-sliderd
    #
    # The label used for the snapshot set just taken, ie. the
    # component proceeding the "@" in the snapshot name
    snaplabel = os.getenv("AUTOSNAP_LABEL")
    # The SMF fmri of the auto-snapshot instance corresponding to
    # the snapshot set just taken.
    snapfmri = os.getenv("AUTOSNAP_FMRI")
    # The SMF fmri of the time-slider plugin instance associated with
    # this command.
    pluginfmri = os.getenv("PLUGIN_FMRI")

    if pluginfmri == None:
        sys.stderr.write("No time-slider plugin SMF instance FMRI defined. " \
                         "This plugin does not support command line "
                         "execution. Exiting\n")
        sys.exit(-1)
    syslog.openlog(pluginfmri, 0, syslog.LOG_DAEMON)

    cmd = [smf.SVCPROPCMD, "-p", verboseprop, pluginfmri]
    outdata, errdata = util.run_command(cmd)
    if outdata.rstrip() == "true":
        verbose = True
    else:
        verbose = False

    if snaplabel == None:
        log_error(syslog.LOG_ERR, "No snapshot label provided. Exiting")
        sys.exit(-1)
    if snapfmri == None:
        log_error(syslog.LOG_ERR,
                  "No auto-snapshot SMF instance FMRI provided. Exiting")
        sys.exit(-1)

    schedule = snapfmri.rsplit(':', 1)[1]
    plugininstance = pluginfmri.rsplit(':', 1)[1]

    # The user property/tag used when tagging and holding zfs datasets
    propname = "%s:%s" % (propbasename, plugininstance)

    # Identifying snapshots is a 3 stage process.
    #
    # First: identify all snapshots matching the AUTOSNAP_LABEL
    # value passed in by the time-slider daemon.
    #
    # Second: Filter out snapshots of volumes, since rsync can only
    # back up filesystems.
    #
    # Third: we need to filter the results and ensure that the
    # filesystem corresponding to each snapshot is actually
    # tagged with the property (com.sun:auto-snapshot<:schedule>)
    #
    # This is necessary to avoid confusion whereby a snapshot might
    # have been sent|received from one zpool to another on the same
    # system. The received snapshot will show up in the first pass
    # results but is not actually part of the auto-snapshot set
    # created by time-slider. It also avoids incorrectly placing
    # zfs holds on the imported snapshots.

    datasets = zfs.Datasets()
    candidates = datasets.list_snapshots(snaplabel)
    autosnapsets = datasets.list_auto_snapshot_sets(schedule)
    autosnapfs = [name for [name,mount] in datasets.list_filesystems() \
                   if name in autosnapsets]
    snappeddatasets = []
    snapnames = [name for [name,ctime] in candidates \
                 if name.split('@',1)[0] in autosnapfs]

    # Mark the snapshots with a user property. Doing this instead of
    # placing a physical hold on the snapshot allows time-slider to
    # expire the snapshots naturally or destroy them if a zpool fills
    # up and triggers a remedial cleanup.
    # It also prevents the possiblity of leaving snapshots lying around
    # indefinitely on the system if the plugin SMF instance becomes
    # disabled or having to release a pile of held snapshots.
    # We set org.opensolaris:time-slider-plugin:<instance> to "pending",
    # indicate
    snapshots = []
    for snap in snapnames:
        snapshot = zfs.Snapshot(snap)
        fs = zfs.Filesystem(snapshot.fsname)
        if fs.get_user_property(rsyncsmf.RSYNCFSTAG) == "true":
            if fs.is_mounted() == True:
                snapshot.set_user_property(propname, "pending")
                util.debug("Marking %s as pending rsync" % (snap), verbose)
            else:
                util.debug("Ignoring snapshot of unmounted fileystem: %s" \
                           % (snap), verbose)