Exemplo n.º 1
0
def cmd_backups(opts, args, overDict):
    if len(args) < 2 or args[1] == 'list':
        return call_xmlrpc_method_and_stop('backups_list')

    elif len(args) < 2 or args[1] == 'idlist':
        return call_xmlrpc_method_and_stop('backups_id_list')

    elif args[1] == 'start' and len(args) >= 3:
        from lib import packetid
        if packetid.Valid(args[2]):
            return call_xmlrpc_method_and_stop('backup_start_id', args[2])
        if not os.path.exists(os.path.abspath(args[2])):
            print_text('path %s not exist\n' % args[2])
            return 1
        return call_xmlrpc_method_and_stop('backup_start_path', args[2])

    elif args[1] == 'add' and len(args) >= 3:
        localpath = os.path.abspath(args[2])
        if not os.path.exists(localpath):
            print_text('path %s not exist\n' % args[2])
            return 1
        from system import bpio
        if bpio.pathIsDir(localpath):
            m = 'backup_dir_add'
        else:
            m = 'backup_file_add'
        return call_xmlrpc_method_and_stop(m, localpath)
    
    elif args[1] == 'addtree' and len(args) >= 3:
        localpath = os.path.abspath(args[2])
        from system import bpio
        if not bpio.pathIsDir(localpath):
            print_text('folder %s not exist\n' % args[2])
            return 1
        return call_xmlrpc_method_and_stop('backup_tree_add', localpath)

    elif args[1] == 'delete' and len(args) >= 3:
        if args[2] == 'local':
            if len(args) < 4:
                return 2
            return call_xmlrpc_method_and_stop('backup_delete_local', args[3].replace('/','_'))
        if packetid.Valid(args[2]):
            return call_xmlrpc_method_and_stop('backup_delete_id', args[2].replace('/','_'))
        return call_xmlrpc_method_and_stop('backup_delete_path', os.path.abspath(args[2]))

    elif args[1] == 'update':
        return call_xmlrpc_method_and_stop('backups_update')
    
    return 2
def _upload(params):
    path = params['path']
    if bpio.Linux() or bpio.Mac():
        path = '/' + (path.lstrip('/'))
    localPath = unicode(path)
    if not bpio.pathExist(localPath):
        return {
            'result': {
                "success": False,
                "error": 'local path %s was not found' % path
            }
        }
    result = []
    pathID = backup_fs.ToID(localPath)
    if pathID is None:
        if bpio.pathIsDir(localPath):
            pathID, iter, iterID = backup_fs.AddDir(localPath, read_stats=True)
            result.append('new folder was added: %s' % localPath)
        else:
            pathID, iter, iterID = backup_fs.AddFile(localPath,
                                                     read_stats=True)
            result.append('new file was added: %s' % localPath)
    pathID = global_id.CanonicalID(pathID)
    backup_control.StartSingle(pathID=pathID, localPath=localPath)
    backup_fs.Calculate()
    backup_control.Save()
    control.request_update([
        ('pathID', pathID),
    ])
    result.append('backup started: %s' % pathID)
    return {
        'result': result,
    }
Exemplo n.º 3
0
def backuptardir_thread(directorypath, arcname=None, recursive_subfolders=True, compress=None):
    """
    Makes tar archive of a single file inside a thread.
    Returns `BytesLoop` object instance which can be used to read produced data in parallel.
    """
    if not bpio.pathIsDir(directorypath):
        lg.err('folder %s not found' % directorypath)
        return None
    if arcname is None:
        arcname = os.path.basename(directorypath)
    p = BytesLoop()

    def _run():
        from storage import tar_file
        ret = tar_file.writetar(
            sourcepath=directorypath,
            arcname=arcname,
            subdirs=recursive_subfolders,
            compression=compress or 'none',
            encoding='utf-8',
            fileobj=p,
        )
        p.mark_finished()
        if _Debug:
            lg.out(_DebugLevel, 'backup_tar.backuptardir_thread writetar() finished')
        return ret

    reactor.callInThread(_run)  # @UndefinedVariable
    return p
Exemplo n.º 4
0
def backuptardir(directorypath, arcname=None, recursive_subfolders=True, compress=None):
    """
    Returns file descriptor for process that makes tar archive.

    In other words executes a child process and create a Pipe to
    communicate with it.
    """
    if not bpio.pathIsDir(directorypath):
        lg.out(1, 'backup_tar.backuptar ERROR %s not found' % directorypath)
        return None
    subdirs = 'subdirs'
    if not recursive_subfolders:
        subdirs = 'nosubdirs'
    if compress is None:
        compress = 'none'
    if arcname is None:
        arcname = strng.to_text(os.path.basename(directorypath))
    # lg.out(14, "backup_tar.backuptar %s %s compress=%s" % (directorypath, subdirs, compress))
    if bpio.Windows():
        if bpio.isFrozen():
            commandpath = "bppipe.exe"
            cmdargs = [commandpath, subdirs, compress, directorypath, arcname]
        else:
            commandpath = "bppipe.py"
            cmdargs = [sys.executable, commandpath, subdirs, compress, directorypath, arcname]
    else:
        commandpath = "bppipe.py"
        cmdargs = [sys.executable, commandpath, subdirs, compress, directorypath, arcname]
    if not os.path.isfile(commandpath):
        lg.out(1, 'backup_tar.backuptar ERROR %s not found' % commandpath)
        return None
    # lg.out(14, "backup_tar.backuptar going to execute %s" % str(cmdargs))
    # p = child_process.run('bppipe', cmdargs[2:])
    p = child_process.pipe(cmdargs)
    return p
Exemplo n.º 5
0
def backuptar(directorypath, recursive_subfolders=True, compress=None):
    """
    Returns file descriptor for process that makes tar archive.

    In other words executes a child process and create a Pipe to
    communicate with it.
    """
    if not bpio.pathIsDir(directorypath):
        lg.out(1, 'backup_tar.backuptar ERROR %s not found' % directorypath)
        return None
    subdirs = 'subdirs'
    if not recursive_subfolders:
        subdirs = 'nosubdirs'
    if compress is None:
        compress = 'none'
    # lg.out(14, "backup_tar.backuptar %s %s compress=%s" % (directorypath, subdirs, compress))
    if bpio.Windows():
        if bpio.isFrozen():
            commandpath = "bppipe.exe"
            cmdargs = [commandpath, subdirs, compress, directorypath]
        else:
            commandpath = "bppipe.py"
            cmdargs = [sys.executable, commandpath, subdirs, compress, directorypath]
    else:
        commandpath = "bppipe.py"
        cmdargs = [sys.executable, commandpath, subdirs, compress, directorypath]
    if not os.path.isfile(commandpath):
        lg.out(1, 'backup_tar.backuptar ERROR %s not found' % commandpath)
        return None
    # lg.out(14, "backup_tar.backuptar going to execute %s" % str(cmdargs))
    # p = child_process.run('bppipe', cmdargs[2:])
    p = child_process.pipe(cmdargs)
    return p
Exemplo n.º 6
0
def main():
    sourcePath = sys.argv[1]
    backupID = sys.argv[2]
    lg.set_debug_level(24)
    compress_mode = 'none'  # 'gz'
    raid_worker.A('init')
    backupPath = backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID)
    if bpio.pathIsDir(sourcePath):
        backupPipe = backup_tar.backuptar(sourcePath, compress=compress_mode)
    else:
        backupPipe = backup_tar.backuptarfile(sourcePath, compress=compress_mode)
    backupPipe.make_nonblocking()

    job = backup.backup(backupID, backupPipe, backup_done)
    reactor.callLater(1, job.automat, 'start')
    reactor.run()
Exemplo n.º 7
0
def cmd_schedule(opts, args, overDict):
    if len(args) < 2:
        return 2
    from system import bpio
    if not bpio.pathIsDir(os.path.abspath(args[1])):
        print_text('folder %s not exist\n' % args[1])
        return 1
    backupDir = os.path.abspath(args[1])
    if len(args) < 3:
        return call_xmlrpc_method_and_stop('getschedule', backupDir)
    from lib import schedule
    shed = schedule.from_compact_string(args[2])
    if shed is None:
        print_text(schedule.format()+'\n')
        return 0
    return call_xmlrpc_method_and_stop(
        'setschedule', backupDir, shed.type, shed.interval, shed.daytime, shed.details,)
Exemplo n.º 8
0
def main():
    from system import bpio
    from . import backup_tar
    from . import backup_fs
    lg.set_debug_level(24)
    sourcePath = sys.argv[1]
    compress_mode = 'none'  # 'gz'
    backupID = sys.argv[2]
    raid_worker.A('init')
    backupPath = backup_fs.MakeLocalDir(settings.getLocalBackupsDir(),
                                        backupID)
    if bpio.pathIsDir(sourcePath):
        backupPipe = backup_tar.backuptardir(sourcePath,
                                             compress=compress_mode)
    else:
        backupPipe = backup_tar.backuptarfile(sourcePath,
                                              compress=compress_mode)
    backupPipe.make_nonblocking()

    def _bk_done(bid, result):
        from crypt import signed
        customer, remotePath = packetid.SplitPacketID(bid)
        try:
            os.mkdir(
                os.path.join(settings.getLocalBackupsDir(), customer,
                             remotePath + '.out'))
        except:
            pass
        for filename in os.listdir(
                os.path.join(settings.getLocalBackupsDir(), customer,
                             remotePath)):
            filepath = os.path.join(settings.getLocalBackupsDir(), customer,
                                    remotePath, filename)
            payld = bpio.ReadBinaryFile(filepath)
            newpacket = signed.Packet('Data', my_id.getLocalID(),
                                      my_id.getLocalID(), filename, payld,
                                      'http://megafaq.ru/cvps1010.xml')
            newfilepath = os.path.join(settings.getLocalBackupsDir(), customer,
                                       remotePath + '.out', filename)
            bpio.WriteBinaryFile(newfilepath, newpacket.Serialize())
        reactor.stop()  # @UndefinedVariable

    job = backup(backupID, backupPipe, _bk_done)
    reactor.callLater(1, job.automat, 'start')  # @UndefinedVariable
    reactor.run()  # @UndefinedVariable
Exemplo n.º 9
0
def main():
    sourcePath = sys.argv[1]
    backupID = sys.argv[2]
    lg.set_debug_level(24)
    compress_mode = 'none'  # 'gz'
    raid_worker.A('init')
    backupPath = backup_fs.MakeLocalDir(settings.getLocalBackupsDir(),
                                        backupID)
    if bpio.pathIsDir(sourcePath):
        backupPipe = backup_tar.backuptardir(sourcePath,
                                             compress=compress_mode)
    else:
        backupPipe = backup_tar.backuptarfile(sourcePath,
                                              compress=compress_mode)
    backupPipe.make_nonblocking()

    job = backup.backup(backupID, backupPipe, backup_done)
    reactor.callLater(1, job.automat, 'start')
    reactor.run()
Exemplo n.º 10
0
def _upload(params):
    path = params['path']
    if bpio.Linux() or bpio.Mac():
        path = '/' + (path.lstrip('/'))
    localPath = unicode(path)
    if not bpio.pathExist(localPath):
        return {'result': {"success": False, "error": 'local path %s was not found' % path}}
    result = []
    pathID = backup_fs.ToID(localPath)
    if pathID is None:
        if bpio.pathIsDir(localPath):
            pathID, iter, iterID = backup_fs.AddDir(localPath, True)
            result.append('new folder was added: %s' % localPath)
        else:
            pathID, iter, iterID = backup_fs.AddFile(localPath, True)
            result.append('new file was added: %s' % localPath)
    backup_control.StartSingle(pathID, localPath)
    backup_fs.Calculate()
    backup_control.Save()
    control.request_update([('pathID', pathID), ])
    result.append('backup started: %s' % pathID)
    return {'result': result, }
Exemplo n.º 11
0
def main():
    from system import bpio
    import backup_tar
    import backup_fs
    lg.set_debug_level(24)
    sourcePath = sys.argv[1]
    compress_mode = 'none'  # 'gz'
    backupID = sys.argv[2]
    raid_worker.A('init')
    backupPath = backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID)
    if bpio.pathIsDir(sourcePath):
        backupPipe = backup_tar.backuptar(sourcePath, compress=compress_mode)
    else:
        backupPipe = backup_tar.backuptarfile(sourcePath, compress=compress_mode)
    backupPipe.make_nonblocking()

    def _bk_done(bid, result):
        from crypt import signed
        try:
            os.mkdir(os.path.join(settings.getLocalBackupsDir(), bid + '.out'))
        except:
            pass
        for filename in os.listdir(os.path.join(settings.getLocalBackupsDir(), bid)):
            filepath = os.path.join(settings.getLocalBackupsDir(), bid, filename)
            payld = str(bpio.ReadBinaryFile(filepath))
            newpacket = signed.Packet(
                'Data',
                my_id.getLocalID(),
                my_id.getLocalID(),
                filename,
                payld,
                'http://megafaq.ru/cvps1010.xml')
            newfilepath = os.path.join(settings.getLocalBackupsDir(), bid + '.out', filename)
            bpio.AtomicWriteFile(newfilepath, newpacket.Serialize())
        reactor.stop()
    job = backup(backupID, backupPipe, _bk_done)
    reactor.callLater(1, job.automat, 'start')
    reactor.run()
Exemplo n.º 12
0
def main():
    from system import bpio
    from storage import backup_tar

    bpio.init()
    settings.init()

    lg.set_debug_level(24)
    lg.life_begins()

    automat.LifeBegins(lg.when_life_begins())
    automat.OpenLogFile(settings.AutomatsLog())

    key.InitMyKey()
    my_id.init()

    sourcePath = sys.argv[1]
    compress_mode = 'none'  # 'gz'
    backupID = sys.argv[2]
    raid_worker.A('init')

    if bpio.pathIsDir(sourcePath):
        backupPipe = backup_tar.backuptardir_thread(sourcePath,
                                                    compress=compress_mode)
    else:
        backupPipe = backup_tar.backuptarfile_thread(sourcePath,
                                                     compress=compress_mode)

    def _bk_done(bid, result):
        from crypt import signed
        customer, remotePath = packetid.SplitPacketID(bid)
        try:
            os.mkdir(
                os.path.join(settings.getLocalBackupsDir(), customer,
                             remotePath + '.out'))
        except:
            pass
        for filename in os.listdir(
                os.path.join(settings.getLocalBackupsDir(), customer,
                             remotePath)):
            filepath = os.path.join(settings.getLocalBackupsDir(), customer,
                                    remotePath, filename)
            payld = bpio.ReadBinaryFile(filepath)
            newpacket = signed.Packet('Data', my_id.getLocalID(),
                                      my_id.getLocalID(), filename, payld,
                                      'http://megafaq.ru/cvps1010.xml')
            newfilepath = os.path.join(settings.getLocalBackupsDir(), customer,
                                       remotePath + '.out', filename)
            bpio.WriteBinaryFile(newfilepath, newpacket.Serialize())

    def _bk_closed(*args, **kwargs):
        # job.automat('fail')
        # del job
        reactor.stop()  # @UndefinedVariable

    def _bk_start():
        job = backup(backupID, backupPipe, blockSize=16 * 1024 * 1024)
        job.finishCallback = _bk_done  # lambda bid, result: _bk_done(bid, result, job)
        job.addStateChangedCallback(_bk_closed, oldstate=None, newstate='DONE')
        reactor.callLater(1, job.automat, 'start')  # @UndefinedVariable

    reactor.callLater(0, _bk_start)  # @UndefinedVariable
    reactor.run()  # @UndefinedVariable
    settings.shutdown()
Exemplo n.º 13
0
    def run(self):
        """
        Runs a new ``Job`` from that ``Task``.
        """
        iter_and_path = backup_fs.WalkByID(self.remotePath, iterID=backup_fs.fsID(self.customerIDURL))
        if iter_and_path is None:
            lg.out(4, 'backup_control.Task.run ERROR %s not found in the index' % self.remotePath)
            # self.defer.callback('error', self.pathID)
            # self._on_job_failed(self.pathID)
            err = 'remote path "%s" not found in the catalog' % self.remotePath
            OnTaskFailed(self.pathID, err)
            return err
        itemInfo, sourcePath = iter_and_path
        if isinstance(itemInfo, dict):
            try:
                itemInfo = itemInfo[backup_fs.INFO_KEY]
            except:
                lg.exc()
                # self._on_job_failed(self.pathID)
                err = 'catalog item related to "%s" is broken' % self.remotePath
                OnTaskFailed(self.pathID, err)
                return err
        if not self.localPath:
            self.localPath = sourcePath
            lg.out('backup_control.Task.run local path was populated from catalog: %s' % self.localPath)
        if self.localPath != sourcePath:
            lg.warn('local path is differ from catalog: %s != %s' % (self.localPath, sourcePath))
        if not bpio.pathExist(self.localPath):
            lg.warn('path not exist: %s' % self.localPath)
            # self._on_job_failed(self.pathID)
            err = 'local path "%s" not exist' % self.localPath
            OnTaskFailed(self.pathID, err)
            return err
#         if os.path.isfile(self.localPath) and self.localPath != sourcePath:
#             tmpfile.make(name, extension, prefix)
        dataID = misc.NewBackupID()
        if itemInfo.has_version(dataID):
            # ups - we already have same version
            # let's add 1,2,3... to the end to make absolutely unique version ID
            i = 1
            while itemInfo.has_version(dataID + str(i)):
                i += 1
            dataID += str(i)
        self.backupID = packetid.MakeBackupID(
            customer=self.fullCustomerID,
            path_id=self.remotePath,
            version=dataID,
        )
        if self.backupID in jobs():
            lg.warn('backup job %s already started' % self.backupID)
            return 'backup job %s already started' % self.backupID
        try:
            backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), self.backupID)
        except:
            lg.exc()
            lg.out(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID)
            # self.defer.callback('error', self.pathID)
            # self._on_job_failed(self.backupID)
            err = 'failed creating destination folder for "%s"' % self.backupID
            return OnTaskFailed(self.backupID, err)
        compress_mode = 'bz2'  # 'none' # 'gz'
        arcname = os.path.basename(sourcePath)
        if bpio.pathIsDir(self.localPath):
            backupPipe = backup_tar.backuptardir(self.localPath, arcname=arcname, compress=compress_mode)
        else:
            backupPipe = backup_tar.backuptarfile(self.localPath, arcname=arcname, compress=compress_mode)
        backupPipe.make_nonblocking()
        job = backup.backup(
            self.backupID,
            backupPipe,
            finishCallback=OnJobDone,
            blockResultCallback=OnBackupBlockReport,
            blockSize=settings.getBackupBlockSize(),
            sourcePath=self.localPath,
            keyID=self.keyID or itemInfo.key_id,
        )
        jobs()[self.backupID] = job
        itemInfo.add_version(dataID)
        if itemInfo.type == backup_fs.DIR:
            dirsize.ask(self.localPath, OnFoundFolderSize, (self.pathID, dataID))
        else:
            sz = os.path.getsize(self.localPath)
            jobs()[self.backupID].totalSize = sz
            itemInfo.set_size(sz)
            backup_fs.Calculate()
            Save()
        jobs()[self.backupID].automat('start')
        reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID)
        lg.out(4, 'backup_control.Task-%d.run [%s/%s], size=%d, %s' % (
            self.number, self.pathID, dataID, itemInfo.size, self.localPath))
        return None
Exemplo n.º 14
0
    def run(self):
        """
        Runs a new ``Job`` from that ``Task``.

        Called from ``RunTasks()`` method if it is possible to start a
        new task - the maximum number of simultaneously running ``Jobs``
        is limited.
        """
        import backup_tar
        import backup
        iter_and_path = backup_fs.WalkByID(self.pathID)
        if iter_and_path is None:
            lg.out(4, 'backup_control.Task.run ERROR %s not found in the index' % self.pathID)
            # self.defer.callback('error', self.pathID)
            return
        itemInfo, sourcePath = iter_and_path
        if isinstance(itemInfo, dict):
            try:
                itemInfo = itemInfo[backup_fs.INFO_KEY]
            except:
                lg.exc()
                return
        if self.localPath and self.localPath != sourcePath:
            lg.warn('local path were changed: %s -> %s' % (self.localPath, sourcePath))
        self.localPath = sourcePath
        if not bpio.pathExist(sourcePath):
            lg.warn('path not exist: %s' % sourcePath)
            reactor.callLater(0, OnTaskFailed, self.pathID, 'not exist')
            return
        dataID = misc.NewBackupID()
        if itemInfo.has_version(dataID):
            # ups - we already have same version
            # let's add 1,2,3... to the end to make absolutely unique version ID
            i = 1
            while itemInfo.has_version(dataID + str(i)):
                i += 1
            dataID += str(i)
        backupID = self.pathID + '/' + dataID
        try:
            backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID)
        except:
            lg.exc()
            lg.out(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID)
            # self.defer.callback('error', self.pathID)
            return
        compress_mode = 'bz2'  # 'none' # 'gz'
        if bpio.pathIsDir(sourcePath):
            backupPipe = backup_tar.backuptar(sourcePath, compress=compress_mode)
        else:
            backupPipe = backup_tar.backuptarfile(sourcePath, compress=compress_mode)
        backupPipe.make_nonblocking()
        job = backup.backup(
            backupID, backupPipe,
            OnJobDone, OnBackupBlockReport,
            settings.getBackupBlockSize(),
            sourcePath)
        jobs()[backupID] = job
        itemInfo.add_version(dataID)
        if itemInfo.type in [backup_fs.PARENT, backup_fs.DIR]:
            dirsize.ask(sourcePath, OnFoundFolderSize, (self.pathID, dataID))
        else:
            jobs()[backupID].totalSize = os.path.getsize(sourcePath)
        jobs()[backupID].automat('start')
        reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID)
        lg.out(4, 'backup_control.Task-%d.run [%s/%s], size=%d, %s' % (
            self.number, self.pathID, dataID, itemInfo.size, sourcePath))