Пример #1
0
def make_backup():
    if not update_files() or not client_status.files:
        return False
    if not client_status.amazon:
        status, content = api.get_s3_access()
        if status == 200:
            client_status.amazon = content
            client_status.save()
        else:
            log.error('Getting S3 access failed')
            return False

    status, backup_id = api.set_backup_info('compress',
                                            time=time.time(),
                                            files='\n'.join(
                                                client_status.files))
    if not status == 200:
        return False
    tmp = '/tmp/backup_%s.tar.gz' % datetime.utcnow().strftime('%Y.%m.%d_%H%M')
    backup.compress(tmp)
    kwargs = {'backup_id': backup_id}
    api.set_backup_info('upload', **kwargs)
    key, size = backup.upload(tmp)
    client_status.prev_backup = date.today().strftime('%Y.%m.%d')
    client_status.save()
    kwargs['time'] = time.time()
    kwargs['keyname'] = key
    kwargs['size'] = size
    api.set_backup_info('complete', **kwargs)
    return True
Пример #2
0
def make_backup():
    if not update_files() or not client_status.files:
        return False
    if not client_status.amazon:
        status, content = api.get_s3_access()
        if status == 200:
            client_status.amazon = content
            client_status.save()
        else:
            log.error("Getting S3 access failed")
            return False

    status, backup_id = api.set_backup_info("compress", time=time.time(), files="\n".join(client_status.files))
    if not status == 200:
        return False
    tmp = "/tmp/backup_%s.tar.gz" % datetime.utcnow().strftime("%Y.%m.%d_%H%M")
    backup.compress(tmp)
    kwargs = {"backup_id": backup_id}
    api.set_backup_info("upload", **kwargs)
    key, size = backup.upload(tmp)
    client_status.prev_backup = date.today().strftime("%Y.%m.%d")
    client_status.save()
    kwargs["time"] = time.time()
    kwargs["keyname"] = key
    kwargs["size"] = size
    api.set_backup_info("complete", **kwargs)
    return True
Пример #3
0
def check_system_info():
    info = get_system_info()
    if info == client_status.system_info:
        return True
    if api.update_system_info(info) == 200:
        client_status.system_info = info
        client_status.save()
        return True
    return False
Пример #4
0
def update_files():
    status, content = api.get_files()
    if status == 200:
        client_status.files_hash = sha(content).hexdigest()
        client_status.files = content.split("\n")
        client_status.save()
        return True
    elif status == 304:
        return True
    return False
Пример #5
0
def update_schedule(on_update=None, on_404=False):
    status, content = api.get_schedule()
    if status == 200:
        content["time"] = (int(content["time"][:2]), int(content["time"][2:]))
        client_status.schedule = content
        client_status.save()
        if on_update:
            on_update()
        return True
    return {304: True, 404: on_404}.get(status, False)
Пример #6
0
def update_files():
    status, content = api.get_files()
    if status == 200:
        client_status.files_hash = sha(content).hexdigest()
        client_status.files = content.split('\n')
        client_status.save()
        return True
    elif status == 304:
        return True
    return False
Пример #7
0
def update_schedule(on_update=None, on_404=False):
    status, content = api.get_schedule()
    if status == 200:
        content['time'] = (int(content['time'][:2]), int(content['time'][2:]))
        client_status.schedule = content
        client_status.save()
        if on_update:
            on_update()
        return True
    return {304: True, 404: on_404}.get(status, False)
Пример #8
0
def check_update():
    status, url = api.check_version()
    if status in (200, 304):
        client_status.last_ver_check = datetime.now()
        client_status.save()
        if status == 200:
            log.info('Start client update')
            update(url)
        return True
    return False
Пример #9
0
def set_fs(depth=-1, step_time=2 * MIN, top='/', action='start', start=None):
    till = datetime.utcnow() + timedelta(seconds=step_time)
    for level, has_next in levelwalk(depth=depth, top=top, start=start):
        status = api.update_fs([level], action, has_next=has_next)
        depth -= 1
        if status == 200:
            if has_next:
                client_status.upload_dirs = [[p[:2] for p in level if p[1]],
                                             depth]
            else:
                client_status.upload_dirs = []
                client_status.last_fs_upload = datetime.utcnow()
            client_status.save()
        else:
            return 0
        if datetime.utcnow() > till and has_next:
            return -1
        action = 'append'
    return 1
Пример #10
0
def run():
    log.info('Checking updates')
    url = None
    if client_status.is_registered:
        status, url = api.check_version()
    else:
        status, content = api.get_version()
        if status == 200 and bitcalm.__version__ != content[0]:
            url = content[1]
    if status != 500:
        client_status.last_ver_check = datetime.now()
        client_status.save()
    if url:
        log.info('Start client update')
        update(url)
        exit()

    print 'Sending info about the client...'
    status, content = api.hi()
    print content
    if not client_status.is_registered:
        if status == 200:
            client_status.is_registered = True
            client_status.save()
        else:
            exit('Aborted')

    context = DaemonContext(pidfile=PIDLockFile(PIDFILE_PATH),
                            signal_map={signal.SIGTERM: on_stop},
                            stderr=open(CRASH_PATH, 'w'))
    context.files_preserve = map(
        lambda h: h.stream,
        filter(lambda h: isinstance(h, FileHandler), log.logger.handlers))
    print 'Starting daemon'
    with context:
        Observer(work)()
Пример #11
0
def run():
    if not client_status.is_registered:
        print "Sending info about new client..."
        status, content = api.hi(platform.uname())
        print content
        if status == 200:
            client_status.is_registered = True
            client_status.save()
        else:
            exit("Aborted")

    if os.path.exists(CRASH_PATH):
        crash = os.stat(CRASH_PATH)
        if crash.st_size:
            with open(CRASH_PATH) as f:
                crash_info = f.read()
            status = api.report_crash(crash_info, crash.st_mtime)
            if status == 200:
                log.info("Crash reported")
                os.remove(CRASH_PATH)

    context = DaemonContext(
        pidfile=PIDLockFile(PIDFILE_PATH), signal_map={signal.SIGTERM: on_stop}, stderr=open(CRASH_PATH, "w")
    )
    context.files_preserve = map(lambda h: h.stream, filter(lambda h: isinstance(h, FileHandler), log.logger.handlers))
    print "Starting daemon"
    with context:
        log.info("Daemon started")
        log.info("Build filesystem image")
        basepath = "/"
        root = FSNode(basepath, ignore=IGNORE_PATHS)
        root_d = root.as_dict()
        root_str = json.dumps(root_d)
        h = sha(root_str).hexdigest()
        if not client_status.fshash or client_status.fshash != h:
            status, content = api.set_fs(root_str)
            if status == 200:
                client_status.fshash = h
                client_status.save()
                log.info("Filesystem image updated")
            else:
                log.error("Filesystem image update failed")

        log.info("Create watch manager")
        wm = WatchManager()
        changelog = []
        global notifier
        notifier = ThreadedNotifier(wm, FSEvent(changelog=changelog))
        notifier.start()
        mask = IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO
        log.info("Start watching filesystem changes")
        for item in os.listdir(basepath):
            path = os.path.join(basepath, item)
            if item in IGNORE_PATHS or os.path.islink(path):
                continue
            wm.add_watch(path, mask, rec=True)

        actions = [
            Action(FS_UPLOAD_PERIOD, upload_fs, changelog),
            Action(LOG_UPLOAD_PERIOD, upload_log),
            Action(RESTORE_CHECK_PERIOD, restore),
        ]

        backup_action = lambda: Action(backup.get_next(), make_backup)

        def on_schedule_update(actions=actions):
            actions[-1] = backup_action()

        update_schedule_action = lambda: Action(SCHEDULE_UPDATE_PERIOD, update_schedule, on_update=on_schedule_update)

        if update_schedule() or client_status.schedule:
            actions.append(update_schedule_action())
            actions.append(backup_action())
        else:

            def on_schedule_download(actions=actions):
                actions[-1] = update_schedule_action()
                actions.append(backup_action())

            actions.append(Action(SCHEDULE_UPDATE_PERIOD, update_schedule, on_update=on_schedule_download, on_404=True))

        log.info("Start main loop")
        while True:
            action = min(actions)
            log.info("Next action is %s" % action)
            time.sleep(action.time_left())
            action()
Пример #12
0
def run():
    if not client_status.is_registered:
        print 'Sending info about new client...'
        status, content = api.hi(platform.uname())
        print content
        if status == 200:
            client_status.is_registered = True
            client_status.save()
        else:
            exit('Aborted')

    if os.path.exists(CRASH_PATH):
        crash = os.stat(CRASH_PATH)
        if crash.st_size:
            with open(CRASH_PATH) as f:
                crash_info = f.read()
            status = api.report_crash(crash_info, crash.st_mtime)
            if status == 200:
                log.info('Crash reported')
                os.remove(CRASH_PATH)

    context = DaemonContext(pidfile=PIDLockFile(PIDFILE_PATH),
                            signal_map={signal.SIGTERM: on_stop},
                            stderr=open(CRASH_PATH, 'w'))
    context.files_preserve = map(
        lambda h: h.stream,
        filter(lambda h: isinstance(h, FileHandler), log.logger.handlers))
    print 'Starting daemon'
    with context:
        log.info('Daemon started')
        log.info('Build filesystem image')
        basepath = '/'
        root = FSNode(basepath, ignore=IGNORE_PATHS)
        root_d = root.as_dict()
        root_str = json.dumps(root_d)
        h = sha(root_str).hexdigest()
        if not client_status.fshash or client_status.fshash != h:
            status, content = api.set_fs(root_str)
            if status == 200:
                client_status.fshash = h
                client_status.save()
                log.info('Filesystem image updated')
            else:
                log.error('Filesystem image update failed')

        log.info('Create watch manager')
        wm = WatchManager()
        changelog = []
        global notifier
        notifier = ThreadedNotifier(wm, FSEvent(changelog=changelog))
        notifier.start()
        mask = IN_CREATE | IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO
        log.info('Start watching filesystem changes')
        for item in os.listdir(basepath):
            path = os.path.join(basepath, item)
            if item in IGNORE_PATHS or os.path.islink(path):
                continue
            wm.add_watch(path, mask, rec=True)

        actions = [
            Action(FS_UPLOAD_PERIOD, upload_fs, changelog),
            Action(LOG_UPLOAD_PERIOD, upload_log),
            Action(RESTORE_CHECK_PERIOD, restore)
        ]

        backup_action = lambda: Action(backup.get_next(), make_backup)

        def on_schedule_update(actions=actions):
            actions[-1] = backup_action()

        update_schedule_action = lambda: Action(SCHEDULE_UPDATE_PERIOD,
                                                update_schedule,
                                                on_update=on_schedule_update)

        if update_schedule() or client_status.schedule:
            actions.append(update_schedule_action())
            actions.append(backup_action())
        else:

            def on_schedule_download(actions=actions):
                actions[-1] = update_schedule_action()
                actions.append(backup_action())

            actions.append(
                Action(SCHEDULE_UPDATE_PERIOD,
                       update_schedule,
                       on_update=on_schedule_download,
                       on_404=True))

        log.info('Start main loop')
        while True:
            action = min(actions)
            log.info('Next action is %s' % action)
            time.sleep(action.time_left())
            action()
Пример #13
0
def make_backup():
    schedule = backup.next_schedule()
    if not client_status.backup:
        status, backup_id = api.set_backup_info('prepare',
                                                time=time.time(),
                                                schedule=schedule.id)
        if status != 200:
            return False
        client_status.backup = {'backup_id': backup_id, 'status': 0}
        client_status.save()
    else:
        backup_id = client_status.backup['backup_id']
    bstatus = client_status.backup
    if schedule.files and bstatus['status'] < 2:
        schedule.clean_files()
        if bstatus['status'] == 0:
            status, content = api.set_backup_info(
                'filesystem',
                backup_id=backup_id,
                has_info=bool(client_status.backupdb.count()))
            if status == 200:
                bstatus['is_full'] = content['is_full']
                if bstatus['is_full']:
                    client_status.backupdb.clean()
                elif 'prev' in content:
                    backup.get_database(int(content['prev']))
            else:
                return False
            bstatus['status'] = 1
            client_status.save()
        if bstatus.get('items') is None:
            bstatus['items'] = {
                'dirs': filter(os.path.isdir, schedule.files),
                'files': filter(os.path.isfile, schedule.files)
            }
            client_status.save()

        files = iterfiles(files=bstatus['items']['files'],
                          dirs=bstatus['items']['dirs'])
        if not bstatus['is_full']:
            files = modified(files, client_status.backupdb)

        with backup.BackupHandler(backup_id) as handler:
            for filename in files:
                try:
                    info = os.stat(filename)
                except OSError:
                    continue
                size, is_compressed = handler.upload_file(filename)
                if size is None:
                    continue
                row = (filename, 1, info.st_mtime, info.st_size, info.st_mode,
                       info.st_uid, info.st_gid, int(is_compressed), backup_id)
                client_status.backupdb.add((row, ))
                client_status.save()
                if handler.files_count >= 100:
                    handler.upload_stats()

    if schedule.databases and bstatus['status'] < 3:
        if bstatus['status'] != 2:
            api.set_backup_info('database', backup_id=backup_id)
            bstatus['status'] = 2
            client_status.save()
        if not bstatus.get('databases'):
            bstatus['databases'] = []
            for host, dbnames in schedule.databases.iteritems():
                if ':' in host:
                    host, port = host.split(':')
                    port = int(port)
                else:
                    port = DEFAULT_DB_PORT
                for name in dbnames:
                    client_status.backup['databases'].append(
                        (host, port, name))
                client_status.save()
        db_creds = {}
        make_key = lambda h, p: '%s:%i' % (h, p)
        for db in itertools.chain(config.database, client_status.database):
            key = make_key(db['host'], db.get('port', DEFAULT_DB_PORT))
            db_creds[key] = (db['user'], db['passwd'])
        db_success = 0
        db_total = len(bstatus['databases'])
        with backup.BackupHandler(backup_id) as handler:
            while bstatus['databases']:
                host, port, name = bstatus['databases'].pop()
                try:
                    user, passwd = db_creds[make_key(host, port)]
                except KeyError:
                    log.error('There are no credentials for %s:%i' %
                              (host, port))
                    client_status.save()
                    continue
                ts = datetime.utcnow().strftime('%Y.%m.%d_%H%M')
                path = '/tmp/%s_%i_%s_%s.sql.gz' % (host, port, name, ts)
                if not dump_db(
                        name, host, user, path=path, passwd=passwd, port=port):
                    log.error('Dump of %s from %s:%i failed' %
                              (name, host, port))
                    client_status.save()
                    continue
                handler.upload_db(path)
                handler.upload_stats()
                client_status.save()
                db_success += 1
                os.remove(path)
        if db_success != db_total:
            log.error('%i of %i databases was backuped' %
                      (db_success, db_total))

    bstatus['status'] = 3
    client_status.save()
    api.set_backup_info('complete', backup_id=backup_id, time=time.time())
    client_status.backup = None
    backup.next_schedule().done()
    client_status.save()
    return True
Пример #14
0
def check_changes():
    status, content = api.get_changes()
    if status == 200:
        if content.get('uninstall'):
            uninstall(verbose=False)
            stop()
        version = content.get('version')
        if version:
            ver, url = version
            if ver != bitcalm.__version__:
                actions.add(OneTimeAction(0, update, url))
                log.info('Planned update to %s' % ver)
                return True

        access = content.get('access')
        if access:
            client_status.amazon = access

        dbases = content.get('db')
        if dbases:
            client_status.database = dbases
            db_test = ((db, connection_error(**db)) for db in dbases)
            err_db = filter(lambda db: db[1], db_test)
            if err_db:
                for db in err_db:
                    dbases.remove(db[0])
                err_db = [(db['host'], db.get('port', 3306), err)
                          for db, err in err_db]
                api.report_db_errors(err_db)
            if dbases and not actions.has(check_db):
                actions.add(Action(DB_CHECK_PERIOD, check_db, start=0))

        schedules = content.get('schedules')
        if schedules:
            types = {
                'daily': DailySchedule,
                'weekly': WeeklySchedule,
                'monthly': MonthlySchedule
            }
            curr = {}
            for s in client_status.schedules:
                curr[s.id] = s
            for s in schedules:
                if 'db' in s:
                    db = pickle.loads(s['db'])
                    user_db = lambda db: db not in EXCLUDE_DB
                    for dbases in db.itervalues():
                        dbases[:] = filter(user_db, dbases)
                    s['db'] = db
                cs = curr.get(s['id'])
                if cs:
                    if isinstance(cs, types[s['type']]):
                        cs.update(**s)
                    else:
                        ns = types[s.pop('type')](**s)
                        ns.prev_backup = cs.prev_backup
                        ns.exclude = cs.exclude
                        client_status.schedules.remove(cs)
                        client_status.schedules.append(ns)
                else:
                    ns = types[s.pop('type')](**s)
                    client_status.schedules.append(ns)
            b = actions.get(make_backup)
            if b:
                b.next()
            else:
                actions.add(Action(backup.next_date, make_backup))
        client_status.save()
        tasks = content.get('restore')
        if tasks:
            actions.add(OneTimeAction(30, restore, tasks))
        if content.get('log_tail', False):
            actions.add(OneTimeAction(0, upload_log, entries=tail_log()))
        if content.get('send_fs', False):
            fs_action = actions.get(update_fs)
            if fs_action:
                fs_action.delay(period=0)
            else:
                actions.add(StepAction(FS_SET_PERIOD, update_fs, start=0))
        return True
    elif status == 304:
        return True
    return False