Esempio n. 1
0
def preupgrade(backup_volume):
    hostname = run('hostname').strip()
    timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
    backup_token = '%s_%s' % (hostname, timestamp)
    backup_dir = os.path.join(backup_volume, 'ugbk_%s' % backup_token)

    try:
        os.mkdir(backup_dir)
    except:
        _print("""\
ERROR making backup dir: backup drive is not mounted, mis-typed, read-only, or
backup dir already exists!""", True)
        raise

    fab_bhoma.stop_apache()
    _stop_couchdb()

    _print('backing up configuration settings')
    paranoid_backup(os.path.join(APP_DIR, 'localsettings.py'), backup_dir)

    _print('backing up couch database')
    couch_db = get_django_setting('BHOMA_COUCH_DATABASE_NAME', COUCH_DB_DEFAULT)
    paranoid_backup(os.path.join(COUCH_DATA_DIR, '%s.couch' % couch_db), os.path.join(backup_dir, 'db.couch'), sudo)

    _print('backing up postgres database')
    #with cd(APP_DIR):
    #    run('python manage.py dumpdata > "%s"' % os.path.join(backup_dir, 'postgres_db.json'))
    with postgres_op() as pgrun:
        pgrun('pg_dump {{db}} > /tmp/pgdump')
    paranoid_backup('/tmp/pgdump', os.path.join(backup_dir, 'postgres_db.pgsql'))

    _print("""
  clinic data has been backed up to %s
  make sure this directory is on an external drive that will not be deleted
  when this server is re-ghosted!!

  to restore clinic data on the upgraded server, run:
    fab clinic postupgrade:%s""" % (backup_dir, backup_dir))
Esempio n. 2
0
def _protected_update(inner_update):
    """
    Shared functionality around a protected update, which backs up
    and restores the directory if anyhing fails
    """
    require('environment', provided_by=('central', 'dimagi', 'clinic', 'daemon'))
    fab_os.create_directory(BACKUP_DIR)
    backup_dir = PATH_SEP.join((BACKUP_DIR, fab_os.timestamp_string()))
    fab_os.backup_directory(env.root, backup_dir)
    with cd(get_app_dir()):
        fab_bhoma.stop_apache()
        fab_bhoma.stop_formplayer()
        if env.environment == "central":
            # stop a few more things only available/running on the central server
            fab_central.stop_central_server_scripts()
        try:
            inner_update()
            fab_bhoma.start_formplayer()
            fab_bhoma.start_apache()
            if env.test: 
                fab_bhoma.check_server()
        except SystemExit:
            print "caught abort from fabric!  restoring backup directory."
            with cd(TMP_DIR):
                fab_os.restore_directory(backup_dir, SOURCE_DIR)
            fab_bhoma.start_formplayer()
            fab_bhoma.start_apache()
            # if this fails we're kinda screwed, but at least 
            # we might know from email notifications
            if env.test:
                fab_bhoma.check_server()
            raise
        finally:
            if env.environment == "central":
                fab_central.load_zones()
                fab_central.start_central_server_scripts()
Esempio n. 3
0
def postupgrade(backup_dir):
    if not os.path.exists(backup_dir) or not os.path.isdir(backup_dir):
        _print('cannot find backup dir [%s]' % backup_dir, True)
        sys.exit()

    if backup_dir.endswith('/'):
        backup_dir = os.path.dirname(backup_dir)

    fab_bhoma.stop_apache()
    _stop_couchdb()

    _print('restoring configuration settings')
    paranoid_restore(os.path.join(backup_dir, 'localsettings.py'), APP_DIR)
    # it is possible localsettings needs to be updated with new required settings before we can proceed with the restore

    _print('restoring postgres database')
    #with cd(APP_DIR):
    #    _start_couchdb() #couchdb must be running for db ops else couchdbkit complains
    #    run('python manage.py flush --noinput')
    #    run('python manage.py loaddata "%s"' % os.path.join(backup_dir, 'postgres_db.json'))
    #    run('python manage.py syncdb')
    #    _stop_couchdb()
    #    # how do we handle database migrations here?
    paranoid_restore(os.path.join(backup_dir, 'postgres_db.pgsql'), '/tmp/pgdump')
    with postgres_op() as pgrun:
        pgrun('dropdb {{db}}')
        pgrun('createdb {{db}}')
        pgrun('psql {{db}} -f /tmp/pgdump')
    #run db migrations here?
    _start_couchdb()
    with cd(APP_DIR):
        run('python manage.py syncdb')
    _stop_couchdb()

    _print('setting hostname')
    hostname = get_django_setting('SYSTEM_HOSTNAME')
    _print('no hostname in localsettings.py; attempting to extract from backup dir')
    try:
        hostname = re.match('ugbk_(.+)_[0-9]{14}$', os.path.split(backup_dir)[1]).group(1)
    except AttributeError:
        hostname = None
    if not hostname:
        _print('unable to determine system hostname!', True)
        sys.exit()
    _print('using [%s] as hostname' % hostname)
    sudo('echo %s > /etc/hostname' % hostname)

    # don't think we really need to update /etc/hosts; punting...

    _print('restoring couch database')
    couch_db = get_django_setting('BHOMA_COUCH_DATABASE_NAME', COUCH_DB_DEFAULT)
    couch_data_file = os.path.join(COUCH_DATA_DIR, '%s.couch' % couch_db)
    paranoid_restore(os.path.join(backup_dir, 'db.couch'), couch_data_file, sudo)
    sudo('chown %s:%s "%s"' % (COUCH_USER, COUCH_USER, couch_data_file))

    _print('re-indexing couch views (may take a while...)')
    _start_couchdb()
    with cd(APP_DIR):
        fab_bhoma.reindex_views()
        pass

    _print("""
  the upgrade and restore is complete. next steps:

  0) verify no errors occurred during the restore; if they did, contact a data
     team member and don't deploy this server

  1) the current system time is %s. set the correct time
     if this is wrong

  2) reboot the server

  3) perform the post-upgrade testing and verification steps
""" % clock_str())