def install_hook():
    execd_preinstall()
    utils.configure_source()
    utils.install(*packages)
    update_config_block('DEFAULT',
                public_port=cluster.determine_api_port(config["service-port"]))
    update_config_block('DEFAULT',
                admin_port=cluster.determine_api_port(config["admin-port"]))
    set_admin_token(config['admin-token'])

    # set all backends to use sql+sqlite, if they are not already by default
    update_config_block('sql',
                        connection='sqlite:////var/lib/keystone/keystone.db')
    update_config_block('identity',
                        driver='keystone.identity.backends.sql.Identity')
    update_config_block('catalog',
                        driver='keystone.catalog.backends.sql.Catalog')
    update_config_block('token',
                        driver='keystone.token.backends.sql.Token')
    update_config_block('ec2',
                        driver='keystone.contrib.ec2.backends.sql.Ec2')

    utils.stop('keystone')
    execute("keystone-manage db_sync")
    utils.start('keystone')

    # ensure user + permissions for peer relations that
    # may be syncing data there via SSH_USER.
    unison.ensure_user(user=SSH_USER, group='keystone')
    execute("chmod -R g+wrx /var/lib/keystone/")

    time.sleep(5)
    ensure_initial_admin(config)
Beispiel #2
0
def ensure_ceph_storage(service,
                        pool,
                        rbd_img,
                        sizemb,
                        mount_point,
                        blk_device,
                        fstype,
                        system_services=[],
                        rbd_pool_replicas=2):
    """
    To be called from the current cluster leader.
    Ensures given pool and RBD image exists, is mapped to a block device,
    and the device is formatted and mounted at the given mount_point.

    If formatting a device for the first time, data existing at mount_point
    will be migrated to the RBD device before being remounted.

    All services listed in system_services will be stopped prior to data
    migration and restarted when complete.
    """
    # Ensure pool, RBD image, RBD mappings are in place.
    if not pool_exists(service, pool):
        utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
        create_pool(service, pool, replicas=rbd_pool_replicas)

    if not rbd_exists(service, pool, rbd_img):
        utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
        create_rbd_image(service, pool, rbd_img, sizemb)

    if not image_mapped(rbd_img):
        utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
        map_block_storage(service, pool, rbd_img)

    # make file system
    # TODO: What happens if for whatever reason this is run again and
    # the data is already in the rbd device and/or is mounted??
    # When it is mounted already, it will fail to make the fs
    # XXX: This is really sketchy!  Need to at least add an fstab entry
    #      otherwise this hook will blow away existing data if its executed
    #      after a reboot.
    if not filesystem_mounted(mount_point):
        make_filesystem(blk_device, fstype)

        for svc in system_services:
            if utils.running(svc):
                utils.juju_log(
                    'INFO', 'Stopping services %s prior to migrating '
                    'data' % svc)
                utils.stop(svc)

        place_data_on_ceph(service, blk_device, mount_point, fstype)

        for svc in system_services:
            utils.start(svc)
Beispiel #3
0
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
                        blk_device, fstype, system_services=[],
                        rbd_pool_replicas=2):
    """
    To be called from the current cluster leader.
    Ensures given pool and RBD image exists, is mapped to a block device,
    and the device is formatted and mounted at the given mount_point.

    If formatting a device for the first time, data existing at mount_point
    will be migrated to the RBD device before being remounted.

    All services listed in system_services will be stopped prior to data
    migration and restarted when complete.
    """
    # Ensure pool, RBD image, RBD mappings are in place.
    if not pool_exists(service, pool):
        utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
        create_pool(service, pool, replicas=rbd_pool_replicas)

    if not rbd_exists(service, pool, rbd_img):
        utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
        create_rbd_image(service, pool, rbd_img, sizemb)

    if not image_mapped(rbd_img):
        utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
        map_block_storage(service, pool, rbd_img)

    # make file system
    # TODO: What happens if for whatever reason this is run again and
    # the data is already in the rbd device and/or is mounted??
    # When it is mounted already, it will fail to make the fs
    # XXX: This is really sketchy!  Need to at least add an fstab entry
    #      otherwise this hook will blow away existing data if its executed
    #      after a reboot.
    if not filesystem_mounted(mount_point):
        make_filesystem(blk_device, fstype)

        for svc in system_services:
            if utils.running(svc):
                utils.juju_log('INFO',
                               'Stopping services %s prior to migrating '
                               'data' % svc)
                utils.stop(svc)

        place_data_on_ceph(service, blk_device, mount_point, fstype)

        for svc in system_services:
            utils.start(svc)
Beispiel #4
0
def do_molecfit(headers, spectra, wave=[], mode='HARPS', load_previous=False):
    """This is a function that pipes a list of s1d spectra into molecfit, and
    executes it. It first launces the molecfit gui on the middle spectrum of the
    sequence, and then loops through the entire list, returning the transmission
    spectra of the Earths atmosphere in the same order as the list provided.
    These can then be used to correct the s1d spectra or the e2ds spectra.
    Note that the s1d spectra are assumed to be in the barycentric frame in vaccuum,
    but that the output transmission spectrum is in the observers frame, and e2ds files
    are in air wavelengths by default.

    If you have run do_molecfit before, and want to reuse the output of the previous run
    for whatever reason, set the load_previous keyword to True. This will reload the
    list of transmission spectra created last time, if available.
    """

    import pdb
    import numpy as np
    import matplotlib.pyplot as plt
    import sys
    import os.path
    import lib.utils as ut
    import pickle
    import copy
    molecfit_input_folder = '/Users/hoeijmakers/Molecfit/share/molecfit/spectra/cross_cor/'
    molecfit_prog_folder = '/Users/hoeijmakers/Molecfit/bin/'
    temp_specname = copy.deepcopy(
        mode)  #The name of the temporary file used (without extension).
    #The spectrum will be named like this.fits There should be a this.par file as well,
    #that contains a line pointing molecfit to this.fits:
    parname = temp_specname + '.par'

    #====== ||  START OF PROGRAM   ||======#
    N = len(headers)
    if N != len(spectra):
        print(
            'ERROR in prep_for_molecfit: Length of list of headers is not equal to length of list of spectra (%s , %s)'
            % (N, len(spectra)))
        sys.exit()

    #Test that the input root and molecfit roots exist; that the molecfit root contains the molecfit executables.
    #that the input root contains the desired parfile and later fitsfile.
    molecfit_input_root = ut.path(molecfit_input_folder)
    molecfit_prog_root = ut.path(molecfit_prog_folder)
    if os.path.isdir(molecfit_input_root) != True:
        print('ERROR in prep_for_molecfit: ' + molecfit_input_root +
              ' does not exist!')
        sys.exit()
    if os.path.isdir(molecfit_prog_root) != True:
        print('ERROR in prep_for_molecfit: ' + molecfit_prog_root +
              ' does not exist!')
        sys.exit()
    if os.path.isfile(molecfit_input_root + parname) != True:
        print('ERROR in prep_for_molecfit: ' + molecfit_input_root +
              temp_specname + '.par does not exist!')
        sys.exit()
    if os.path.isfile(molecfit_prog_root + 'molecfit') != True:
        print('ERROR in prep_for_molecfit: ' + molecfit_prog_root +
              'molecfit does not exist!')
        sys.exit()
    if os.path.isfile(molecfit_prog_root + 'molecfit_gui') != True:
        print('ERROR in do_molecfit: ' + molecfit_prog_root +
              'molecfit_gui does not exist!')
        sys.exit()

    pickle_outpath = molecfit_input_root + 'previous_run_of_do_molecfit.pkl'

    if load_previous == True:
        if os.path.isfile(pickle_outpath) == False:
            print(
                'WARNING in do_molecfit: Previously saved run is not available.'
            )
            print('The user will have to re-fit.')
            print('That run will then be saved.')
            load_previous = False
        else:
            pickle_in = open(pickle_outpath, "rb")
            list_of_wls, list_of_fxc, list_of_trans = pickle.load(pickle_in)

    if load_previous == False:
        list_of_wls = []
        list_of_fxc = []
        list_of_trans = []

        middle_i = int(
            round(0.5 * N)
        )  #We initialize molecfit on the middle spectrum of the time series.
        write_file_to_molecfit(molecfit_input_root,
                               temp_specname + '.fits',
                               headers,
                               spectra,
                               middle_i,
                               mode=mode,
                               wave=wave)
        print(molecfit_input_root)
        print(temp_specname + '.fits')
        print(headers[middle_i])

        execute_molecfit(molecfit_prog_root,
                         molecfit_input_root + parname,
                         gui=True)
        wl, fx, trans = retrieve_output_molecfit(molecfit_input_root +
                                                 temp_specname)
        remove_output_molecfit(molecfit_input_root, temp_specname)

        for i in range(N):  #range(len(spectra)):
            print('Fitting spectrum %s from %s' % (i + 1, len(spectra)))
            t1 = ut.start()
            write_file_to_molecfit(molecfit_input_root,
                                   temp_specname + '.fits',
                                   headers,
                                   spectra,
                                   i,
                                   mode=mode,
                                   wave=wave)
            execute_molecfit(molecfit_prog_root,
                             molecfit_input_root + parname,
                             gui=False)
            wl, fx, trans = retrieve_output_molecfit(molecfit_input_root +
                                                     temp_specname)
            remove_output_molecfit(molecfit_input_root, temp_specname)
            list_of_wls.append(wl * 1000.0)  #Convert to nm.
            list_of_fxc.append(fx / trans)
            list_of_trans.append(trans)
            ut.end(t1)

        pickle_outpath = molecfit_input_root + 'previous_run_of_do_molecfit.pkl'
        with open(pickle_outpath, 'wb') as f:
            pickle.dump((list_of_wls, list_of_fxc, list_of_trans), f)

    to_do_manually = check_fit_gui(list_of_wls, list_of_fxc, list_of_trans)
    if len(to_do_manually) > 0:
        print('The following spectra were selected to redo manually:')
        print(to_do_manually)
        #CHECK THAT THIS FUNCIONALITY WORKS:
        for i in to_do_manually:
            write_file_to_molecfit(molecfit_input_root,
                                   temp_specname + '.fits',
                                   headers,
                                   spectra,
                                   int(i),
                                   mode=mode,
                                   wave=wave)
            execute_molecfit(molecfit_prog_root,
                             molecfit_input_root + parname,
                             gui=True)
            wl, fx, trans = retrieve_output_molecfit(molecfit_input_root +
                                                     temp_specname)
            list_of_wls[int(i)] = wl * 1000.0  #Convert to nm.
            list_of_fxc[int(i)] = fxc
            list_of_trans[int(i)] = trans
    return (list_of_wls, list_of_trans)
def do_openstack_upgrade(install_src, packages):
    '''Upgrade packages from a given install src.'''

    config = config_get()
    old_vers = get_os_codename_package('keystone')
    new_vers = get_os_codename_install_source(install_src)

    utils.juju_log('INFO',
                   "Beginning Keystone upgrade: %s -> %s" % \
                   (old_vers, new_vers))

    # Backup previous config.
    utils.juju_log('INFO', "Backing up contents of /etc/keystone.")
    stamp = time.strftime('%Y%m%d%H%M')
    cmd = 'tar -pcf /var/lib/juju/keystone-backup-%s.tar /etc/keystone' % stamp
    execute(cmd, die=True, echo=True)

    configure_installation_source(install_src)
    execute('apt-get update', die=True, echo=True)
    os.environ['DEBIAN_FRONTEND'] = 'noninteractive'
    cmd = 'apt-get --option Dpkg::Options::=--force-confnew -y '\
          'install %s' % packages
    execute(cmd, echo=True, die=True)

    # we have new, fresh config files that need updating.
    # set the admin token, which is still stored in config.
    set_admin_token(config['admin-token'])

    # set the sql connection string if a shared-db relation is found.
    ids = utils.relation_ids('shared-db')

    if ids:
        for rid in ids:
            for unit in utils.relation_list(rid):
                utils.juju_log('INFO',
                               'Configuring new keystone.conf for '
                               'database access on existing database'
                               ' relation to %s' % unit)
                relation_data = utils.relation_get_dict(relation_id=rid,
                                                        remote_unit=unit)

                update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
                                        (config["database-user"],
                                         relation_data["password"],
                                         relation_data["private-address"],
                                         config["database"]))

    utils.stop('keystone')
    if (cluster.eligible_leader(CLUSTER_RES)):
        utils.juju_log('INFO',
                       'Running database migrations for %s' % new_vers)
        execute('keystone-manage db_sync', echo=True, die=True)
    else:
        utils.juju_log('INFO',
                       'Not cluster leader; snoozing whilst'
                       ' leader upgrades DB')
        time.sleep(10)
    utils.start('keystone')
    time.sleep(5)
    utils.juju_log('INFO',
                   'Completed Keystone upgrade: '
                   '%s -> %s' % (old_vers, new_vers))