Ejemplo n.º 1
0
def condense(config=None, vm_grouping_config=None, debug=False):
    """
    When migration is done in-place (there's no spare hardware), cloud
    migration admin would want to free as many hardware nodes as possible. This
    task handles that case by analyzing source cloud and rearranging source
    cloud load and generating migration scenario.

    Condensation is a process of:
     1. Retrieving groups of connected VMs (see `get_groups`)
     2. Rearrangement of source cloud load in order to free up as many
        hardware nodes as possible.
     3. Generation filter files for CloudFerry, which only contain groups of
        VMs identified in step 1 in the order identified in step 2.

    Method arguments:
     :config: - path to CloudFerry configuration file (based on
                `configs/config.ini`)
     :vm_grouping_config: - path to grouping config file (based on
                `configs/groups.yaml`)
     :debug: - boolean value, enables debugging messages if set to `True`
    """
    if debug:
        utils.configure_logging("DEBUG")
    cfglib.collector_configs_plugins()
    cfglib.init_config(config)
    data_storage.check_redis_config()

    LOG.info("Retrieving flavors, VMs and nodes from SRC cloud")
    flavors, vms, nodes = nova_collector.get_flavors_vms_and_nodes(cfglib.CONF)

    if cfglib.CONF.condense.keep_interim_data:
        condense_utils.store_condense_data(flavors, nodes, vms)

    LOG.info("Retrieving groups of VMs")

    # get_groups stores results in group_file_path config
    get_groups(config, vm_grouping_config)
    groups = condense_utils.read_file(cfglib.CONF.migrate.group_file_path)
    if groups is None:
        message = ("Grouping information is missing. Make sure you have "
                   "grouping file defined in config.")

        LOG.critical(message)
        raise RuntimeError(message)

    LOG.info("Generating migration schedule based on grouping rules")
    process.process(nodes=nodes, flavors=flavors, vms=vms, groups=groups)

    LOG.info("Starting generation of filter files for migration")
    create_filters(config)

    LOG.info("Migration schedule generated. You may now want to start "
             "evacuation job: 'fab evacuate'")

    LOG.info("Condensation process finished. Checkout filters file: %s.",
             DEFAULT_FILTERS_FILES)
Ejemplo n.º 2
0
def condense(config=None, vm_grouping_config=None, debug=False):
    """
    When migration is done in-place (there's no spare hardware), cloud
    migration admin would want to free as many hardware nodes as possible. This
    task handles that case by analyzing source cloud and rearranging source
    cloud load and generating migration scenario.

    Condensation is a process of:
     1. Retrieving groups of connected VMs (see `get_groups`)
     2. Rearrangement of source cloud load in order to free up as many
        hardware nodes as possible.
     3. Generation filter files for CloudFerry, which only contain groups of
        VMs identified in step 1 in the order identified in step 2.

    Method arguments:
     :config: - path to CloudFerry configuration file (based on
                `configs/config.ini`)
     :vm_grouping_config: - path to grouping config file (based on
                `configs/groups.yaml`)
     :debug: - boolean value, enables debugging messages if set to `True`
    """
    if debug:
        utils.configure_logging("DEBUG")
    cfglib.collector_configs_plugins()
    cfglib.init_config(config)
    data_storage.check_redis_config()

    LOG.info("Retrieving flavors, VMs and nodes from SRC cloud")
    flavors, vms, nodes = nova_collector.get_flavors_vms_and_nodes(cfglib.CONF)

    if cfglib.CONF.condense.keep_interim_data:
        condense_utils.store_condense_data(flavors, nodes, vms)

    LOG.info("Retrieving groups of VMs")

    # get_groups stores results in group_file_path config
    get_groups(config, vm_grouping_config)
    groups = condense_utils.read_file(cfglib.CONF.migrate.group_file_path)
    if groups is None:
        message = ("Grouping information is missing. Make sure you have "
                   "grouping file defined in config.")

        LOG.critical(message)
        raise RuntimeError(message)

    LOG.info("Generating migration schedule based on grouping rules")
    process.process(nodes=nodes, flavors=flavors, vms=vms, groups=groups)

    LOG.info("Starting generation of filter files for migration")
    create_filters(config)

    LOG.info("Migration schedule generated. You may now want to start "
             "evacuation job: 'fab evacuate'")

    LOG.info("Condensation process finished. Checkout filters file: %s.",
             DEFAULT_FILTERS_FILES)
Ejemplo n.º 3
0
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import cfglib

from condensation import cloud
from condensation import utils as condense_utils
from cloudferrylib.utils import utils as utl
LOG = utl.get_log(__name__)
SOURCE = "source"
DESTINATION = "destination"


def process(nodes, flavors, vms, groups):
    """
    This function is entry point of this Program. We need to read files,
    create Cloud object and run migration process recursively
    """
    # read files with nova data and node data
    LOG.info("started creating schedule for node condensation")

    source = cloud.Cloud.from_dicts('source',  nodes, flavors, vms, groups)
    source.migrate_to(cloud.Cloud('destination'))


if __name__ == "__main__":
    process(nodes=condense_utils.read_file(cfglib.CONF.condense.nodes_file),
            flavors=condense_utils.read_file(cfglib.CONF.condense.flavors_file),
            vms=condense_utils.read_file(cfglib.CONF.condense.vms_file),
            groups=condense_utils.read_file(cfglib.CONF.condense.groups_file))
Ejemplo n.º 4
0
# See the License for the specific language governing permissions and#
# limitations under the License.
import cfglib

from condensation import cloud
from condensation import utils as condense_utils
from cloudferrylib.utils import utils as utl
LOG = utl.get_log(__name__)
SOURCE = "source"
DESTINATION = "destination"


def process(nodes, flavors, vms, groups):
    """
    This function is entry point of this Program. We need to read files,
    create Cloud object and run migration process recursively
    """
    # read files with nova data and node data
    LOG.info("started creating schedule for node condensation")

    source = cloud.Cloud.from_dicts('source', nodes, flavors, vms, groups)
    source.migrate_to(cloud.Cloud('destination'))


if __name__ == "__main__":
    process(nodes=condense_utils.read_file(cfglib.CONF.condense.nodes_file),
            flavors=condense_utils.read_file(
                cfglib.CONF.condense.flavors_file),
            vms=condense_utils.read_file(cfglib.CONF.condense.vms_file),
            groups=condense_utils.read_file(cfglib.CONF.condense.groups_file))