def setUp(self):
        pymysql_connect_patcher = patch('pymysql.connect')
        self.mock_connect = pymysql_connect_patcher.start()
        self.addCleanup(pymysql_connect_patcher.stop)
        self.mock_cursor = self.mock_connect.return_value.cursor.return_value

        self.cs = CosmicSQL(server='localhost',
                            password='******',
                            dry_run=False)
    def test_kill_jobs_of_instance_dry_run(self):
        self.cs = CosmicSQL(server='localhost',
                            password='******',
                            dry_run=True)

        self.assertTrue(self.cs.kill_jobs_of_instance('1'))

        self.mock_cursor.execute.assert_has_calls([
            call('DELETE FROM `async_job` WHERE `instance_id` = %s', ('1', )),
            call('DELETE FROM `vm_work_job` WHERE `vm_instance_id` = %s',
                 ('1', )),
            call('DELETE FROM `sync_queue` WHERE `sync_objid` = %s', ('1', ))
        ])
        self.mock_connect.return_value.commit.assert_not_called()
Esempio n. 3
0
def main(profile, destination_dc, dry_run, host, cluster):
    """Migrate all VMs on HOST to CLUSTER"""

    click_log.basic_config()

    log_to_slack = True
    logging.task = 'Live Migrate HV to new POD'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    cs = CosmicSQL(server=profile, dry_run=dry_run)

    host = co.get_host(name=host)
    if not host:
        sys.exit(1)

    for vm in host.get_all_vms() + host.get_all_project_vms():
        live_migrate(co=co,
                     cs=cs,
                     cluster=cluster,
                     vm_name=vm['name'],
                     destination_dc=destination_dc,
                     add_affinity_group=None,
                     is_project_vm=None,
                     zwps_to_cwps=None,
                     log_to_slack=log_to_slack,
                     dry_run=dry_run)
    def test_load_from_config_with_defaults(self, tmp):
        config = (b"[testmariadb]\n" b"password = test_password\n")

        tmp.write('config', config)
        with patch('cosmicops.config.Path.cwd') as path_cwd_mock:
            path_cwd_mock.return_value = Path(tmp.path)
            cs = CosmicSQL(server='testmariadb')

        self.assertEqual('testmariadb', cs.server)
        self.assertEqual(3306, cs.port)
        self.assertEqual('cloud', cs.user)
        self.assertEqual('test_password', cs.password)
Esempio n. 5
0
def list_ha_workers(profile, hostname, name_filter, non_running, plain_display):
    """Lists HA workers"""

    cs = CosmicSQL(server=profile, dry_run=False)

    ha_workers = cs.list_ha_workers(hostname)
    if not ha_workers:
        return f"No HA workers found"

    table_headers = [
        "Domain",
        "VM",
        "Type",
        "VM state",
        "Created (-2H)",
        "HAworker step taken",
        "Step",
        "Hypervisor",
        "Mgt server"
    ]
    table_format = 'plain' if plain_display else 'pretty'
    table_data = []

    for (domain, vm_name, vm_type, state, created, taken, step, host, mgt_server, ha_state) in ha_workers:
        if not vm_name:
            continue
        if non_running and state == 'Running':
            continue
        if name_filter and name_filter not in vm_name:
            continue

        display_name = (vm_name[:28] + '..') if len(vm_name) >= 31 else vm_name
        if mgt_server:
            mgt_server = mgt_server.split('.')[0]
        host = host.split('.')[0]

        table_data.append([domain, display_name, vm_type, state, created, taken, step, host, mgt_server])

    return tabulate(table_data, headers=table_headers, tablefmt=table_format)
    def test_get_all_dbs_from_config(self, tmp):
        config = (b"[dummy]\n"
                  b"foo = bar\n"
                  b"[db1]\n"
                  b"host = db1\n"
                  b"[db2]\n"
                  b"host = db2\n"
                  b"[db3]\n"
                  b"host = db3\n")

        tmp.write('config', config)
        with patch('cosmicops.config.Path.cwd') as path_cwd_mock:
            path_cwd_mock.return_value = Path(tmp.path)
            self.assertListEqual(['db1', 'db2', 'db3'],
                                 CosmicSQL.get_all_dbs_from_config())
def main(profile, zwps_to_cwps, add_affinity_group, destination_dc, is_project_vm,
         skip_within_cluster, dry_run, vm, cluster):
    """Live migrate VM to CLUSTER"""

    click_log.basic_config()

    log_to_slack = True
    logging.task = 'Live Migrate VM'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    cs = CosmicSQL(server=profile, dry_run=dry_run)

    # Work around migration issue: first in the same pod to limit possible hiccup
    vm_instance = co.get_vm(name=vm, is_project_vm=is_project_vm)

    if not vm_instance:
        logging.error(f"Cannot migrate, VM '{vm}' not found!")
        sys.exit(1)

    if not vm_instance['state'] == 'Running':
        logging.error(f"Cannot migrate, VM has has state: '{vm_instance['state']}'")
        sys.exit(1)

    source_host = co.get_host(id=vm_instance['hostid'])
    source_cluster = co.get_cluster(id=source_host['clusterid'])
    if not skip_within_cluster:
        if not vm_instance.migrate_within_cluster(vm=vm_instance, source_cluster=source_cluster,
                                                  source_host=source_host, instancename=vm_instance):
            logging.info(f"VM Migration failed at {datetime.now().strftime('%d-%m-%Y %H:%M:%S')}\n")
            sys.exit(1)

    if not live_migrate(co, cs, cluster, vm, destination_dc, add_affinity_group, is_project_vm, zwps_to_cwps,
                        log_to_slack, dry_run):
        logging.info(f"VM Migration failed at {datetime.now().strftime('%d-%m-%Y %H:%M:%S')}\n")
        sys.exit(1)
    logging.info(f"VM Migration completed at {datetime.now().strftime('%d-%m-%Y %H:%M:%S')}\n")
    def test_load_from_config(self, tmp):
        config = (b"[testmariadb]\n"
                  b"host = 10.0.0.1\n"
                  b"port = 3706\n"
                  b"user = test_user\n"
                  b"password = test_password\n")

        tmp.write('config', config)
        with patch('cosmicops.config.Path.cwd') as path_cwd_mock:
            path_cwd_mock.return_value = Path(tmp.path)
            cs = CosmicSQL(server='testmariadb')

        self.assertEqual('10.0.0.1', cs.server)
        self.assertEqual(3706, cs.port)
        self.assertEqual('test_user', cs.user)
        self.assertEqual('test_password', cs.password)

        with patch('pathlib.Path.cwd') as path_cwd_mock:
            path_cwd_mock.return_value = Path(tmp.path)
            self.assertRaises(RuntimeError, CosmicSQL, server='dummy')
def main(profile, max_iops, zwps_to_cwps, is_project_vm, dry_run, vm,
         storage_pool):
    """Live migrate VM volumes to STORAGE_POOL"""

    click_log.basic_config()

    log_to_slack = True
    logging.task = 'Live Migrate VM Volumes'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    cs = CosmicSQL(server=profile, dry_run=dry_run)

    if not live_migrate_volumes(storage_pool, co, cs, dry_run, is_project_vm,
                                log_to_slack, max_iops, vm, zwps_to_cwps):
        sys.exit(1)
def who_has_this_ip(profile, all_databases, ip_address):
    if all_databases:
        databases = CosmicSQL.get_all_dbs_from_config()
        if not databases:
            raise RuntimeError("No databases found in configuration file")
    else:
        databases = [profile]

    table_headers = [
        "VM", "Network", "MAC address", "IPv4", "Netmask", "Mode", "State",
        "Created"
    ]
    table_data = []

    for database in databases:
        cs = CosmicSQL(server=database, dry_run=False)

        count = 0

        for (network_name, mac_address, ipv4_address, netmask, _, mode, state, created, vm_name) \
                in cs.get_ip_address_data(ip_address):
            count += 1
            table_data.append([
                vm_name, network_name, mac_address, ipv4_address, netmask,
                mode, state, created
            ])

        if count == 0:
            for (vm_name, ipv4_address, created, network_name,
                 state) in cs.get_ip_address_data_bridge(ip_address):
                count += 1
                table_data.append([
                    vm_name, network_name, '-', ipv4_address, '-', '-', state,
                    created
                ])

        if count == 0:
            for (vm_name, _, state, ipv4_address,
                 instance_id) in cs.get_ip_address_data_infra(ip_address):
                table_data.append([
                    f'{vm_name} ({instance_id})', '-', '-', ipv4_address, '-',
                    '-', state, '-'
                ])

    return tabulate(table_data, headers=table_headers, tablefmt='pretty')
def main(profile, is_project_vm, dry_run, vm, cluster, destination_dc, destination_so):
    """Offline migrate VM to CLUSTER"""

    click_log.basic_config()

    log_to_slack = True
    logging.task = 'Offline Migrate VM'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    cs = CosmicSQL(server=profile, dry_run=dry_run)

    target_cluster = co.get_cluster(name=cluster)
    if not target_cluster:
        sys.exit(1)

    vm = co.get_vm(name=vm, is_project_vm=is_project_vm)
    if not vm:
        sys.exit(1)

    if destination_dc and destination_dc not in DATACENTERS:
        logging.error(f"Unknown datacenter '{destination_dc}', should be one of {str(DATACENTERS)}")
        sys.exit(1)

    logging.instance_name = vm['instancename']
    logging.slack_value = vm['domain']
    logging.vm_name = vm['name']
    logging.zone_name = vm['zonename']

    target_storage_pool = None
    try:
        # Get CLUSTER scoped volume (no NVMe or ZONE-wide)
        while target_storage_pool is None or target_storage_pool['scope'] != 'CLUSTER':
            target_storage_pool = choice(target_cluster.get_storage_pools())
    except IndexError:
        logging.error(f"No storage pools found for cluster '{target_cluster['name']}")
        sys.exit(1)

    if vm['state'] == 'Running':
        need_to_stop = True
        auto_start_vm = True
    else:
        need_to_stop = False
        auto_start_vm = False

    if destination_dc:
        for datacenter in DATACENTERS:
            if datacenter == destination_dc:
                continue

            if datacenter in vm['serviceofferingname']:
                new_offering = vm['serviceofferingname'].replace(datacenter, destination_dc)
                logging.info(
                    f"Replacing '{vm['serviceofferingname']}' with '{new_offering}'")
                cs.update_service_offering_of_vm(vm['instancename'], new_offering)
                vm = co.get_vm(name=vm['instancename'], is_project_vm=is_project_vm)
                break

    if destination_so:
        logging.info(
            f"Replacing '{vm['serviceofferingname']}' with '{destination_so}'")
        cs.update_service_offering_of_vm(vm['instancename'], destination_so)
        vm = co.get_vm(name=vm['instancename'], is_project_vm=is_project_vm)

    vm_service_offering = co.get_service_offering(id=vm['serviceofferingid'])
    if vm_service_offering:
        storage_tags = vm_service_offering['tags'] if 'tags' in vm_service_offering else ''

        if not storage_tags:
            logging.warning('VM service offering has no storage tags')
        else:
            if storage_tags not in target_storage_pool['tags']:
                logging.error(
                    f"Can't migrate '{vm['name']}', storage tags on target cluster ({target_storage_pool['tags']}) to not contain the tags on the VM's service offering ({storage_tags})'")
                sys.exit(1)

    if need_to_stop:
        if not vm.stop():
            sys.exit(1)

    volumes = vm.get_volumes()

    for volume in volumes:
        if volume['storage'] == target_storage_pool['name']:
            logging.warning(
                f"Volume '{volume['name']}' ({volume['id']}) already on cluster '{target_cluster['name']}', skipping...")
            continue

        source_storage_pool = co.get_storage_pool(name=volume['storage'])
        if not source_storage_pool:
            sys.exit(1)

        if source_storage_pool['scope'] == 'ZONE':
            logging.warning(f"Scope of volume '{volume['name']}' ({volume['id']}) is ZONE, skipping...")
            continue

        if not volume.migrate(target_storage_pool):
            sys.exit(1)

        with click_spinner.spinner():
            while True:
                volume.refresh()

                if volume['state'] == 'Ready':
                    break

                logging.warning(
                    f"Volume '{volume['name']}' ({volume['id']}) is in '{volume['state']}' state instead of 'Ready', sleeping...")
                time.sleep(60)

    if auto_start_vm:
        destination_host = target_cluster.find_migration_host(vm)
        if not destination_host:
            sys.exit(1)

        if not vm.start(destination_host):
            sys.exit(1)
    else:
        logging.info(f"Not starting VM '{vm['name']}' as it was not running", log_to_slack)
def main(profile, dry_run, ignore_volumes, zwps_to_cwps, skip_disk_offerings,
         only_project, source_cluster_name, destination_cluster_name):
    """Migrate offline volumes from SOURCE_CLUSTER to DESTINATION_CLUSTER"""

    click_log.basic_config()

    if source_cluster_name == destination_cluster_name:
        logging.error('Destination cluster cannot be the source cluster!')
        sys.exit(1)

    if dry_run:
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run)
    cs = CosmicSQL(server=profile, dry_run=dry_run)

    source_cluster = co.get_cluster(name=source_cluster_name)
    source_storage_pools = co.get_all_storage_pools(name=source_cluster_name)
    if not source_cluster and not source_storage_pools:
        logging.error(f"Source cluster not found:'{source_cluster_name}'!")
        sys.exit(1)

    destination_cluster = co.get_cluster(name=destination_cluster_name)
    if not destination_cluster:
        logging.error(
            f"Destination cluster not found:'{destination_cluster_name}'!")
        sys.exit(1)

    if source_cluster:
        try:
            source_storage_pools = source_cluster.get_storage_pools(
                scope='CLUSTER')
        except IndexError:
            logging.error(
                f"No storage pools  found for cluster '{source_cluster['name']}'"
            )
            sys.exit(1)

    logging.info('Source storage pools found:')
    for source_storage_pool in source_storage_pools:
        logging.info(f" - '{source_storage_pool['name']}'")

    try:
        destination_storage_pools = destination_cluster.get_storage_pools(
            scope='CLUSTER')
    except IndexError:
        logging.error(
            f"No storage pools  found for cluster '{destination_cluster['name']}'"
        )
        sys.exit(1)
    logging.info('Destination storage pools found:')
    for destination_storage_pool in destination_storage_pools:
        logging.info(f" - '{destination_storage_pool['name']}'")

    if ignore_volumes:
        ignore_volumes = ignore_volumes.replace(' ', '').split(',')
        logging.info(f"Ignoring volumes: {str(ignore_volumes)}")

    if skip_disk_offerings:
        skip_disk_offerings = skip_disk_offerings.replace(' ', '').split(',')
        logging.info(f"Skipping disk offerings: {str(skip_disk_offerings)}")

    for source_storage_pool in source_storage_pools:
        destination_storage_pool = choice(destination_storage_pools)
        volumes = source_storage_pool.get_volumes(only_project)

        for volume in volumes:
            if volume['id'] in ignore_volumes:
                continue

            if skip_disk_offerings and volume.get(
                    'diskofferingname') in skip_disk_offerings:
                logging.warning(
                    f"Volume '{volume['name']}' has offering '{volume['diskofferingname']}', skipping..."
                )
                continue

            if 'storage' not in volume:
                logging.warning(
                    f"No storage attribute found for volume '{volume['name']}' ({volume['id']}), skipping..."
                )
                continue

            if volume['storage'] == destination_storage_pool['name']:
                logging.warning(
                    f"Volume '{volume['name']}' ({volume['id']}) already on cluster '{destination_cluster['name']}', skipping..."
                )
                continue

            if volume['state'] != 'Ready':
                logging.warning(
                    f"Volume '{volume['name']}' ({volume['id']}) is in state '{volume['state']}', skipping..."
                )
                continue

            if 'vmstate' in volume and volume['vmstate'] != 'Stopped':
                logging.warning(
                    f"Volume '{volume['name']}' ({volume['id']}) is attached to {volume['vmstate']} VM '{volume['vmname']}', skipping..."
                )
                continue

            if zwps_to_cwps:
                if not dry_run:
                    logging.info(
                        f"Converting ZWPS volume '{volume['name']}' to CWPS before starting the migration"
                    )
                    if not cs.update_zwps_to_cwps('MCC_v1.CWPS',
                                                  volume_id=volume['id']):
                        logging.error(
                            f"Failed to apply CWPS disk offering to volume '{volume['name']}'"
                        )
                        return False
                else:
                    logging.info(
                        f"Would have changed the diskoffering for volume '{volume['name']}' to CWPS before starting the migration"
                    )

            if source_cluster:
                logging.info(
                    f"Volume '{volume['name']}' will be migrated from cluster '{source_cluster['name']}' to '{destination_cluster['name']}'"
                )
            else:
                logging.info(
                    f"Volume '{volume['name']}' will be migrated from storage pool '{source_storage_pool['name']}' to '{destination_cluster['name']}'"
                )

            if not volume.migrate(destination_storage_pool):
                continue
Esempio n. 13
0
def main(dry_run, zwps_cluster, destination_cluster, virtual_machines,
         force_end_hour):
    """Empty ZWPS by migrating VMs and/or it's volumes to the destination cluster."""

    click_log.basic_config()

    if force_end_hour:
        try:
            force_end_hour = int(force_end_hour)
        except ValueError as e:
            logging.error(
                f"Specified time:'{force_end_hour}' is not a valid integer due to: '{e}'"
            )
            sys.exit(1)
        if force_end_hour >= 24:
            logging.error(f"Specified time:'{force_end_hour}' should be < 24")
            sys.exit(1)

    profile = 'nl2'

    log_to_slack = True
    logging.task = 'Live Migrate VM Volumes'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    if not dry_run:
        cs = CosmicSQL(server=profile, dry_run=dry_run)
    else:
        cs = None

    zwps_storage_pools = []
    for storage_pool in co.get_all_storage_pools():
        if zwps_cluster.upper() in storage_pool['name']:
            zwps_storage_pools.append(storage_pool)

    logging.info('ZWPS storage pools found:')
    for zwps_storage_pool in zwps_storage_pools:
        logging.info(f" - '{zwps_storage_pool['name']}'")

    target_cluster = co.get_cluster(name=destination_cluster)
    if not target_cluster:
        logging.error(
            f"Destination cluster not found:'{target_cluster['name']}'!")
        sys.exit(1)

    try:
        destination_storage_pools = target_cluster.get_storage_pools(
            scope='CLUSTER')
    except IndexError:
        logging.error(
            f"No storage pools  found for cluster '{target_cluster['name']}'")
        sys.exit(1)
    logging.info('Destination storage pools found:')
    for target_storage_pool in destination_storage_pools:
        logging.info(f" - '{target_storage_pool['name']}'")

    target_storage_pool = random.choice(destination_storage_pools)

    volumes = []
    for zwps_storage_pool in zwps_storage_pools:
        vols = co.get_all_volumes(list_all=True,
                                  storageid=zwps_storage_pool['id'])
        if vols:
            volumes += vols

    vm_ids = []
    logging.info('Volumes found:')
    for volume in volumes:
        for virtual_machine in virtual_machines:
            if re.search(virtual_machine, volume['vmname'], re.IGNORECASE):
                logging.info(
                    f" - '{volume['name']}' on VM '{volume['vmname']}'")
                if volume['virtualmachineid'] not in vm_ids:
                    vm_ids.append(volume['virtualmachineid'])

    vms = []
    for vm_id in vm_ids:
        vm = co.get_vm(id=vm_id)
        if vm['affinitygroup']:
            for affinitygroup in vm['affinitygroup']:
                if 'DedicatedGrp' in affinitygroup['name']:
                    logging.warning(
                        f"Skipping VM '{vm['name']}' because of 'DedicatedGrp' affinity group"
                    )
                    continue
        vms.append(vm)

    logging.info('Virtualmachines found:')
    for vm in vms:
        logging.info(f" - '{vm['name']}'")

    logging.info(
        f"Starting live migration of volumes and/or virtualmachines from the ZWPS storage pools to storage pool '{target_cluster['name']}'"
    )

    for vm in vms:
        """ Can we start a new migration? """
        if force_end_hour:
            now = datetime.datetime.now(pytz.timezone('CET'))
            if now.hour >= force_end_hour:
                logging.info(
                    f"Stopping migration batch. We are not starting new migrations after '{force_end_hour}':00",
                    log_to_slack=log_to_slack)
                sys.exit(0)

        source_host = co.get_host(id=vm['hostid'])
        source_cluster = co.get_cluster(zone='nl2',
                                        id=source_host['clusterid'])
        if source_cluster['name'] == target_cluster['name']:
            """ VM is already on the destination cluster, so we only need to migrate the volumes to this storage pool """
            logging.info(
                f"Starting live migration of volumes of VM '{vm['name']}' to storage pool '{target_storage_pool['name']}' ({target_storage_pool['id']})",
                log_to_slack=log_to_slack)
            live_migrate_volumes(target_storage_pool['name'], co, cs, dry_run,
                                 False, log_to_slack, 0, vm['name'], True)
        else:
            """ VM needs to be migrated live to the destination cluster, including volumes """
            live_migrate(co=co,
                         cs=cs,
                         cluster=target_cluster['name'],
                         vm_name=vm['name'],
                         destination_dc=None,
                         add_affinity_group=None,
                         is_project_vm=None,
                         zwps_to_cwps=True,
                         log_to_slack=log_to_slack,
                         dry_run=dry_run)
class TestCosmicSQL(TestCase):
    def setUp(self):
        pymysql_connect_patcher = patch('pymysql.connect')
        self.mock_connect = pymysql_connect_patcher.start()
        self.addCleanup(pymysql_connect_patcher.stop)
        self.mock_cursor = self.mock_connect.return_value.cursor.return_value

        self.cs = CosmicSQL(server='localhost',
                            password='******',
                            dry_run=False)

    @tempdir()
    def test_load_from_config(self, tmp):
        config = (b"[testmariadb]\n"
                  b"host = 10.0.0.1\n"
                  b"port = 3706\n"
                  b"user = test_user\n"
                  b"password = test_password\n")

        tmp.write('config', config)
        with patch('cosmicops.config.Path.cwd') as path_cwd_mock:
            path_cwd_mock.return_value = Path(tmp.path)
            cs = CosmicSQL(server='testmariadb')

        self.assertEqual('10.0.0.1', cs.server)
        self.assertEqual(3706, cs.port)
        self.assertEqual('test_user', cs.user)
        self.assertEqual('test_password', cs.password)

        with patch('pathlib.Path.cwd') as path_cwd_mock:
            path_cwd_mock.return_value = Path(tmp.path)
            self.assertRaises(RuntimeError, CosmicSQL, server='dummy')

    @tempdir()
    def test_load_from_config_with_defaults(self, tmp):
        config = (b"[testmariadb]\n" b"password = test_password\n")

        tmp.write('config', config)
        with patch('cosmicops.config.Path.cwd') as path_cwd_mock:
            path_cwd_mock.return_value = Path(tmp.path)
            cs = CosmicSQL(server='testmariadb')

        self.assertEqual('testmariadb', cs.server)
        self.assertEqual(3306, cs.port)
        self.assertEqual('cloud', cs.user)
        self.assertEqual('test_password', cs.password)

    @tempdir()
    def test_load_from_config_without_password(self, tmp):
        config = (b"[testmariadb]\n" b"host = 10.0.0.1\n")

        tmp.write('config', config)
        with patch('cosmicops.config.Path.cwd') as path_cwd_mock:
            path_cwd_mock.return_value = Path(tmp.path)
            self.assertRaises(configparser.NoOptionError,
                              CosmicSQL,
                              server='testmariadb')

    @tempdir()
    def test_get_all_dbs_from_config(self, tmp):
        config = (b"[dummy]\n"
                  b"foo = bar\n"
                  b"[db1]\n"
                  b"host = db1\n"
                  b"[db2]\n"
                  b"host = db2\n"
                  b"[db3]\n"
                  b"host = db3\n")

        tmp.write('config', config)
        with patch('cosmicops.config.Path.cwd') as path_cwd_mock:
            path_cwd_mock.return_value = Path(tmp.path)
            self.assertListEqual(['db1', 'db2', 'db3'],
                                 CosmicSQL.get_all_dbs_from_config())

    def test_connect_failure(self):
        self.mock_connect.side_effect = pymysql.Error('Mock connection error')
        self.assertRaises(pymysql.Error,
                          CosmicSQL,
                          server='localhost',
                          password='******')

    def test_kill_jobs_of_instance(self):
        self.assertTrue(self.cs.kill_jobs_of_instance('1'))

        self.mock_cursor.execute.assert_has_calls([
            call('DELETE FROM `async_job` WHERE `instance_id` = %s', ('1', )),
            call('DELETE FROM `vm_work_job` WHERE `vm_instance_id` = %s',
                 ('1', )),
            call('DELETE FROM `sync_queue` WHERE `sync_objid` = %s', ('1', ))
        ])
        self.assertEqual(3, self.mock_connect.return_value.commit.call_count)

    def test_kill_jobs_of_instance_dry_run(self):
        self.cs = CosmicSQL(server='localhost',
                            password='******',
                            dry_run=True)

        self.assertTrue(self.cs.kill_jobs_of_instance('1'))

        self.mock_cursor.execute.assert_has_calls([
            call('DELETE FROM `async_job` WHERE `instance_id` = %s', ('1', )),
            call('DELETE FROM `vm_work_job` WHERE `vm_instance_id` = %s',
                 ('1', )),
            call('DELETE FROM `sync_queue` WHERE `sync_objid` = %s', ('1', ))
        ])
        self.mock_connect.return_value.commit.assert_not_called()

    def test_kill_jobs_of_instance_query_failure(self):
        self.mock_cursor.execute.side_effect = pymysql.Error(
            'Mock query error')

        self.assertFalse(self.cs.kill_jobs_of_instance('i-1-VM'))

    def test_list_ha_workers(self):
        self.assertIsNotNone(self.cs.list_ha_workers())

        self.mock_cursor.execute.assert_called_with(ANY)
        self.mock_cursor.fetchall.assert_called()

    def test_list_ha_workers_with_hostname(self):
        self.assertIsNotNone(self.cs.list_ha_workers('host1'))

        self.mock_cursor.execute.assert_called_with(ANY)

    def test_list_ha_workers_query_failure(self):
        self.mock_cursor.execute.side_effect = pymysql.Error(
            'Mock query error')

        self.assertRaises(pymysql.Error, self.cs.list_ha_workers)
        self.mock_cursor.close.assert_called_once()

    def test_get_ip_address_data(self):
        self.assertIsNotNone(self.cs.get_ip_address_data('192.168.1.1'))

        self.assertIn("public_ip_address LIKE '%192.168.1.1%'",
                      self.mock_cursor.execute.call_args[0][0])
        self.assertIn("ip4_address LIKE '%192.168.1.1%'",
                      self.mock_cursor.execute.call_args[0][0])

    def test_get_ip_address_data_bridge(self):
        self.assertIsNotNone(self.cs.get_ip_address_data_bridge('192.168.1.1'))

        self.assertIn("user_ip_address.public_ip_address LIKE '%192.168.1.1%'",
                      self.mock_cursor.execute.call_args[0][0])

    def test_get_ip_address_data_infra(self):
        self.assertIsNotNone(self.cs.get_ip_address_data_infra('192.168.1.1'))

        self.assertIn("nics.ip4_address LIKE '%192.168.1.1%'",
                      self.mock_cursor.execute.call_args[0][0])

    def test_get_mac_address_data(self):
        self.assertIsNotNone(self.cs.get_mac_address_data('aa:bb:cc:dd:ee:ff'))

        self.assertIn("mac_address LIKE '%aa:bb:cc:dd:ee:ff%'",
                      self.mock_cursor.execute.call_args[0][0])

    def test_get_instance_id_from_name(self):
        self.assertIsNotNone(self.cs.get_instance_id_from_name('instance'))

        self.assertIn("instance_name = 'instance'",
                      self.mock_cursor.execute.call_args[0][0])

    def test_get_disk_offering_id_from_name(self):
        self.assertIsNotNone(
            self.cs.get_disk_offering_id_from_name('disk_offering'))

        self.assertIn("name = 'disk_offering'",
                      self.mock_cursor.execute.call_args[0][0])

    def test_get_service_offering_id_from_name(self):
        self.assertIsNotNone(
            self.cs.get_service_offering_id_from_name('service_offering'))

        self.assertIn("name = 'service_offering'",
                      self.mock_cursor.execute.call_args[0][0])

    def test_get_affinity_group_id_from_name(self):
        self.assertIsNotNone(
            self.cs.get_affinity_group_id_from_name('affinity_group'))

        self.assertIn("name = 'affinity_group'",
                      self.mock_cursor.execute.call_args[0][0])

    def test_update_zwps_to_cwps(self):
        self.cs.get_instance_id_from_name = Mock(return_value='instance_id')
        self.cs.get_disk_offering_id_from_name = Mock(
            return_value='disk_offering_id')

        self.assertTrue(
            self.cs.update_zwps_to_cwps('instance_name', 'disk_offering_name'))
        self.assertIn("disk_offering_id=",
                      self.mock_cursor.execute.call_args[0][0])
        self.assertIn("instance_id=", self.mock_cursor.execute.call_args[0][0])
        self.assertEqual(('disk_offering_id', 'instance_id'),
                         self.mock_cursor.execute.call_args[0][1])

    def test_update_service_offering_of_vm(self):
        self.cs.get_instance_id_from_name = Mock(return_value='instance_id')
        self.cs.get_service_offering_id_from_name = Mock(
            return_value='service_offering_id')

        self.assertTrue(
            self.cs.update_service_offering_of_vm('instance_name',
                                                  'service_offering_name'))
        self.assertIn("service_offering_id=",
                      self.mock_cursor.execute.call_args[0][0])
        self.assertIn("id=", self.mock_cursor.execute.call_args[0][0])
        self.assertEqual(('service_offering_id', 'instance_id'),
                         self.mock_cursor.execute.call_args[0][1])

    def test_get_volume_size(self):
        self.assertIsNotNone(self.cs.get_volume_size('path1'))

        self.assertIn("path = 'path1'",
                      self.mock_cursor.execute.call_args[0][0])

    def test_update_volume_size(self):
        self.cs.get_instance_id_from_name = Mock(return_value='instance_id')

        self.assertTrue(
            self.cs.update_volume_size('instance_name', 'path', 4321))
        self.assertIn("size=", self.mock_cursor.execute.call_args[0][0])
        self.assertIn("instance_id=", self.mock_cursor.execute.call_args[0][0])
        self.assertIn("path=", self.mock_cursor.execute.call_args[0][0])
        self.assertEqual((4321, 'path', 'instance_id'),
                         self.mock_cursor.execute.call_args[0][1])

    def test_add_vm_to_affinity_group(self):
        self.cs.get_instance_id_from_name = Mock(return_value='instance_id')
        self.cs.get_affinity_group_id_from_name = Mock(
            return_value='affinity_group_id')

        self.assertTrue(
            self.cs.add_vm_to_affinity_group('instance_name',
                                             'affinity_group_name'))
        self.assertIn("(instance_id, affinity_group_id)",
                      self.mock_cursor.execute.call_args[0][0])
        self.assertEqual(('instance_id', 'affinity_group_id'),
                         self.mock_cursor.execute.call_args[0][1])
Esempio n. 15
0
def kill_jobs(profile, dry_run, instance_id):
    cs = CosmicSQL(server=profile, dry_run=dry_run)

    cs.kill_jobs_of_instance(instance_id)