def create_heat_snapshot(context):
    filters = {'deleted': False}
    instances = Clients(context).nova().servers.list(search_opts=filters)
    snapshot = []

    for instance in instances:
        LOG.debug(_("Creating heat snapshot, iterating instance %s ."),
                  instance.id)
        instance_id = instance.id
        metadata = Clients(context).nova().servers.get(instance_id).metadata
        if 'dr_state' in metadata and metadata['dr_state'] == "ready":
            snap = instance_snapshot(context, instance)
            snapshot.append(snap)

    return snapshot
コード例 #2
0
    def __init__(self, context, is_unit_test=False):

        super(HeatFlameTemplate, self).__init__(context)
        self.dr_image_snapshot_ids = []
        self.dr_protected_server_names = []
        self.dr_volumes = []  # each has : volume name only !
        self.dr_replicated_volumes = []
        self.dr_networks = []
        self.credentials = {}
        self.dr_keynames = []
        self.context = context
        self.clients = Clients(context)
        LOG.debug("heat_flame_template  . initializing ")

        if is_unit_test is True:
            password = '******'
        else:
            importutils.import_module('keystoneclient.middleware.auth_token')
            password = cfg.CONF.keystone_authtoken.admin_password

        self.credentials = {
            'user': self.context.username,
            'password': password,
            'project': self.context.tenant,
            'auth_url': self.context.auth_url,
            'auth_token': self.context.auth_token
        }

        LOG.debug("heat_flame_template  credentials: user: %s, password : %s,"
                  "project: %s, auth_url %s , auth_token %s" %
                  (self.credentials['user'], self.credentials['password'],
                   self.credentials['project'], self.credentials['auth_url'],
                   self.credentials['auth_token']))
def remote_copy_ssh_key(context, key):

    LOG.debug(" key name =  %s" % (key))

    ssh_key_name = key.id
    # id is actually key name . see : novaclient/v1_1/keypairs.py/def id(self)
    LOG.debug("key name by dot =  %s" % (ssh_key_name))

    key_data = key.public_key
    LOG.debug("key data = %s." % (key_data))

    if ssh_key_name is not None:
        try:
            dr_remote_nova_client = Clients(context).remote_nova()

            LOG.debug('.... connected to remote Nova....')

            if (is_remote_existing_keyname(dr_remote_nova_client, ssh_key_name)
                    is False):
                LOG.debug('....copying ssh key name : "%s"' % (ssh_key_name))

                dr_remote_nova_client.keypairs.create(ssh_key_name, key_data)
                LOG.debug('.... public key copied to remote Nova')

        except Exception:
            ssh_key_name = None
            LOG.error('write public key data to remote site/swift failed..')
    return ssh_key_name
コード例 #4
0
 def __init__(self, context):
     self.clients = Clients(context)
     self.cntx = context
     self._instances = []
     self._volumes = []
     self._replicated_volumes = []
     self._keypairs = []
コード例 #5
0
 def __init__(self, context):
     self.clients = Clients(context)
     self._snap_id = None
     self._image_id = None
     self._name = None
     self._resource_id = None
     self.data_block_size_bytes = CONF.backup_snapshot_object_size
    def _imagecopy(self, context, instance, container_name, action_excution):

        backup_rec = {}
        action_excution.set_status(context, 'uploaded to swift')
        swift_conn = Clients(context).swift()
        headers = {'X-Container-Meta-dr_state': 'processing'}
        image = self.clients.glance().images.get(self._image_id)

        # take the checksum as unique id
        global_container_image_id = image._info['checksum']
        image_response = image.data()
        image_response_data = StringIO.StringIO()
        for chunk in image_response:
            image_response_data.write(chunk)
        image_response_data.seek(0, os.SEEK_SET)

        chunks = 0
        while True:
            data = image_response_data.read(self.data_block_size_bytes)
            data_offset = image_response_data.tell()
            LOG.debug("uploading image offset %s chunks %s" %
                      (data_offset, chunks))
            if data == '':
                break
            try:
                swift_conn.put_object(container_name,
                                      global_container_image_id + "_" +
                                      str(chunks),
                                      data,
                                      content_length=len(data))
                chunks += 1
            except socket.error as err:
                dr_state = 'DR image backup failed'
                action_excution.set_status(context, dr_state)
                raise exception.SwiftConnectionFailed(reason=str(err))

        dr_state = 'Protected'

        backup_rec["metadata"] = instance.metadata
        backup_rec["image_id"] = global_container_image_id
        backup_rec["instance_name"] = self._name
        backup_rec["meta"] = image.to_dict()
        backup_rec["chunks"] = chunks

        action_excution.set_status(context, dr_state)
        return dr_state, backup_rec
def instance_volumes_list(context, instance):
    LOG.debug("_instance_volumes_list %s %s." % (instance, instance.id))
    bdms = Clients(context).nova().volumes.get_server_volumes(instance.id)

    if not bdms:
        LOG.debug(_("Instance %s is not attached."), instance.id)

    mountpoints = []

    for bdm in bdms:
        LOG.debug("bdm %s %s." % (bdm, dir(bdm)))
        volume_id = bdm.volumeId
        assigned_mountpoint = bdm.device
        if volume_id is not None:
            mountpoints.append(
                translate_attachment_detail_view(volume_id, instance.id,
                                                 assigned_mountpoint))

    return mountpoints
コード例 #8
0
    def failover(context, container_name):
        swift_conn = Clients(context).swift()
        LOG.debug("failover container name %s" % (container_name))
        # TODO(Oshrit): load dynamically
        template_generator = heat_flame_template.HeatFlameTemplate(context)

        # Loop over metadata file, load actions
        swift_meta, actions_metadata = swift_conn.get_object(
            container_name, "metadata")
        actions = jsonutils.loads(actions_metadata)
        policy_status = False
        try:
            for recover_action in actions["actions"]:
                action_resource = recover_action["resource"]
                db_action = recover_action["action"]
                action_extra_data = recover_action["execution_data"]

                # Instantiate action class
                action_obj = action.load_action_driver(db_action["class_name"],
                                                       context=context)

                if action_obj.is_using_global_container():
                    action_container_name =\
                        get_global_container_name(context.tenant, "")
                else:
                    action_container_name = container_name

                # Invoke recover method on action class passing resource id
                policy_status = action_obj.failover(context,
                                                    action_resource["id"],
                                                    action_extra_data,
                                                    action_container_name)

                # Invoke failover for each resource-action
                action_obj.generate_template(context, template_generator)

        except Exception, e:
            policy_status = False
            LOG.debug(e)
            LOG.error("resource %s could not be recovered using action %s."
                      "Verify that the resource is a valid resource" %
                      (db_action["id"], db_action["name"]))
def instance_snapshot_reverse_volume(context, instance):
    LOG.debug("In snapshot of instance %s ." % instance)
    try:
        image = Clients(context).glance().images.get(instance.image['id'])
        flavor = Clients(context).nova().flavors.get(instance.flavor['id'])
        volumes = instance_volumes_list(context, instance)
        networks = Clients(context).remote_neutron().\
            list_networks()['networks']
        # ===================================================================
        LOG.debug("In snapshot, remote networks %s %s." %
                  (networks, dir(networks)))

        for net in networks:
            LOG.debug("In snapshot, network %s %s." % (net, dir(net)))
            if net['name'] == 'private':
                remote_private_net_id = net['id']
                remote_private_net_subnet_id = net['subnets'][0]
            if net['name'] == 'public':
                remote_public_net_id = net['id']

        LOG.debug("In remote private network id = %s." %
                  (remote_private_net_id))

        volume_mapping = []
        for v in volumes:
            LOG.debug("In snapshot, instance's volume %s %s." % (v, dir(v)))
            c_client = Clients(context).cinder()
            vol = c_client.volumes.get(v['volumeId'])
            if ('dr_state' in vol.metadata
                    and vol.metadata['dr_state'] == "ready"):
                # check if the consistency group is present
                if vol.metadata['consist_grp_name']:
                    volume_mapping.append({
                        'id':
                        vol.metadata['dr_backup_id'],
                        'device':
                        v['device'],
                        'consist_grp':
                        vol.metadata['consist_grp_name']
                    })
                else:
                    volume_mapping.append({
                        'id': vol.metadata['dr_backup_id'],
                        'device': v['device']
                    })

        # ===================================================================
        # network_mapping = []
        # for net in networks:
        #  network = self.clients.nova().networks.get(net['id'])
        #  LOG.debug("In snapshot, instance's networks details %s." % network)
        # ===================================================================

        return {
            'inst': instance,
            'img': image,
            'flavor': flavor,
            'volumes': volume_mapping,
            'networks': instance.addresses,
            'remote_private_net_id': remote_private_net_id,
            'remote_private_net_subnet_id': remote_private_net_subnet_id,
            'remote_public_net_id': remote_public_net_id
        }
    except Exception, e:
        LOG.error("Failed getting master images %s" % (e))
        LOG.error(traceback.format_exc())
        LOG.error(sys.exc_info()[0])
def generate_instance_and_net_section(context, snap, out):
    instance = snap['inst']
    image = snap['img']
    flavor = snap['flavor']
    ssh_key_name = snap['key_name']

    instance_name = instance.name
    image_name = image.name

    flavor_id = flavor.id
    fl = Clients(context).nova().flavors.get(flavor_id)
    flavor_name = fl.name

    instance_has_floating = False
    LOG.debug("instance networks %s" % snap['networks'])
    for net in snap['networks'].values():
        for address in net:
            LOG.debug("instance networks loop %s %s %s." %
                      (address, type(address), dir(address)))
            if address['OS-EXT-IPS:type'] == 'floating':
                instance_has_floating = True

    port_name = "%s_port" % instance_name

    if "resources" not in out or out["resources"] is None:
        out["resources"] = {}

    if ssh_key_name is not None:
        inst_properties = {
            "image": image_name,
            "flavor": flavor_name,
            "key_name": ssh_key_name,
            "networks": [{
                "port": {
                    "get_resource": port_name
                }
            }],
        }
    else:
        inst_properties = {
            "image": image_name,
            "flavor": flavor_name,
            "networks": [{
                "port": {
                    "get_resource": port_name
                }
            }],
        }

    out["resources"][instance_name] = {
        "type": "OS::Nova::Server",
        "properties": inst_properties
    }

    fixed_ips = []
    fixed_ips.append({'subnet_id': snap['remote_private_net_subnet_id']})
    out["resources"][port_name] = {
        "type": "OS::Neutron::Port",
        "properties": {
            "network_id": snap['remote_private_net_id'],
            "fixed_ips": fixed_ips,
        }
    }

    if instance_has_floating:
        floating_ip_name = "%s_floating_ip" % instance_name
        out["resources"][floating_ip_name] = {
            "type": "OS::Neutron::FloatingIP",
            "properties": {
                "floating_network_id": snap['remote_public_net_id'],
                "port_id": {
                    "get_resource": port_name
                },
            }
        }

    LOG.debug("instance and net info = %s" % (out))
def instance_snapshot(context, instance):

    LOG.debug("In snapshot of instance %s ." % instance)
    try:
        image = Clients(context).glance().images.get(instance.image['id'])
        flavor = Clients(context).nova().flavors.get(instance.flavor['id'])

        key = Clients(context).nova().keypairs.get(instance.key_name)
        # returned object is keypair class
        LOG.debug("key_name type = %s" % type(key))

        ssh_key_name = remote_copy_ssh_key(context, key)
        LOG.debug("ssh key name (before write to snap)  = %s" % (ssh_key_name))

        volumes = instance_volumes_list(context, instance)
        networks = Clients(context).remote_neutron().\
            list_networks()['networks']
        # ===================================================================
        LOG.debug("In snapshot, remote networks %s %s." %
                  (networks, dir(networks)))

        for net in networks:
            LOG.debug("In snapshot, network %s %s." % (net, dir(net)))
            if net['name'] == 'private':
                remote_private_net_id = net['id']
                remote_private_net_subnet_id = net['subnets'][0]
            if net['name'] == 'public':
                remote_public_net_id = net['id']

        LOG.debug("In remote private network id = %s." %
                  (remote_private_net_id))

        volume_mapping = []
        for v in volumes:
            LOG.debug("In snapshot, instance's volume %s %s." % (v, dir(v)))
            c_client = Clients(context).cinder()
            # cinder_client.cinderclient(context)
            vol = c_client.volumes.get(v['volumeId'])
            if ('dr_state' in vol.metadata
                    and vol.metadata['dr_state'] == "ready"):
                volume_mapping.append({
                    'id':
                    v['volumeId'],
                    'device':
                    v['device'],
                    'dr_backup_id':
                    vol.metadata['dr_backup_id']
                })

        # ===================================================================
        # network_mapping = []
        # for net in networks:
        #  network = self.clients.nova().networks.get(net['id'])
        #  LOG.debug("In snapshot, instance's networks details %s ."
        #            % network)
        # ===================================================================

        return {
            'inst': instance,
            'img': image,
            'flavor': flavor,
            'key_name': ssh_key_name,
            'volumes': volume_mapping,
            'networks': instance.addresses,
            'remote_private_net_id': remote_private_net_id,
            'remote_private_net_subnet_id': remote_private_net_subnet_id,
            'remote_public_net_id': remote_public_net_id
        }
    except Exception, e:
        LOG.error("Failed getting master images %s" % (e))
        LOG.error(traceback.format_exc())
        LOG.error(sys.exc_info()[0])
        return None
 def __init__(self, context):
     self.clients = Clients(context)
     self._name = None
     self._id = None
     self._backup_id = None
     self._resource_id = None
コード例 #13
0
    def protect(self, cnxt):
        # Define container_name, Swift create if container does not exists
        container_name = get_policy_execution_container_name(
            cnxt.tenant, self.name)
        swift_conn = Clients(cnxt).swift()
        # container = swift.swift_create_container(request, name)
        headers = {'X-Container-Meta-dr_state': 'processing'}
        swift_conn.put_container(container_name, headers)
        LOG.debug("put_container %s " % (container_name))

        # Create a global container for the tenant in case it does not exists
        if action.Action.is_using_global_container:
            swift_conn.put_container(
                get_global_container_name(cnxt.tenant, self.name),
                {'X-Container-Meta-dr_state': 'ready'})

        # create workload_policy_execution instance
        protect_execution = {
            'workload_policy_id': self.id,
            'status': 'Creating',
        }
        workload_action_excution =\
            api.workload_policy_excution_create(cnxt, protect_execution)
        # TODO(load dynamic)
        template_generator = heat_flame_template.HeatFlameTemplate(cnxt)

        try:
            if self.consistent_protect:
                self.pre_protect(cnxt, workload_action_excution)

            # iterate on actions
            protect_statuses = []
            metadata = {}
            metadata["workload_policy_name"] = self.name
            policy_status = True
            LOG.debug("workload_policy protect, policy_status = %s" %
                      (policy_status))
            metadata["actions"] = []
            for db_action in self.actions:
                try:
                    # instantiate action class
                    action_obj = action.load(db_action, context=cnxt)

                    if action_obj.is_using_global_container():
                        action_container_name =\
                            get_global_container_name(cnxt.tenant, self.name)
                    else:
                        action_container_name = container_name

                    # invoke protect method on action class passing resource id
                    status, protect_data =\
                        action_obj.protect(cnxt, workload_action_excution.id,
                                           db_action.resource.id,
                                           action_container_name)

                    LOG.debug("workload_policy protect, status = %s" %
                              (status))
                    policy_status = policy_status and (status == "Protected")
                    LOG.debug("workload_policy protect, policy_status = %s" %
                              (policy_status))

                    action_obj.generate_template(cnxt, template_generator)

                    # save result of invocation in relation
                    # to workload_policy_execution
                    protect_statuses.\
                        append({"action id": db_action.action.id,
                                "action_name": db_action.action.name,
                                "resource_id": db_action.resource.id,
                                "resource_name": db_action.resource.name,
                                "status": status})

                    db_action.execution_data = protect_data
                    metadata["actions"].append(db_action)
                    LOG.debug("action metadata %s " % (metadata))
                except Exception, e:
                    policy_status = False
                    exc = sys.exc_info()
                    LOG.error(traceback.format_exception(*exc))
                    LOG.error("resource %s could not be protected using action"
                              "%s. Verify that the resource is a valid "
                              " resource" %
                              (db_action.resource.id, db_action.action.name))
        except Exception, e:
            policy_status = False
            LOG.debug(e)
            LOG.error("Workload could not be protected")
コード例 #14
0
                                                    action_container_name)

                # Invoke failover for each resource-action
                action_obj.generate_template(context, template_generator)

        except Exception, e:
            policy_status = False
            LOG.debug(e)
            LOG.error("resource %s could not be recovered using action %s."
                      "Verify that the resource is a valid resource" %
                      (db_action["id"], db_action["name"]))

        if not policy_status:
            return policy_status

        keypairs = Clients(context).nova().keypairs.list()
        for keypair in keypairs:
            template_generator.add_keypair(KeyPairResource(keypair.name))

        # TODO(Oshrit): no need to run HEAT if one of the
        # resources failed to restore
        swift_meta, template = swift_conn.get_object(container_name,
                                                     "template.yaml")
        LOG.debug("template.yaml = %s " % template)
        adjusted_template =\
            template_generator.process_recover_template(template)

        LOG.debug("adjusted template = %s " % adjusted_template)

        stack_name = container_name
コード例 #15
0
    def _snapshot(self, context, instance, container_name, action_excution):
        # metadata = instance.metadata
        n_client = self.clients.nova()
        snapshot_name = instance.name + "_snapshot"
        snapshot_metadata = instance.metadata

        instance_snapshot = instance.create_image(snapshot_name,
                                                  instance.metadata)
        self._snap_id = instance_snapshot
        action_excution.set_status(context, 'taking snapshot')
        local_snapshot = n_client.images.get(instance_snapshot)
        LOG.debug("checking instance snapshot %s %s " %
                  (local_snapshot.status, local_snapshot.progress))
        while (local_snapshot.status == "SAVING"):
            greenthread.sleep(1)
            local_snapshot = n_client.images.get(instance_snapshot)
        backup_rec = {}
        if local_snapshot.status == "ACTIVE":
            action_excution.set_status(context, 'uploading to swift')

            swift_conn = Clients(context).swift()
            headers = {'X-Container-Meta-dr_state': 'processing'}
            image = self.clients.glance().images.get(instance_snapshot)

            image_response = image.data()
            image_response_data = StringIO.StringIO()
            for chunk in image_response:
                image_response_data.write(chunk)
            image_response_data.seek(0, os.SEEK_SET)

            chunks = 0
            while True:
                data = image_response_data.read(self.data_block_size_bytes)
                data_offset = image_response_data.tell()
                LOG.debug("uploading offset %s chunks %s" %
                          (data_offset, chunks))
                if data == '':
                    break
                try:
                    swift_conn.put_object(container_name,
                                          instance_snapshot + "_" +
                                          str(chunks),
                                          data,
                                          content_length=len(data))
                    chunks += 1
                except socket.error as err:
                    raise exception.SwiftConnectionFailed(reason=str(err))

            dr_state = 'Protected'

            backup_rec["metadata"] = instance.metadata
            backup_rec["snap_id"] = self._snap_id
            backup_rec["instance_name"] = self._name
            backup_rec["meta"] = image.to_dict()
            backup_rec["chunks"] = chunks

            self._cleanup(context, n_client, self._snap_id)
        else:
            dr_state = 'DR clone backup failed'

        action_excution.set_status(context, dr_state)

        return dr_state, backup_rec