Example #1
0
    def _action_recorder(self, action, expected_exceptions=tuple()):
        '''Return a context manager to record the progress of an action.

        Upon entering the context manager, the state is set to IN_PROGRESS.
        Upon exiting, the state will be set to COMPLETE if no exception was
        raised, or FAILED otherwise. Non-exit exceptions will be translated
        to ResourceFailure exceptions.

        Expected exceptions are re-raised, with the Resource left in the
        IN_PROGRESS state.
        '''
        try:
            self.state_set(action, self.IN_PROGRESS)
            yield
        except expected_exceptions as ex:
            with excutils.save_and_reraise_exception():
                LOG.debug('%s', six.text_type(ex))
        except Exception as ex:
            LOG.info('%(action)s: %(info)s', {"action": action,
                                              "info": six.text_type(self)},
                     exc_info=True)
            failure = exception.ResourceFailure(ex, self, action)
            self.state_set(action, self.FAILED, six.text_type(failure))
            raise failure
        except:  # noqa
            with excutils.save_and_reraise_exception():
                try:
                    self.state_set(action, self.FAILED, '%s aborted' % action)
                except Exception:
                    LOG.exception(_('Error marking resource as failed'))
        else:
            self.state_set(action, self.COMPLETE)
Example #2
0
    def _create_ha_network(self, context, tenant_id):
        admin_ctx = context.elevated()

        args = {'network':
                {'name': constants.HA_NETWORK_NAME % tenant_id,
                 'tenant_id': '',
                 'shared': False,
                 'admin_state_up': True,
                 'status': constants.NET_STATUS_ACTIVE}}
        network = self._core_plugin.create_network(admin_ctx, args)
        try:
            ha_network = self._create_ha_network_tenant_binding(admin_ctx,
                                                                tenant_id,
                                                                network['id'])
        except Exception:
            with excutils.save_and_reraise_exception():
                self._core_plugin.delete_network(admin_ctx, network['id'])

        try:
            self._create_ha_subnet(admin_ctx, network['id'], tenant_id)
        except Exception:
            with excutils.save_and_reraise_exception():
                self._core_plugin.delete_network(admin_ctx, network['id'])

        return ha_network
Example #3
0
 def set_data(self, data, size=None):
     payload = format_image_notification(self.image)
     self.notifier.info('image.prepare', payload)
     try:
         self.image.set_data(data, size)
     except glance_store.StorageFull as e:
         msg = (_("Image storage media is full: %s") %
                utils.exception_to_str(e))
         self.notifier.error('image.upload', msg)
         raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
     except glance_store.StorageWriteDenied as e:
         msg = (_("Insufficient permissions on image storage media: %s")
                % utils.exception_to_str(e))
         self.notifier.error('image.upload', msg)
         raise webob.exc.HTTPServiceUnavailable(explanation=msg)
     except ValueError as e:
         msg = (_("Cannot save data for image %(image_id)s: %(error)s") %
                {'image_id': self.image.image_id,
                 'error': utils.exception_to_str(e)})
         self.notifier.error('image.upload', msg)
         raise webob.exc.HTTPBadRequest(
             explanation=utils.exception_to_str(e))
     except exception.Duplicate as e:
         msg = (_("Unable to upload duplicate image data for image"
                  "%(image_id)s: %(error)s") %
                {'image_id': self.image.image_id,
                 'error': utils.exception_to_str(e)})
         self.notifier.error('image.upload', msg)
         raise webob.exc.HTTPConflict(explanation=msg)
     except exception.Forbidden as e:
         msg = (_("Not allowed to upload image data for image %(image_id)s:"
                  " %(error)s") % {'image_id': self.image.image_id,
                                   'error': utils.exception_to_str(e)})
         self.notifier.error('image.upload', msg)
         raise webob.exc.HTTPForbidden(explanation=msg)
     except exception.NotFound as e:
         msg = (_("Image %(image_id)s could not be found after upload."
                  " The image may have been deleted during the upload:"
                  " %(error)s") % {'image_id': self.image.image_id,
                                   'error': utils.exception_to_str(e)})
         self.notifier.error('image.upload', msg)
         raise webob.exc.HTTPNotFound(explanation=utils.exception_to_str(e))
     except webob.exc.HTTPError as e:
         with excutils.save_and_reraise_exception():
             msg = (_("Failed to upload image data for image %(image_id)s"
                      " due to HTTP error: %(error)s") %
                    {'image_id': self.image.image_id,
                     'error': utils.exception_to_str(e)})
             self.notifier.error('image.upload', msg)
     except Exception as e:
         with excutils.save_and_reraise_exception():
             msg = (_("Failed to upload image data for image %(image_id)s "
                      "due to internal error: %(error)s") %
                    {'image_id': self.image.image_id,
                     'error': utils.exception_to_str(e)})
             self.notifier.error('image.upload', msg)
     else:
         payload = format_image_notification(self.image)
         self.notifier.info('image.upload', payload)
         self.notifier.info('image.activate', payload)
Example #4
0
    def create_router(self, host, username, password, rbridge_id, router_id):
        """create vrf and associate vrf."""
        router_id = router_id[0:11]
        vrf_name = template.OS_VRF_NAME.format(id=router_id)
        rd = router_id + ":" + router_id
        try:
            mgr = self.connect(host, username, password)
            self.create_vrf(mgr, rbridge_id, vrf_name)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("NETCONF error"))
                self.close_session()
        try:
            # For Nos5.0.0
            self.configure_rd_for_vrf(mgr, rbridge_id, vrf_name, rd)
            self.configure_address_family_for_vrf(mgr, rbridge_id, vrf_name)
        except Exception:
            with excutils.save_and_reraise_exception() as ctxt:
                try:
                    # This is done because on 4.0.0 rd doesnt accept alpha
                    # character nor hyphen
                    rd = "".join(i for i in router_id if i in "0123456789")
                    rd = rd[:4] + ":" + rd[:4]
                    self.configure_rd_for_vrf(mgr, rbridge_id, vrf_name, rd)
                    self.configure_address_family_for_vrf_v1(mgr,
                                                             rbridge_id,
                                                             vrf_name)
                except Exception:
                    with excutils.save_and_reraise_exception():
                        LOG.exception(_LE("NETCONF error"))
                        self.close_session()

                ctxt.reraise = False
Example #5
0
    def remove_from_instance(self, context, instance, security_group_name):
        """Remove the security group associated with the instance."""
        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group',
                security_group_name,
                context.project_id)
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") %
                       {'name': security_group_name,
                        'project': context.project_id})
                self.raise_not_found(msg)
            else:
                LOG.exception(_LE("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        params = {'device_id': instance['uuid']}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Neutron Error:"))

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                   " any ports") % instance['uuid'])
            self.raise_not_found(msg)

        found_security_group = False
        for port in ports:
            try:
                port.get('security_groups', []).remove(security_group_id)
            except ValueError:
                # When removing a security group from an instance the security
                # group should be on both ports since it was added this way if
                # done through the nova api. In case it is not a 404 is only
                # raised if the security group is not found on any of the
                # ports on the instance.
                continue

            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(_LI("Adding security group %(security_group_id)s to "
                             "port %(port_id)s"),
                         {'security_group_id': security_group_id,
                          'port_id': port['id']})
                neutron.update_port(port['id'], {'port': updated_port})
                found_security_group = True
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Neutron Error:"))
        if not found_security_group:
            msg = (_("Security group %(security_group_name)s not associated "
                     "with the instance %(instance)s") %
                   {'security_group_name': security_group_name,
                    'instance': instance['uuid']})
            self.raise_not_found(msg)
    def create_vip(self, context, edge_id, vip):
        app_profile = self._convert_app_profile(
            vip['name'], (vip.get('session_persistence') or {}),
            vip.get('protocol'))
        try:
            header, response = self.vcns.create_app_profile(
                edge_id, app_profile)
        except vcns_exc.VcnsApiException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Failed to create app profile on edge: %s"),
                              edge_id)
        objuri = header['location']
        app_profileid = objuri[objuri.rfind("/") + 1:]

        vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid)
        try:
            header, response = self.vcns.create_vip(
                edge_id, vip_new)
        except vcns_exc.VcnsApiException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Failed to create vip on vshield edge: %s"),
                              edge_id)
                self.vcns.delete_app_profile(edge_id, app_profileid)
        objuri = header['location']
        vip_vseid = objuri[objuri.rfind("/") + 1:]

        # Add the vip mapping
        map_info = {
            "vip_id": vip['id'],
            "vip_vseid": vip_vseid,
            "edge_id": edge_id,
            "app_profileid": app_profileid
        }
        vcns_db.add_vcns_edge_vip_binding(context.session, map_info)
Example #7
0
    def set_driver(self, resource):
        """Set the driver for a neutron resource.

        :param resource: Neutron resource in dict format. Expected keys:
                        { 'id': <value>
                          'hosting_device': { 'id': <value>, }
                          'router_type': {'cfg_agent_driver': <value>,  }
                        }
        :return driver : driver object
        """
        try:
            resource_id = resource['id']
            hosting_device = resource['hosting_device']
            hd_id = hosting_device['id']
            if hd_id in self._hosting_device_routing_drivers_binding:
                driver = self._hosting_device_routing_drivers_binding[hd_id]
                self._drivers[resource_id] = driver
            else:
                driver_class = resource['router_type']['cfg_agent_driver']
                driver = importutils.import_object(driver_class,
                                                   **hosting_device)
                self._hosting_device_routing_drivers_binding[hd_id] = driver
                self._drivers[resource_id] = driver
            return driver
        except ImportError:
            with excutils.save_and_reraise_exception(reraise=False):
                LOG.exception(_LE("Error loading cfg agent driver %(driver)s "
                                "for hosting device template "
                                "%(t_name)s(%(t_id)s)"),
                              {'driver': driver_class, 't_id': hd_id,
                               't_name': resource['name']})
                raise cfg_exceptions.DriverNotExist(driver=driver_class)
        except KeyError as e:
            with excutils.save_and_reraise_exception(reraise=False):
                raise cfg_exceptions.DriverNotSetForMissingParameter(e)
Example #8
0
    def consume_in_thread(self):
        """Runs the ZmqProxy service."""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        try:
            os.makedirs(ipc_dir)
        except os.error:
            if not os.path.isdir(ipc_dir):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Required IPC directory does not exist at"
                                " %s"), ipc_dir)
        try:
            self.register(consumption_proxy,
                          consume_in,
                          zmq.PULL)
        except zmq.ZMQError:
            if os.access(ipc_dir, os.X_OK):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Permission denied to IPC directory at"
                                " %s"), ipc_dir)
            with excutils.save_and_reraise_exception():
                LOG.error(_("Could not create ZeroMQ receiver daemon. "
                            "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
Example #9
0
 def set_driver(self, device_param):
     d_id = device_param['id']
     if d_id in self._drivers:
         #driver = self._hosting_device_drivers_binding[d_id]
         #self._drivers[d_id] = driver
         return
     else:
         try:
             driver_name = device_param['device_template'].get('device_driver') 
             if driver_name:
                 name_list = [DRIVERS_PATH, driver_name,'.',
                              driver_name, '.', driver_name.capitalize()]
                 driver_class_name = "".join(name_list)
                 driver_class = importutils.import_object(driver_class_name,
                                     **device_param)
                 self._drivers[d_id] = driver_class
                 #self._hosting_device_drivers_binding[d_id] = driver_class
                 LOG.info("import drvier %(driver_class)s for device %(d_id)s", 
                         {'driver_class': driver_class, 
                          'd_id': d_id})
             else:
                 LOG.warn("Device %(device)s not specified device_driver.",
                         {'device': d_id})
         except ImportError:
             with excutils.save_and_reraise_exception(reraise=False):
                 LOG.exception(_LE("Error loading cfg agent driver %(driver)s "
                                 "for hosting device template "
                                 "%(d_id))"),
                               {'driver': driver_class_name,
                                'd_id': d_id})
                 raise svmagt_exception.DriverNotExist(driver=driver_class_name)
         except KeyError as e:
             with excutils.save_and_reraise_exception(reraise=False):
                 raise svmagt_exception.DriverNotSetForMissingParameter(p=e)
Example #10
0
    def check_swift_availability(self, cluster_info):
        plugin_config = cluster_info['plugin_config']
        # Make unique name of Swift container during Swift testing
        swift_container_name = 'Swift-test-' + str(uuid.uuid4())[:8]
        extra_script_parameters = {
            'OS_TENANT_NAME': self.common_config.OS_TENANT_NAME,
            'OS_USERNAME': self.common_config.OS_USERNAME,
            'OS_PASSWORD': self.common_config.OS_PASSWORD,
            'HADOOP_USER': plugin_config.HADOOP_USER,
            'SWIFT_CONTAINER_NAME': swift_container_name
        }
        namenode_ip = cluster_info['node_info']['namenode_ip']
        self.open_ssh_connection(namenode_ip, plugin_config.SSH_USERNAME)
        try:
            self.transfer_helper_script_to_node(
                'swift_test_script.sh', parameter_list=extra_script_parameters
            )

        except Exception as e:
            with excutils.save_and_reraise_exception():
                print(str(e))
        swift = self.connect_to_swift()
        swift.put_container(swift_container_name)
        try:
            self.execute_command('./script.sh')

        except Exception as e:
            with excutils.save_and_reraise_exception():
                print(str(e))

        finally:
            self.delete_swift_container(swift, swift_container_name)
        self.close_ssh_connection()
Example #11
0
def add_neutron_nsx_port_mapping(session, neutron_id,
                                 nsx_switch_id, nsx_port_id):
    session.begin(subtransactions=True)
    try:
        mapping = models.NeutronNsxPortMapping(
            neutron_id, nsx_switch_id, nsx_port_id)
        session.add(mapping)
        session.commit()
    except db_exc.DBDuplicateEntry:
        with excutils.save_and_reraise_exception() as ctxt:
            session.rollback()
            # do not complain if the same exact mapping is being added,
            # otherwise re-raise because even though it is possible for the
            # same neutron port to map to different back-end ports over time,
            # this should not occur whilst a mapping already exists
            current = get_nsx_switch_and_port_id(session, neutron_id)
            if current[1] == nsx_port_id:
                LOG.debug(_("Port mapping for %s already available"),
                          neutron_id)
                ctxt.reraise = False
    except db_exc.DBError:
        with excutils.save_and_reraise_exception():
            # rollback for any other db error
            session.rollback()
    return mapping
    def create_replica(self, local_lun_info, replica_model):
        """Create remote LUN and replication pair.

        Purpose:
            1. create remote lun
            2. create replication pair
            3. enable replication pair
        """
        LOG.debug(('Create replication, local lun info: %(info)s, '
                   'replication model: %(model)s.'),
                  {'info': local_lun_info, 'model': replica_model})

        local_lun_id = local_lun_info['ID']
        self.wait_volume_online(self.local_client, local_lun_info)

        # step1, create remote lun
        rmt_lun_info = self.create_rmt_lun(local_lun_info)
        rmt_lun_id = rmt_lun_info['ID']

        # step2, get remote device info
        rmt_dev_id, rmt_dev_name = self.get_rmt_dev_info()
        if not rmt_lun_id or not rmt_dev_name:
            self._delete_rmt_lun(rmt_lun_id)
            msg = _('Get remote device info failed.')
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(data=msg)

        # step3, create replication pair
        try:
            pair_info = self.local_op.create(local_lun_id,
                                             rmt_lun_id, rmt_dev_id,
                                             rmt_dev_name, replica_model)
            pair_id = pair_info['ID']
        except Exception as err:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Create pair failed. Error: %s.'), err)
                self._delete_rmt_lun(rmt_lun_id)

        # step4, start sync manually. If replication type is sync,
        # then wait for sync complete.
        wait_complete = (replica_model == constants.REPLICA_SYNC_MODEL)
        try:
            self.local_driver.sync(pair_id, wait_complete)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Start synchronization failed. Error: %s.'), err)
                self._delete_pair(pair_id)
                self._delete_rmt_lun(rmt_lun_id)

        model_update = {}
        driver_data = {'pair_id': pair_id,
                       'rmt_lun_id': rmt_lun_id}
        model_update['replication_driver_data'] = to_string(driver_data)
        model_update['replication_status'] = 'available'
        LOG.debug('Create replication, return info: %s.', model_update)
        return model_update
Example #13
0
    def add_to_instance(self, context, instance, security_group_name):
        """Add security group to the instance."""

        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group',
                security_group_name,
                context.project_id)
        except n_exc.NeutronClientNoUniqueMatch as e:
            raise exception.NoUniqueMatch(six.text_type(e))
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") %
                       {'name': security_group_name,
                        'project': context.project_id})
                self.raise_not_found(msg)
            else:
                LOG.exception(_LE("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        params = {'device_id': instance['uuid']}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Neutron Error:"))

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                   " any ports") % instance['uuid'])
            self.raise_not_found(msg)

        for port in ports:
            if not self._has_security_group_requirements(port):
                LOG.warning(_LW("Cannot add security group %(name)s to "
                                "%(instance)s since the port %(port_id)s "
                                "does not meet security requirements"),
                            {'name': security_group_name,
                             'instance': instance['uuid'],
                             'port_id': port['id']})
                raise exception.SecurityGroupCannotBeApplied()
            if 'security_groups' not in port:
                port['security_groups'] = []
            port['security_groups'].append(security_group_id)
            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(_LI("Adding security group %(security_group_id)s to "
                             "port %(port_id)s"),
                         {'security_group_id': security_group_id,
                          'port_id': port['id']})
                neutron.update_port(port['id'], {'port': updated_port})
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Neutron Error:"))
Example #14
0
def deploy(address, port, iqn, lun, image_path,
           root_mb, swap_mb, ephemeral_mb, ephemeral_format, node_uuid,
           preserve_ephemeral=False, configdrive=None):
    """All-in-one function to deploy a node.

    :param address: The iSCSI IP address.
    :param port: The iSCSI port number.
    :param iqn: The iSCSI qualified name.
    :param lun: The iSCSI logical unit number.
    :param image_path: Path for the instance's disk image.
    :param root_mb: Size of the root partition in megabytes.
    :param swap_mb: Size of the swap partition in megabytes.
    :param ephemeral_mb: Size of the ephemeral partition in megabytes. If 0,
        no ephemeral partition will be created.
    :param ephemeral_format: The type of file system to format the ephemeral
        partition.
    :param node_uuid: node's uuid. Used for logging.
    :param preserve_ephemeral: If True, no filesystem is written to the
        ephemeral block device, preserving whatever content it had (if the
        partition table has not changed).
    :param configdrive: Optional. Base64 encoded Gzipped configdrive content
                        or configdrive HTTP URL.
    :returns: the UUID of the root partition.
    """
    dev = get_dev(address, port, iqn, lun)
    image_mb = get_image_mb(image_path)
    if image_mb > root_mb:
        root_mb = image_mb
    discovery(address, port)
    login_iscsi(address, port, iqn)
    try:
        root_uuid = work_on_disk(dev, root_mb, swap_mb, ephemeral_mb,
                                 ephemeral_format, image_path, node_uuid,
                                 preserve_ephemeral=preserve_ephemeral,
                                 configdrive=configdrive)
    except processutils.ProcessExecutionError as err:
        with excutils.save_and_reraise_exception():
            LOG.error(_LE("Deploy to address %s failed."), address)
            LOG.error(_LE("Command: %s"), err.cmd)
            LOG.error(_LE("StdOut: %r"), err.stdout)
            LOG.error(_LE("StdErr: %r"), err.stderr)
    except exception.InstanceDeployFailure as e:
        with excutils.save_and_reraise_exception():
            LOG.error(_LE("Deploy to address %s failed."), address)
            LOG.error(e)
    finally:
        logout_iscsi(address, port, iqn)
        delete_iscsi(address, port, iqn)

    return root_uuid
Example #15
0
def destroy_disk_metadata(dev, node_uuid):
    """Destroy metadata structures on node's disk.

       Ensure that node's disk appears to be blank without zeroing the entire
       drive. To do this we will zero:
       - the first 18KiB to clear MBR / GPT data
       - the last 18KiB to clear GPT and other metadata like: LVM, veritas,
         MDADM, DMRAID, ...
    """
    # NOTE(NobodyCam): This is needed to work around bug:
    # https://bugs.launchpad.net/ironic/+bug/1317647
    LOG.debug("Start destroy disk metadata for node %(node)s.",
              {'node': node_uuid})
    try:
        utils.execute('dd', 'if=/dev/zero', 'of=%s' % dev,
                      'bs=512', 'count=36', run_as_root=True,
                      check_exit_code=[0])
    except processutils.ProcessExecutionError as err:
        with excutils.save_and_reraise_exception():
            LOG.error(_LE("Failed to erase beginning of disk for node "
                          "%(node)s. Command: %(command)s. Error: %(error)s."),
                      {'node': node_uuid,
                       'command': err.cmd,
                       'error': err.stderr})

    # now wipe the end of the disk.
    # get end of disk seek value
    try:
        block_sz = get_dev_block_size(dev)
    except processutils.ProcessExecutionError as err:
        with excutils.save_and_reraise_exception():
            LOG.error(_LE("Failed to get disk block count for node %(node)s. "
                          "Command: %(command)s. Error: %(error)s."),
                      {'node': node_uuid,
                       'command': err.cmd,
                       'error': err.stderr})
    else:
        seek_value = block_sz - 36
        try:
            utils.execute('dd', 'if=/dev/zero', 'of=%s' % dev,
                          'bs=512', 'count=36', 'seek=%d' % seek_value,
                          run_as_root=True, check_exit_code=[0])
        except processutils.ProcessExecutionError as err:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Failed to erase the end of the disk on node "
                              "%(node)s. Command: %(command)s. "
                              "Error: %(error)s."),
                          {'node': node_uuid,
                           'command': err.cmd,
                           'error': err.stderr})
Example #16
0
 def delete(self, id):
     """Deletes a listener from a load balancer."""
     session = db_api.get_session()
     db_listener = self.repositories.listener.get(session, id=id)
     if not db_listener:
         LOG.info(_LI("Listener %s not found.") % id)
         raise exceptions.NotFound(
             resource=data_models.Listener._name(), id=id)
     # Verify load balancer is in a mutable status.  If so it can be assumed
     # that the listener is also in a mutable status because a load balancer
     # will only be ACTIVE when all it's listeners as ACTIVE.
     if not self.repositories.test_and_set_lb_and_listener_prov_status(
             session, self.load_balancer_id, id, constants.PENDING_UPDATE,
             constants.PENDING_DELETE):
         lb_repo = self.repositories.load_balancer
         db_lb = lb_repo.get(session, id=self.load_balancer_id)
         raise exceptions.ImmutableObject(resource=db_lb._name(),
                                          id=self.load_balancer_id)
     db_listener = self.repositories.listener.get(session, id=id)
     try:
         LOG.info(_LI("Sending Deletion of Listener %s to handler") %
                  db_listener.id)
         self.handler.delete(db_listener)
     except Exception:
         with excutils.save_and_reraise_exception(reraise=False):
             self.repositories.listener.update(
                 session, db_listener.id,
                 provisioning_status=constants.ERROR)
     db_listener = self.repositories.listener.get(
         session, id=db_listener.id)
     return self._convert_db_to_type(db_listener,
                                     listener_types.ListenerResponse)
    def sync_single_resource(self, operation, object_type, obj_id,
                             context, attr_filter_create, attr_filter_update):
        """Sync over a single resource from Neutron to OpenDaylight.

        Handle syncing a single operation over to OpenDaylight, and correctly
        filter attributes out which are not required for the requisite
        operation (create or update) being handled.
        """
        dbcontext = context._plugin_context
        if operation == 'create':
            urlpath = object_type
            method = 'post'
        else:
            urlpath = object_type + '/' + obj_id
            method = 'put'

        try:
            obj_getter = getattr(context._plugin, 'get_%s' % object_type[:-1])
            resource = obj_getter(dbcontext, obj_id)
        except not_found_exception_map[object_type]:
            LOG.debug(_('%(object_type)s not found (%(obj_id)s)'),
                      {'object_type': object_type.capitalize(),
                      'obj_id': obj_id})
        else:
            if operation == 'create':
                attr_filter_create(self, resource, context, dbcontext)
            elif operation == 'update':
                attr_filter_update(self, resource, context, dbcontext)
            try:
                # 400 errors are returned if an object exists, which we ignore.
                self.client.sendjson(method, urlpath,
                                     {object_type[:-1]: resource}, [400])
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.out_of_sync = True
Example #18
0
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info, block_device_info=None):
        """Create a new VM and start it."""
        LOG.info(_("Spawning new instance"), instance=instance)

        instance_name = instance['name']
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)

        if self._volumeops.ebs_root_in_block_devices(block_device_info):
            root_vhd_path = None
        else:
            root_vhd_path = self._create_root_vhd(context, instance)

        eph_vhd_path = self.create_ephemeral_vhd(instance)

        try:
            self.create_instance(instance, network_info, block_device_info,
                                 root_vhd_path, eph_vhd_path)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(instance,
                                                             injected_files,
                                                             admin_password)
                self.attach_config_drive(instance, configdrive_path)

            self.power_on(instance)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance)
Example #19
0
 def _live_migrate(self, context, instance, scheduler_hint,
                   block_migration, disk_over_commit):
     destination = scheduler_hint.get("host")
     try:
         live_migrate.execute(context, instance, destination,
                          block_migration, disk_over_commit)
     except (exception.NoValidHost,
             exception.ComputeServiceUnavailable,
             exception.InvalidHypervisorType,
             exception.InvalidCPUInfo,
             exception.UnableToMigrateToSelf,
             exception.DestinationHypervisorTooOld,
             exception.InvalidLocalStorage,
             exception.InvalidSharedStorage,
             exception.HypervisorUnavailable,
             exception.InstanceNotRunning,
             exception.MigrationPreCheckError) as ex:
         with excutils.save_and_reraise_exception():
             # TODO(johngarbutt) - eventually need instance actions here
             request_spec = {'instance_properties': {
                 'uuid': instance['uuid'], },
             }
             scheduler_utils.set_vm_state_and_notify(context,
                     'compute_task', 'migrate_server',
                     dict(vm_state=instance['vm_state'],
                          task_state=None,
                          expected_task_state=task_states.MIGRATING,),
                     ex, request_spec, self.db)
     except Exception as ex:
         LOG.error(_('Migration of instance %(instance_id)s to host'
                    ' %(dest)s unexpectedly failed.'),
                    {'instance_id': instance['uuid'], 'dest': destination},
                    exc_info=True)
         raise exception.MigrationError(reason=ex)
 def _status_edge(self, task):
     edge_id = task.userdata['edge_id']
     try:
         response = self.vcns.get_edge_deploy_status(edge_id)[1]
         task.userdata['retries'] = 0
         system_status = response.get('systemStatus', None)
         if system_status is None:
             status = constants.TaskStatus.PENDING
         elif system_status == 'good':
             status = constants.TaskStatus.COMPLETED
         else:
             status = constants.TaskStatus.ERROR
     except exceptions.VcnsApiException:
         with excutils.save_and_reraise_exception():
             LOG.exception(_LE("VCNS: Edge %s status query failed."),
                           edge_id)
     except Exception:
         retries = task.userdata.get('retries', 0) + 1
         if retries < 3:
             task.userdata['retries'] = retries
             LOG.exception(_LE("VCNS: Unable to retrieve edge %(edge_id)s "
                               "status. Retry %(retries)d."),
                           {'edge_id': edge_id,
                            'retries': retries})
             status = constants.TaskStatus.PENDING
         else:
             LOG.exception(_LE("VCNS: Unable to retrieve edge %s status. "
                              "Abort."), edge_id)
             status = constants.TaskStatus.ERROR
     LOG.debug("VCNS: Edge %s status", edge_id)
     return status
 def _get_edges(self):
     try:
         return self.vcns.get_edges()[1]
     except exceptions.VcnsApiException as e:
         with excutils.save_and_reraise_exception():
             LOG.exception(_LE("VCNS: Failed to get edges:\n%s"),
                           e.response)
Example #22
0
 def get_bridge_name_for_port_name(self, port_name):
     try:
         return self.run_vsctl(['port-to-br', port_name], check_error=True)
     except RuntimeError as e:
         with excutils.save_and_reraise_exception() as ctxt:
             if 'Exit code: 1\n' in str(e):
                 ctxt.reraise = False
    def sync_resources(self, resource_name, collection_name, resources,
                       context, dbcontext, attr_filter):
        """Sync objects from Neutron over to OpenDaylight.

        This will handle syncing networks, subnets, and ports from Neutron to
        OpenDaylight. It also filters out the requisite items which are not
        valid for create API operations.
        """
        to_be_synced = []
        for resource in resources:
            try:
                urlpath = collection_name + '/' + resource['id']
                self.client.sendjson('get', urlpath, None)
            except requests.exceptions.HTTPError as e:
                with excutils.save_and_reraise_exception() as ctx:
                    if e.response.status_code == 404:
                        attr_filter(resource, context, dbcontext)
                        to_be_synced.append(resource)
                        ctx.reraise = False

        key = resource_name if len(to_be_synced) == 1 else collection_name

        # 400 errors are returned if an object exists, which we ignore.
        self.client.sendjson('post', collection_name,
                             {key: to_be_synced}, [400])
 def get_nat_config(self, edge_id):
     try:
         return self.vcns.get_nat_config(edge_id)[1]
     except exceptions.VcnsApiException as e:
         with excutils.save_and_reraise_exception():
             LOG.exception(_LE("VCNS: Failed to get nat config:\n%s"),
                           e.response)
Example #25
0
    def _add_params_to_script_and_transfer_to_node(self, cluster_info,
                                                   node_group,
                                                   node_with_volumes=False):
        plugin_config = cluster_info['plugin_config']
        hadoop_log_directory = plugin_config.HADOOP_LOG_DIRECTORY
        if node_with_volumes:
            hadoop_log_directory = (
                plugin_config.HADOOP_LOG_DIRECTORY_ON_VOLUME)
        extra_script_parameters = {
            'HADOOP_EXAMPLES_JAR_PATH': plugin_config.HADOOP_EXAMPLES_JAR_PATH,
            'HADOOP_LOG_DIRECTORY': hadoop_log_directory,
            'HADOOP_USER': plugin_config.HADOOP_USER,
            'NODE_COUNT': cluster_info['node_info']['node_count']
        }
        for instance in node_group['instances']:
            try:
                self.open_ssh_connection(
                    instance['management_ip'], plugin_config.SSH_USERNAME)
                self.transfer_helper_script_to_node(
                    'map_reduce_test_script.sh', extra_script_parameters
                )
                self.close_ssh_connection()

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    print(str(e))
Example #26
0
def _sign_csr(csr_text, ca_folder):
    with utils.tempdir() as tmpdir:
        inbound = os.path.join(tmpdir, "inbound.csr")
        outbound = os.path.join(tmpdir, "outbound.csr")

        try:
            with open(inbound, "w") as csrfile:
                csrfile.write(csr_text)
        except IOError:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Failed to write inbound.csr"))

        LOG.debug("Flags path: %s", ca_folder)
        start = os.getcwd()

        # Change working dir to CA
        fileutils.ensure_tree(ca_folder)
        os.chdir(ca_folder)
        utils.execute("openssl", "ca", "-batch", "-out", outbound, "-config", "./openssl.cnf", "-infiles", inbound)
        out, _err = utils.execute("openssl", "x509", "-in", outbound, "-serial", "-noout")
        serial = string.strip(out.rpartition("=")[2])
        os.chdir(start)

        with open(outbound, "r") as crtfile:
            return (serial, crtfile.read())
Example #27
0
    def create_volume_from_snapshot(self, volume, snapshot):
        """Creates a volume from a snapshot."""
        vol_size = volume.size
        snap_size = snapshot.volume_size

        self._clone_volume(snapshot.name, volume.name, snapshot.volume_id)
        share = self._get_volume_location(snapshot.volume_id)
        volume['provider_location'] = share
        path = self.local_path(volume)
        run_as_root = self._execute_as_root

        if self._discover_file_till_timeout(path):
            self._set_rw_permissions(path)
            if vol_size != snap_size:
                try:
                    self.extend_volume(volume, vol_size)
                except Exception:
                    with excutils.save_and_reraise_exception():
                        LOG.error(
                            _LE("Resizing %s failed. Cleaning volume."),
                            volume.name)
                        self._execute('rm', path, run_as_root=run_as_root)
        else:
            raise exception.CinderException(
                _("NFS file %s not discovered.") % volume['name'])

        return {'provider_location': volume['provider_location']}
Example #28
0
    def _create_volume(self, eseries_pool_label, eseries_volume_label,
                       size_gb):
        """Creates volume with given label and size."""

        target_pool = None

        pools = self._client.list_storage_pools()
        for pool in pools:
            if pool["label"] == eseries_pool_label:
                target_pool = pool
                break

        if not target_pool:
            msg = _("Pools %s does not exist")
            raise exception.NetAppDriverException(msg % eseries_pool_label)

        try:
            vol = self._client.create_volume(target_pool['volumeGroupRef'],
                                             eseries_volume_label, size_gb)
            LOG.info(_LI("Created volume with "
                         "label %s."), eseries_volume_label)
        except exception.NetAppDriverException as e:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Error creating volume. Msg - %s."),
                          six.text_type(e))

        return vol
Example #29
0
    def get_method(self, request, action, content_type, body):
        """Look up the action-specific method and its extensions."""

        # Look up the method
        try:
            if not self.controller:
                meth = getattr(self, action)
            else:
                meth = getattr(self.controller, action)
        except AttributeError as e:
            with excutils.save_and_reraise_exception(e) as ctxt:
                if (not self.wsgi_actions or action not in ['action',
                                                            'create',
                                                            'delete',
                                                            'update']):
                    LOG.exception(_LE('Get method error.'))
                else:
                    ctxt.reraise = False
        else:
            return meth, self.wsgi_extensions.get(action, [])

        if action == 'action':
            # OK, it's an action; figure out which action...
            mtype = _MEDIA_TYPE_MAP.get(content_type)
            action_name = self.action_peek[mtype](body)
            LOG.debug("Action body: %s", body)
        else:
            action_name = action

        # Look up the action method
        return (self.wsgi_actions[action_name],
                self.wsgi_action_extensions.get(action_name, []))
Example #30
0
def mkfs(fs, path, label=None):
    """Format a file or block device

    :param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4'
               'btrfs', etc.)
    :param path: Path to file or block device to format
    :param label: Volume label to use
    """
    if fs == 'swap':
        args = ['mkswap']
    else:
        args = ['mkfs', '-t', fs]
    # add -F to force no interactive execute on non-block device.
    if fs in ('ext3', 'ext4'):
        args.extend(['-F'])
    if label:
        if fs in ('msdos', 'vfat'):
            label_opt = '-n'
        else:
            label_opt = '-L'
        args.extend([label_opt, label])
    args.append(path)
    try:
        execute(*args, run_as_root=True, use_standard_locale=True)
    except processutils.ProcessExecutionError as e:
        with excutils.save_and_reraise_exception() as ctx:
            if os.strerror(errno.ENOENT) in e.stderr:
                ctx.reraise = False
                LOG.exception(_LE('Failed to make file system. '
                                  'File system %s is not supported.'), fs)
                raise exception.FileSystemNotSupported(fs=fs)
            else:
                LOG.exception(_LE('Failed to create a file system '
                                  'in %(path)s. Error: %(error)s'),
                              {'path': path, 'error': e})
Example #31
0
    def _fill_group(self, hgs, port, host_grp_name, wwns):
        added_hostgroup = False
        LOG.debug('Create host group (hgs: %(hgs)s port: %(port)s '
                  'name: %(name)s wwns: %(wwns)s)' % {
                      'hgs': hgs,
                      'port': port,
                      'name': host_grp_name,
                      'wwns': wwns
                  })
        gid = self._get_hgname_gid(port, host_grp_name)
        if gid is None:
            for retry_cnt in basic_lib.DEFAULT_TRY_RANGE:
                try:
                    gid = self._get_unused_gid(port)
                    self._add_hostgroup(port, gid, host_grp_name)
                    added_hostgroup = True
                except exception.HBSDNotFound:
                    gid = None
                    msg = basic_lib.set_msg(312, resource='GID')
                    LOG.warning(msg)
                    continue
                else:
                    LOG.debug('Completed to add host target'
                              '(port: %(port)s gid: %(gid)d)' % {
                                  'port': port,
                                  'gid': gid
                              })
                    break
            else:
                msg = basic_lib.output_err(641)
                raise exception.HBSDError(message=msg)

        try:
            if wwns:
                self._add_wwn(hgs, port, gid, wwns)
            else:
                hgs.append({
                    'port': port,
                    'gid': gid,
                    'initiator_wwn': None,
                    'detected': True
                })
        except Exception:
            with excutils.save_and_reraise_exception():
                if added_hostgroup:
                    self._delete_hostgroup(port, gid, host_grp_name)
Example #32
0
    def _associate_mac_to_net(self, context, network_id, interface_mac, op):
        network = brocade_db.get_network(context, network_id)
        vlan_id = network['vlan']

        # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx
        mac = self.mac_reformat_62to34(interface_mac)
        try:
            self._driver.associate_mac_to_network(self._switch['address'],
                                                  self._switch['username'],
                                                  self._switch['password'],
                                                  vlan_id,
                                                  mac)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(
                    _LE("Brocade NOS driver: failed to associate mac %s"),
                    interface_mac)
Example #33
0
    def _create_ha_interfaces(self, context, router, ha_network):
        admin_ctx = context.elevated()

        num_agents = self.get_number_of_agents_for_scheduling(context)

        port_ids = []
        try:
            for index in range(num_agents):
                binding = self.add_ha_port(admin_ctx, router.id,
                                           ha_network.network['id'],
                                           router.tenant_id)
                port_ids.append(binding.port_id)
        except Exception:
            with excutils.save_and_reraise_exception():
                for port_id in port_ids:
                    self._core_plugin.delete_port(admin_ctx, port_id,
                                                  l3_port_check=False)
Example #34
0
    def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
        utils.check_ssh_injection(cmd_list)
        command = ' '.join(cmd_list)

        if not self.sshpool:
            password = self.configuration.san_password
            privatekey = self.configuration.san_private_key
            min_size = self.configuration.ssh_min_pool_conn
            max_size = self.configuration.ssh_max_pool_conn
            self.sshpool = ssh_utils.SSHPool(
                self.configuration.san_ip,
                self.configuration.san_ssh_port,
                self.configuration.ssh_conn_timeout,
                self.configuration.san_login,
                password=password,
                privatekey=privatekey,
                min_size=min_size,
                max_size=max_size)
        last_exception = None
        try:
            with self.sshpool.item() as ssh:
                while attempts > 0:
                    attempts -= 1
                    try:
                        return processutils.ssh_execute(
                            ssh, command, check_exit_code=check_exit_code)
                    except Exception as e:
                        LOG.error(e)
                        last_exception = e
                        greenthread.sleep(random.randint(20, 500) / 100.0)
                try:
                    raise processutils.ProcessExecutionError(
                        exit_code=last_exception.exit_code,
                        stdout=last_exception.stdout,
                        stderr=last_exception.stderr,
                        cmd=last_exception.cmd)
                except AttributeError:
                    raise processutils.ProcessExecutionError(
                        exit_code=-1,
                        stdout="",
                        stderr="Error running SSH command",
                        cmd=command)

        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error running SSH command: %s") % command)
Example #35
0
    def _live_migrate(self, context, instance, scheduler_hint, block_migration,
                      disk_over_commit):
        destination = scheduler_hint.get("host")

        def _set_vm_state(context,
                          instance,
                          ex,
                          vm_state=None,
                          task_state=None):
            request_spec = {
                'instance_properties': {
                    'uuid': instance['uuid'],
                },
            }
            scheduler_utils.set_vm_state_and_notify(
                context, instance.uuid, 'compute_task', 'migrate_server',
                dict(
                    vm_state=vm_state,
                    task_state=task_state,
                    expected_task_state=task_states.MIGRATING,
                ), ex, request_spec, self.db)

        try:
            live_migrate.execute(context, instance, destination,
                                 block_migration, disk_over_commit)
        except (exception.NoValidHost, exception.ComputeServiceUnavailable,
                exception.InvalidHypervisorType, exception.InvalidCPUInfo,
                exception.UnableToMigrateToSelf,
                exception.DestinationHypervisorTooOld,
                exception.InvalidLocalStorage, exception.InvalidSharedStorage,
                exception.HypervisorUnavailable, exception.InstanceNotRunning,
                exception.MigrationPreCheckError,
                exception.LiveMigrationWithOldNovaNotSafe) as ex:
            with excutils.save_and_reraise_exception():
                # TODO(johngarbutt) - eventually need instance actions here
                _set_vm_state(context, instance, ex, instance['vm_state'])
        except Exception as ex:
            LOG.error(_LE('Migration of instance %(instance_id)s to host'
                          ' %(dest)s unexpectedly failed.'), {
                              'instance_id': instance['uuid'],
                              'dest': destination
                          },
                      exc_info=True)
            _set_vm_state(context, instance, ex, vm_states.ERROR,
                          instance['task_state'])
            raise exception.MigrationError(reason=six.text_type(ex))
Example #36
0
 def setup(self, mount=True):
     self.imgdir = tempfile.mkdtemp(prefix="openstack-vfs-localfs")
     try:
         if self.imgfmt == "raw":
             LOG.debug("Using LoopMount")
             mnt = loop.LoopMount(self.imgfile, self.imgdir, self.partition)
         else:
             LOG.debug("Using NbdMount")
             mnt = nbd.NbdMount(self.imgfile, self.imgdir, self.partition)
         if mount:
             if not mnt.do_mount():
                 raise exception.NovaException(mnt.error)
         self.mount = mnt
     except Exception as e:
         with excutils.save_and_reraise_exception():
             LOG.debug("Failed to mount image %(ex)s)", {'ex': e})
             self.teardown()
Example #37
0
 def do_request(self, method, action, body=None):
     max_attempts = config.OFC.api_max_attempts
     for i in range(max_attempts, 0, -1):
         try:
             return self.do_single_request(method, action, body)
         except nexc.OFCServiceUnavailable as e:
             with excutils.save_and_reraise_exception() as ctxt:
                 try:
                     wait_time = int(e.retry_after)
                 except (ValueError, TypeError):
                     wait_time = None
                 if i > 1 and wait_time:
                     LOG.info(_LI("Waiting for %s seconds due to "
                                  "OFC Service_Unavailable."), wait_time)
                     time.sleep(wait_time)
                     ctxt.reraise = False
                     continue
Example #38
0
    def _migrate_disk_files(self, instance_name, disk_files, dest):
        # TODO(mikal): it would be nice if this method took a full instance,
        # because it could then be passed to the log messages below.
        same_host = False
        if dest in self._hostutils.get_local_ips():
            same_host = True
            LOG.debug("Migration target is the source host")
        else:
            LOG.debug("Migration target host: %s", dest)

        instance_path = self._pathutils.get_instance_dir(instance_name)
        revert_path = self._pathutils.get_instance_migr_revert_dir(
            instance_name, remove_dir=True)
        dest_path = None

        try:
            if same_host:
                # Since source and target are the same, we copy the files to
                # a temporary location before moving them into place
                dest_path = '%s_tmp' % instance_path
                if self._pathutils.exists(dest_path):
                    self._pathutils.rmtree(dest_path)
                self._pathutils.makedirs(dest_path)
            else:
                dest_path = self._pathutils.get_instance_dir(instance_name,
                                                             dest,
                                                             remove_dir=True)
            for disk_file in disk_files:
                # Skip the config drive as the instance is already configured
                if os.path.basename(disk_file).lower() != 'configdrive.vhd':
                    LOG.debug(
                        'Copying disk "%(disk_file)s" to '
                        '"%(dest_path)s"', {
                            'disk_file': disk_file,
                            'dest_path': dest_path
                        })
                    self._pathutils.copy(disk_file, dest_path)

            self._pathutils.rename(instance_path, revert_path)

            if same_host:
                self._pathutils.rename(dest_path, instance_path)
        except Exception:
            with excutils.save_and_reraise_exception():
                self._cleanup_failed_disk_migration(instance_path, revert_path,
                                                    dest_path)
Example #39
0
def update_instance_cache_with_nw_info(impl, context, instance,
                                       nw_info=None, update_cells=True):
    try:
        if not isinstance(nw_info, network_model.NetworkInfo):
            nw_info = None
        if nw_info is None:
            nw_info = impl._get_instance_nw_info(context, instance)
        LOG.debug('Updating cache with info: %s', nw_info)
        # NOTE(comstud): The save() method actually handles updating or
        # creating the instance.  We don't need to retrieve the object
        # from the DB first.
        ic = objects.InstanceInfoCache.new(context, instance['uuid'])
        ic.network_info = nw_info
        ic.save(update_cells=update_cells)
    except Exception:
        with excutils.save_and_reraise_exception():
            LOG.exception(_('Failed storing info cache'), instance=instance)
Example #40
0
        def edp_test():
            pig_job_data = self.edp_info.read_pig_example_script()
            pig_lib_data = self.edp_info.read_pig_example_jar()
            mapreduce_jar_data = self.edp_info.read_mapreduce_example_jar()
            # This is a modified version of WordCount that takes swift configs
            java_lib_data = self.edp_info.read_java_example_lib()

            try:
                self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
                                 job_data_list=[{
                                     'pig': pig_job_data
                                 }],
                                 lib_data_list=[{
                                     'jar': pig_lib_data
                                 }],
                                 swift_binaries=True,
                                 hdfs_local_output=True)
                self.edp_testing(
                    job_type=utils_edp.JOB_TYPE_MAPREDUCE,
                    job_data_list=[],
                    lib_data_list=[{
                        'jar': mapreduce_jar_data
                    }],
                    configs=self.edp_info.mapreduce_example_configs(),
                    swift_binaries=True,
                    hdfs_local_output=True)
                self.edp_testing(
                    job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
                    job_data_list=[],
                    lib_data_list=[],
                    configs=self.edp_info.mapreduce_streaming_configs())
                self.edp_testing(job_type=utils_edp.JOB_TYPE_JAVA,
                                 job_data_list=[],
                                 lib_data_list=[{
                                     'jar': java_lib_data
                                 }],
                                 configs=self.edp_info.java_example_configs(),
                                 pass_input_output_args=True)

            except Exception as e:
                with excutils.save_and_reraise_exception():
                    self.delete_objects(cluster_info['cluster_id'],
                                        cluster_template_id,
                                        node_group_template_id_list)
                    message = 'Failure while EDP testing: '
                    self.print_error_log(message, e)
Example #41
0
    def create_network(self, host, username, password, net_id):
        """Creates a new virtual network."""

        name = template.OS_PORT_PROFILE_NAME.format(id=net_id)
        try:
            mgr = self.connect(host, username, password)
            self.create_vlan_interface(mgr, net_id)
            self.create_port_profile(mgr, name)
            self.create_vlan_profile_for_port_profile(mgr, name)
            self.configure_l2_mode_for_vlan_profile(mgr, name)
            self.configure_trunk_mode_for_vlan_profile(mgr, name)
            self.configure_allowed_vlans_for_vlan_profile(mgr, name, net_id)
            self.activate_port_profile(mgr, name)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("NETCONF error: %s"), ex)
                self.close_session()
Example #42
0
 def _delete_db_pool(self, context, id):
     # proxy the call until plugin inherits from DBPlugin
     # rely on uuid uniqueness:
     try:
         with context.session.begin(subtransactions=True):
             self.service_type_manager.del_resource_associations(
                 context, [id])
             super(LoadBalancerPlugin, self).delete_pool(context, id)
     except Exception:
         # that should not happen
         # if it's still a case - something goes wrong
         # log the error and mark the pool as ERROR
         LOG.error(
             _LE('Failed to delete pool %s, putting it in ERROR '
                 'state'), id)
         with excutils.save_and_reraise_exception():
             self.update_status(context, ldb.Pool, id, constants.ERROR)
Example #43
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        if exc_type is None and self._spawn_method is not None:
            # Spawn a worker to complete the task
            # The linked callback below will be called whenever:
            #   - background task finished with no errors.
            #   - background task has crashed with exception.
            #   - callback was added after the background task has
            #     finished or crashed. While eventlet currently doesn't
            #     schedule the new thread until the current thread blocks
            #     for some reason, this is true.
            # All of the above are asserted in tests such that we'll
            # catch if eventlet ever changes this behavior.
            thread = None
            try:
                thread = self._spawn_method(*self._spawn_args,
                                            **self._spawn_kwargs)

                # NOTE(comstud): Trying to use a lambda here causes
                # the callback to not occur for some reason. This
                # also makes it easier to test.
                thread.link(self._thread_release_resources)
                # Don't unlock! The unlock will occur when the
                # thread finshes.
                return
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    try:
                        # Execute the on_error hook if set
                        if self._on_error_method:
                            self._on_error_method(e, *self._on_error_args,
                                                  **self._on_error_kwargs)
                    except Exception:
                        LOG.warning(
                            _LW("Task's on_error hook failed to "
                                "call %(method)s on node %(node)s"), {
                                    'method': self._on_error_method.__name__,
                                    'node': self.node.uuid
                                })

                    if thread is not None:
                        # This means the link() failed for some
                        # reason. Nuke the thread.
                        thread.cancel()
                    self.release_resources()
        self.release_resources()
Example #44
0
    def cluster_scaling(self, cluster_info, change_list):
        scale_body = {'add_node_groups': [], 'resize_node_groups': []}
        for change in change_list:
            if change['operation'] == 'resize':
                node_group_name = change['info'][0]
                node_group_size = change['info'][1]
                self._add_new_field_to_scale_body_while_ng_resizing(
                    scale_body, node_group_name, node_group_size)
                self._change_node_info_while_ng_resizing(
                    node_group_name, node_group_size, cluster_info)
            if change['operation'] == 'add':
                node_group_name = change['info'][0]
                node_group_size = change['info'][1]
                node_group_id = change['info'][2]
                self._add_new_field_to_scale_body_while_ng_adding(
                    scale_body, node_group_id, node_group_size,
                    node_group_name)
                self._change_node_info_while_ng_adding(node_group_id,
                                                       node_group_size,
                                                       cluster_info)
        self.sahara.clusters.scale(cluster_info['cluster_id'], scale_body)
        self.poll_cluster_state(cluster_info['cluster_id'])
        new_node_ip_list = self.get_cluster_node_ip_list_with_node_processes(
            cluster_info['cluster_id'])
        try:
            new_node_info = self.get_node_info(new_node_ip_list,
                                               cluster_info['plugin_config'])

        except Exception as e:
            with excutils.save_and_reraise_exception():
                print('\nFailure during check of node process deployment '
                      'on cluster node: ' + str(e))
        expected_node_info = cluster_info['node_info']
        self.assertEqual(
            expected_node_info, new_node_info,
            'Failure while node info comparison.\n'
            'Expected node info after cluster scaling: %s.\n'
            'Actual node info after cluster scaling: %s.' %
            (expected_node_info, new_node_info))
        return {
            'cluster_id': cluster_info['cluster_id'],
            'node_ip_list': new_node_ip_list,
            'node_info': new_node_info,
            'plugin_config': cluster_info['plugin_config']
        }
Example #45
0
    def create_port(self, context, port):

        self._ensure_default_security_group_on_port(context, port)

        sgids = self._get_security_groups_on_port(context, port)

        network = {}

        network_id = port['port']['network_id']

        with context.session.begin(subtransactions=True):

            # Invoke the Neutron  API for creating port
            neutron_port = super(OneConvergencePluginV2,
                                 self).create_port(context, port)

            self._process_portbindings_create_and_update(context,
                                                         port['port'],
                                                         neutron_port)

            self._process_port_create_security_group(context, neutron_port,
                                                     sgids)
            if port['port']['device_owner'] in ('network:router_gateway',
                                                'network:floatingip'):
                # for l3 requests, tenant_id will be None/''
                network = self._get_network(context, network_id)

                tenant_id = network['tenant_id']
            else:
                tenant_id = port['port']['tenant_id']

        port_id = neutron_port['id']

        try:
            self.nvsdlib.create_port(tenant_id, neutron_port)
        except nvsdexception.NVSDAPIException:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Deleting newly created "
                              "neutron port %s"), port_id)
                super(OneConvergencePluginV2, self).delete_port(context,
                                                                port_id)

        self.notify_security_groups_member_updated(context, neutron_port)

        return neutron_port
Example #46
0
    def attach(self, context, instance, volume_api, virt_driver,
               do_check_attach=True, do_driver_attach=False):
        volume = volume_api.get(context, self.volume_id)
        if do_check_attach:
            volume_api.check_attach(context, volume, instance=instance)

        volume_id = volume['id']
        context = context.elevated()

        connector = virt_driver.get_volume_connector(instance)
        connection_info = volume_api.initialize_connection(context,
                                                           volume_id,
                                                           connector)
        if 'serial' not in connection_info:
            connection_info['serial'] = self.volume_id
        self._preserve_multipath_id(connection_info)

        # If do_driver_attach is False, we will attach a volume to an instance
        # at boot time. So actual attach is done by instance creation code.
        if do_driver_attach:
            encryption = encryptors.get_encryption_metadata(
                context, volume_api, volume_id, connection_info)

            try:
                virt_driver.attach_volume(
                        context, connection_info, instance,
                        self['mount_device'], disk_bus=self['disk_bus'],
                        device_type=self['device_type'], encryption=encryption)
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Driver failed to attach volume "
                                      "%(volume_id)s at %(mountpoint)s"),
                                  {'volume_id': volume_id,
                                   'mountpoint': self['mount_device']},
                                  context=context, instance=instance)
                    volume_api.terminate_connection(context, volume_id,
                                                    connector)
        self['connection_info'] = connection_info

        mode = 'rw'
        if 'data' in connection_info:
            mode = connection_info['data'].get('access_mode', 'rw')
        if volume['attach_status'] == "detached":
            volume_api.attach(context, volume_id, instance.uuid,
                              self['mount_device'], mode=mode)
Example #47
0
    def _generate_configdrive(self,
                              instance,
                              node,
                              network_info,
                              extra_md=None,
                              files=None):
        """Generate a config drive.

        :param instance: The instance object.
        :param node: The node object.
        :param network_info: Instance network information.
        :param extra_md: Optional, extra metadata to be added to the
                         configdrive.
        :param files: Optional, a list of paths to files to be added to
                      the configdrive.

        """
        if not extra_md:
            extra_md = {}

        i_meta = instance_metadata.InstanceMetadata(instance,
                                                    content=files,
                                                    extra_md=extra_md,
                                                    network_info=network_info)

        with tempfile.NamedTemporaryFile() as uncompressed:
            try:
                with configdrive.ConfigDriveBuilder(instance_md=i_meta) as cdb:
                    cdb.make_drive(uncompressed.name)
            except Exception as e:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE("Creating config drive failed with "
                                  "error: %s"),
                              e,
                              instance=instance)

            with tempfile.NamedTemporaryFile() as compressed:
                # compress config drive
                with gzip.GzipFile(fileobj=compressed, mode='wb') as gzipped:
                    uncompressed.seek(0)
                    shutil.copyfileobj(uncompressed, gzipped)

                # base64 encode config drive
                compressed.seek(0)
                return base64.b64encode(compressed.read())
    def terminate_connection(self, volume, connector, force=False, **kwargs):
        # Get our volume name
        volume_name = volume.get('id')
        LOG.debug('Terminate connection: %s', volume_name)
        with self._client.open_connection() as api:
            try:
                ssn = api.find_sc(self.configuration.dell_sc_ssn)
                wwpns = connector.get('wwpns')
                for wwn in wwpns:
                    scserver = api.find_server(ssn,
                                               wwn)
                    if scserver is not None:
                        break

                # Find the volume on the storage center.
                scvolume = api.find_volume(ssn,
                                           volume_name)
                # Get our target map so we can return it to free up a zone.
                lun, targets, init_targ_map = api.find_wwns(scvolume,
                                                            scserver)
                # If we have a server and a volume lets unmap them.
                if (scserver is not None and
                        scvolume is not None and
                        api.unmap_volume(scvolume, scserver) is True):
                    LOG.debug('Connection terminated')
                else:
                    raise exception.VolumeBackendAPIException(
                        _('Terminate connection failed'))

                # basic return info...
                info = {'driver_volume_type': 'fibre_channel',
                        'data': {}}

                # if not then we return the target map so that
                # the zone can be freed up.
                if api.get_volume_count(scserver) == 0:
                    info['data'] = {'target_wwn': targets,
                                    'initiator_target_map': init_targ_map}
                return info

            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE('Failed to terminate connection'))
        raise exception.VolumeBackendAPIException(
            _('Terminate connection unable to connect to backend.'))
Example #49
0
    def add_ha_port(self, context, router_id, network_id, tenant_id):
        port = self._core_plugin.create_port(context, {
            'port':
            {'tenant_id': '',
             'network_id': network_id,
             'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
             'mac_address': attributes.ATTR_NOT_SPECIFIED,
             'admin_state_up': True,
             'device_id': router_id,
             'device_owner': constants.DEVICE_OWNER_ROUTER_HA_INTF,
             'name': constants.HA_PORT_NAME % tenant_id}})

        try:
            return self._create_ha_port_binding(context, port['id'], router_id)
        except Exception:
            with excutils.save_and_reraise_exception():
                self._core_plugin.delete_port(context, port['id'],
                                              l3_port_check=False)
Example #50
0
    def _do_detach(self, volume, vg):
        devname = self._device_name(volume)
        volname = self._get_volname(volume)
        cmd = 'echo ' + devname + ' > /sys/class/srb/detach'
        try:
            putils.execute('/bin/sh', '-c', cmd,
                           root_helper='sudo', run_as_root=True)
        except putils.ProcessExecutionError:
            with excutils.save_and_reraise_exception(reraise=True):
                try:
                    with patched(lvm.LVM, 'activate_lv', self._activate_lv):
                        vg.activate_lv(volname)

                    self._do_deactivate(volume, vg)
                except putils.ProcessExecutionError:
                    LOG.warning(_LW('All attempts to recover failed detach '
                                    'of %(volume)s failed.')
                                % {'volume': volname})
Example #51
0
    def _get_secret(self, ctxt, secret_ref):
        """Creates the URL required for accessing a secret's metadata.

        :param ctxt: contains information of the user and the environment for
                     the request (cinder/context.py)
        :param secret_ref: URL to access the secret

        :return: the secret's metadata
        :throws Exception: if there is an error retrieving the data
        """

        barbican_client = self._get_barbican_client(ctxt)

        try:
            return barbican_client.secrets.get(secret_ref)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error getting secret metadata: %s"), (e))
Example #52
0
    def create_pool(self, context, edge_id, pool, members):
        pool_new = self._convert_lb_pool(context, edge_id, pool, members)
        try:
            header = self.vcns.create_pool(edge_id, pool_new)[0]
        except vcns_exc.VcnsApiException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_("Failed to create pool"))

        objuri = header['location']
        pool_vseid = objuri[objuri.rfind("/") + 1:]

        # update the pool mapping table
        map_info = {
            "pool_id": pool['id'],
            "pool_vseid": pool_vseid,
            "edge_id": edge_id
        }
        vcns_db.add_vcns_edge_pool_binding(context.session, map_info)
    def delete_volume(self, volume):
        deleted = False
        # we use id as our name as it s unique
        volume_name = volume.get('id')
        with self._client.open_connection() as api:
            try:
                ssn = api.find_sc(self.configuration.dell_sc_ssn)
                if ssn is not None:
                    deleted = api.delete_volume(ssn, volume_name)
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE('Failed to delete volume %s'), volume_name)

        # if there was an error we will have raised an
        # exception.  If it failed to delete it is because
        # the conditions to delete a volume were not met.
        if deleted is False:
            raise exception.VolumeIsBusy(volume_name=volume_name)
Example #54
0
    def do_setup(self, context):
        """Disable cli confirmation and tune output format."""
        try:
            disabled_cli_features = ('confirmation', 'paging', 'events',
                                     'formatoutput')
            for feature in disabled_cli_features:
                self._eql_execute('cli-settings', feature, 'off')

            for line in self._eql_execute('grpparams', 'show'):
                if line.startswith('Group-Ipaddress:'):
                    out_tup = line.rstrip().partition(' ')
                    self._group_ip = out_tup[-1]

            LOG.info(_LI('EQL-driver: Setup is complete, group IP is "%s".'),
                     self._group_ip)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to setup the Dell EqualLogic driver.'))
Example #55
0
 def create_volume(self, volume):
     """Create a volume."""
     try:
         cmd = [
             'volume', 'create', volume['name'],
             "%sG" % (volume['size'])
         ]
         if self.configuration.eqlx_pool != 'default':
             cmd.append('pool')
             cmd.append(self.configuration.eqlx_pool)
         if self.configuration.san_thin_provision:
             cmd.append('thin-provision')
         out = self._eql_execute(*cmd)
         self.add_multihost_access(volume)
         return self._get_volume_data(out)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.error(_LE('Failed to create volume "%s".'), volume['name'])
Example #56
0
    def _run_ssh(self, cmd_list, attempts=1):
        utils.check_ssh_injection(cmd_list)
        command = ' '.join(cmd_list)

        if not self.sshpool:
            password = self.configuration.san_password
            privatekey = self.configuration.san_private_key
            min_size = self.configuration.ssh_min_pool_conn
            max_size = self.configuration.ssh_max_pool_conn
            self.sshpool = ssh_utils.SSHPool(
                self.configuration.san_ip,
                self.configuration.san_ssh_port,
                self.configuration.ssh_conn_timeout,
                self.configuration.san_login,
                password=password,
                privatekey=privatekey,
                min_size=min_size,
                max_size=max_size)
        try:
            total_attempts = attempts
            with self.sshpool.item() as ssh:
                while attempts > 0:
                    attempts -= 1
                    try:
                        LOG.info(_LI('EQL-driver: executing "%s".'), command)
                        return self._ssh_execute(
                            ssh,
                            command,
                            timeout=self.configuration.eqlx_cli_timeout)
                    except processutils.ProcessExecutionError:
                        raise
                    except Exception as e:
                        LOG.exception(e)
                        greenthread.sleep(random.randint(20, 500) / 100.0)
                msg = (_("SSH Command failed after '%(total_attempts)r' "
                         "attempts : '%(command)s'") % {
                             'total_attempts': total_attempts,
                             'command': command
                         })
                raise exception.VolumeBackendAPIException(data=msg)

        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Error running SSH command: "%s".'), command)
Example #57
0
    def get_boot_device(self, task):
        """Get the current boot device for a node.

        Returns the current boot device of the node.

        :param task: a task from TaskManager.
        :raises: DracClientError on an error from pywsman library.
        :returns: a dictionary containing:

            :boot_device: the boot device, one of
                :mod:`ironic.common.boot_devices` or None if it is unknown.
            :persistent: Whether the boot device will persist to all
                future boots or not, None if it is unknown.

        """
        client = drac_common.get_wsman_client(task.node)
        boot_mode = _get_next_boot_mode(task.node)

        persistent = boot_mode['is_next'] == PERSISTENT
        instance_id = boot_mode['instance_id']

        options = pywsman.ClientOptions()
        filter_query = ('select * from DCIM_BootSourceSetting where '
                        'PendingAssignedSequence=0 and '
                        'BootSourceType="%s"' % instance_id)
        try:
            doc = client.wsman_enumerate(resource_uris.DCIM_BootSourceSetting,
                                         options,
                                         filter_query=filter_query)
        except exception.DracClientError as exc:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE('DRAC driver failed to get the current boot '
                        'device for node %(node_uuid)s. '
                        'Reason: %(error)s.'), {
                            'node_uuid': task.node.uuid,
                            'error': exc
                        })

        instance_id = drac_common.find_xml(
            doc, 'InstanceID', resource_uris.DCIM_BootSourceSetting).text
        boot_device = next((key for (key, value) in _BOOT_DEVICES_MAP.items()
                            if value in instance_id), None)
        return {'boot_device': boot_device, 'persistent': persistent}
Example #58
0
    def delete_share(self, context, share_id):
        """Delete a share."""
        context = context.elevated()
        share_ref = self.db.share_get(context, share_id)
        share_server = self._get_share_server(context, share_ref)

        if context.project_id != share_ref['project_id']:
            project_id = share_ref['project_id']
        else:
            project_id = context.project_id
        rules = self.db.share_access_get_all_for_share(context, share_id)
        try:
            for access_ref in rules:
                self._deny_access(context, access_ref, share_ref, share_server)
            self.driver.delete_share(context,
                                     share_ref,
                                     share_server=share_server)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.share_update(context, share_id,
                                     {'status': 'error_deleting'})
        try:
            reservations = QUOTAS.reserve(context,
                                          project_id=project_id,
                                          shares=-1,
                                          gigabytes=-share_ref['size'])
        except Exception:
            reservations = None
            LOG.exception(_LE("Failed to update usages deleting share"))

        self.db.share_delete(context, share_id)
        LOG.info(_LI("Share %s: deleted successfully."), share_ref['name'])

        if reservations:
            QUOTAS.commit(context, reservations, project_id=project_id)

        if CONF.delete_share_server_with_last_share:
            share_server = self._get_share_server(context, share_ref)
            if share_server and not share_server.shares:
                LOG.debug(
                    "Scheduled deletion of share-server "
                    "with id '%s' automatically by "
                    "deletion of last share.", share_server['id'])
                self.delete_share_server(context, share_server)
Example #59
0
def _set_power_state(node, target_state):
    """Turns the server power on/off or do a reboot.

    :param node: an ironic node object.
    :param target_state: target state of the node.
    :raises: DracClientError if the client received unexpected response.
    :raises: InvalidParameterValue if an invalid power state was specified.
    """

    client = drac_common.get_wsman_client(node)
    options = pywsman.ClientOptions()
    options.add_selector('CreationClassName', 'DCIM_ComputerSystem')
    options.add_selector('Name', 'srv:system')
    options.add_property('RequestedState', REVERSE_POWER_STATES[target_state])

    try:
        root = client.wsman_invoke(resource_uris.DCIM_ComputerSystem, options,
                                   'RequestStateChange')
    except exception.DracClientError as exc:
        with excutils.save_and_reraise_exception():
            LOG.error(
                _LE('DRAC driver failed to set power state for node '
                    '%(node_uuid)s to %(target_power_state)s. '
                    'Reason: %(error)s.'), {
                        'node_uuid': node.uuid,
                        'target_power_state': target_state,
                        'error': exc
                    })

    return_value = drac_common.find_xml(root, 'ReturnValue',
                                        resource_uris.DCIM_ComputerSystem).text
    if return_value != drac_common.RET_SUCCESS:
        message = drac_common.find_xml(root, 'Message',
                                       resource_uris.DCIM_ComputerSystem).text
        LOG.error(
            _LE('DRAC driver failed to set power state for node '
                '%(node_uuid)s to %(target_power_state)s. '
                'Reason: %(error)s.'), {
                    'node_uuid': node.uuid,
                    'target_power_state': target_state,
                    'error': message
                })
        raise exception.DracOperationError(operation='set_power_state',
                                           error=message)
Example #60
0
    def add_router_interface(self, context, router_id, interface_info):
        """Add a subnet of a network to an existing router."""

        new_router = super(AristaL3ServicePlugin, self).add_router_interface(
            context, router_id, interface_info)

        # Get network info for the subnet that is being added to the router.
        # Check if the interface information is by port-id or subnet-id
        add_by_port, add_by_sub = self._validate_interface_info(interface_info)
        if add_by_sub:
            subnet = self.get_subnet(context, interface_info['subnet_id'])
        elif add_by_port:
            port = self.get_port(context, interface_info['port_id'])
            subnet_id = port['fixed_ips'][0]['subnet_id']
            subnet = self.get_subnet(context, subnet_id)
        network_id = subnet['network_id']

        # To create SVI's in Arista HW, the segmentation Id is required
        # for this network.
        ml2_db = NetworkContext(self, context, {'id': network_id})
        seg_id = ml2_db.network_segments[0]['segmentation_id']

        # Package all the info needed for Hw programming
        router = super(AristaL3ServicePlugin, self).get_router(context,
                                                               router_id)
        router_info = copy.deepcopy(new_router)
        router_info['seg_id'] = seg_id
        router_info['name'] = router['name']
        router_info['cidr'] = subnet['cidr']
        router_info['gip'] = subnet['gateway_ip']
        router_info['ip_version'] = subnet['ip_version']

        try:
            self.driver.add_router_interface(context, router_info)
            return new_router
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Error Adding subnet %(subnet)s to "
                              "router %(router_id)s on Arista HW"),
                          {'subnet': subnet, 'router_id': router_id})
                super(AristaL3ServicePlugin, self).remove_router_interface(
                                                    context,
                                                    router_id,
                                                    interface_info)