class EventletApiRequest(request.ApiRequest):
    '''Eventlet-based ApiRequest class.

    This class will form the basis for eventlet-based ApiRequest classes
    '''

    # Maximum number of green threads present in the system at one time.
    API_REQUEST_POOL_SIZE = request.DEFAULT_API_REQUEST_POOL_SIZE

    # Pool of green threads. One green thread is allocated per incoming
    # request. Incoming requests will block when the pool is empty.
    API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE)

    # A unique id is assigned to each incoming request. When the current
    # request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0.
    MAXIMUM_REQUEST_ID = request.DEFAULT_MAXIMUM_REQUEST_ID

    # The request id for the next incoming request.
    CURRENT_REQUEST_ID = 0

    def __init__(self, client_obj, url, method="GET", body=None,
                 headers=None,
                 request_timeout=request.DEFAULT_REQUEST_TIMEOUT,
                 retries=request.DEFAULT_RETRIES,
                 auto_login=True,
                 redirects=request.DEFAULT_REDIRECTS,
                 http_timeout=request.DEFAULT_HTTP_TIMEOUT, client_conn=None):
        '''Constructor.'''
        self._api_client = client_obj
        self._url = url
        self._method = method
        self._body = body
        self._headers = headers or {}
        self._request_timeout = request_timeout
        self._retries = retries
        self._auto_login = auto_login
        self._redirects = redirects
        self._http_timeout = http_timeout
        self._client_conn = client_conn
        self._abort = False

        self._request_error = None

        if "User-Agent" not in self._headers:
            self._headers["User-Agent"] = USER_AGENT

        self._green_thread = None
        # Retrieve and store this instance's unique request id.
        self._request_id = EventletApiRequest.CURRENT_REQUEST_ID
        # Update the class variable that tracks request id.
        # Request IDs wrap around at MAXIMUM_REQUEST_ID
        next_request_id = self._request_id + 1
        next_request_id %= self.MAXIMUM_REQUEST_ID
        EventletApiRequest.CURRENT_REQUEST_ID = next_request_id

    @classmethod
    def _spawn(cls, func, *args, **kwargs):
        '''Allocate a green thread from the class pool.'''
        return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs)

    def spawn(self, func, *args, **kwargs):
        '''Spawn a new green thread with the supplied function and args.'''
        return self.__class__._spawn(func, *args, **kwargs)

    @classmethod
    def joinall(cls):
        '''Wait for all outstanding requests to complete.'''
        return cls.API_REQUEST_POOL.waitall()

    def join(self):
        '''Wait for instance green thread to complete.'''
        if self._green_thread is not None:
            return self._green_thread.wait()
        return Exception(_('Joining an invalid green thread'))

    def start(self):
        '''Start request processing.'''
        self._green_thread = self.spawn(self._run)

    def copy(self):
        '''Return a copy of this request instance.'''
        return EventletApiRequest(
            self._api_client, self._url, self._method, self._body,
            self._headers, self._request_timeout, self._retries,
            self._auto_login, self._redirects, self._http_timeout)

    def _run(self):
        '''Method executed within green thread.'''
        if self._request_timeout:
            # No timeout exception escapes the with block.
            with eventlet.timeout.Timeout(self._request_timeout, False):
                return self._handle_request()

            LOG.info(_('[%d] Request timeout.'), self._rid())
            self._request_error = Exception(_('Request timeout'))
            return None
        else:
            return self._handle_request()

    def _handle_request(self):
        '''First level request handling.'''
        attempt = 0
        timeout = 0
        response = None
        while response is None and attempt <= self._retries:
            eventlet.greenthread.sleep(timeout)
            attempt += 1

            req = self._issue_request()
            # automatically raises any exceptions returned.
            if isinstance(req, httplib.HTTPResponse):
                timeout = 0
                if attempt <= self._retries and not self._abort:
                    if req.status in (httplib.UNAUTHORIZED, httplib.FORBIDDEN):
                        continue
                    elif req.status == httplib.SERVICE_UNAVAILABLE:
                        timeout = 0.5
                        continue
                    # else fall through to return the error code

                LOG.debug(_("[%(rid)d] Completed request '%(method)s %(url)s'"
                            ": %(status)s"),
                          {'rid': self._rid(), 'method': self._method,
                           'url': self._url, 'status': req.status})
                self._request_error = None
                response = req
            else:
                LOG.info(_('[%(rid)d] Error while handling request: %(req)s'),
                         {'rid': self._rid(), 'req': req})
                self._request_error = req
                response = None
        return response
class Client(object):
    """
    Client for the Cisco Nexus1000V Neutron Plugin.

    This client implements functions to communicate with
    Cisco Nexus1000V VSM.

    For every Neutron object, Cisco Nexus1000V Neutron Plugin
    creates a corresponding object on the controller (Cisco
    Nexus1000V VSM).

    CONCEPTS:

    Following are few concepts used in Nexus1000V VSM:

    port-profiles:
    Policy profiles correspond to port profiles on Nexus1000V VSM.
    Port profiles are the primary mechanism by which network policy is
    defined and applied to switch interfaces in a Nexus 1000V system.

    network-segment:
    Each network-segment represents a broadcast domain.

    network-segment-pool:
    A network-segment-pool contains one or more network-segments.

    logical-network:
    A logical-network contains one or more network-segment-pools.

    bridge-domain:
    A bridge-domain is created when the network-segment is of type VXLAN.
    Each VXLAN <--> VLAN combination can be thought of as a bridge domain.

    ip-pool:
    Each ip-pool represents a subnet on the Nexus1000V VSM.


    WORK FLOW:

    For every network profile a corresponding logical-network and
    a network-segment-pool, under this logical-network, will be created.

    For every network created from a given network profile, a
    network-segment will be added to the network-segment-pool corresponding
    to that network profile.

    A port is created on a network and associated with a policy-profile.
    Hence for every unique combination of a network and a policy-profile, a
    unique vm-network will be created and a reference to the port will be
    added. If the same combination of network and policy-profile is used by
    another port, the references to that port will be added to the same
    vm-network.


    """

    # Define paths for the URI where the client connects for HTTP requests.
    port_profiles_path = "/virtual-port-profile"
    ports_path = "/kvm/vm-network/%s/ports"
    port_path = "/kvm/vm-network/%s/ports/%s"
    network_segment_path = "/network-segment/%s"
    network_segments_path = "/network-segment"
    network_segment_pool_path = "/network-segment-pool/%s"
    network_segment_pools_path = "/network-segment-pool"
    ip_pool_path = "/ip-pool-template/%s"
    ip_pools_path = "/ip-pool-template"
    vm_networks_path = "/kvm/vm-network"
    vm_network_path = "/kvm/vm-network/%s"
    bridge_domains_path = "/kvm/bridge-domain"
    bridge_domain_path = "/kvm/bridge-domain/%s"
    logical_network_path = "/logical-network/%s"
    md5_path = "/kvm/config-md5-hashes"
    sync_notification_path = "/sync-notification"

    pool = eventlet.GreenPool(cfg.CONF.ml2_cisco_n1kv.http_pool_size)

    def __init__(self, **kwargs):
        """Initialize a new client for the plugin."""
        self.format = 'json'

        # Extract configuration parameters from the configuration file.
        self.username = cfg.CONF.ml2_cisco_n1kv.username
        self.password = cfg.CONF.ml2_cisco_n1kv.password
        self.vsm_ips = config.get_vsm_hosts()
        self.action_prefix = 'http://%s/api/n1k'
        self.timeout = cfg.CONF.ml2_cisco_n1kv.http_timeout
        self.max_vsm_retries = cfg.CONF.ml2_cisco_n1kv.max_vsm_retries
        required_opts = ('vsm_ips', 'username', 'password')
        # Validate whether required options are configured
        for opt in required_opts:
            if not getattr(self, opt):
                raise cfg.RequiredOptError(opt, 'ml2_cisco_n1kv')
        # Validate the configured VSM IP addresses
        # Note: Currently only support IPv4
        for vsm_ip in self.vsm_ips:
            if not (netutils.is_valid_ipv4(vsm_ip)
                    or netutils.is_valid_ipv6(vsm_ip)):
                raise cfg.Error(
                    _("Cisco Nexus1000V ML2 driver config: "
                      "Invalid format for VSM IP address: %s") % vsm_ip)

    def send_sync_notification(self, msg, vsm_ip):
        """Send a start/end/no-change sync notification to the VSM.

        :param vsm_ip: string representing the IP address of the VSM
        :param msg: message string, start, end or no-change
        """
        body = {'status': msg}
        self._post(self.sync_notification_path, body=body, vsm_ip=vsm_ip)

    def list_port_profiles(self, vsm_ip=None):
        """Fetch all policy profiles from the VSM.

        :param vsm_ip: string representing the IP address of the VSM
        :returns: JSON string
        """
        return self._get(self.port_profiles_path, vsm_ip=vsm_ip)

    def list_network_profiles(self, vsm_ip=None):
        """Fetch all network profiles from VSM.

        :param vsm_ip: string representing the IP address of the VSM
        :return: JSON string
        """
        return self._get(self.network_segment_pools_path, vsm_ip=vsm_ip)

    def list_networks(self, vsm_ip=None):
        """Fetch all networks from VSM.

        :param vsm_ip: string representing the IP address of the VSM
        :return: JSON string
        """
        return self._get(self.network_segments_path, vsm_ip=vsm_ip)

    def list_subnets(self, vsm_ip=None):
        """Fetch all subnets from VSM.

        :param vsm_ip: string representing the IP address of the VSM
        :return: JSON string
        """
        return self._get(self.ip_pools_path, vsm_ip=vsm_ip)

    def list_vmnetworks(self, vsm_ip=None):
        """Fetch all VM networks from VSM.

        :param vsm_ip: string representing the IP address of the VSM
        :return: JSON string
        """
        return self._get(self.vm_networks_path, vsm_ip=vsm_ip)

    def list_md5_hashes(self, vsm_ip=None):
        """Fetch MD5 hashes for all resources from VSM.

        Fetch MD5 hashes for network profiles, networks, subnets, ports and
        a consolidated hash of these hashes from the VSM

        :param vsm_ip: string representing the IP address of the VSM
        :return: JSON string
        """
        return self._get(self.md5_path, vsm_ip=vsm_ip)

    def list_bridge_domains(self, vsm_ip=None):
        """Fetch the list of all bridge domains on the VSM.

        :param vsm_ip: string representing the IP address of the VSM
        :return: JSON string
        """
        return self._get(self.bridge_domains_path, vsm_ip=vsm_ip)

    def show_network(self, network_id, vsm_ip=None):
        """Fetch details of a given network like segment type from the VSM.

        :param network_id: UUID of the network whose details are needed
        :param vsm_ip: string representing the IP address of the VSM
        :return: JSON string
        """
        return self._get(self.network_segment_path % network_id, vsm_ip=vsm_ip)

    def _create_logical_network(self, network_profile, vsm_ip=None):
        """Create a logical network on the VSM.

        :param network_profile: network profile dict
        :param vsm_ip: string representing the IP address of the VSM
        """
        body = {'description': network_profile['name']}
        logical_network_name = (network_profile['id'] +
                                n1kv_const.LOGICAL_NETWORK_SUFFIX)
        return self._post(self.logical_network_path % logical_network_name,
                          body=body,
                          vsm_ip=vsm_ip)

    def delete_logical_network(self, logical_network_name, vsm_ip=None):
        """Delete a logical network on VSM.

        :param logical_network_name: string representing name of the logical
                                     network
        :param vsm_ip: string representing the IP address of the VSM
        """
        return self._delete(self.logical_network_path % logical_network_name,
                            vsm_ip=vsm_ip)

    def create_network_segment_pool(self, network_profile, vsm_ip=None):
        """Create a network segment pool on the VSM.

        :param network_profile: network profile dict
        :param vsm_ip: string representing the IP address of the VSM
        """
        self._create_logical_network(network_profile, vsm_ip=vsm_ip)
        logical_network_name = (network_profile['id'] +
                                n1kv_const.LOGICAL_NETWORK_SUFFIX)
        body = {
            'name': network_profile['name'],
            'description': network_profile['name'],
            'id': network_profile['id'],
            'logicalNetwork': logical_network_name
        }
        return self._post(self.network_segment_pool_path %
                          network_profile['id'],
                          body=body,
                          vsm_ip=vsm_ip)

    def delete_network_segment_pool(self,
                                    network_segment_pool_id,
                                    vsm_ip=None):
        """Delete a network segment pool on the VSM.

        :param network_segment_pool_id: UUID representing the network
                                        segment pool
        :param vsm_ip: string representing the IP address of the VSM
        """
        return self._delete(self.network_segment_pool_path %
                            network_segment_pool_id,
                            vsm_ip=vsm_ip)

    def create_network_segment(self, network, network_profile, vsm_ip=None):
        """Create a network segment on the VSM.

        :param network: network dict
        :param network_profile: network profile object
        :param vsm_ip: string representing the IP address of the VSM
        """
        body = {
            'publishName': network['id'],
            'description': network['name'],
            'id': network['id'],
            'tenantId': network['tenant_id'],
            'mode': 'access',
            'segmentType': network_profile['segment_type'],
            'networkSegmentPool': network_profile['id']
        }
        # Override tenantId if network is shared
        if network['shared']:
            body['tenantId'] = '0'
        if network[providernet.NETWORK_TYPE] == p_const.TYPE_VLAN:
            body['vlan'] = network[providernet.SEGMENTATION_ID]
        elif network[providernet.NETWORK_TYPE] == p_const.TYPE_VXLAN:
            # Create a bridge domain on VSM
            bd_name = network['id'] + n1kv_const.BRIDGE_DOMAIN_SUFFIX
            self.create_bridge_domain(network, network_profile, vsm_ip=vsm_ip)
            body['bridgeDomain'] = bd_name
        try:
            return self._post(self.network_segment_path % network['id'],
                              body=body,
                              vsm_ip=vsm_ip)
        except (n1kv_exc.VSMError, n1kv_exc.VSMConnectionFailed):
            with excutils.save_and_reraise_exception():
                # Clean up the bridge domain from the VSM for VXLAN networks.
                # Reraise the exception so that caller method executes further
                # clean up.
                if network[providernet.NETWORK_TYPE] == p_const.TYPE_VXLAN:
                    self.delete_bridge_domain(bd_name, vsm_ip=vsm_ip)

    def update_network_segment(self, updated_network):
        """Update a network segment on the VSM.

        :param updated_network: updated network dict
        """
        body = {
            'description': updated_network['name'],
            'tenantId': updated_network['tenant_id']
        }
        if updated_network['shared']:
            body['tenantId'] = '0'
        return self._post(self.network_segment_path % updated_network['id'],
                          body=body)

    def delete_network_segment(self,
                               network_segment_id,
                               network_type,
                               vsm_ip=None):
        """Delete a network segment on the VSM.

        :param network_segment_id: UUID representing the network segment
        :param network_type: type of network to be deleted
        :param vsm_ip: string representing the IP address of the VSM
        """
        if network_type == p_const.TYPE_VXLAN:
            bd_name = network_segment_id + n1kv_const.BRIDGE_DOMAIN_SUFFIX
            self.delete_bridge_domain(bd_name, vsm_ip=vsm_ip)
        return self._delete(self.network_segment_path % network_segment_id,
                            vsm_ip=vsm_ip)

    def create_bridge_domain(self, network, net_prof, vsm_ip=None):
        """Create a bridge domain on VSM.

        :param network: network dict
        :param net_prof: network profile dict
        :param vsm_ip: string representing the IP address of the VSM
        """
        if net_prof['sub_type'] == n1kv_const.CLI_VXLAN_MODE_ENHANCED:
            vxlan_subtype = n1kv_const.MODE_UNICAST
        else:
            vxlan_subtype = n1kv_const.MODE_NATIVE_VXLAN
        body = {
            'name': network['id'] + n1kv_const.BRIDGE_DOMAIN_SUFFIX,
            'segmentId': network[providernet.SEGMENTATION_ID],
            'subType': vxlan_subtype,
            'tenantId': network['tenant_id']
        }
        if vxlan_subtype == n1kv_const.MODE_NATIVE_VXLAN:
            start_ip, end_ip = net_prof['multicast_ip_range'].split('-', 1)
            body['groupIp'] = start_ip
        return self._post(self.bridge_domains_path, body=body, vsm_ip=vsm_ip)

    def delete_bridge_domain(self, name, vsm_ip=None):
        """Delete a bridge domain on VSM.

        :param name: name of the bridge domain to be deleted
        :param vsm_ip: string representing the IP address of the VSM
        """
        return self._delete(self.bridge_domain_path % name, vsm_ip=vsm_ip)

    def create_ip_pool(self, subnet, vsm_ip=None):
        """Create a subnet on VSM.

        :param subnet: subnet dict
        :param vsm_ip: string representing the IP address of the VSM
        """
        if subnet['cidr']:
            try:
                ip = netaddr.IPNetwork(subnet['cidr'])
                netmask = str(ip.netmask)
                network_address = str(ip.network)
            except (ValueError, netaddr.AddrFormatError):
                msg = _("Invalid input for CIDR")
                raise n_exc.InvalidInput(error_message=msg)
        else:
            netmask = network_address = ""

        if subnet['allocation_pools']:
            address_range_start = subnet['allocation_pools'][0]['start']
            address_range_end = subnet['allocation_pools'][0]['end']
        else:
            address_range_start = None
            address_range_end = None

        body = {
            'addressRangeStart': address_range_start,
            'addressRangeEnd': address_range_end,
            'ipAddressSubnet': netmask,
            'description': subnet['name'],
            'gateway': subnet['gateway_ip'],
            'dhcp': subnet['enable_dhcp'],
            'dnsServersList': subnet['dns_nameservers'],
            'networkAddress': network_address,
            'netSegmentName': subnet['network_id'],
            'id': subnet['id'],
            'tenantId': subnet['tenant_id']
        }
        return self._post(self.ip_pool_path % subnet['id'],
                          body=body,
                          vsm_ip=vsm_ip)

    def update_ip_pool(self, subnet):
        """Update an ip-pool on the VSM.

        :param subnet: subnet dictionary
        """
        body = {
            'description': subnet['name'],
            'dhcp': subnet['enable_dhcp'],
            'dnsServersList': subnet['dns_nameservers']
        }
        return self._post(self.ip_pool_path % subnet['id'], body=body)

    def delete_ip_pool(self, subnet_id, vsm_ip=None):
        """Delete an ip-pool on the VSM.

        :param subnet_id: UUID representing the subnet
        :param vsm_ip: string representing the IP address of the VSM
        """
        return self._delete(self.ip_pool_path % subnet_id, vsm_ip=vsm_ip)

    def create_n1kv_port(self,
                         port,
                         vmnetwork_name,
                         policy_profile,
                         vsm_ip=None):
        """Create a port on the VSM.

        :param port: port dict
        :param vmnetwork_name: name of the VM network
        :param policy_profile: policy profile object
        :param vsm_ip: string representing the IP address of the VSM
        """
        body = {
            'name': vmnetwork_name,
            'networkSegmentId': port['network_id'],
            'networkSegment': port['network_id'],
            'portProfile': policy_profile.name,
            'portProfileId': policy_profile.id,
            'tenantId': port['tenant_id'],
            'portId': port['id'],
            'macAddress': port['mac_address'],
            'portType': port['device_owner'],
        }
        if port.get('fixed_ips'):
            body['ipAddress'] = port['fixed_ips'][0]['ip_address']
            body['subnetId'] = port['fixed_ips'][0]['subnet_id']
        return self._post(self.vm_networks_path, body=body, vsm_ip=vsm_ip)

    def delete_n1kv_port(self, vmnetwork_name, port_id, vsm_ip=None):
        """Delete a port on the VSM.

        :param vmnetwork_name: name of the VM network which imports this port
        :param port_id: UUID of the port
        :param vsm_ip: string representing the IP address of the VSM
        """
        return self._delete(self.port_path % (vmnetwork_name, port_id),
                            vsm_ip=vsm_ip)

    def _do_request(self,
                    method,
                    action,
                    body=None,
                    headers=None,
                    vsm_ip=None):
        """Perform the HTTP request.

        The response is in either JSON format or plain text. A GET method will
        invoke a JSON response while a PUT/POST/DELETE returns message from the
        VSM in plain text format.
        Exception is raised when VSM replies with an INTERNAL SERVER ERROR HTTP
        status code (500) i.e. an error has occurred on the VSM or SERVICE
        UNAVAILABLE (404) i.e. VSM is not reachable.

        :param method: type of the HTTP request. POST, GET, PUT or DELETE
        :param action: path to which the client makes request
        :param body: dict for arguments which are sent as part of the request
        :param headers: header for the HTTP request
        :param vsm_ip: vsm_ip for the HTTP request. If not provided then
                       request will be sent to all VSMs.
        :returns: JSON or plain text in HTTP response
        """

        action = self.action_prefix + action
        if body:
            body = jsonutils.dumps(body)
            LOG.debug("req: %s", body)
        hosts = []
        if vsm_ip:
            hosts.append(vsm_ip)
        else:
            hosts = self.vsm_ips
        if not headers:
            headers = self._get_auth_header()
            headers['Content-Type'] = headers['Accept'] = "application/json"
        for vsm_ip in hosts:
            if netutils.is_valid_ipv6(vsm_ip):
                # Enclose IPv6 address in [] in the URL
                vsm_action = action % ("[%s]" % vsm_ip)
            else:
                # IPv4 address
                vsm_action = action % vsm_ip
            for attempt in range(self.max_vsm_retries + 1):
                try:
                    LOG.debug("[VSM %(vsm)s attempt %(id)s]: Connecting.." % {
                        "vsm": vsm_ip,
                        "id": attempt
                    })
                    resp = self.pool.spawn(requests.request,
                                           method,
                                           url=vsm_action,
                                           data=body,
                                           headers=headers,
                                           timeout=self.timeout).wait()
                    break
                except Exception as e:
                    LOG.debug("[VSM %(vsm)s attempt %(id)s]: Conn timeout." % {
                        "vsm": vsm_ip,
                        "id": attempt
                    })
                    if attempt == self.max_vsm_retries:
                        LOG.error(_LE("VSM %s, Conn failed."), vsm_ip)
                        raise n1kv_exc.VSMConnectionFailed(reason=e)
            if resp.status_code != requests.codes.OK:
                LOG.error(_LE("VSM %(vsm)s, Got error: %(err)s"), {
                    "vsm": vsm_ip,
                    "err": resp.text
                })
                raise n1kv_exc.VSMError(reason=resp.text)
        if 'application/json' in resp.headers['content-type']:
            try:
                return resp.json()
            except ValueError:
                return {}
        elif 'text/plain' in resp.headers['content-type']:
            LOG.info(_LI("VSM: %s"), resp.text)

    def _delete(self, action, body=None, headers=None, vsm_ip=None):
        return self._do_request("DELETE",
                                action,
                                body=body,
                                headers=headers,
                                vsm_ip=vsm_ip)

    def _get(self, action, vsm_ip, body=None, headers=None):
        return self._do_request("GET",
                                action,
                                body=body,
                                headers=headers,
                                vsm_ip=vsm_ip)

    def _post(self, action, body=None, headers=None, vsm_ip=None):
        return self._do_request("POST",
                                action,
                                body=body,
                                headers=headers,
                                vsm_ip=vsm_ip)

    def _put(self, action, body=None, headers=None, vsm_ip=None):
        return self._do_request("PUT",
                                action,
                                body=body,
                                headers=headers,
                                vsm_ip=vsm_ip)

    def _get_auth_header(self):
        """Retrieve header with auth info for the VSM.

        :return: authorization header dict
        """
        auth = base64.encodestring(
            six.b("%s:%s" % (self.username, self.password))).rstrip()
        return {"Authorization": "Basic %s" % auth}
Example #3
0
    print("fetching %s" % url)
    try:
        r = requests.head(url, allow_redirects=True, timeout=timeout)
    except requests.exceptions.RequestException:
        res = timeout + 1
    else:
        if r.status_code == 200:
            res = time.time() - now
    client.zadd(key, timeout - res, url)


if __name__ == "__main__":
    import eventlet
    eventlet.monkey_patch()
    import redis
    import requests
    key = "pypimirrors"
    client = redis.StrictRedis()
    client.delete(key)
    pool = eventlet.GreenPool(20)

    for i in range(ord("b"), ord("g") + 1):
        c = chr(i)
        if c in "bde":
            continue
        url = "http://%s.pypi.python.org/packages/" % chr(i)
        pool.spawn_n(fetch, url)
    pool.waitall()
    for x in client.zrange(key, 0, 10, withscores=True):
        print(x)
Example #4
0
 def __init__(self):
     #
     self.ts = ts()
     self.pool = et.GreenPool(20)
Example #5
0
    def _process_loop(self):
        LOG.debug("Starting _process_loop")

        pool = eventlet.GreenPool(size=8)
        while True:
            pool.spawn_n(self._process_resource_update)
Example #6
0
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import datetime
from heat.openstack.common import log as logging
from heat.openstack.common import timeutils
from heat.engine import timestamp
from heat.db import api as db_api
from heat.engine import parser
from heat.common import context as ctxtlib
import eventlet

logger = logging.getLogger('heat.engine.watchrule')
greenpool = eventlet.GreenPool()


class WatchRule(object):
    WATCH_STATES = (ALARM, NORMAL, NODATA) = ('ALARM', 'NORMAL', 'NODATA')

    ACTION_MAP = {
        ALARM: 'AlarmActions',
        NORMAL: 'OKActions',
        NODATA: 'InsufficientDataActions'
    }

    created_at = timestamp.Timestamp(db_api.watch_rule_get, 'created_at')
    updated_at = timestamp.Timestamp(db_api.watch_rule_get, 'updated_at')

    def __init__(self,
Example #7
0
 def __init__(self, is_not_light=False):
     super(DFDaemon, self).__init__()
     self.pool = eventlet.GreenPool()
     self.is_daemonize = False
     self.thread = None
     self.is_not_light = is_not_light
Example #8
0
    def process_service(self, device_ids=None, removed_devices_info=None):
        try:
            LOG.debug("Routing service processing started")
            resources = {}
            routers = []
            removed_routers = []
            all_routers_flag = False
            if self.fullsync:
                LOG.debug("FullSync flag is on. Starting fullsync")
                # Setting all_routers_flag and clear the global full_sync flag
                all_routers_flag = True
                self.fullsync = False
                self.updated_routers.clear()
                self.removed_routers.clear()
                self.sync_devices.clear()
                routers = self._fetch_router_info(all_routers=True)
            else:
                if self.updated_routers:
                    router_ids = list(self.updated_routers)
                    LOG.debug("Updated routers:%s", router_ids)
                    self.updated_routers.clear()
                    routers = self._fetch_router_info(router_ids=router_ids)
                if device_ids:
                    LOG.debug("Adding new devices:%s", device_ids)
                    self.sync_devices = set(device_ids) | self.sync_devices
                if self.sync_devices:
                    sync_devices_list = list(self.sync_devices)
                    LOG.debug("Fetching routers on:%s", sync_devices_list)
                    routers.extend(self._fetch_router_info(
                        device_ids=sync_devices_list))
                    self.sync_devices.clear()
                if removed_devices_info:
                    if removed_devices_info.get('deconfigure'):
                        ids = self._get_router_ids_from_removed_devices_info(
                            removed_devices_info)
                        self.removed_routers = self.removed_routers | set(ids)
                if self.removed_routers:
                    removed_routers_ids = list(self.removed_routers)
                    LOG.debug("Removed routers:%s", removed_routers_ids)
                    for r in removed_routers_ids:
                        if r in self.router_info:
                            removed_routers.append(self.router_info[r].router)

            # Sort on hosting device
            if routers:
                resources['routers'] = routers
            if removed_routers:
                resources['removed_routers'] = removed_routers
            hosting_devices = self._sort_resources_per_hosting_device(
                resources)

            # Dispatch process_services() for each hosting device
            pool = eventlet.GreenPool()
            for device_id, resources in hosting_devices.items():
                routers = resources.get('routers')
                removed_routers = resources.get('removed_routers')
                pool.spawn_n(self._process_routers, routers, removed_routers,
                             device_id, all_routers=all_routers_flag)
            pool.waitall()
            if removed_devices_info:
                for hd_id in removed_devices_info['hosting_data']:
                    self._drivermgr.remove_driver_for_hosting_device(hd_id)
            LOG.debug("Routing service processing successfully completed")
        except Exception:
            LOG.exception(_LE("Failed processing routers"))
            self.fullsync = True
Example #9
0
from abc import ABCMeta, abstractmethod
import uuid

import eventlet

from .message import (Down, Monitor, Unmonitor, Cancel, Kill, Fork,
                      ForkWithMonitor, ForkResponse)
from .mailbox import Mailbox, AckableMailbox, LocalMailbox, Receiver

_actor_map = {}

_actor_pool = eventlet.GreenPool(size=1000000)


class ActorBase(Receiver, metaclass=ABCMeta):
    @abstractmethod
    def encode(self):
        pass

    @staticmethod
    @abstractmethod
    def decode(params):
        pass


class Actor(ActorBase):
    __slots__ = [
        '_ack', '_inbox', '_outbox', '_callback', '_greenlet', '_observers'
    ]

    def __init__(self, callback, mailbox=None):
Example #10
0
 def test_update_while_iterating_agents(self):
     pool = eventlet.GreenPool(2)
     pool.spawn(self._list_agents)
     pool.spawn(self._add_and_delete_agents)
     pool.waitall()
     self.assertEqual(self.names_ref, self.names_read)
Example #11
0
 def _start_process_queue(self):
     LOG.debug("Starting ip_conntrack _process_queue_worker() threads")
     pool = eventlet.GreenPool(size=WORKERS)
     for i in range(WORKERS):
         pool.spawn_n(self._process_queue_worker)
Example #12
0
def multiobject_download(
    urlList,
    downloadDirectory,
    log,
    timeStamp=True,
    timeout=180,
    concurrentDownloads=10,
    resetFilename=False,
    credentials=False,
    longTime=False,
    indexFilenames=False
):
    """
    *get multiple url documents and place them in specified download directory/directories*

    **Key Arguments:**
      - ``urlList`` -- list of document urls
      - ``downloadDirectory`` -- directory(ies) to download the documents to - can be one directory path or a list of paths the same length as urlList
      - ``log`` -- the logger
      - ``timestamp`` -- append a timestamp the name of the URL (ensure unique filenames)
      - ``longTime`` -- use a longer timestamp when appending to the filename (greater uniqueness)
      - ``timeout`` -- the timeout limit for downloads (secs)
      - ``concurrentDownloads`` -- the number of concurrent downloads allowed at any one time
      - ``resetFilename`` -- a string to reset all filenames to
      - ``credentials`` -- basic http credentials { 'username' : "...", "password", "..." }
      - ``indexFilenames`` -- prepend filenames with index (where url appears in urllist)

    **Return:**
      - list of timestamped documents (same order as the input urlList)

    **Usage:**
        .. code-block:: python 

            # download the pages linked from the main list page
            from fundamentals.download import multiobject_download
            localUrls = multiobject_download(
                urlList=["https://www.python.org/dev/peps/pep-0257/","https://en.wikipedia.org/wiki/Docstring"],
                downloadDirectory="/tmp",
                log="log",
                timeStamp=True,
                timeout=180,
                concurrentDownloads=2,
                resetFilename=False,
                credentials=False,  # { 'username' : "...", "password", "..." }
                longTime=True
            )

            print localUrls
            # OUT: ['/tmp/untitled_20160316t160650610780.html', '/tmp/Docstring_20160316t160650611136.html']

        .. image:: https://i.imgur.com/QYoMm24.png width=600px
    """
    ## > IMPORTS ##
    import sys
    import os
    import eventlet
    import socket
    import re
    import base64
    from fundamentals.download import _fetch, _dump_files_to_local_drive, append_now_datestamp_to_filename, extract_filename_from_url

    ## >SETTINGS ##
    # TIMEOUT IN SECONDS
    timeout = float(timeout)
    socket.setdefaulttimeout(timeout)

    ###########################################################
    # >ACTION(S)                                              #
    ###########################################################
    # BUILD THE 2D ARRAY FOR MULTI_THREADED DOWNLOADS
    thisArray = []
    bodies = []
    localUrls = []
    theseUrls = []
    requestList = []

    totalCount = len(urlList)

    # IF ONLY ONE DOWNLOAD DIRECORY
    if isinstance(downloadDirectory, ("".__class__, u"".__class__)):
        for i, url in enumerate(urlList):
            # EXTRACT THE FILENAME FROM THE URL
            if resetFilename and len(resetFilename):
                filename = resetFilename[i]
            else:
                filename = extract_filename_from_url(log, url)
                if indexFilenames:
                    filename = """%(i)03d_%(filename)s""" % locals()

            if not filename:
                from datetime import datetime, date, time
                now = datetime.now()
                filename = now.strftime("%Y%m%dt%H%M%S%f")

            if(timeStamp):
                # APPEND TIMESTAMP TO THE FILENAME
                filename = append_now_datestamp_to_filename(
                    log, filename, longTime=longTime)
            # GENERATE THE LOCAL FILE URL
            localFilepath = downloadDirectory + "/" + filename
            thisArray.extend([[url, localFilepath]])

            # GENERATE THE REQUESTS
            request = urllib.request.Request(url)
            if credentials != False:
                username = credentials["username"]
                password = credentials["password"]
                base64string = base64.encodestring(
                    '%s:%s' % (username, password)).replace('\n', '')
                request.add_header("Authorization", "Basic %s" % base64string)
            requestList.append(request)

    elif isinstance(downloadDirectory, list):

        for u, d in zip(urlList, downloadDirectory):
            # EXTRACT THE FILENAME FROM THE URL
            if resetFilename:
                filename = resetFilename
            else:
                filename = extract_filename_from_url(log, url)

            if not filename:
                continue

            if(timeStamp):
                # APPEND TIMESTAMP TO THE FILENAME
                filename = append_now_datestamp_to_filename(
                    log, filename)
            # GENERATE THE LOCAL FILE URL
            localFilepath = d + "/" + filename
            thisArray.extend([[u, localFilepath]])
            log.debug(" about to download %s" % (u,))

            # GENERATE THE REQUESTS
            request = urllib.request.Request(u)

            if credentials != False:
                log.debug('adding the credentials')
                username = credentials["username"]
                password = credentials["password"]
                base64string = base64.encodestring(
                    '%s:%s' % (username, password)).replace('\n', '')
                request.add_header("Authorization", "Basic %s" % base64string)
            requestList.append(request)

    pool = eventlet.GreenPool(concurrentDownloads)
    i = 0
    try:

        log.debug(
            "starting mutli-threaded download batch - %s concurrent downloads" %
            (concurrentDownloads,))
        log.debug('len(requestList): %s' % (len(requestList),))
        for url, body in pool.imap(_fetch, requestList):
            urlNum = i + 1
            if urlNum > 1:
                # CURSOR UP ONE LINE AND CLEAR LINE
                sys.stdout.write("\x1b[1A\x1b[2K")
            percent = (float(urlNum) / float(totalCount)) * 100.
            print(
                "  %(urlNum)s / %(totalCount)s (%(percent)1.1f%%) URLs downloaded" % locals())

            if(body):
                bodies.extend([body])
                theseUrls.extend([thisArray[i][1]])
            else:
                theseUrls.extend([None])
                bodies.extend([None])

            # DUMP THE FILES FROM MEMORY EVERY CONCURRENT DOWNLOAD CYCLE
            if i % concurrentDownloads == 0:
                _dump_files_to_local_drive(bodies, theseUrls, log)
                localUrls.extend(theseUrls)
                # RESET THE TMP ARRAYS
                bodies = []
                theseUrls = []
            i += 1
    except Exception as e:
        log.error(
            "something went wrong with the mutli-threaded download : " + str(e) + "\n")

    # DUMP REMAINING FILES TO THE LOCAL DRIVE
    _dump_files_to_local_drive(bodies, theseUrls, log)
    localUrls.extend(theseUrls)

    return localUrls
Example #13
0
def main(argv):
    global urllib2
    usage = "async.py <sync | async>"

    # Parse the command line args.
    opts = parse(argv, {}, ".splunkrc")

    # We have to see if we got either the "sync" or
    # "async" command line arguments.
    allowed_args = ["sync", "async"]
    if len(opts.args) == 0 or opts.args[0] not in allowed_args:
        error("Must supply either of: %s" % allowed_args, 2)

    # Note whether or not we are async.
    is_async = opts.args[0] == "async"

    # If we're async, we'' import `eventlet` and `eventlet`'s version
    # of `urllib2`. Otherwise, import the stdlib version of `urllib2`.
    #
    # The reason for the funky import syntax is that Python imports
    # are scoped to functions, and we need to make it global.
    # In a real application, you would only import one of these.
    if is_async:
        urllib2 = __import__('eventlet.green', globals(), locals(),
                             ['urllib2'], -1).urllib2
    else:
        urllib2 = __import__("urllib2", globals(), locals(), [], -1)

    # Create the service instance using our custom HTTP request handler.
    service = client.Service(handler=request, **opts.kwargs)
    service.login()

    # Record the current time at the start of the
    # "benchmark".
    oldtime = datetime.datetime.now()

    def do_search(query):
        # Create a search job for the query.

        # In the async case, eventlet will "relinquish" the coroutine
        # worker, and let others go through. In the sync case, we will
        # block the entire thread waiting for the request to complete.
        job = service.jobs.create(query, exec_mode="blocking")

        # We fetch the results, and cancel the job
        results = job.results()
        job.cancel()

        return results

    # We specify many queries to get show the advantages
    # of paralleism.
    queries = [
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
        'search * | head 100',
    ]

    # Check if we are async or not, and execute all the
    # specified queries.
    if is_async:
        import eventlet

        # Create an `eventlet` pool of workers.
        pool = eventlet.GreenPool(16)

        # If we are async, we use our worker pool to farm
        # out all the queries. We just pass, as we don't
        # actually care about the result.
        for results in pool.imap(do_search, queries):
            pass
    else:
        # If we are sync, then we just execute the queries one by one,
        # and we can also ignore the result.
        for query in queries:
            do_search(query)

    # Record the current time at the end of the benchmark,
    # and print the delta elapsed time.
    newtime = datetime.datetime.now()
    print("Elapsed Time: %s" % (newtime - oldtime))
Example #14
0
class Client(object):
    """
    Client for the Cisco Nexus1000V Neutron Plugin.

    This client implements functions to communicate with
    Cisco Nexus1000V VSM.

    For every Neutron objects, Cisco Nexus1000V Neutron Plugin
    creates a corresponding object in the controller (Cisco
    Nexus1000V VSM).

    CONCEPTS:

    Following are few concepts used in Nexus1000V VSM:

    port-profiles:
    Policy profiles correspond to port profiles on Nexus1000V VSM.
    Port profiles are the primary mechanism by which network policy is
    defined and applied to switch interfaces in a Nexus 1000V system.

    network-segment:
    Each network-segment represents a broadcast domain.

    network-segment-pool:
    A network-segment-pool contains one or more network-segments.

    logical-network:
    A logical-network contains one or more network-segment-pools.

    bridge-domain:
    A bridge-domain is created when the network-segment is of type VXLAN.
    Each VXLAN <--> VLAN combination can be thought of as a bridge domain.

    ip-pool:
    Each ip-pool represents a subnet on the Nexus1000V VSM.

    vm-network:
    vm-network refers to a network-segment and policy-profile.
    It maintains a list of ports that uses the network-segment and
    policy-profile this vm-network refers to.

    events:
    Events correspond to commands that are logged on Nexus1000V VSM.
    Events are used to poll for a certain resource on Nexus1000V VSM.
    Event type of port_profile: Return all updates/create/deletes
    of port profiles from the VSM.
    Event type of port_profile_update: Return only updates regarding
    policy-profiles.
    Event type of port_profile_delete: Return only deleted policy profiles.


    WORK FLOW:

    For every network profile a corresponding logical-network and
    a network-segment-pool, under this logical-network, will be created.

    For every network created from a given network profile, a
    network-segment will be added to the network-segment-pool corresponding
    to that network profile.

    A port is created on a network and associated with a policy-profile.
    Hence for every unique combination of a network and a policy-profile, a
    unique vm-network will be created and a reference to the port will be
    added. If the same combination of network and policy-profile is used by
    another port, the references to that port will be added to the same
    vm-network.


    """

    # Define paths for the URI where the client connects for HTTP requests.
    port_profiles_path = "/virtual-port-profile"
    network_segment_path = "/network-segment/%s"
    network_segment_pool_path = "/network-segment-pool/%s"
    ip_pool_path = "/ip-pool-template/%s"
    ports_path = "/kvm/vm-network/%s/ports"
    port_path = "/kvm/vm-network/%s/ports/%s"
    vm_networks_path = "/kvm/vm-network"
    vm_network_path = "/kvm/vm-network/%s"
    bridge_domains_path = "/kvm/bridge-domain"
    bridge_domain_path = "/kvm/bridge-domain/%s"
    logical_network_path = "/logical-network/%s"
    events_path = "/kvm/events"
    clusters_path = "/cluster"
    encap_profiles_path = "/encapsulation-profile"
    encap_profile_path = "/encapsulation-profile/%s"

    pool = eventlet.GreenPool(c_conf.CISCO_N1K.http_pool_size)

    def __init__(self, **kwargs):
        """Initialize a new client for the plugin."""
        self.format = 'json'
        self.hosts = self._get_vsm_hosts()
        self.action_prefix = 'http://%s/api/n1k' % self.hosts[0]
        self.timeout = c_conf.CISCO_N1K.http_timeout

    def list_port_profiles(self):
        """
        Fetch all policy profiles from the VSM.

        :returns: JSON string
        """
        return self._get(self.port_profiles_path)

    def create_bridge_domain(self, network, overlay_subtype):
        """
        Create a bridge domain on VSM.

        :param network: network dict
        :param overlay_subtype: string representing subtype of overlay network
        """
        body = {
            'name': network['id'] + c_const.BRIDGE_DOMAIN_SUFFIX,
            'segmentId': network[providernet.SEGMENTATION_ID],
            'subType': overlay_subtype,
            'tenantId': network['tenant_id']
        }
        if overlay_subtype == c_const.NETWORK_SUBTYPE_NATIVE_VXLAN:
            body['groupIp'] = network[n1kv.MULTICAST_IP]
        return self._post(self.bridge_domains_path, body=body)

    def delete_bridge_domain(self, name):
        """
        Delete a bridge domain on VSM.

        :param name: name of the bridge domain to be deleted
        """
        return self._delete(self.bridge_domain_path % name)

    def create_network_segment(self, network, network_profile):
        """
        Create a network segment on the VSM.

        :param network: network dict
        :param network_profile: network profile dict
        """
        body = {
            'publishName': network['id'],
            'description': network['name'],
            'id': network['id'],
            'tenantId': network['tenant_id'],
            'networkSegmentPool': network_profile['id'],
        }
        if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_VLAN:
            body['vlan'] = network[providernet.SEGMENTATION_ID]
        elif network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY:
            body['bridgeDomain'] = (network['id'] +
                                    c_const.BRIDGE_DOMAIN_SUFFIX)
        if network_profile['segment_type'] == c_const.NETWORK_TYPE_TRUNK:
            body['mode'] = c_const.NETWORK_TYPE_TRUNK
            body['segmentType'] = network_profile['sub_type']
            if network_profile['sub_type'] == c_const.NETWORK_TYPE_VLAN:
                body['addSegments'] = network['add_segment_list']
                body['delSegments'] = network['del_segment_list']
            else:
                body['encapProfile'] = (network['id'] +
                                        c_const.ENCAPSULATION_PROFILE_SUFFIX)
        else:
            body['mode'] = 'access'
            body['segmentType'] = network_profile['segment_type']
        return self._post(self.network_segment_path % network['id'], body=body)

    def update_network_segment(self, network_segment_id, body):
        """
        Update a network segment on the VSM.

        Network segment on VSM can be updated to associate it with an ip-pool
        or update its description and segment id.

        :param network_segment_id: UUID representing the network segment
        :param body: dict of arguments to be updated
        """
        return self._post(self.network_segment_path % network_segment_id,
                          body=body)

    def delete_network_segment(self, network_segment_id):
        """
        Delete a network segment on the VSM.

        :param network_segment_id: UUID representing the network segment
        """
        return self._delete(self.network_segment_path % network_segment_id)

    def create_logical_network(self, network_profile, tenant_id):
        """
        Create a logical network on the VSM.

        :param network_profile: network profile dict
        :param tenant_id: UUID representing the tenant
        """
        LOG.debug(_("Logical network"))
        body = {'description': network_profile['name'], 'tenantId': tenant_id}
        logical_network_name = (network_profile['id'] +
                                c_const.LOGICAL_NETWORK_SUFFIX)
        return self._post(self.logical_network_path % logical_network_name,
                          body=body)

    def delete_logical_network(self, logical_network_name):
        """
        Delete a logical network on VSM.

        :param logical_network_name: string representing name of the logical
                                     network
        """
        return self._delete(self.logical_network_path % logical_network_name)

    def create_network_segment_pool(self, network_profile, tenant_id):
        """
        Create a network segment pool on the VSM.

        :param network_profile: network profile dict
        :param tenant_id: UUID representing the tenant
        """
        LOG.debug(_("network_segment_pool"))
        logical_network_name = (network_profile['id'] +
                                c_const.LOGICAL_NETWORK_SUFFIX)
        body = {
            'name': network_profile['name'],
            'description': network_profile['name'],
            'id': network_profile['id'],
            'logicalNetwork': logical_network_name,
            'tenantId': tenant_id
        }
        if network_profile['segment_type'] == c_const.NETWORK_TYPE_OVERLAY:
            body['subType'] = network_profile['sub_type']
        return self._post(self.network_segment_pool_path %
                          network_profile['id'],
                          body=body)

    def update_network_segment_pool(self, network_profile):
        """
        Update a network segment pool on the VSM.

        :param network_profile: network profile dict
        """
        body = {
            'name': network_profile['name'],
            'description': network_profile['name']
        }
        return self._post(self.network_segment_pool_path %
                          network_profile['id'],
                          body=body)

    def delete_network_segment_pool(self, network_segment_pool_id):
        """
        Delete a network segment pool on the VSM.

        :param network_segment_pool_id: UUID representing the network
                                        segment pool
        """
        return self._delete(self.network_segment_pool_path %
                            network_segment_pool_id)

    def create_ip_pool(self, subnet):
        """
        Create an ip-pool on the VSM.

        :param subnet: subnet dict
        """
        if subnet['cidr']:
            try:
                ip = netaddr.IPNetwork(subnet['cidr'])
                netmask = str(ip.netmask)
                network_address = str(ip.network)
            except (ValueError, netaddr.AddrFormatError):
                msg = _("Invalid input for CIDR")
                raise n_exc.InvalidInput(error_message=msg)
        else:
            netmask = network_address = ""

        if subnet['allocation_pools']:
            address_range_start = subnet['allocation_pools'][0]['start']
            address_range_end = subnet['allocation_pools'][0]['end']
        else:
            address_range_start = None
            address_range_end = None

        body = {
            'addressRangeStart': address_range_start,
            'addressRangeEnd': address_range_end,
            'ipAddressSubnet': netmask,
            'description': subnet['name'],
            'gateway': subnet['gateway_ip'],
            'dhcp': subnet['enable_dhcp'],
            'dnsServersList': subnet['dns_nameservers'],
            'networkAddress': network_address,
            'netSegmentName': subnet['network_id'],
            'id': subnet['id'],
            'tenantId': subnet['tenant_id']
        }
        return self._post(self.ip_pool_path % subnet['id'], body=body)

    def update_ip_pool(self, subnet):
        """
        Update an ip-pool on the VSM.

        :param subnet: subnet dictionary
        """
        body = {
            'description': subnet['name'],
            'dhcp': subnet['enable_dhcp'],
            'dnsServersList': subnet['dns_nameservers']
        }
        return self._post(self.ip_pool_path % subnet['id'], body=body)

    def delete_ip_pool(self, subnet_id):
        """
        Delete an ip-pool on the VSM.

        :param subnet_id: UUID representing the subnet
        """
        return self._delete(self.ip_pool_path % subnet_id)

    def create_vm_network(self, port, vm_network_name, policy_profile):
        """
        Create a VM network on the VSM.

        :param port: port dict
        :param vm_network_name: name of the VM network
        :param policy_profile: policy profile dict
        """
        body = {
            'name': vm_network_name,
            'networkSegmentId': port['network_id'],
            'networkSegment': port['network_id'],
            'portProfile': policy_profile['name'],
            'portProfileId': policy_profile['id'],
            'tenantId': port['tenant_id'],
            'portId': port['id'],
            'macAddress': port['mac_address'],
        }
        if port.get('fixed_ips'):
            body['ipAddress'] = port['fixed_ips'][0]['ip_address']
            body['subnetId'] = port['fixed_ips'][0]['subnet_id']
        return self._post(self.vm_networks_path, body=body)

    def delete_vm_network(self, vm_network_name):
        """
        Delete a VM network on the VSM.

        :param vm_network_name: name of the VM network
        """
        return self._delete(self.vm_network_path % vm_network_name)

    def create_n1kv_port(self, port, vm_network_name):
        """
        Create a port on the VSM.

        :param port: port dict
        :param vm_network_name: name of the VM network which imports this port
        """
        body = {'id': port['id'], 'macAddress': port['mac_address']}
        if port.get('fixed_ips'):
            body['ipAddress'] = port['fixed_ips'][0]['ip_address']
            body['subnetId'] = port['fixed_ips'][0]['subnet_id']
        return self._post(self.ports_path % vm_network_name, body=body)

    def update_n1kv_port(self, vm_network_name, port_id, body):
        """
        Update a port on the VSM.

        Update the mac address associated with the port

        :param vm_network_name: name of the VM network which imports this port
        :param port_id: UUID of the port
        :param body: dict of the arguments to be updated
        """
        return self._post(self.port_path % (vm_network_name, port_id),
                          body=body)

    def delete_n1kv_port(self, vm_network_name, port_id):
        """
        Delete a port on the VSM.

        :param vm_network_name: name of the VM network which imports this port
        :param port_id: UUID of the port
        """
        return self._delete(self.port_path % (vm_network_name, port_id))

    def _do_request(self, method, action, body=None, headers=None):
        """
        Perform the HTTP request.

        The response is in either JSON format or plain text. A GET method will
        invoke a JSON response while a PUT/POST/DELETE returns message from the
        VSM in plain text format.
        Exception is raised when VSM replies with an INTERNAL SERVER ERROR HTTP
        status code (500) i.e. an error has occurred on the VSM or SERVICE
        UNAVAILABLE (503) i.e. VSM is not reachable.

        :param method: type of the HTTP request. POST, GET, PUT or DELETE
        :param action: path to which the client makes request
        :param body: dict for arguments which are sent as part of the request
        :param headers: header for the HTTP request
        :returns: JSON or plain text in HTTP response
        """
        action = self.action_prefix + action
        if not headers and self.hosts:
            headers = self._get_auth_header(self.hosts[0])
        headers['Content-Type'] = self._set_content_type('json')
        headers['Accept'] = self._set_content_type('json')
        if body:
            body = jsonutils.dumps(body, indent=2)
            LOG.debug(_("req: %s"), body)
        try:
            resp = self.pool.spawn(requests.request,
                                   method,
                                   url=action,
                                   data=body,
                                   headers=headers,
                                   timeout=self.timeout).wait()
        except Exception as e:
            raise c_exc.VSMConnectionFailed(reason=e)
        LOG.debug(_("status_code %s"), resp.status_code)
        if resp.status_code == requests.codes.OK:
            if 'application/json' in resp.headers['content-type']:
                try:
                    return resp.json()
                except ValueError:
                    return {}
            elif 'text/plain' in resp.headers['content-type']:
                LOG.debug(_("VSM: %s"), resp.text)
        else:
            raise c_exc.VSMError(reason=resp.text)

    def _set_content_type(self, format=None):
        """
        Set the mime-type to either 'xml' or 'json'.

        :param format: format to be set.
        :return: mime-type string
        """
        if not format:
            format = self.format
        return "application/%s" % format

    def _delete(self, action, body=None, headers=None):
        return self._do_request("DELETE", action, body=body, headers=headers)

    def _get(self, action, body=None, headers=None):
        return self._do_request("GET", action, body=body, headers=headers)

    def _post(self, action, body=None, headers=None):
        return self._do_request("POST", action, body=body, headers=headers)

    def _put(self, action, body=None, headers=None):
        return self._do_request("PUT", action, body=body, headers=headers)

    def _get_vsm_hosts(self):
        """
        Retrieve a list of VSM ip addresses.

        :return: list of host ip addresses
        """
        return [
            cr[c_const.CREDENTIAL_NAME]
            for cr in network_db_v2.get_all_n1kv_credentials()
        ]

    def _get_auth_header(self, host_ip):
        """
        Retrieve header with auth info for the VSM.

        :param host_ip: IP address of the VSM
        :return: authorization header dict
        """
        username = c_cred.Store.get_username(host_ip)
        password = c_cred.Store.get_password(host_ip)
        auth = base64.encodestring("%s:%s" % (username, password)).rstrip()
        header = {"Authorization": "Basic %s" % auth}
        return header

    def get_clusters(self):
        """Fetches a list of all vxlan gateway clusters."""
        return self._get(self.clusters_path)

    def create_encapsulation_profile(self, encap):
        """
        Create an encapsulation profile on VSM.

        :param encap: encapsulation dict
        """
        body = {
            'name': encap['name'],
            'addMappings': encap['add_segment_list'],
            'delMappings': encap['del_segment_list']
        }
        return self._post(self.encap_profiles_path, body=body)

    def update_encapsulation_profile(self, context, profile_name, body):
        """
        Adds a vlan to bridge-domain mapping to an encapsulation profile.

        :param profile_name: Name of the encapsulation profile
        :param body: mapping dictionary
        """
        return self._post(self.encap_profile_path % profile_name, body=body)

    def delete_encapsulation_profile(self, name):
        """
        Delete an encapsulation profile on VSM.

        :param name: name of the encapsulation profile to be deleted
        """
        return self._delete(self.encap_profile_path % name)
Example #15
0
import eventlet


def handle(fd):
    print("client connected")
    while True:
        # pass through every non-eof line
        x = fd.readline()
        if not x:
            break
        fd.write(x)
        fd.flush()
        print("echoed", x, end=" ")
    print("client disconnected")


if __name__ == "__main__":
    server = eventlet.listen(("0.0.0.0", 6000))
    pool = eventlet.GreenPool(10000)
    while True:
        try:
            new_sock, address = server.accept()
            print("accepted", address)
            pool.spawn_n(handle, new_sock.makefile("rw"))
        except (SystemExit, KeyboardInterrupt):
            break
conn.close()


def run(filename_list):
    conn = httplib.HTTPSConnection(CONNECTION_ENDPOINT)
    for filename in filename_list:
        start = time.time()
        with open(filename, 'rb') as f:
            conn.request('PUT',
                         CONTAINER_PATH + '/' + filename,
                         body=f,
                         headers=SEND_HEADERS)
        resp = conn.getresponse()
        resp.read()
        print '%s uploaded in %.4f seconds' % (filename, (time.time() - start))
        sys.stdout.flush()
    conn.close()


data_list = [
    'test_data/%s' % x for x in os.listdir('test_data') if x.endswith('.dat')
]
len_data_list = len(data_list)
concurrency = min(len_data_list, 20)
print 'uploading %d files with a concurrency of %d' % (len_data_list,
                                                       concurrency)
pool = eventlet.GreenPool(size=concurrency)
for i in xrange(len_data_list / concurrency):
    pool.spawn(run, data_list[concurrency * i:concurrency * (i + 1)])
pool.waitall()
Example #17
0
    def big_object_get(self,
                       bucket_name,
                       object_name,
                       file_path=None,
                       thread_size=4,
                       part_size=32 * 1024 * 124):
        if not file_path:
            file_path = object_name

        if os.path.exists(file_path): os.remove(file_path)

        # Get the total length of the big object
        resource = '/%s/%s' % (bucket_name, object_name)
        conn = self.conn.get_request_conn('GET', resource)
        response = conn.getresponse()
        if response.status >= 400:
            raise exceptions.from_response(response.status,
                                           body=response.read())
        content_length = int(dict(response.getheaders())['content-length'])

        # Inner Method, Download a part_size piece of data and write file.
        @utils.retries(5)
        def get_piece(start_pos, end_pos):
            if end_pos >= content_length:
                bytes_str = 'bytes=%s-' % start_pos
            else:
                bytes_str = 'bytes=%s-%s' % (start_pos, end_pos - 1)
            headers = {'Range': bytes_str}
            cli = self.new_httpclient()
            cli_conn = cli.get_request_conn('GET', resource, headers)
            resp = cli_conn.getresponse()
            if resp.status >= 400:
                raise exceptions.from_response(resp.status, resp.read())

            # receive 64K every time
            with open(file_path, 'awb') as fd:
                fd.seek(start_pos)
                start = 0
                while start < (end_pos - start_pos):
                    data = resp.read(self.io_buffer_size)
                    if len(data) <= 0:
                        break
                    fd.write(data)
                    start += self.io_buffer_size
            # close socket
            resp.close()

        # start position, end position
        green_pool = eventlet.GreenPool(thread_size)
        start_pos = 0
        end_pos = part_size
        while start_pos < content_length:
            green_pool.spawn(get_piece, start_pos, end_pos)
            start_pos = end_pos
            end_pos += part_size

        # Wait all green finished
        green_pool.waitall()
        local_size = os.path.getsize(file_path)
        error_msg = 'Size of local object(%s bytes) is not different from'\
        ' the size of remote jss object(%s bytes)' % (local_size,
                                                      content_length)
        assert local_size == content_length, error_msg
Example #18
0
 def create_pool(self):
     return eventlet.GreenPool(size=self.threads)
Example #19
0
    def __init__(self,
                 name,
                 app,
                 host=None,
                 port=None,
                 pool_size=None,
                 protocol=eventlet.wsgi.HttpProtocol,
                 backlog=128):
        """Initialize, but do not start, a WSGI server.

        :param name: Pretty name for logging.
        :param app: The WSGI application to serve.
        :param host: IP address to serve the application.
        :param port: Port number to server the application.
        :param pool_size: Maximum number of eventlets to spawn concurrently.
        :returns: None

        """
        # Allow operators to customize http requests max header line size.
        eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
        self.name = name
        self.app = app
        self._host = host or "0.0.0.0"
        self._port = port or 0
        self._server = None
        self._socket = None
        self._protocol = protocol
        self.pool_size = pool_size or self.default_pool_size
        self._pool = eventlet.GreenPool(self.pool_size)
        self._logger = logging.getLogger("eventlet.wsgi.server")
        self._wsgi_logger = logging.WritableLogger(self._logger)

        if backlog < 1:
            raise exception.InvalidInput(
                reason='The backlog must be more than 1')

        bind_addr = (host, port)
        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix
        try:
            info = socket.getaddrinfo(bind_addr[0], bind_addr[1],
                                      socket.AF_UNSPEC, socket.SOCK_STREAM)[0]
            family = info[0]
            bind_addr = info[-1]
        except Exception:
            family = socket.AF_INET

        cert_file = CONF.ssl_cert_file
        key_file = CONF.ssl_key_file
        ca_file = CONF.ssl_ca_file
        self._use_ssl = cert_file or key_file

        if cert_file and not os.path.exists(cert_file):
            raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)

        if ca_file and not os.path.exists(ca_file):
            raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)

        if key_file and not os.path.exists(key_file):
            raise RuntimeError(_("Unable to find key_file : %s") % key_file)

        if self._use_ssl and (not cert_file or not key_file):
            raise RuntimeError(
                _("When running server in SSL mode, you "
                  "must specify both a cert_file and "
                  "key_file option value in your "
                  "configuration file."))

        retry_until = time.time() + 30
        while not self._socket and time.time() < retry_until:
            try:
                self._socket = eventlet.listen(bind_addr,
                                               backlog=backlog,
                                               family=family)
            except socket.error as err:
                if err.args[0] != errno.EADDRINUSE:
                    raise
                eventlet.sleep(0.1)

        if not self._socket:
            raise RuntimeError(
                _("Could not bind to %(host)s:%(port)s "
                  "after trying for 30 seconds") % {
                      'host': host,
                      'port': port
                  })

        (self._host, self._port) = self._socket.getsockname()[0:2]
        LOG.info(
            _("%(name)s listening on %(_host)s:%(_port)s") % self.__dict__)
Example #20
0
    def __init__(self, host, conf=None):
        if conf:
            self.conf = conf
        else:
            self.conf = cfg.CONF
        self.router_info = {}

        self._check_config_params()

        self.process_monitor = external_process.ProcessMonitor(
            config=self.conf,
            resource_type='router')

        self.driver = common_utils.load_interface_driver(self.conf)

        self._context = n_context.get_admin_context_without_session()
        self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)
        self.fullsync = True
        self.sync_routers_chunk_size = SYNC_ROUTERS_MAX_CHUNK_SIZE

        # Get the HA router count from Neutron Server
        # This is the first place where we contact neutron-server on startup
        # so retry in case its not ready to respond.
        while True:
            try:
                self.ha_router_count = int(
                    self.plugin_rpc.get_host_ha_router_count(self.context))
            except oslo_messaging.MessagingTimeout as e:
                LOG.warning('l3-agent cannot contact neutron server '
                            'to retrieve HA router count. '
                            'Check connectivity to neutron server. '
                            'Retrying... '
                            'Detailed message: %(msg)s.', {'msg': e})
                continue
            break
        LOG.info("Agent HA routers count %s", self.ha_router_count)

        self.init_extension_manager(self.plugin_rpc)

        self.metadata_driver = None
        if self.conf.enable_metadata_proxy:
            self.metadata_driver = metadata_driver.MetadataDriver(self)

        self.namespaces_manager = namespace_manager.NamespaceManager(
            self.conf,
            self.driver,
            self.metadata_driver)

        # L3 agent router processing green pool
        self._pool_size = ROUTER_PROCESS_GREENLET_MIN
        self._pool = eventlet.GreenPool(size=self._pool_size)
        self._queue = queue.ResourceProcessingQueue()
        super(L3NATAgent, self).__init__(host=self.conf.host)

        self.target_ex_net_id = None
        self.use_ipv6 = ipv6_utils.is_enabled_and_bind_by_default()

        self.pd = pd.PrefixDelegation(self.context, self.process_monitor,
                                      self.driver,
                                      self.plugin_rpc.process_prefix_update,
                                      self.create_pd_router_update,
                                      self.conf)

        # Consume network updates to trigger router resync
        consumers = [[topics.NETWORK, topics.UPDATE]]
        agent_rpc.create_consumers([self], topics.AGENT, consumers)

        self._check_ha_router_process_status()
Example #21
0
 def __init__(self, threads=1000):
     self.pool = eventlet.GreenPool(threads)
Example #22
0
class GenericApiRequest(request.ApiRequest):
    '''A non Eventlet-based ApiRequest class.

    This class will form the basis for eventlet-based ApiRequest classes
    '''

    # Maximum number of green threads present in the system at one time.
    API_REQUEST_POOL_SIZE = request.DEFAULT_API_REQUEST_POOL_SIZE

    # Pool of green threads. One green thread is allocated per incoming
    # request. Incoming requests will block when the pool is empty.
    API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE)

    # A unique id is assigned to each incoming request. When the current
    # request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0.
    MAXIMUM_REQUEST_ID = request.DEFAULT_MAXIMUM_REQUEST_ID

    # The request id for the next incoming request.
    CURRENT_REQUEST_ID = 0

    def __init__(self,
                 client_obj,
                 url,
                 method="GET",
                 body=None,
                 headers=None,
                 request_id=CURRENT_REQUEST_ID,
                 retries=DEFAULT_RETRIES,
                 auto_login=True,
                 redirects=DEFAULT_REDIRECTS,
                 http_timeout=DEFAULT_HTTP_TIMEOUT,
                 client_conn=None):
        '''Constructor.'''
        self._api_client = client_obj
        self._url = url
        self._method = method
        self._body = body if body else None
        self._headers = headers or {}
        self._request_timeout = http_timeout * retries
        self._retries = retries
        self._auto_login = auto_login
        self._redirects = redirects
        self._http_timeout = http_timeout
        self._client_conn = client_conn
        self._abort = False
        self._request_error = None

        if "User-Agent" not in self._headers:
            self._headers["User-Agent"] = DEFAULT_USER_AGENT

        # Retrieve and store this instance's unique request id.
        self._request_id = request_id

    def join(self):
        '''Wait for instance green thread to complete.'''
        pass

    def start(self):
        '''Start request processing.'''
        return self._handle_request()

    def _handle_request(self):
        '''First level request handling.'''
        attempt = 0
        timeout = 0
        badstatus = 0
        response = None
        while response is None and attempt <= self._retries:
            attempt += 1
            try:
                req = self._issue_request()
            except (httplib.BadStatusLine, socket.error):
                if badstatus <= DEFAULT_RETRIES:
                    badstatus += 1
                    attempt -= 1
                    LOG.error("request {method} {url} {body} error".format(
                        method=self._method, url=self._url, body=self._body))
                    continue
            # automatically raises any exceptions returned.
            if isinstance(req, httplib.HTTPResponse):
                timeout = 0
                if attempt <= self._retries and not self._abort:
                    # currently there is a bug in fortios, it return 401 and
                    # 400 when a cookie is invalid, the change is to tolerant
                    # the bug to handle return 400 situation.
                    # when fortios fix the bug, here should use
                    # 'req.status in (401, 403)' instead
                    if req.status in (400, 401, 403):
                        continue
                    elif req.status == 503:
                        timeout = 0.5
                        continue
                    # else fall through to return the error code

                LOG.info(
                    "[%(rid)d] Completed request '%(method)s %(url)s'"
                    ": %(status)s", {
                        'rid': self._rid(),
                        'method': self._method,
                        'url': self._url,
                        'status': req.status
                    })
                self._request_error = None
                response = req
            else:
                LOG.info(
                    _LI('[%(rid)d] Error while handling request: '
                        '%(req)s'), {
                            'rid': self._rid(),
                            'req': req
                        })
                self._request_error = req
                response = None
        return response
Example #23
0
    def __init__(self,
                 name,
                 app,
                 host='0.0.0.0',
                 port=0,
                 pool_size=None,
                 protocol=eventlet.wsgi.HttpProtocol,
                 backlog=128,
                 use_ssl=False,
                 max_url_len=None):
        """Initialize, but do not start, a WSGI server.

        :param name: Pretty name for logging.
        :param app: The WSGI application to serve.
        :param host: IP address to serve the application.
        :param port: Port number to server the application.
        :param pool_size: Maximum number of eventlets to spawn concurrently.
        :param backlog: Maximum number of queued connections.
        :param max_url_len: Maximum length of permitted URLs.
        :returns: None
        :raises: masakari.exception.InvalidInput
        """
        # Allow operators to customize http requests max header line size.
        eventlet.wsgi.MAX_HEADER_LINE = CONF.wsgi.max_header_line
        self.name = name
        self.app = app
        self._server = None
        self._protocol = protocol
        self.pool_size = pool_size or self.default_pool_size
        self._pool = eventlet.GreenPool(self.pool_size)
        self._logger = logging.getLogger("masakari.%s.wsgi.server" % self.name)
        self._use_ssl = use_ssl
        self._max_url_len = max_url_len

        self.client_socket_timeout = CONF.wsgi.client_socket_timeout or None

        if backlog < 1:
            raise exception.InvalidInput(
                reason=_('The backlog must be more than 0'))

        bind_addr = (host, port)
        try:
            info = socket.getaddrinfo(bind_addr[0], bind_addr[1],
                                      socket.AF_UNSPEC, socket.SOCK_STREAM)[0]
            family = info[0]
            bind_addr = info[-1]
        except Exception:
            family = socket.AF_INET

        try:
            self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
        except EnvironmentError:
            LOG.error("Could not bind to %(host)s:%(port)d", {
                'host': host,
                'port': port
            })
            raise

        (self.host, self.port) = self._socket.getsockname()[0:2]
        LOG.info("%(name)s listening on %(host)s:%(port)d", {
            'name': self.name,
            'host': self.host,
            'port': self.port
        })
Example #24
0
 def __init__(self, port, handler=None):
     Publisher.__init__(self)
     self.port = port
     self.handler = handler
     self.pool = eventlet.GreenPool(10000)
     self.connections = weakref.WeakValueDictionary()
Example #25
0
        rec = sock.recv(1024)
        if not rec:
            break

        if rec.decode() in model:
            data = eval(rec.decode().replace('.', '_'))()
            logger.debug('execute function %s' %
                         rec.decode().replace('.', '_'))
            if isinstance(data, int) or isinstance(data, float):
                sock.send(str(data).encode())
            elif isinstance(data, str):
                sock.sendall(data.encode())
        else:
            sock.sendall(b'no')


try:
    server = eventlet.listen((agent_conf.listen, agent_conf.agent_port))
    pool = eventlet.GreenPool(agent_conf.maxclient)
except Exception as msg:
    raise msg
finally:
    logger.info("listen %s:%s" % (agent_conf.listen, agent_conf.agent_port))

while True:
    try:
        sock, address = server.accept()
        pool.spawn_n(handle, sock)
        logger.info(address)
    except Exception as msg:
        raise msg