Exemplo n.º 1
0
def cloud(request, mocked_driver):
    """instantiates clouds"""
    name = request.param

    def __connect_and_check(cloud):
        cloud._connect()
        assert cloud.connected

    if name == cloud_names[0]:
        dums = dummy.DummyCloud(name + "-test", None)
        __connect_and_check(dums)
        assert isinstance(dums.driver, get_driver(Provider.DUMMY))
        return dums
    elif name == cloud_names[1]:
        aws_ec2 = ec2.EC2Cloud(cloud_name, config_parser(name))
        aws_ec2.set_context("")
        __connect_and_check(aws_ec2)
        mocked_driver.mock_add_spec(EC2NodeDriver, True)
        assert isinstance(aws_ec2.driver, get_driver(Provider.EC2_US_WEST_OREGON))
        aws_ec2.driver = mocked_driver
        return aws_ec2
    elif name == cloud_names[2]:
        nebula = opennebula.OpenNebulaCloud(cloud_name, config_parser(name))
        nebula.set_context("")
        __connect_and_check(nebula)
        mocked_driver.mock_add_spec(OpenNebulaNodeDriver, True)
        assert isinstance(nebula.driver, get_driver(Provider.OPENNEBULA))
        # let's us iterate over the nodes
        mocked_driver.create_node.side_effect = __generate_nodes([3, 5])
        nebula.driver = mocked_driver
        return nebula
Exemplo n.º 2
0
 def test_error_is_thrown_on_accessing_old_constant(self):
     for provider in DEPRECATED_RACKSPACE_PROVIDERS:
         try:
             get_driver(provider)
         except Exception:
             e = sys.exc_info()[1]
             self.assertTrue(str(e).find('has been removed') != -1)
         else:
             self.fail('Exception was not thrown')
Exemplo n.º 3
0
 def __init__(self, config):
     super(VSphereConnector, self).__init__(config)
     self.type = "vsphere"
     self.logger = logging.getLogger("hyclops.connector." + self.type)
     try:
         self.driver = get_driver("vsphere")
     except AttributeError:
         set_driver("vsphere", 'hyclops.libcloud_driver.vsphere', 'VSphereNodeDriver')
         self.driver = get_driver("vsphere")
Exemplo n.º 4
0
def _get_driver(provider):
	""" Get the driver for the given provider """
	_provider_exists(provider)
	if provider == 'ec2_us_west':
		driver = get_driver(Provider.EC2_US_WEST)
	elif provider == 'ec2_us_east':
		driver = get_driver(Provider.EC2_US_EAST)
	elif provider == 'rackspace':
		driver = get_driver(Provider.RACKSPACE)
	return driver
Exemplo n.º 5
0
    def connect(self, label, project):
        """
        establishes a connection to the eucalyptus cloud,
        e.g. initializes the needed components to conduct subsequent
        queries.
        """

        # from old eucalyptus_libcloud
        # path = os.environ['HOME'] + "/" + \
        # self.credentials.location.replace("/eucarc", "")
        # os.environ['CA_CERTS_PATH'] = path
        #         libcloud.security.CA_CERTS_PATH.append(self.credential['EUCALYPTUS_CERT'])

        self.label = label
        self.project = project

        # copied from deprecated code
        # if project is None:
        #    self.activate_project("fg82")
        # else:
        #    self.activate_project(project)

        print("Loading", self.label, self.project)
        Driver = get_driver(Provider.EUCALYPTUS)

        self.config = cm_config()

        cred = self.config.get(self.label, expand=True)

        euca_id = cred['EC2_ACCESS_KEY']
        euca_key = cred['EC2_SECRET_KEY']
        ec2_url = cred['EC2_URL']

        result = urlparse.urlparse(ec2_url)
        is_secure = (result.scheme == 'https')
        if ":" in result.netloc:
            host_port_tuple = result.netloc.split(':')
            host = host_port_tuple[0]
            port = int(host_port_tuple[1])
        else:
            host = result.netloc
            port = None

        path = result.path

        self.credential = self.config.get(self.label, expand=True)
        pprint(self.credential)

        # libcloud.security.CA_CERTS_PATH.append(self.credential['EUCALYPTUS_CERT'])
        # libcloud.security.VERIFY_SSL_CERT = False

        Driver = get_driver(Provider.EUCALYPTUS)
        self.cloud = Driver(
            key=euca_id, secret=euca_key, secure=False, host=host, path=path, port=port)
Exemplo n.º 6
0
 def myConn(self, region):
   Driver = None
   if region == 'us-west-1':
     Driver = get_driver(Provider.EC2_US_WEST)
   elif region == 'us-west-2':
     Driver = get_driver(Provider.EC2_US_WEST_OREGON)
   elif region == 'eu-west-1':
     Driver = get_driver(Provider.EC2_EU_WEST)
   else:
     Driver = get_driver(Provider.EC2_US_EAST)
   
   return Driver(self.AWS_ACCESS_KEY, self.AWS_SECRET_KEY)
Exemplo n.º 7
0
    def _connect(self):
        if self.ec2_region == "ec2.us-east-1.amazonaws.com":
            EC2Driver = get_driver(Provider.EC2_US_EAST)
        elif self.ec2_region == "ec2.us-west-2.amazonaws.com":
            EC2Driver = get_driver(Provider.EC2_US_WEST_OREGON)
        elif self.ec2_region == "ec2.eu-west-1.amazonaws.com":
            EC2Driver = get_driver(Provider.EC2_EU_WEST)
        else:
            raise Exception('Unknown EC2 region: %s' % self.ec2_region)

        self.driver = EC2Driver(self.user,
                                self.passwd)
        self.connected = True
    def __chooseDestination(self , manifest , config = None , task_opts = None):
        """creates destination object due to config"""
        destination = DEST_LOCAL
        #choose destination
        if config:
            destination = config.get_destination_type()

        if destination == DEST_TEST_VHD:
            import LocalVhdDestination
            return LocalVhdDestination.LocalVhdDestination()
        elif destination == DEST_CLOUD:
            cloud_name = config.get_cloud_name()
            cls = get_driver(cloud_name)
            cloud_user = config.get_user_name()
            cloud_secret_key = config.get_secret_name()
            driver = cls(user, key)
            return ImagingCloudVolumeDestintaion.ImagingCloudVolumeDestintaion(driver, RawDiskImage.RawDiskImage() , manifest['import']['volume-size']*_GIG_)
        elif destination == DEST_LOCAL_PATH:
            path = config.get_local_path()      
            if manifest.local():
                path = manifest.extension.path
            return LocalPathDestination.LocalPathDestination(path , RawDiskImage.RawDiskImage())
            #TODO: GENERATE LOCAL DESTINATION

        return None
Exemplo n.º 9
0
def main():
    from pprint import pprint

    from libcloud.compute.types import Provider
    from libcloud.compute.providers import get_driver

    cls = get_driver(Provider.FEDERATION)
Exemplo n.º 10
0
  def __init__( self, user, secret, endpointConfig, imageConfig ):
    """
    Multiple constructor depending on the passed parameters
    
    :Parameters:
      **user** - `string`
        username that will be used on the authentication
      **secret** - `string`
        password used on the authentication
      If secret is None then user actually is:
      **proxyPath** - `string`
        path to the valid X509 proxy 
      **endpointConfig** - `dict`
        dictionary with the endpoint configuration ( WMS.Utilities.Configuration.NovaConfiguration )
      **imageConfig** - `dict`
        dictionary with the image configuration ( WMS.Utilities.Configuration.ImageConfiguration )
    
    """
    # logger
    self.log = gLogger.getSubLogger( self.__class__.__name__ )
    
    self.endpointConfig = endpointConfig
    self.imageConfig    = imageConfig
 
    # Variables needed to contact the service  
    ex_force_auth_url       = endpointConfig.get( 'ex_force_auth_url', None )
    ex_force_service_region = endpointConfig.get( 'ex_force_service_region', None ) 
    ex_force_auth_version   = endpointConfig.get( 'ex_force_auth_version', None )
    ex_tenant_name          = endpointConfig.get( 'ex_tenant_name', None )
    
    # we force SSL cacert, if defined
    ex_force_ca_cert        = endpointConfig.get( 'ex_force_ca_cert', None )
    if ex_force_ca_cert is not None:
      security.CA_CERTS_PATH = [ ex_force_ca_cert ]

    # get openstack driver
    openstack_driver = get_driver( Provider.OPENSTACK )
    
    if secret == None:
      # with VOMS (from Alvaro Lopez trunk https://github.com/alvarolopez/libcloud/blob/trunk):
      proxyPath=user
      username = password = None

      self.__driver = openstack_driver( username, password,
                                     ex_force_auth_url = ex_force_auth_url,
                                     ex_force_service_region = ex_force_service_region,
                                     ex_force_auth_version = ex_force_auth_version,
                                     ex_tenant_name = ex_tenant_name,
                                     ex_voms_proxy = proxyPath
                                    )
    else:
      # with user password
      username = user
      password = secret
      self.__driver = openstack_driver( username, password,
                                     ex_force_auth_url = ex_force_auth_url,
                                     ex_force_service_region = ex_force_service_region,
                                     ex_force_auth_version = ex_force_auth_version,
                                     ex_tenant_name = ex_tenant_name
                                    )
Exemplo n.º 11
0
def get_rackspace_driver(rackspace):
    """
    Get a libcloud Rackspace driver given some credentials and other
    configuration.
    """
    rackspace = get_driver(Provider.RACKSPACE)(rackspace["username"], rackspace["key"], region=rackspace["region"])
    return rackspace
Exemplo n.º 12
0
	def initialize(self):
		print 'Initializing AWS driver.'

		from libcloud.compute.providers import get_driver
		AWS = get_driver(Provider.EC2_EU_WEST)

		self.driver = AWS (self.get_property('ACCESS_ID'), self.get_property('SECRET_ID'))
Exemplo n.º 13
0
Arquivo: ec2.py Projeto: a13m/fedimg
    def upload(self, raw):
        """ Takes a raw image file and registers it as an AMI in each
        EC2 region. """
        # TODO: Check here to confirm that image is proper format (RAW)?
        # TODO: Make sure that once we create an AMI, we copy it to other
        # regions via region-to-region copy rather than remake the AMI
        # in each region (might just be copying image though).
        for ami in self.amis:
            cls = get_driver(ami['prov'])
            driver = cls(fedimg.AWS_ACCESS_ID, fedimg.AWS_SECRET_KEY)

            # select the desired node attributes
            sizes = driver.list_sizes()
            size_id = 't1.micro'  # The smallest one for now.
            # check to make sure we have access to that size node
            size = [s for s in sizes if s.id == size_id][0]
            image = NodeImage(id=ami['ami'], name=None, driver=driver)

            # create node
            # must be EBS-backed for AMI registration to work
            name = 'fedimg AMI builder'  # TODO: will add raw image title
            node = driver.create_node(name=name, image=image, size=size,
                                      ex_iamprofile=fedimg.AWS_IAM_PROFILE,
                                      ex_ebs_optimized=True)

            # create a volume for the uploaded image to be written to
            vol_name = 'fedimg AMI volume'  # TODO; will add raw image title
            # TODO: might need to provide availability zone in the below call
            vol = driver.create_volume(10, vol_name)  # new 10 GB volume

            # Attach the new volume to the node
            # TODO: Check to see if it's faster to have the second volume
            # in the block device mappings when the instance is spun up.
            driver.attach_volume(node, vol, device='/dev/sdb')
Exemplo n.º 14
0
def _thread_create_node(name):
    Driver = get_driver(Provider.RACKSPACE)
    conn = Driver('username', 'api key')
    image = conn.list_images()[0]
    size = conn.list_sizes()[0]
    node = conn.create_node(name=name, image=image, size=size)
    return node
Exemplo n.º 15
0
def rackspace_provisioner(username, key, region, keyname):
    """
    Create a LibCloudProvisioner for provisioning nodes on rackspace.

    :param bytes username: The user to connect to rackspace with.
    :param bytes key: The API key associated with the user.
    :param bytes region: The rackspace region in which to launch the instance.
    :param bytes keyname: The name of an existing ssh public key configured in
       rackspace. The provision step assumes the corresponding private key is
       available from an agent.
    """
    # Import these here, so that this can be imported without
    # installng libcloud.
    from libcloud.compute.providers import get_driver, Provider
    monkeypatch()
    driver = get_driver(Provider.RACKSPACE)(
        key=username,
        secret=key,
        region=region)

    provisioner = LibcloudProvisioner(
        driver=driver,
        keyname=keyname,
        image_names=IMAGE_NAMES,
        create_node_arguments=lambda **kwargs: {
            "ex_config_drive": "true",
        },
        provision=provision_rackspace,
        default_size="performance1-8",
        get_default_user=get_default_username,
    )

    return provisioner
Exemplo n.º 16
0
def destroy_volume(name):
    driver = get_driver()
    volume = get_volume(name)
    # check to see if this is a valid volume
    if volume.state != "notfound":
        logger.info("Destroying volume %s", name)
        driver.destroy_volume(volume)
Exemplo n.º 17
0
def create_servers(size_id, image_id, num, event_id):
    username, apikey, region = get_creds()

    Driver = get_driver(Provider.RACKSPACE)
    driver = Driver(username, apikey, region=region)

    size = list(filter(lambda size: size.id == size_id, driver.list_sizes()))[0]
    image = list(filter(lambda img: img.id == image_id, driver.list_images()))[0]

    for i in range(num):
        node = driver.create_node(
            name=datetime.strftime(datetime.now(), "gmas-%Y%m%d%H%M%S"),
            image=image, size=size)
        password = node.extra["password"]

        _, ip = driver.wait_until_running([node])[0]
        server = Server(ip[0], password, event_id)
        server.available = True
        db.session.add(server)
        db.session.commit()

        event = Event.query.filter(Event.id.is_(event_id)).scalar()
        if not event:
            continue
        event.image_name = image.name
        event.size_name = size.name
        db.session.add(event)
        db.session.commit()
    return i
Exemplo n.º 18
0
    def upload(self, raw_url):
        """ Takes a URL to a .raw.xz file and registers it as an image
        in each Rackspace region. """

        cls = get_driver(Provider.RACKSPACE)
        driver = cls(fedimg.RACKSPACE_USER, fedimg.RACKSPACE_API_KEY,
                     region=self.regions[0])
Exemplo n.º 19
0
 def initialize(self):
     OpenStack = get_driver(Provider.OPENSTACK)
     self.__conn=OpenStack(self.access_id,self.secret_key,
                           ex_force_auth_url='http://8.21.28.222:5000/v2.0/tokens',
                           ex_force_auth_version='2.0_password',
                           ex_tenant_name='facebook853050102',
                           ex_tenant_id='3af07bd87fdd4845bdbe93ca49a0a255')
Exemplo n.º 20
0
Arquivo: GCE.py Projeto: lxhiguera/im
    def get_driver(self, auth_data):
        """
        Get the driver from the auth data

        Arguments:
            - auth(Authentication): parsed authentication tokens.
        
        Returns: a :py:class:`libcloud.compute.base.NodeDriver` or None in case of error
        """
        if self.driver:
            return self.driver
        else:
            auth = auth_data.getAuthInfo(self.type)
            
            if auth and 'username' in auth[0] and 'password' in auth[0] and 'project' in auth[0]:
                cls = get_driver(Provider.GCE)
                # Patch to solve some client problems with \\n 
                auth[0]['password'] = auth[0]['password'].replace('\\n','\n')         
                lines = len(auth[0]['password'].replace(" ","").split())
                if lines < 2:
                    raise Exception("The certificate provided to the GCE plugin has an incorrect format. Check that it has more than one line.")

                driver = cls(auth[0]['username'], auth[0]['password'], project=auth[0]['project']) 

                self.driver = driver
                return driver
            else:
                self.logger.error("No correct auth data has been specified to GCE: username, password and project")
                self.logger.debug(auth)
                raise Exception("No correct auth data has been specified to GCE: username, password and project")
Exemplo n.º 21
0
    def __init__(self, plan_file):
        try:
            with open(plan_file) as plan:
                self.config = yaml.safe_load(plan.read())

            self.deployment = DeploymentTool(plan_file, manage=True)

        except IndexError:
            raise Exception("You need to specify a deployment plan.")

        except IOError:
            raise Exception("Unable to read specified deployment plan.")

        except Exception as exc:
            print "Management exception: %s" % exc

        else:
            self.driver = get_driver(Provider.RACKSPACE)
            self.conn = self.driver(RACKSPACE_USER, RACKSPACE_APIKEY,
                                    region=self.config.get('region',
                                                           DEFAULT_REGION))
            self.servers = []
            self.nodes = self.conn.list_nodes()
            for depl in self.deployment.deployments:
                for node in self.nodes:
                    if depl.name == node.name:
                        self.servers.append(node)
Exemplo n.º 22
0
    def test_driver_instantiation(self):
        urls = [
            'http://api.exoscale.ch/compute1',  # http, default port
            'https://api.exoscale.ch/compute2',  # https, default port
            'http://api.exoscale.ch:8888/compute3',  # https, custom port
            'https://api.exoscale.ch:8787/compute4',  # https, custom port
            'https://api.test.com/compute/endpoint'  # https, default port
        ]

        expected_values = [
            {'host': 'api.exoscale.ch', 'port': 80, 'path': '/compute1'},
            {'host': 'api.exoscale.ch', 'port': 443, 'path': '/compute2'},
            {'host': 'api.exoscale.ch', 'port': 8888, 'path': '/compute3'},
            {'host': 'api.exoscale.ch', 'port': 8787, 'path': '/compute4'},
            {'host': 'api.test.com', 'port': 443, 'path': '/compute/endpoint'}
        ]

        cls = get_driver(Provider.CLOUDSTACK)

        for url, expected in zip(urls, expected_values):
            driver = cls('key', 'secret', url=url)

            self.assertEqual(driver.host, expected['host'])
            self.assertEqual(driver.path, expected['path'])
            self.assertEqual(driver.connection.port, expected['port'])
def get_cloud_driver(ctx):
    auth_config = _get_auth_from_context(ctx)
    api_key = auth_config['API_KEY']
    api_secret_key = auth_config['API_SECRET_KEY']
    driver = get_driver(Provider.EXOSCALE)
    libcloud.security.VERIFY_SSL_CERT = False
    return driver(api_key, api_secret_key)
def main():
    aws_key = os.environ.get('AWS_KEY')
    aws_secret = os.environ.get('AWS_SECRET')

    if not (aws_key and aws_secret):
        print 'AWS_KEY and AWS_SECRET must both be set in the environment.'
        exit(1)

    # Set up EC2 driver.
    cls = get_driver(Provider.EC2_AP_SOUTHEAST2)
    driver = cls(aws_key, aws_secret)

    # Get desired size and the AMI image to base the instance on.
    size = [x for x in driver.list_sizes() if 'micro' in x.id][0]
    image = [x for x in driver.list_images() if x.id == XILINX_AMI][0]

    # Here: set up SSH pairs (or load a key from EC2), create deployment, etc...

    # Create instance from the found AMI.
    node = driver.create_node(name='xilinx_ec2', size=size, image=image)
    try:
        nodes = driver.wait_until_running([node])
        for running_node, ip_addr in nodes:
            node_created(running_node, ip_addr)
    except:
        raise
    finally:
        # Terminate the instance.
        node.destroy()
Exemplo n.º 25
0
def authenticateGCE():

	ComputeEngine = get_driver(Provider.GCE)
	# Datacenter is set to 'us-central1-a' as an example, but can be set to any
	# zone, like 'us-central1-b' or 'europe-west1-a'
	driver = ComputeEngine(datacenter='europe-west1-b', project='theone-agens')
	return driver
Exemplo n.º 26
0
def _openstack_region(provider, service_name, region, cred):
    username, password, auth_version, keystone_url, tenant_name = _openstack_cred(cred)
    url = urlparse.urlparse(keystone_url)
    service_type = 'compute'

    cls = get_driver(provider)
    driver = cls(
        username,
        password,
        ex_force_auth_url=url.geturl(),
        ex_tenant_name=tenant_name,
        ex_force_auth_version=auth_version,
        ex_force_service_region=region,
        ex_force_service_type=service_type,
        ex_force_service_name=service_name,
    )
    cloud_nodes = _libcloud_list_nodes(driver)
    timestamp = int(time.time())
    nodes = list()
    for cloud_node in cloud_nodes:
        if cloud_node.state != NodeState.RUNNING:
            continue
        node = {
            'instance_id': cloud_node.id,
            'instance_type': cloud_node.extra['flavorId'],
            'os': None
        }
        nodes.append(node)
    return {
        'region': region,
        'timestamp': timestamp,
        'nodes': nodes
    } if nodes else dict()
Exemplo n.º 27
0
 def test_driver(self):
     token = self.mocks['m_auth_token'].return_value
     self.mocks['m_auth_token'].return_value.__enter__.return_value = token
     token.value = None
     obj = cloud.get_provider('my_provider')
     assert isinstance(obj.driver, get_driver('openstack'))
     assert obj._auth_token.value is None
Exemplo n.º 28
0
def get_driver_by_provider_name(provider_name):
    """
    Get a driver by provider name
    If the provider is unknown, will raise an exception.

    :param drivers: Dictionary containing valid providers.

    :param provider: object that contain supported provider
    :type providers: :class:`libcloud.compute.types.Provider`

    :param    provider_name:   String with a provider name (required)
    :type     provider_name:   ``str``

    :return: :class:`NodeDriver`

    """
    provider_name = provider_name.upper()

    if ((provider_name == 'RACKSPACE_NOVA_DFW') or (provider_name == 'RACKSPACE_NOVA_BETA') or (provider_name == 'RACKSPACE_NOVA_ORD') or (provider_name == 'RACKSPACE_NOVA_LON')):
        provider_name = 'RACKSPACE'
    elif ((provider_name == 'RACKSPACE_UK')):
        provider_name = 'RACKSPACE_FIRST_GEN'
    else:
        "Name conflict"

    provider = getattr(Provider, provider_name, None)

    try:
        Driver = get_driver(provider)
    except AttributeError:
        raise ProviderNotSupportedError(provider=provider_name)
    return Driver
Exemplo n.º 29
0
    def __init__(self, creds):
        """
        @param  creds: Credentials

        """
        super(EC2_EU_WEST_Driver, self).__init__(creds)
        self.driver = get_driver(Provider.EC2_EU_WEST)
Exemplo n.º 30
0
def get_conn():
    '''
    Return a conn object for the passed VM data
    '''
    vm_ = get_configured_provider()
    driver = get_driver(Provider.DIMENSIONDATA)

    region = config.get_cloud_config_value(
         'region', vm_, __opts__
    )

    user_id = config.get_cloud_config_value(
        'user_id', vm_, __opts__
    )
    key = config.get_cloud_config_value(
        'key', vm_, __opts__
    )

    if key is not None:
        log.debug('DimensionData authenticating using password')

    return driver(
        user_id,
        key,
        region=region
    )
 def create(self):
     lgr.debug('creating exoscale cloudstack connector')
     api_key = self.config['authentication']['api_key']
     api_secret_key = self.config['authentication']['api_secret_key']
     cls = get_driver(Provider.EXOSCALE)
     return cls(api_key, api_secret_key)
Exemplo n.º 32
0
__author__ = 'syedaali'
'''
This is a sample program that shows you how to connection to GCP using
Apache libcloud.
'''

from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver

Driver = get_driver(Provider.GCE)

#replace email with service account email
#replace private-key-filename with private key filename
#replace region with GCP region
#replace project with GCP project name
gce = Driver('email',
             'private-key-filename',
             datacenter='region',
             project='project')

sizes = gce.list_sizes()
images = gce.list_images()

print images
from pprint import pprint

from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver

apikey = 'your api key'
secretkey = 'your secret key'

Driver = get_driver(Provider.IKOULA)
driver = Driver(key=apikey, secret=secretkey)

# This returns a list of CloudStackNetwork objects
nets = driver.ex_list_networks()

# List the images/templates available
# This returns a list of NodeImage objects
images = driver.list_images()

# List the instance types
# This returns a list of NodeSize objects
sizes = driver.list_sizes()

# Create the node
# This returns a Node object
node = driver.create_node(name='libcloud',
                          image=images[0],
                          size=sizes[0],
                          networks=[nets[0]])

# The node has a private IP in the guest network used
# No public IPs and no rules
Exemplo n.º 34
0
        i = i + 1
        files[i % number].write('>' + name + '\n' + seq + '\n')

    for f in files:
        f.close()

    return filenames


SIZE = 'c1.xlarge'
job = 'lab_meeting_demo'
number = 8
processors = 8
input_file = 'input.fasta'

main_Driver = get_driver(Provider.EC2)
main_conn = main_Driver(credentials.EC2_ACCESS_ID, credentials.EC2_SECRET_KEY)

my_image = [
    i for i in main_conn.list_images() if i.id == credentials.WORKER_AMI
][0]
my_size = [i for i in main_conn.list_sizes() if i.id == SIZE][0]

print('destroying old nodes...')
destroy_worker_nodes(main_conn)
print('done destroying old nodes')

filenames = split_fasta(input_file, number)

for i in range(number):
    print('starting thread ' + str(i))
Exemplo n.º 35
0
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver

# First we need to instantiate desired libcoud driver.
cls = get_driver(Provider.ONEANDONE)

token = 'your_token'
# Then pass in your security token
drv = cls(key=token)
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import MultiStepDeployment
from libcloud.compute.deployment import ScriptDeployment, SSHKeyDeployment

# Path to the public key you would like to install
KEY_PATH = os.path.expanduser('~/.ssh/id_rsa.pub')

# Shell script to run on the remote server
SCRIPT = '''#!/usr/bin/env bash
apt-get -y update && apt-get -y install puppet
'''

RACKSPACE_USER = '******'
RACKSPACE_KEY = 'your key'

Driver = get_driver(Provider.RACKSPACE)
conn = Driver(RACKSPACE_USER, RACKSPACE_KEY)

with open(KEY_PATH) as fp:
    content = fp.read()

# Note: This key will be added to the authorized keys for the root user
# (/root/.ssh/authorized_keys)
step_1 = SSHKeyDeployment(content)

# A simple script to install puppet post boot, can be much more complicated.
step_2 = ScriptDeployment(SCRIPT)

msd = MultiStepDeployment([step_1, step_2])

images = conn.list_images()
Exemplo n.º 37
0
    def upload(self):
        """ Registers the image in each EC2 region. """

        log.info('EC2 upload process started')

        # Get a starting utility AMI in some region to use as an origin
        ami = self.util_amis[0]  # Select the starting AMI to begin
        self.destination = 'EC2 ({region})'.format(region=ami['region'])

        fedimg.messenger.message('image.upload', self.build_name,
                                 self.destination, 'started')

        try:
            # Connect to the region through the appropriate libcloud driver
            cls = get_driver(ami['prov'])
            driver = cls(fedimg.AWS_ACCESS_ID, fedimg.AWS_SECRET_KEY)

            # select the desired node attributes
            sizes = driver.list_sizes()
            reg_size_id = 'm1.large'

            # check to make sure we have access to that size node
            # TODO: Add try/except if for some reason the size isn't
            # available?
            size = [s for s in sizes if s.id == reg_size_id][0]
            base_image = NodeImage(id=ami['ami'], name=None, driver=driver)

            # Name the utility node
            name = 'Fedimg AMI builder'

            # Block device mapping for the utility node
            # (Requires this second volume to write the image to for
            # future registration.)
            mappings = [{
                'VirtualName': None,  # cannot specify with Ebs
                'Ebs': {
                    'VolumeSize': fedimg.AWS_UTIL_VOL_SIZE,
                    'VolumeType': 'standard',
                    'DeleteOnTermination': 'false'
                },
                'DeviceName': '/dev/sdb'
            }]

            # Read in the SSH key
            with open(fedimg.AWS_PUBKEYPATH, 'rb') as f:
                key_content = f.read()

            # Add key to authorized keys for root user
            step_1 = SSHKeyDeployment(key_content)

            # Add script for deployment
            # Device becomes /dev/xvdb on instance
            script = "touch test"  # this isn't so important for the util inst.
            step_2 = ScriptDeployment(script)

            # Create deployment object (will set up SSH key and run script)
            msd = MultiStepDeployment([step_1, step_2])

            log.info('Deploying utility instance')

            while True:
                try:
                    self.util_node = driver.deploy_node(
                        name=name,
                        image=base_image,
                        size=size,
                        ssh_username=fedimg.AWS_UTIL_USER,
                        ssh_alternate_usernames=[''],
                        ssh_key=fedimg.AWS_KEYPATH,
                        deploy=msd,
                        kernel_id=ami['aki'],
                        ex_metadata={'build': self.build_name},
                        ex_keyname=fedimg.AWS_KEYNAME,
                        ex_security_groups=['ssh'],
                        ex_ebs_optimized=True,
                        ex_blockdevicemappings=mappings)

                except KeyPairDoesNotExistError:
                    # The keypair is missing from the current region.
                    # Let's install it and try again.
                    log.exception('Adding missing keypair to region')
                    driver.ex_import_keypair(fedimg.AWS_KEYNAME,
                                             fedimg.AWS_PUBKEYPATH)
                    continue

                except Exception as e:
                    # We might have an invalid security group, aka the 'ssh'
                    # security group doesn't exist in the current region. The
                    # reason this is caught here is because the related
                    # exception that prints`InvalidGroup.NotFound is, for
                    # some reason, a base exception.
                    if 'InvalidGroup.NotFound' in e.message:
                        log.exception('Adding missing security'
                                      'group to region')
                        # Create the ssh security group
                        driver.ex_create_security_group('ssh', 'ssh only')
                        driver.ex_authorize_security_group(
                            'ssh', '22', '22', '0.0.0.0/0')
                        continue
                    else:
                        raise
                break

            # Wait until the utility node has SSH running
            while not ssh_connection_works(fedimg.AWS_UTIL_USER,
                                           self.util_node.public_ips[0],
                                           fedimg.AWS_KEYPATH):
                sleep(10)

            log.info('Utility node started with SSH running')

            # Connect to the utility node via SSH
            client = paramiko.SSHClient()
            client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            client.connect(self.util_node.public_ips[0],
                           username=fedimg.AWS_UTIL_USER,
                           key_filename=fedimg.AWS_KEYPATH)

            # Curl the .raw.xz file down from the web, decompressing it
            # and writing it to the secondary volume defined earlier by
            # the block device mapping.
            # curl with -L option, so we follow redirects
            cmd = "sudo sh -c 'curl -L {0} | xzcat > /dev/xvdb'".format(
                self.raw_url)
            chan = client.get_transport().open_session()
            chan.get_pty()  # Request a pseudo-term to get around requiretty

            log.info('Executing utility script')

            # Run the above command and wait for its exit status
            chan.exec_command(cmd)
            status = chan.recv_exit_status()
            if status != 0:
                # There was a problem with the SSH command
                log.error('Problem writing volume with utility instance')
                raise EC2UtilityException("Problem writing image to"
                                          " utility instance volume."
                                          " Command exited with"
                                          " status {0}.\n"
                                          "command: {1}".format(status, cmd))
            client.close()

            # Get volume name that image was written to
            vol_id = [
                x['ebs']['volume_id']
                for x in self.util_node.extra['block_device_mapping']
                if x['device_name'] == '/dev/sdb'
            ][0]

            log.info('Destroying utility node')

            # Terminate the utility instance
            driver.destroy_node(self.util_node)

            # Wait for utility node to be terminated
            while ssh_connection_works(fedimg.AWS_UTIL_USER,
                                       self.util_node.public_ips[0],
                                       fedimg.AWS_KEYPATH):
                sleep(10)

            # Wait a little longer since loss of SSH connectivity doesn't mean
            # that the node's destroyed
            # TODO: Check instance state rather than this lame sleep thing
            sleep(45)

            # Take a snapshot of the volume the image was written to
            self.util_volume = [
                v for v in driver.list_volumes() if v.id == vol_id
            ][0]
            snap_name = 'fedimg-snap-{0}'.format(self.build_name)

            log.info('Taking a snapshot of the written volume')

            self.snapshot = driver.create_volume_snapshot(self.util_volume,
                                                          name=snap_name)
            snap_id = str(self.snapshot.id)

            while self.snapshot.extra['state'] != 'completed':
                # Re-obtain snapshot object to get updates on its state
                self.snapshot = [
                    s for s in driver.list_snapshots() if s.id == snap_id
                ][0]
                sleep(10)

            log.info('Snapshot taken')

            # Delete the volume now that we've got the snapshot
            driver.destroy_volume(self.util_volume)
            # make sure Fedimg knows that the vol is gone
            self.util_volume = None

            log.info('Destroyed volume')

            # Actually register image
            log.info('Registering image as an AMI')

            if self.virt_type == 'paravirtual':
                image_name = "{0}-{1}-PV-0".format(self.build_name,
                                                   ami['region'])
                test_size_id = 'm1.medium'
                # test_amis will include AKIs of the appropriate arch
                registration_aki = [
                    a['aki'] for a in self.test_amis
                    if a['region'] == ami['region']
                ][0]
                reg_root_device_name = '/dev/sda'
            else:  # HVM
                image_name = "{0}-{1}-HVM-0".format(self.build_name,
                                                    ami['region'])
                test_size_id = 'm3.medium'
                # Can't supply a kernel image with HVM
                registration_aki = None
                reg_root_device_name = '/dev/sda1'

            # For this block device mapping, we have our volume be
            # based on the snapshot's ID
            mapping = [{
                'DeviceName': reg_root_device_name,
                'Ebs': {
                    'SnapshotId': snap_id,
                    'VolumeSize': fedimg.AWS_TEST_VOL_SIZE,
                    'VolumeType': 'standard',
                    'DeleteOnTermination': 'true'
                }
            }]

            # Avoid duplicate image name by incrementing the number at the
            # end of the image name if there is already an AMI with that name.
            # TODO: This process could be written nicer.
            while True:
                try:
                    if self.dup_count > 0:
                        # Remove trailing '-0' or '-1' or '-2' or...
                        image_name = '-'.join(image_name.split('-')[:-1])
                        # Re-add trailing dup number with new count
                        image_name += '-{0}'.format(self.dup_count)
                    # Try to register with that name
                    self.images.append(
                        driver.ex_register_image(
                            image_name,
                            description=self.image_desc,
                            root_device_name=reg_root_device_name,
                            block_device_mapping=mapping,
                            virtualization_type=self.virt_type,
                            kernel_id=registration_aki,
                            architecture=self.image_arch))
                except Exception as e:
                    # Check if the problem was a duplicate name
                    if 'InvalidAMIName.Duplicate' in e.message:
                        # Keep trying until an unused name is found
                        self.dup_count += 1
                        continue
                    else:
                        raise
                break

            log.info('Completed image registration')

            # Emit success fedmsg
            for image in self.images:
                fedimg.messenger.message('image.upload',
                                         self.build_name,
                                         self.destination,
                                         'completed',
                                         extra={'id': image.id})

            # Now, we'll spin up a node of the AMI to test:

            # Add script for deployment
            # Device becomes /dev/xvdb on instance
            script = "touch test"
            step_2 = ScriptDeployment(script)

            # Create deployment object
            msd = MultiStepDeployment([step_1, step_2])

            log.info('Deploying test node')

            # Pick a name for the test instance
            name = 'Fedimg AMI tester'

            # Select the appropriate size for the instance
            size = [s for s in sizes if s.id == test_size_id][0]

            # Actually deploy the test instance
            self.test_node = driver.deploy_node(
                # TODO: Test all images
                name=name,
                image=self.images[0],
                size=size,
                ssh_username=fedimg.AWS_TEST_USER,
                ssh_alternate_usernames=['root'],
                ssh_key=fedimg.AWS_KEYPATH,
                deploy=msd,
                kernel_id=registration_aki,
                ex_metadata={'build': self.build_name},
                ex_keyname=fedimg.AWS_KEYNAME,
                ex_security_groups=['ssh'],
            )

            # Wait until the test node has SSH running
            while not ssh_connection_works(fedimg.AWS_TEST_USER,
                                           self.test_node.public_ips[0],
                                           fedimg.AWS_KEYPATH):
                sleep(10)

            log.info('Starting AMI tests')

            # Alert the fedmsg bus that an image test has started
            fedimg.messenger.message('image.test', self.build_name,
                                     self.destination, 'started')

            client = paramiko.SSHClient()
            client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            client.connect(self.test_node.public_ips[0],
                           username=fedimg.AWS_TEST_USER,
                           key_filename=fedimg.AWS_KEYPATH)

            # Run /bin/true on the test instance as a simple "does it
            # work" test
            cmd = "/bin/true"
            chan = client.get_transport().open_session()
            chan.get_pty()  # Request a pseudo-term to get around requiretty

            log.info('Running AMI test script')

            chan.exec_command(cmd)

            # Again, wait for the test command's exit status
            if chan.recv_exit_status() != 0:
                # There was a problem with the SSH command
                log.error('Problem testing new AMI')
                raise EC2AMITestException("Tests on AMI failed.")

            client.close()

            log.info('AMI test completed')
            fedimg.messenger.message(
                'image.test',
                self.build_name,
                self.destination,
                'completed',
                # TODO: Update this line when
                # we test all images
                extra={'id': self.images[0].id})

            # Let this EC2Service know that the AMI test passed, so
            # it knows how to proceed.
            self.test_success = True

            log.info('Destroying test node')

            # Destroy the test node
            driver.destroy_node(self.test_node)

            # Make AMIs public
            for image in self.images:
                driver.ex_modify_image_attribute(
                    image, {'LaunchPermission.Add.1.Group': 'all'})

        except EC2UtilityException as e:
            fedimg.messenger.message('image.upload', self.build_name,
                                     self.destination, 'failed')
            log.exception("Failure")
            if fedimg.CLEAN_UP_ON_FAILURE:
                self._clean_up(driver,
                               delete_images=fedimg.DELETE_IMAGES_ON_FAILURE)
            return 1

        except EC2AMITestException as e:
            fedimg.messenger.message('image.test', self.build_name,
                                     self.destination, 'failed')
            log.exception("Failure")
            if fedimg.CLEAN_UP_ON_FAILURE:
                self._clean_up(driver,
                               delete_images=fedimg.DELETE_IMAGES_ON_FAILURE)
            return 1

        except DeploymentException as e:
            fedimg.messenger.message('image.upload', self.build_name,
                                     self.destination, 'failed')
            log.exception("Problem deploying node: {0}".format(e.value))
            if fedimg.CLEAN_UP_ON_FAILURE:
                self._clean_up(driver,
                               delete_images=fedimg.DELETE_IMAGES_ON_FAILURE)
            return 1

        except Exception as e:
            # Just give a general failure message.
            fedimg.messenger.message('image.upload', self.build_name,
                                     self.destination, 'failed')
            log.exception("Unexpected exception")
            if fedimg.CLEAN_UP_ON_FAILURE:
                self._clean_up(driver,
                               delete_images=fedimg.DELETE_IMAGES_ON_FAILURE)
            return 1

        else:
            self._clean_up(driver)

        if self.test_success:
            # Copy the AMI to every other region if tests passed
            copied_images = list()  # completed image copies (ami: image)

            # Use the AMI list as a way to cycle through the regions
            for ami in self.test_amis[1:]:  # we don't need the origin region

                # Choose an appropriate destination name for the copy
                alt_dest = 'EC2 ({region})'.format(region=ami['region'])

                fedimg.messenger.message('image.upload', self.build_name,
                                         alt_dest, 'started')

                # Connect to the libcloud EC2 driver for the region we
                # want to copy into
                alt_cls = get_driver(ami['prov'])
                alt_driver = alt_cls(fedimg.AWS_ACCESS_ID,
                                     fedimg.AWS_SECRET_KEY)

                # Construct the full name for the image copy
                if self.virt_type == 'paravirtual':
                    image_name = "{0}-{1}-PV-0".format(self.build_name,
                                                       ami['region'])
                else:  # HVM
                    image_name = "{0}-{1}-HVM-0".format(
                        self.build_name, ami['region'])

                log.info('AMI copy to {0} started'.format(ami['region']))

                # Avoid duplicate image name by incrementing the number at the
                # end of the image name if there is already an AMI with
                # that name.
                # TODO: Again, this could be written better
                while True:
                    try:
                        if self.dup_count > 0:
                            # Remove trailing '-0' or '-1' or '-2' or...
                            image_name = '-'.join(image_name.split('-')[:-1])
                            # Re-add trailing dup number with new count
                            image_name += '-{0}'.format(self.dup_count)

                        # Actually run the image copy from the origin region
                        # to the current region.
                        for image in self.images:
                            image_copy = alt_driver.copy_image(
                                image,
                                self.test_amis[0]['region'],
                                name=image_name,
                                description=self.image_desc)
                            # Add the image copy to a list so we can work with
                            # it later.
                            copied_images.append(image_copy)

                            log.info('AMI {0} copied to AMI {1}'.format(
                                image, image_name))

                    except Exception as e:
                        # Check if the problem was a duplicate name
                        if 'InvalidAMIName.Duplicate' in e.message:
                            # Keep trying until an unused name is found.
                            # This probably won't trigger, since it seems
                            # like EC2 doesn't mind duplicate AMI names
                            # when they are being copied, only registered.
                            # Strange, but apprently true.
                            self.dup_count += 1
                            continue
                        else:
                            # TODO: Catch a more specific exception
                            log.exception('Image copy to {0} failed'.format(
                                ami['region']))
                            fedimg.messenger.message('image.upload',
                                                     self.build_name, alt_dest,
                                                     'failed')
                    break

            # Now cycle through and make all of the copied AMIs public
            # once the copy process has completed. Again, use the test
            # AMI list as a way to have region and arch data:

            # We don't need the origin region, since the AMI was made there:
            self.test_amis = self.test_amis[1:]

            for image in copied_images:
                ami = self.test_amis[copied_images.index(image)]
                alt_cls = get_driver(ami['prov'])
                alt_driver = alt_cls(fedimg.AWS_ACCESS_ID,
                                     fedimg.AWS_SECRET_KEY)

                # Get an appropriate name for the region in question
                alt_dest = 'EC2 ({region})'.format(region=ami['region'])

                # Need to wait until the copy finishes in order to make
                # the AMI public.
                while True:
                    try:
                        # Make the image public
                        alt_driver.ex_modify_image_attribute(
                            image, {'LaunchPermission.Add.1.Group': 'all'})
                    except Exception as e:
                        if 'InvalidAMIID.Unavailable' in e.message:
                            # The copy isn't done, so wait 20 seconds
                            # and try again.
                            sleep(20)
                            continue
                    break

                log.info('Made {0} public'.format(image.id))

                fedimg.messenger.message('image.upload',
                                         self.build_name,
                                         alt_dest,
                                         'completed',
                                         extra={'id': image.id})

            return 0
Exemplo n.º 38
0
 def _init(cls):
     for name, value in cls._ENUMS.iteritems():
         m = cls.Maaper(get_driver(name))
         for k, v in value.iteritems():
             setattr(m, k, v)
         setattr(cls, name, m)
Exemplo n.º 39
0
from pprint import pprint
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.nttcis import NttCisFirewallAddress
import libcloud.security

# Get nttcis driver
libcloud.security.VERIFY_SSL_CERT = True
cls = get_driver(Provider.NTTCIS)
driver = cls('myusername', 'mypassword', region='eu')

domain_name = 'sdk_test_1'
domains = driver.ex_list_network_domains(location='EU6')
net_domain = [d for d in domains if d.name == domain_name]
source_firewall_address = NttCisFirewallAddress(any_ip='ANY')
dest_firewall_address = NttCisFirewallAddress(ip_address='10.2.0.0',
                                              ip_prefix_size='16',
                                              port_begin='8000',
                                              port_end='8080')

rule = driver.ex_create_firewall_rule(net_domain[0],
                                      'sdk_test_firewall_rule_2',
                                      'ACCEPT_DECISIVELY', 'IPV4', 'TCP',
                                      source_firewall_address,
                                      dest_firewall_address, 'LAST')
pprint(rule)
Exemplo n.º 40
0
# to a magical number, which is 280 (4 minutes). This is 1 minute less than the
# timeouts for production settings that should allow enough time to handle the
# exception and return a response
socket.setdefaulttimeout(280)

# FIXME
# At the time this example was written, https://nova-api.trystack.org:5443
# was using a certificate issued by a Certificate Authority (CA) which is
# not included in the default Ubuntu certificates bundle (ca-certificates).
# Note: Code like this poses a security risk (MITM attack) and that's the
# reason why you should never use it for anything else besides testing. You
# have been warned.
# signed cert installed : https://projects.engineering.redhat.com/browse/CID-2407
# libcloud.security.VERIFY_SSL_CERT = False

OpenStack = get_driver(Provider.OPENSTACK)


class InvalidHostName(Exception):
    pass


class NodeErrorState(Exception):
    pass


class GetIPError(Exception):
    pass


class ResourceNotFound(Exception):
Exemplo n.º 41
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import json
import time
"""
Address of the bootstrap node
with driver V0
"""
Snooze = get_driver(Provider.SNOOZE)
driver = Snooze("127.0.0.1", "5000")

resp = driver.get_and_set_groupleader()
print "group leader address %s : %s" % (resp.get("address"), resp.get("port"))
"""
We create a first VM using the template fashion
"""
n1 = driver.create_node(
    libvirt_template=
    "/home/msimonin/Images-VM/Snooze-images/vmtemplates/debian1.xml",
    tx=12800,
    rx=12800)
driver.shutdown(n1)
time.sleep(2)
driver.list_nodes()
Exemplo n.º 42
0
 def _connect(self):
     host = dnat(self.cloud.owner, self.cloud.host)
     return get_driver(Provider.VSPHERE)(host=host,
                                         username=self.cloud.username,
                                         password=self.cloud.password)
Exemplo n.º 43
0
 def _connect(self):
     return get_driver(Provider.ONAPP)(key=self.cloud.username,
                                       secret=self.cloud.apikey,
                                       host=self.cloud.host)
Exemplo n.º 44
0
def get_gce_driver():
    driver = get_driver(Provider.GCE)(*args, **kwargs)
    return driver
Exemplo n.º 45
0
 def _connect(self):
     return get_driver(Provider.PACKET)(self.cloud.apikey,
                                        project=self.cloud.project_id)
Exemplo n.º 46
0
 def _connect(self):
     return get_driver(Provider.EC2)(self.cloud.apikey,
                                     self.cloud.apisecret,
                                     region=self.cloud.region)
Exemplo n.º 47
0
 def _connect(self):
     return get_driver(Provider.HOSTVIRTUAL)(self.cloud.apikey)
Exemplo n.º 48
0
 def _connect(self):
     return get_driver(Provider.VULTR)(self.cloud.apikey)
Exemplo n.º 49
0
    def get_gce_driver(self):
        """Determine the GCE authorization settings and return a
        libcloud driver.
        """
        gce_ini_default_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), "gce.ini")
        gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)

        # Create a ConfigParser.
        # This provides empty defaults to each key, so that environment
        # variable configuration (as opposed to INI configuration) is able
        # to work.
        config = ConfigParser.SafeConfigParser(defaults={
            'gce_service_account_email_address': '',
            'gce_service_account_pem_file_path': '',
            'gce_project_id': '',
            'libcloud_secrets': '',
        })
        if 'gce' not in config.sections():
            config.add_section('gce')
        config.read(gce_ini_path)

        # Attempt to get GCE params from a configuration file, if one
        # exists.
        secrets_path = config.get('gce', 'libcloud_secrets')
        secrets_found = False
        try:
            import secrets
            args = list(getattr(secrets, 'GCE_PARAMS', []))
            kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
            secrets_found = True
        except:
            pass

        if not secrets_found and secrets_path:
            if not secrets_path.endswith('secrets.py'):
                err = "Must specify libcloud secrets file as "
                err += "/absolute/path/to/secrets.py"
                print(err)
                sys.exit(1)
            sys.path.append(os.path.dirname(secrets_path))
            try:
                import secrets
                args = list(getattr(secrets, 'GCE_PARAMS', []))
                kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
                secrets_found = True
            except:
                pass
        if not secrets_found:
            args = [
                config.get('gce','gce_service_account_email_address'),
                config.get('gce','gce_service_account_pem_file_path')
            ]
            kwargs = {'project': config.get('gce', 'gce_project_id')}

        # If the appropriate environment variables are set, they override
        # other configuration; process those into our args and kwargs.
        args[0] = os.environ.get('GCE_EMAIL', args[0])
        args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
        kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])

        # Retrieve and return the GCE driver.
        gce = get_driver(Provider.GCE)(*args, **kwargs)
        gce.connection.user_agent_append(
            '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
        )
        return gce
Exemplo n.º 50
0
config = ConfigDict("cloudmesh.yaml")
credential = config['cloudmesh']['clouds'][cloud]['credentials']
clouddefault = config['cloudmesh']['clouds'][cloud]['default']
# pprint(dict(credential))

auth_url = credential["EC2_URL"]

data = re.match(r'^http[s]?://(.+):([0-9]+)/([a-zA-Z/]*)', auth_url,
                re.M | re.I)
host, port, path = data.group(1), data.group(2), data.group(3)
print("host: " + host)
print("port: " + port)
print("path: " + path)

extra_args = {'path': path}
cls = get_driver(Provider.EC2_US_EAST)
driver = cls(credential['EC2_ACCESS_KEY'],
             credential['EC2_SECRET_KEY'],
             host=host,
             port=port,
             **extra_args)

print("DRIVER", driver)

pprint(dict(credential))

# list VMs
nodes = driver.list_nodes()
pprint(nodes)

# THIS FUNCTION TAKES TIME TO LOAD 40K+ IMAGES
Exemplo n.º 51
0
 def _connect(self):
     return get_driver(Provider.AZURE_ARM)(self.cloud.tenant_id,
                                           self.cloud.subscription_id,
                                           self.cloud.key,
                                           self.cloud.secret)
Exemplo n.º 52
0
 def _connect(self):
     return get_driver(Provider.GCE)(self.cloud.email,
                                     self.cloud.private_key,
                                     project=self.cloud.project_id)
Exemplo n.º 53
0
 def _connect(self):
     return get_driver(Provider.NEPHOSCALE)(self.cloud.username,
                                            self.cloud.password)
Exemplo n.º 54
0
 def _connect(self):
     tmp_cert_file = tempfile.NamedTemporaryFile(delete=False)
     tmp_cert_file.write(self.cloud.certificate)
     tmp_cert_file.close()
     return get_driver(Provider.AZURE)(self.cloud.subscription_id,
                                       tmp_cert_file.name)
Exemplo n.º 55
0
 def _connect(self):
     return get_driver(Provider.LINODE)(self.cloud.apikey)
Exemplo n.º 56
0
 def _connect(self):
     return get_driver(Provider.SOFTLAYER)(self.cloud.username,
                                           self.cloud.apikey)
Exemplo n.º 57
0
class DoCmds(CommonCloudFunctions):
    @trace
    def __init__(self, pid, osci, expid=None):
        CommonCloudFunctions.__init__(self, pid, osci)
        self.pid = pid
        self.osci = osci
        self.access_url = False
        self.ft_supported = False
        self.lock = False
        self.expid = expid
        self.locations = False
        self.sizes = False
        self.images = False
        self.cache_mutex = threading.Lock()

    @trace
    def get_description(self):
        return "DigitalOcean"

    @trace
    def connect(self, access_token):
        # libcloud is totally not thread-safe. bastards.
        cbdebug("Checking libcloud connection...")
        try:
            getattr(catalogs, "digitalocean")
        except Exception, e:
            cbdebug("Initializing thread local connection.")
            catalogs.digitalocean = False

        self.cache_mutex.acquire()
        try:
            _status = 100

            if not catalogs.digitalocean:
                cbdebug("Connecting to DigitalOcean...")
                driver = get_driver(Provider.DIGITAL_OCEAN)
                _status = 110
                catalogs.digitalocean = driver(access_token, api_version='v2')
            else:
                cbdebug("DigitalOcean Already connected.")

            cbdebug(
                "Caching DigitalOcean locations, sizes, and images. If stale, then restart..."
            )
            if not self.locations:
                cbdebug("Caching DigitalOcean Locations...", True)
                self.locations = catalogs.digitalocean.list_locations()
            if not self.sizes:
                cbdebug("Caching DigitalOcean Sizes...", True)
                self.sizes = catalogs.digitalocean.list_sizes()
            if not self.images:
                cbdebug(
                    "Caching DigitalOcean Images (can take a minute or so)...",
                    True)
                self.images = catalogs.digitalocean.list_images()
            assert (self.images)
            assert (self.sizes)
            assert (self.locations)
            cbdebug("Done caching.")

            _status = 0

        except Exception, e:
            _msg = "Error connecting DigitalOcean: " + str(e)
            cbdebug(_msg, True)
            _status = 23
Exemplo n.º 58
0
 def _connect(self):
     return get_driver(Provider.DIGITAL_OCEAN)(self.cloud.token)
from pprint import pprint

from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver

# Import the deployment specific modules
from libcloud.compute.deployment import ScriptDeployment
from libcloud.compute.deployment import MultiStepDeployment

cls = get_driver(Provider.EXOSCALE)
driver = cls('api key', 'api secret key')

image = driver.list_images()[0]
size = driver.list_sizes()[0]

# Define the scripts that you want to run during deployment
script = ScriptDeployment('/bin/date')
msd = MultiStepDeployment([script])

node = driver.deploy_node(name='test',
                          image=image,
                          size=size,
                          ssh_key='~/.ssh/id_rsa_test',
                          ex_keyname='test-keypair',
                          deploy=msd)

# The stdout of the deployment can be checked on the `script` object
pprint(script.stdout)
Exemplo n.º 60
0
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver

cls = get_driver(Provider.CLOUDSIGMA)
driver = cls('username', 'password', region='zrh', api_version='2.0')

tags = driver.ex_list_tags()
tag = [tag for tag in tags if tag.name == 'database-server'][0]

nodes = driver.list_nodes(ex_tag=tag)
policy = driver.ex_list_firewall_policies()[0]

for node in nodes:
    driver.ex_attach_firewall_policy(policy=policy, node=node)