def process_settings(): config_file = path.expanduser('~/.blitz.cfg') config = ConfigParser.SafeConfigParser() config.read(config_file) if not config.has_section('general'): config.add_section('general') if not config.has_section('rackspace'): config.add_section('rackspace') try: username = config.get('rackspace', 'username') api_key = config.get('rackspace', 'api_key') except ConfigParser.NoOptionError: username = raw_input("Rackspace Username: "******"Rackspace API Key: ") config.set('rackspace', 'username', username) config.set('rackspace', 'api_key', api_key) config.write(open(config_file, 'w')) compute_driver = get_compute_driver(Compute_Provider.RACKSPACE_UK) lb_driver = get_lb_driver(LB_Provider.RACKSPACE_UK)(username, api_key) established_compute_conn = compute_driver(username, api_key) # TODO - replace. Libcloud hardcodes 'ord' into the URL for single region support. # The below is a nasty hack to overcome this lb_driver.connection.lb_url = lb_driver.connection.lb_url.replace( "ord.", "") return (compute_driver, lb_driver, established_compute_conn)
def process_settings(): config_file = path.expanduser('~/.blitz.cfg') config = ConfigParser.SafeConfigParser() config.read(config_file) if not config.has_section('general'): config.add_section('general') if not config.has_section('rackspace'): config.add_section('rackspace') try: username = config.get('rackspace', 'username') api_key = config.get('rackspace','api_key') except ConfigParser.NoOptionError: username = raw_input("Rackspace Username: "******"Rackspace API Key: ") config.set('rackspace', 'username', username) config.set('rackspace', 'api_key', api_key) config.write(open(config_file, 'w')) compute_driver = get_compute_driver(Compute_Provider.RACKSPACE_UK) lb_driver = get_lb_driver(LB_Provider.RACKSPACE_UK)(username, api_key) established_compute_conn = compute_driver(username, api_key) # TODO - replace. Libcloud hardcodes 'ord' into the URL for single region support. # The below is a nasty hack to overcome this lb_driver.connection.lb_url = lb_driver.connection.lb_url.replace("ord.", "") return (compute_driver, lb_driver, established_compute_conn)
def os_compute_conn(self): """ Create and return a OpenStack compute driver object """ provider = get_compute_driver(ComputeProvider.OPENSTACK) conn = provider(auth_username, auth_password, ex_force_auth_url=auth_url, ex_force_auth_version='2.0_password', ex_tenant_name=project_name, ex_force_service_region=region_name) return conn
def __init__(self, cloud_config, region): aws_access_id = cloud_config['AWS_ACCESS_KEY_ID'] aws_secret_key = cloud_config['AWS_SECRET_ACCESS_KEY'] EC2Driver = get_compute_driver(ComputeProvider.EC2) self.compute_session = EC2Driver( aws_access_id, aws_secret_key, region=region)
def __init__(self): """ OpenStack global connection constructor """ self.region = 'RegionOne' self.OpenStackCompute = get_compute_driver(ComputeProvider.OPENSTACK) self.OpenStackStorage = get_storage_driver(StorageProvider.OPENSTACK_SWIFT) libcloud.security.CA_CERTS_PATH = ['ca-bundle.crt']
def __init__(self, cloud_config, region): aws_access_id = cloud_config['AWS_ACCESS_KEY_ID'] aws_secret_key = cloud_config['AWS_SECRET_ACCESS_KEY'] EC2Driver = get_compute_driver(ComputeProvider.EC2) self.compute_session = EC2Driver(aws_access_id, aws_secret_key, region=region)
def get_compute_driver(self, region): """ Loads a compute driver from Apache Libcloud """ driver = get_compute_driver(ComputeProvider.DIMENSIONDATA) return driver(self.get_user_name(), self.get_user_password(), region)
def get_compute_driver(self, region): """ Loads a compute driver from Apache Libcloud """ driver = get_compute_driver(ComputeProvider.DIMENSIONDATA) return driver( key=self.get_user_name(), secret=self.get_user_password(), region=region)
def get_driver(self): # Get the compute driver we want to connect to, then pass in credentials. compute_driver = self.config["compute_driver"] ComputeDriver = get_compute_driver(compute_driver) identity = self.config["identity"] credential = self.config["credential"] rgn = self.config["region"] compute = ComputeDriver(identity, credential, region=rgn) log.info("Created a %s compute driver in the %s region.", compute_driver, rgn) return compute
def get_compute_driver(self, region=None, host=None): """ Loads a compute driver from Apache Libcloud """ driver = get_compute_driver(ComputeProvider.DIMENSIONDATA) instance = driver(key=self.get_user_name(), secret=self.get_user_password(), region=region) if host is not None: instance.connection.host = host return instance
def __init__(self, cloud_config, region): AUTH_URL = cloud_config['AUTH_URL'] AUTH_USERNAME = cloud_config['USERNAME'] AUTH_PASSWORD = cloud_config['PASSWORD'] TENANT_NAME = cloud_config['TENANT_NAME'] Openstack = get_compute_driver(ComputeProvider.OPENSTACK) self.compute_session = Openstack(AUTH_USERNAME, AUTH_PASSWORD, ex_force_auth_url=AUTH_URL, ex_tenant_name=TENANT_NAME, ex_force_auth_version="2.0_password", ex_force_service_region=region) self._size_cache = {}
def get_compute_driver(self, region=None, host=None): """ Loads a compute driver from Apache Libcloud """ driver = get_compute_driver(ComputeProvider.DIMENSIONDATA) instance = driver( key=self.get_user_name(), secret=self.get_user_password(), region=region) if host is not None: instance.connection.host = host return instance
def __init__(self, cloud_config, region): AUTH_URL = cloud_config['AUTH_URL'] AUTH_USERNAME = cloud_config['USERNAME'] AUTH_PASSWORD = cloud_config['PASSWORD'] TENANT_NAME = cloud_config['TENANT_NAME'] Openstack = get_compute_driver(ComputeProvider.OPENSTACK) self.compute_session = Openstack(AUTH_USERNAME, AUTH_PASSWORD, ex_force_auth_url=AUTH_URL, ex_tenant_name=TENANT_NAME, ex_force_auth_version="2.0_password", ex_force_service_region=region ) self._size_cache = {}
def init(self): if not self._initialized: BaseController.init(self) config = self.configObj.config['euca'] self.euca_config = config self.config = self.configObj.config if 'eucalyptus_cert_file_path' in config: libcloud.security.CA_CERTS_PATH.append(config["eucalyptus_cert_file_path"]) self.driver = get_compute_driver(ComputeProvider.EUCALYPTUS) ec2_url = config['ec2_url'] url = urlparse.urlparse(ec2_url) self.conn = self.driver(config['ec2_access_key'], config['ec2_secret_key'], host=url.hostname, port=url.port) self._initialized = True self.cluster.init()
def set_drivers(self): """ Sets compute drivers from Apache Libcloud """ driver = get_compute_driver(ComputeProvider.DIMENSIONDATA) self.engines = {} for region in self.get_regions(): self.engines[ region ] = driver( key=self.get_user_name(), secret=self.get_user_password(), region=region, host=None) return self.engines
def _get_driver(self, subscription_id, key_file): cls = get_compute_driver(ComputeProvider.AZURE) driver = cls(subscription_id=subscription_id, key_file=key_file) return driver
def create_node(name, config, deploy_name=None, deployment=None): """Create a compute node with a given name and configuration information. Return the Node object.""" # Some providers require extra settings when configuring their driver. # Look in the config for any starting with 'ex_' and include them. extra = dict((x[0], x[1]) for x in config.items() if x[0][:3] == "ex_") # Get the compute driver we want to connect to, then pass in credentials. ComputeDriver = get_compute_driver(config["compute_driver"]) compute = ComputeDriver(config["identity"], config["credential"], region=config["region"], **extra) log.info("Created a %s compute driver in the %s region.", config["compute_driver"], config["region"]) sec_group = None if config["provider"] == "amazon": # Create a security group for our nodes on Amazon. # Rackspace and HP do not use security groups. sec_group = "sxsw" sec_groups = compute.ex_list_security_groups() if sec_group not in sec_groups: compute.ex_create_security_group(sec_group, "Sec Group for SXSW Workshop") log.info("Created %s security group.", sec_group) # Let SSH (port 22) and web (port 80) traffic through. compute.ex_authorize_security_group(sec_group, 22, 22, "0.0.0.0/0") compute.ex_authorize_security_group(sec_group, 80, 80, "0.0.0.0/0") log.info("Authorized %s on ports 22 and 80.", sec_group) # Pair our SSH public key with the provider so we can communicate # with our deployed compute nodes. pub_key = open(config["public_key"], "r").read() key_name = os.path.split(config["private_key"])[-1] keys = [key.name for key in compute.list_key_pairs()] if key_name not in keys: # If this key isn't already paired, import the key by choosing a name # and passing in the contents of the public key. key = compute.import_key_pair_from_string(key_name, pub_key) log.info("Paired %s key with provider.", key) else: log.info("Already had %s key paired.", key_name) # Once the node is built, it'll be a bare image. Run the configured # bootstrap script using libcloud's ScriptDeployment to run the system # updates and install Flask. if deploy_name and deployment is None: deployment = ScriptFileDeployment(config[deploy_name]) log.info("Created ScriptFileDeployment with %s file.", deploy_name) elif deployment and deploy_name is None: assert isinstance(deployment, ScriptDeployment) log.info("ScriptDeployment was passed in.") else: raise Exception("Unable to determine deployment.") # Find the size and image we want to use when creating our node. # The following iterates through lists of sizes and images and finds # the match for our configured strings. size_name = config["size"] size = filter(lambda size: size.id == size_name, compute.list_sizes())[0] img_name = config["image"] image = filter(lambda image: image.name == img_name, compute.list_images())[0] log.info("Deploying node with size=%s, image=%s", size, image) # Deploy our node. This calls create_node but waits for the creation to # complete, and then it uses paramiko to SSH into the node and run # the commands specified by the `deploy` argument. In order to do this, # the paramiko SSH client must know the private key, specified in # `ssh_key`. `ex_keyname` is the public key we paired up above. node = compute.deploy_node(name=name, image=image, size=size, deploy=deployment, ssh_key=config["private_key"], ssh_username=config["ssh_user"], ex_keyname=key_name, ex_securitygroup=sec_group, timeout=50000) log.info("Node deployed: %s", node) return node
f = open(cred_file, "r") for line in f: m = re.match(r"^(\w+)='(\w+)'$", line) if m.group(1) == 'AccessKeyId' and AWSAccessKey is None: AWSAccessKey = m.group(2) elif m.group(1) == 'SecretAccessKey' and AWSSecretKey is None: AWSSecretKey = m.group(2) f.close() if AWSAccessKey is None or AWSSecretKey is None: print "Failed to read AWS credentials from " + cred_file sys.exit(-1) # Amazon EC2 ec2_driver = get_compute_driver(ComputeProvider.EC2) ec2_compute = ec2_driver(AWSAccessKey, AWSSecretKey, secure=False) ec2_images = ec2_compute.list_images() ec2_nodes = ec2_compute.list_nodes() ec2_sizes = ec2_compute.list_sizes() ec2_locations = ec2_compute.list_locations() print "[EC2] loaded %d images, %d nodes, %d sizes, %d locations" % (len(ec2_images), len(ec2_nodes), len(ec2_sizes), len(ec2_locations)) try: ec2_compute.ex_import_keypair(keyname, '/Users/tklauser/.ssh/id_rsa.pub') ec2_compute.ex_describe_keypairs(keyname) except: pass # if the keypair already exists, just leave it
import time from pprint import pprint from libcloud.backup.types import BackupTargetJobStatusType from libcloud.backup.types import Provider as BackupProvider from libcloud.backup.providers import get_driver as get_backup_driver from libcloud.compute.providers import get_driver as get_compute_driver from libcloud.compute.types import Provider as ComputeProvider backup_driver = get_backup_driver( BackupProvider.DIMENSIONDATA)('username', 'api key') compute_driver = get_compute_driver( ComputeProvider.DIMENSIONDATA)('username', 'api key') nodes = compute_driver.list_nodes() # Backup the first node in the pool selected_node = nodes[0] print('Enabling backup for node') new_target = backup_driver.create_target_from_node(selected_node) print('Starting backup of node') job = backup_driver.create_target_job(new_target) print('Waiting for job to complete') while True: if job.status != BackupTargetJobStatusType.RUNNING: break else:
import time from pprint import pprint from libcloud.backup.types import BackupTargetJobStatusType from libcloud.backup.types import Provider as BackupProvider from libcloud.backup.providers import get_driver as get_backup_driver from libcloud.compute.providers import get_driver as get_compute_driver from libcloud.compute.types import Provider as ComputeProvider backup_driver = get_backup_driver(BackupProvider.DIMENSIONDATA)("username", "api key") compute_driver = get_compute_driver(ComputeProvider.DIMENSIONDATA)( "username", "api key" ) nodes = compute_driver.list_nodes() # Backup the first node in the pool selected_node = nodes[0] print("Enabling backup for node") new_target = backup_driver.create_target_from_node(selected_node) print("Starting backup of node") job = backup_driver.create_target_job(new_target) print("Waiting for job to complete") while True: if job.status != BackupTargetJobStatusType.RUNNING: break else:
import time from pprint import pprint from libcloud.backup.types import BackupTargetJobStatusType from libcloud.backup.types import Provider as BackupProvider from libcloud.backup.providers import get_driver as get_backup_driver from libcloud.compute.providers import get_driver as get_compute_driver from libcloud.compute.types import Provider as ComputeProvider backup_driver = get_backup_driver(BackupProvider.DIMENSIONDATA)('username', 'api key') compute_driver = get_compute_driver(ComputeProvider.DIMENSIONDATA)('username', 'api key') nodes = compute_driver.list_nodes() # Backup the first node in the pool selected_node = nodes[0] print('Enabling backup for node') new_target = backup_driver.create_target_from_node(selected_node) print('Starting backup of node') job = backup_driver.create_target_job(new_target) print('Waiting for job to complete') while True: if job.status != BackupTargetJobStatusType.RUNNING: break else:
def fp_ec2_create_vms(n_nodes, cred_file=CRED_FILE, keyname=KEYNAME, ec2_image_id=EC2_IMAGE_ID, ec2_size_id=EC2_SIZE_ID, ec2_location_id=EC2_LOCATION_ID, ec2_userdata_file=EC2_USERDATA_FILE, pubkey_file=PUBKEY_FILE): # set to None to load values from cred_file AWSAccessKey = None AWSSecretKey = None f = open(cred_file, "r") for line in f: m = re.match(r"^(\w+)='(\w+)'$", line) if m.group(1) == 'AccessKeyId' and AWSAccessKey is None: AWSAccessKey = m.group(2) elif m.group(1) == 'SecretAccessKey' and AWSSecretKey is None: AWSSecretKey = m.group(2) f.close() if AWSAccessKey is None or AWSSecretKey is None: print "Failed to read AWS credentials from " + cred_file return None if not os.path.exists(ec2_userdata_file): print "Userdata file %s not found" % ec2_userdata_file return None # Amazon EC2 ec2_driver = get_compute_driver(ComputeProvider.EC2) ec2_compute = ec2_driver(AWSAccessKey, AWSSecretKey, secure=False) ec2_images = ec2_compute.list_images() ec2_nodes = ec2_compute.list_nodes() ec2_sizes = ec2_compute.list_sizes() ec2_locations = ec2_compute.list_locations() print "[EC2] loaded %d images, %d nodes, %d sizes, %d locations" % (len(ec2_images), len(ec2_nodes), len(ec2_sizes), len(ec2_locations)) try: ec2_compute.ex_import_keypair(keyname, pubkey_file) ec2_compute.ex_describe_keypairs(keyname) except: pass # if the keypair already exists, just leave it # Select NodeImage to start ec2_image = None for img in ec2_images: if img.id == ec2_image_id: ec2_image = img break if ec2_image is None: print "[EC2] Couldn't find image with ID " + ec2_image_id sys.exit(-1) ec2_size = None for sz in ec2_sizes: if sz.id == ec2_size_id: ec2_size = sz break if ec2_size is None: print "[EC2] Couldn't find size with ID " + ec2_size_id sys.exit(-1) ec2_location = None for loc in ec2_locations: if loc.id == ec2_location_id: ec2_location = loc break if ec2_location is None: print "[EC2] Couldn't find location"# + ec2_location_name sys.exit(-1) ips = [] nodes = [] ud = open(ec2_userdata_file, 'r').read() print "[EC2] Starting %d nodes..." % n_nodes for i in range(0, n_nodes): ec2_node = ec2_compute.create_node(name='lsci2012', image=ec2_image, size=ec2_size,location=ec2_location, ex_keyname=keyname, ex_securitygroup='lsci2012', ex_userdata=ud) # wait until nodes are running print "[EC2] Waiting for node %d to become available..." % i ec2_running = False while not ec2_running: for node in ec2_compute.list_nodes(): if node.extra['keyname'] == keyname: if node.state == 0 and len(node.public_ips) > 0 and not node.public_ips[0] in ips: ec2_node = node ips.append(ec2_node.public_ips[0]) nodes.append(node) ec2_running = True break if not ec2_running: time.sleep(2) print "[EC2] Node {0} with IP {1} running".format(i, ec2_node.public_ips[0]) return nodes
from pprint import pprint from libcloud.compute.providers import get_driver as get_compute_driver from libcloud.compute.types import Provider as ComputeProvider from libcloud.dns.providers import get_driver as get_dns_driver from libcloud.dns.types import Provider as DNSProvider, RecordType CREDENTIALS_RACKSPACE = ('username', 'api key') CREDENTIALS_ZERIGO = ('email', 'api key') Cls = get_compute_driver(ComputeProvider.RACKSPACE) compute_driver = Cls(*CREDENTIALS_RACKSPACE) Cls = get_dns_driver(DNSProvider.ZERIGO) dns_driver = Cls(*CREDENTIALS_ZERIGO) # Retrieve all the nodes nodes = compute_driver.list_nodes() # Create a new zone zone = dns_driver.create_zone(domain='mydomain2.com') created = [] for node in nodes: name = node.name ips = node.public_ip if not ips: continue
from pprint import pprint from libcloud.compute.providers import get_driver as get_compute_driver from libcloud.compute.types import Provider as ComputeProvider from libcloud.dns.providers import get_driver as get_dns_driver from libcloud.dns.types import Provider as DNSProvider from libcloud.dns.types import RecordType CREDENTIALS_RACKSPACE = ("username", "api key") CREDENTIALS_ZERIGO = ("email", "api key") cls = get_compute_driver(ComputeProvider.RACKSPACE) compute_driver = cls(*CREDENTIALS_RACKSPACE) cls = get_dns_driver(DNSProvider.ZERIGO) dns_driver = cls(*CREDENTIALS_ZERIGO) # Retrieve all the nodes nodes = compute_driver.list_nodes() # Create a new zone zone = dns_driver.create_zone(domain="mydomain2.com") created = [] for node in nodes: name = node.name ip = node.public_ips[0] if node.public_ips else None if not ip: