def ec2secgroup_openport(cloudname, port): cloud = None portnum = 0 try: cloud = openstack(cloudname) cloud.get_token() except: print "Cloud name or credential wrong! Please check your cloudmesh.yaml file" try: portnum = int(port) except: print "Invalid port number" mygroup = Ec2SecurityGroup("default") groupid = cloud.find_security_groupid_by_name(mygroup.name) # print groupid rule = Ec2SecurityGroup.Rule(portnum, portnum) cloud.add_security_group_rules(groupid, [rule])
def test_20_create_secgroup(self): """test security group""" # print "defining a group" # mygroup = Ec2SecurityGroup("testSecGroupCM") mygroup = Ec2SecurityGroup("default") # print "defining a security rule" # rule1 = Ec2SecurityGroup.Rule(8088, 8088) # rule2 = Ec2SecurityGroup.Rule(9090, 9099, "UDP") # rules = [rule1, rule2] # print self.cloud.create_security_group(mygroup, rules) # mygroup.set_rules(rules) # print self.cloud.create_security_group(mygroup) groupid = self.cloud.find_security_groupid_by_name(mygroup.name) # print groupid assert groupid is not None rule3 = Ec2SecurityGroup.Rule(5000, 5000) # rule3 = Ec2SecurityGroup.Rule(22,22) rule4 = Ec2SecurityGroup.Rule(-1, -1, 'ICMP') print(self.cloud.add_security_group_rules(groupid, [rule3, rule4])) groupid = self.cloud.find_security_groupid_by_name( "dummy_name_not_exist") print(groupid) assert groupid is None
class cm_mongo: clouds = {} client = None db_clouds = None mongo_host = 'localhost' mongo_port = 27017 mongo_db_name = "cloudmesh" mongo_collection = "cloudmesh" ssh_ec2_rule = Ec2SecurityGroup.Rule(22, 22) config = None cm_user = None userinfo = None userid = None activated = False def __init__(self, collection="cloudmesh"): """initializes the cloudmesh mongo db. The name of the collection os passed.""" # DEBUG try: _args = locals() del (_args['self']) log.debug("[{0}()] called with [{1}]".format( sys._getframe().f_code.co_name, str(_args))) except: pass defaults_collection = 'defaults' passwd_collection = 'password' user_collection = "user" self.userdb_passwd = get_mongo_db(passwd_collection) self.db_defaults = get_mongo_db(defaults_collection) self.db_user = get_mongo_db(user_collection) self.db_clouds = get_mongo_db(collection) self.config = cm_config() def cloud_provider(self, kind): ''' returns the cloud provider based on the kind :param kind: the kind is openstack, eucalyptus, or azure (< is not yet supported) ''' provider = None if kind == 'openstack': provider = openstack elif kind == 'eucalyptus': provider = eucalyptus elif kind == 'azure': provider = azure elif kind == 'aws': provider = aws elif kind == 'ec2': provider = ec2 return provider # # BUG NO USER IS INVOLVED # def get_credential(self, cm_user_id, cloud): # DEBUG try: _args = locals() if 'self' in _args: del (_args['self']) log.debug("[{0}()] called with [{1}]".format( sys._getframe().f_code.co_name, str(_args))) except: pass try: password = cm_config_server().get( "cloudmesh.server.mongo.collections.password.key") safe_credential = (self.userdb_passwd.find_one({ "cm_user_id": cm_user_id, "cloud": cloud }))["credential"] # print "SK", safe_credential for cred in safe_credential: t = safe_credential[cred] n = decrypt(t, password) safe_credential[cred] = n return safe_credential except: print(traceback.format_exc()) return None def get_cloud_info(self, cm_user_id, cloudname): # DEBUG try: _args = locals() if 'self' in _args: del (_args['self']) log.debug("[{0}()] called with [{1}]".format( sys._getframe().f_code.co_name, str(_args))) except: pass cloud_config = self.config.cloud(cloudname) if cloud_config['cm_type'] in ['openstack']: del cloud_config['credentials']['OS_USERNAME'] del cloud_config['credentials']['OS_PASSWORD'] del cloud_config['credentials']['OS_TENANT_NAME'] elif cloud_config['cm_type'] in ['ec2']: del cloud_config['credentials']['EC2_ACCESS_KEY'] del cloud_config['credentials']['EC2_SECRET_KEY'] elif cloud_config['cm_type'] in ['aws']: if 'EC2_ACCESS_KEY' in cloud_config['credentials']: del cloud_config['credentials']['EC2_ACCESS_KEY'] del cloud_config['credentials']['EC2_SECRET_KEY'] elif cloud_config['cm_type'] in ['azure']: del cloud_config['credentials']['subscriptionid'] credential = self.get_credential(cm_user_id, cloudname) # print "C", credential for key in credential: if key not in cloud_config['credentials']: cloud_config['credentials'][key] = credential[key] # # THIS SEEMS TO BE A BUG???? sos = sierra openstack, why only sierra? # if (cloud_config['cm_type'] in [ 'openstack' ]) and (cloud_config['cm_label'] in ['sos', 'ios_havana']): cloud_config['credentials'][ 'OS_TENANT_NAME'] = self.active_project(cm_user_id) return cloud_config def get_cloud(self, cm_user_id, cloud_name, force=False): # DEBUG try: _args = locals() if 'self' in _args: del (_args['self']) log.debug("[{0}()] called with [{1}]".format( sys._getframe().f_code.co_name, str(_args))) except: pass cloud = None # do we recreate a cloud instance? # recreate only when user/tenant is changed for a certain cloud recreate = False cloud_info = self.get_cloud_info(cm_user_id, cloud_name) # credential = self.config.cloud(cloud_name) cm_type = cloud_info['cm_type'] cm_type_version = cloud_info['cm_type_version'] cm_service_url_type = 'publicURL' if 'cm_service_url_type' in cloud_info: cm_service_url_type = cloud_info['cm_service_url_type'] credentials = cloud_info['credentials'] # print "D",credentials # we can force an update if force: recreate = True # new user elif cm_user_id not in self.clouds: recreate = True # new cloud for that user elif cloud_name not in self.clouds[cm_user_id]: recreate = True # manager pointer does not exist elif 'manager' not in self.clouds[cm_user_id][cloud_name]: recreate = True # manager object ref is None elif not self.clouds[cm_user_id][cloud_name]['manager']: recreate = True # for openstack, we check if tenant_name was recently changed elif 'OS_TENANT_NAME' in credentials and \ hasattr(self.clouds[cm_user_id][cloud_name]['manager'], 'user_token') and \ 'access' in self.clouds[cm_user_id][cloud_name]['manager'].user_token and \ self.clouds[cm_user_id][cloud_name]['manager'].user_token['access']['token']['tenant'][ 'name'] != credentials['OS_TENANT_NAME']: recreate = True # in most case we return the existing object ref else: return self.clouds[cm_user_id][cloud_name]['manager'] # in case new object needs to be created if recreate: try: if cm_user_id not in self.clouds: self.clouds[cm_user_id] = {} if cm_type in [ 'openstack', 'eucalyptus', 'azure', 'ec2', 'aws' ]: self.clouds[cm_user_id][cloud_name] = { 'name': cloud_name, 'cm_type': cm_type, 'cm_type_version': cm_type_version } provider = self.cloud_provider(cm_type) if cm_type in ['openstack']: cloud = provider(cloud_name, credentials, service_url_type=cm_service_url_type) else: cloud = provider(cloud_name, credentials) log.debug( "Created new cloud instance for cloud name: %s, type: %s" % (cloud_name, cm_type)) if cm_service_url_type == 'internalURL': log.debug("The cloud is working in INTERNAL mode") if cm_type in ['openstack', 'ec2']: if cm_type in ['openstack']: log.debug("\tfor tenant: %s" % credentials['OS_TENANT_NAME']) if not cloud.auth(): cloud = None log.error( "Authentication Failed, cloud is not activated" ) self.clouds[cm_user_id][cloud_name].update( {'manager': cloud}) if cloud is not None: self.refresh(cm_user_id, [cloud_name], ['servers']) if cm_type in ['openstack']: secgroups = cloud.list_security_groups( )['security_groups'] for secgroup in secgroups: if secgroup['name'] == 'default': foundsshrule = False for rule in secgroup['rules']: existRule = Ec2SecurityGroup.Rule( rule['from_port'], rule['to_port']) if existRule == self.ssh_ec2_rule: foundsshrule = True log.debug( "Ec2 security group rule allowing ssh exists for cloud: %s, type: %s, tenant: %s" % (cloud_name, cm_type, credentials['OS_TENANT_NAME']) ) if not foundsshrule: iddefault = cloud.find_security_groupid_by_name( 'default') cloud.add_security_group_rules( iddefault, [self.ssh_ec2_rule]) log.debug( "Added Ec2 security group rule to allow ssh for cloud: %s, type: %s, tenant: %s" % (cloud_name, cm_type, credentials['OS_TENANT_NAME'])) except Exception, e: cloud = None log.error("Cannot activate cloud {0} for {1}\n{2}".format( cloud_name, cm_user_id, e)) print(traceback.format_exc()) return cloud
def get_cloud(self, cm_user_id, cloud_name, force=False): # DEBUG try: _args = locals() if 'self' in _args: del (_args['self']) log.debug("[{0}()] called with [{1}]".format( sys._getframe().f_code.co_name, str(_args))) except: pass cloud = None # do we recreate a cloud instance? # recreate only when user/tenant is changed for a certain cloud recreate = False cloud_info = self.get_cloud_info(cm_user_id, cloud_name) # credential = self.config.cloud(cloud_name) cm_type = cloud_info['cm_type'] cm_type_version = cloud_info['cm_type_version'] cm_service_url_type = 'publicURL' if 'cm_service_url_type' in cloud_info: cm_service_url_type = cloud_info['cm_service_url_type'] credentials = cloud_info['credentials'] # print "D",credentials # we can force an update if force: recreate = True # new user elif cm_user_id not in self.clouds: recreate = True # new cloud for that user elif cloud_name not in self.clouds[cm_user_id]: recreate = True # manager pointer does not exist elif 'manager' not in self.clouds[cm_user_id][cloud_name]: recreate = True # manager object ref is None elif not self.clouds[cm_user_id][cloud_name]['manager']: recreate = True # for openstack, we check if tenant_name was recently changed elif 'OS_TENANT_NAME' in credentials and \ hasattr(self.clouds[cm_user_id][cloud_name]['manager'], 'user_token') and \ 'access' in self.clouds[cm_user_id][cloud_name]['manager'].user_token and \ self.clouds[cm_user_id][cloud_name]['manager'].user_token['access']['token']['tenant'][ 'name'] != credentials['OS_TENANT_NAME']: recreate = True # in most case we return the existing object ref else: return self.clouds[cm_user_id][cloud_name]['manager'] # in case new object needs to be created if recreate: try: if cm_user_id not in self.clouds: self.clouds[cm_user_id] = {} if cm_type in [ 'openstack', 'eucalyptus', 'azure', 'ec2', 'aws' ]: self.clouds[cm_user_id][cloud_name] = { 'name': cloud_name, 'cm_type': cm_type, 'cm_type_version': cm_type_version } provider = self.cloud_provider(cm_type) if cm_type in ['openstack']: cloud = provider(cloud_name, credentials, service_url_type=cm_service_url_type) else: cloud = provider(cloud_name, credentials) log.debug( "Created new cloud instance for cloud name: %s, type: %s" % (cloud_name, cm_type)) if cm_service_url_type == 'internalURL': log.debug("The cloud is working in INTERNAL mode") if cm_type in ['openstack', 'ec2']: if cm_type in ['openstack']: log.debug("\tfor tenant: %s" % credentials['OS_TENANT_NAME']) if not cloud.auth(): cloud = None log.error( "Authentication Failed, cloud is not activated" ) self.clouds[cm_user_id][cloud_name].update( {'manager': cloud}) if cloud is not None: self.refresh(cm_user_id, [cloud_name], ['servers']) if cm_type in ['openstack']: secgroups = cloud.list_security_groups( )['security_groups'] for secgroup in secgroups: if secgroup['name'] == 'default': foundsshrule = False for rule in secgroup['rules']: existRule = Ec2SecurityGroup.Rule( rule['from_port'], rule['to_port']) if existRule == self.ssh_ec2_rule: foundsshrule = True log.debug( "Ec2 security group rule allowing ssh exists for cloud: %s, type: %s, tenant: %s" % (cloud_name, cm_type, credentials['OS_TENANT_NAME']) ) if not foundsshrule: iddefault = cloud.find_security_groupid_by_name( 'default') cloud.add_security_group_rules( iddefault, [self.ssh_ec2_rule]) log.debug( "Added Ec2 security group rule to allow ssh for cloud: %s, type: %s, tenant: %s" % (cloud_name, cm_type, credentials['OS_TENANT_NAME'])) except Exception, e: cloud = None log.error("Cannot activate cloud {0} for {1}\n{2}".format( cloud_name, cm_user_id, e)) print(traceback.format_exc())