def execute(self, name, command, cwd=None): arg = dotdict() arg.cwd = cwd arg.command = command arg.name = name config = Config() cloud = "vagrant" # TODO: must come through parameter or set cloud arg.path = config.data["cloudmesh"]["cloud"][cloud]["default"]["path"] arg.directory = os.path.expanduser("{path}/{name}".format(**arg)) vms = self.to_dict(self.nodes()) arg = "ssh {} -c {}".format(name, command) result = Shell.execute("vagrant", ["ssh", name, "-c", command], cwd=arg.directory) return result
def __init__(self, name): """ Get Google Cloud credentials and defaults from cloudmesh.yaml and set scopes for Google Compute Engine :param name: name of cloud provider in cloudmesh.yaml file under cloudmesh.volume """ self.cloud = name config = Config() self.cm = CmDatabase() self.default = config[f"cloudmesh.volume.{name}.default"] self.credentials = config[f"cloudmesh.volume.{name}.credentials"] self.compute_scopes = [ 'https://www.googleapis.com/auth/compute', 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/compute.readonly']
def check_yaml(self): config = Config() errors = False if "chameleon" in config["cloudmesh.cloud"]: credentials = config["cloudmesh.cloud.chameleon.credentials"] if "OS_PASSWORD" in credentials: Console.error( "You are using an old version of the cloudmesh.yaml file") else: Console.ok("your yaml file seems ok") location = config.location print(f"Location of cloudmesh.yaml: {location}") print( "we run a simple yamllint test. Not all issues reported need to be corrected." ) current = config["cloudmesh.version"] if latest == current: Console.ok(f"OK. you have cloudmesh.yaml version {current}") else: Console.warning( f"Your cloudmesh.yaml file version is not up to date.") Console.warning(f"Production version: {latest}") Console.warning(f"Your version: {current}") Console.warning(f"Please carefully check your cloudmesh.yaml") yamlfile = Path(f"{location}/cloudmesh.yaml") if sys.platform in ["win32"]: r = os.system((f"yamllint {yamlfile}")) else: result = Shell.run(f"yamllint {yamlfile}") for line in result.splitlines(): line = line.strip() if "missing document start" in line: continue if "line too long" in line: continue if "error" in line: errors = True Console.error(line) else: print(line) if errors: Console.error("We found issues in your yaml file") else: Console.ok(f"OK. Your yaml file {yamlfile} seems valid")
def get_client(self, service='emr'): """ Connects to AWS and cre :param service: The service to create a client for. Either S3 or EMR. :return: boto3.client """ configs = Config() key_id = configs['cloudmesh.cloud.aws.credentials.EC2_ACCESS_ID'] access_key = configs['cloudmesh.cloud.aws.credentials.EC2_SECRET_KEY'] region = configs['cloudmesh.cloud.aws.credentials.region'] client = boto3.client(service, region_name=region, aws_access_key_id=key_id, aws_secret_access_key=access_key) return client
def __init__(self): self.__dict__ = self.__shared_state if "data" not in self.__dict__: self.config = Config() self.data = dotdict(self.config.data["cloudmesh"]["data"]["mongo"]) self.expanduser() if self.data.MONGO_PASSWORD in ["TBD", "admin"]: Console.error("MongoDB password must not be the default") raise Exception("password error") mongo_path = self.data["MONGO_PATH"] mongo_log = self.data["MONGO_LOG"] paths = [mongo_path, mongo_log] for path in paths: if not os.path.exists(path): os.makedirs(path)
def __init__(self, service=None, config="~/.cloudmesh/cloudmesh.yaml"): super().__init__(service=service, config=config) self.config = Config() credential = { 'aws_access_key_id': self.credentials['access_key_id'], 'aws_secret_access_key': self.credentials['secret_access_key'], 'region_name': self.credentials['region'] } self.s3_client = boto3.client( 's3', aws_access_key_id=self.credentials['access_key_id'], aws_secret_access_key=self.credentials['secret_access_key'], region_name=self.credentials['region']) self.aws_provider = AWSProv(service='aws') self.local_dir = self.config["cloudmesh"]["storage"]["local"]["dir"]
def __init__(self, name=None): """ Initialize provider, create boto3 ec2 client, get the default dict. :param name: name of cloud """ self.cloud = name config = Config() self.default = config[f"cloudmesh.volume.{self.cloud}.default"] self.cred = config[f'cloudmesh.volume.{self.cloud}.credentials'] self.client = boto3.client('ec2', region_name=self.default['region_name'], aws_access_key_id=self.cred['EC2_ACCESS_ID'], aws_secret_access_key=self.cred[ 'EC2_SECRET_KEY'] ) self.cm = CmDatabase()
def copy(self, source=None, target=None, source_file_dir=None, target_fil_dir=None): print((source_file_dir, target_fil_dir)) status = None if (source == "local"): print("Copying file from Local to Google") source_path = self.getLocalPath(source_file_dir) target_fil_dir = target_fil_dir.replace("\\", "/") status = self.uploadfile(source_path, target_fil_dir) Console.ok( f"File copied from {source_file_dir} to {target_fil_dir}") elif (source == "google"): if (target == "aws"): pprint( f"Copying from Google {source_file_dir} to AWS {target_fil_dir}" ) target_local = self.getLocalPath( self.local_dir) + "/" + source_file_dir self.download_blob(source_file_dir, target_local) target = AWS_Provider(service="aws") config = Config(config_path="~/.cloudmesh/cloudmesh.yaml") status = target.put(target_local, target_fil_dir, True) if status is None: return Console.error( f" Cannot Copy {source_file_dir} to {target_fil_dir}") else: Console.ok( f"File copied from {source_file_dir} to {target_fil_dir}" ) elif (target == "local"): #To fix bucket, file target_fil_dir = self.getLocalPath(target_fil_dir) status = self.download_blob(source_file_dir, target_fil_dir) Console.ok( f"File copied from {source_file_dir} to {target_fil_dir}") else: NotImplementedError return status
def __init__(self, cloudname='aws', configuration="~/.cloudmesh/cloudmesh.yaml"): self.cloudname = cloudname self.configuration = configuration self.yaml_content = Config(configuration) #pprint(yaml_content) self.kind = self.yaml_content[f"cloudmesh.cloud.{self.cloudname}.cm.kind"] banner(f"Working on {self.kind} cloud service.") if self.cloudname == "aws": #VERBOSE(self.yaml_content[f"cloudmesh.cloud.{self.cloudname}"]) self.ACCESS_KEY = self.yaml_content[f"cloudmesh.cloud.{self.cloudname}.credentials.EC2_ACCESS_ID"] self.SECRET_KEY = self.yaml_content[f"cloudmesh.cloud.{self.cloudname}.credentials.EC2_SECRET_KEY"] self.REGION_ID = self.yaml_content[f"cloudmesh.cloud.{self.cloudname}.credentials.region"] if self.ACCESS_KEY == 'TBD' or self.SECRET_KEY == 'TBD' or self.REGION_ID == 'TBD': Console.error("Critical details missing from .yaml file. TBD not allowed. Please check.") else: Console.error(f"\nProvider {self.cloudname} not supported") raise ValueError(f"Provider {self.cloudname} not supported")
def __init__(self, organization="cloudmesh-community"): config = Config() try: g = Github(config["cloudmesh.github.user"], config["cloudmesh.github.password"]) except KeyError: Console.error("Make sure the cloudmesh.yaml file defines both \n" " cloudmesh.github.user\n" " cloudmesh.github.password") raise ValueError("cloudmesh.github") if organization != "cloudmesh-community": raise ValueError( "currently we support only organization cloudmesh-community") self.org = g.get_organization(organization) self.ta_team = self.org.get_team(2631498)
def test_git(self): HEADING() config = Config() username = config["cloudmesh.profile.github"] print("Username:"******"name"], order=["name", "fingerprint"], header=["Name", "Fingerprint"])) assert len(keys) > 0
def __init__(self, service='compute'): VERBOSE("initialize google big query manager") self.service_account_file = None self.project_id = None #self.region = None self.config = Config() print(self.service_account_file) self.service_account_file = self.config[ 'cloudmesh.cloud.google.credentials.path_to_json_file'] self.credentials = service_account.Credentials.from_service_account_file( self.service_account_file) self.project_id = self.config[ 'cloudmesh.cloud.google.credentials.project'] self.client = bigquery.Client(credentials=self.credentials, project=self.project_id) print(self.project_id)
def __init__(self, debug): """ Initializes the virtualcluster class :param debug: switch the debug information on and off """ current_path = os.path.dirname(os.path.realpath(__file__)) self.workspace = os.path.join(current_path, "vcluster_workspace/now.yaml") if not os.path.exists(os.path.dirname(self.workspace)): os.makedirs(os.path.dirname(self.workspace)) self.cm_config = Config() self.vcluster_config = GenericConfig(self.workspace) self.debug = debug self.all_pids = [] self.virt_cluster = {} self.runtime_config = {} self.job_metadata = {}
def copy(self, source=None, target=None, source_file_dir=None, target_fil_dir=None): print(source + target + source_file_dir + target_fil_dir) status = None if source == "local" and target == "aws": print(source_file_dir) source_path = self.getLocalPath(source_file_dir) target_fil_dir = target_fil_dir.replace("\\", "/") status = self.aws_provider.put(source_path, target_fil_dir, True) elif source == "aws": if target == "google": target_local = self.getLocalPath( self.local_dir) + "/" + source_file_dir print("source_file=" + source_file_dir + " Target= " + target_local) status = self.aws_provider.get(source_file_dir, target_local, recursive=True) if status is not None: google_provider = Google_Provider(service="google") config = Config(config_path="~/.cloudmesh/cloudmesh.yaml") #local_target = self.local_dir #sourceFile = local_target + source_file_dir status = google_provider.uploadfile( target_local, target_fil_dir) else: Console.error("File cannot be downloaded from AWS") elif target == "local": target_fil_dir = self.getLocalPath(target_fil_dir) status = self.aws_provider.get(source_file_dir, target_fil_dir, True) if status is None: return Console.error( f"{source_file_dir} is not copied to {target_fil_dir}") else: Console.ok(f"File copied from {source} to {target}") return status
def create_name(): """ Gregor suggests to use from cloudmesh.management.configuration.Name import Name as VolumeName config = Config() n = VolumeName( user=config["cloudmesh.profile.username"], kind="volume", path=f"{config.location}/volume.yaml", schema="{user}-volume-{counter}" counter=1) n.incr() counter = n.get() :return: """ # please edit ~ /.cloudmesh / volume.yaml as following: # counter: 1 # kind: volume config = Config() directory = f"{config.location}" volume_yaml = path_expand(f"{directory}/volume.yaml") try: with open(volume_yaml) as file: dic = yaml.load(file, Loader=yaml.FullLoader) counter = dic["counter"] user = dic["user"] created_name = f"{user}-{cloud}-{counter}" dic["counter"] += 1 with open(volume_yaml, 'w') as file: documents = yaml.dump(dic, file) except: Console.error("the volume.yaml file does not exist." "You need to implement " "the case so it gets autogenerated") raise NotImplementedError return created_name
def setup_class(self): "Runs once per class" # Create random Bucket Name (must be lowercase) self.storage_bucket = "cloudmeshtest" + str( random.randint(10000, 100000)) self.storage_lifespan = random.randint(1, 365) # Load config values from cloudmesh.yaml self.config = Config() self.credentails = self.config["cloudmesh"]["storage"]["aws"][ "credentials"] # Create client connection self.s3_client = boto3.client( 's3', aws_access_key_id=self.credentails["api_access_id"], aws_secret_access_key=self.credentails["api_secret_key"], region_name=self.credentails["region"])
def list_all(): config = Config() config_service = config["cloudmesh"] service_list = [] for item in config_service: if item in "cloud storage volume data cluster": list_item = {} kinds = [] for sub_item in config_service[item]: kinds.append(sub_item) list_item["service"] = item list_item["kind"] = kinds service_list.append(list_item) return service_list
def json_to_yaml(name, filename="~/.cloudmesh/google.json"): """ given a json file downloaded from google, copies the content into the cloudmesh yaml file, while overwriting or creating a new storage provider :param filename: :return: """ # creates cloud,esh.storgae.{name} path = path_expand(filename) with open(path, "r") as file: d = json.load(file) config = Config() # # BUG START FROM THE sample # element = { "cm": { "name": name, "active": 'true', "heading": "GCP", "host": "https://console.cloud.google.com/storage", "kind": "google", "version": "TBD", "service": "storage" }, "default": { "directory": "cloudmesh_gcp", "Location_type": "Region", "Location": "us - east1", "Default_storage_class": "Standard", "Access_control": "Uniform", "Encryption": "Google-managed", "Link_URL": "https://console.cloud.google.com/storage/browser/cloudmesh_gcp", "Link_for_gsutil": "gs://cloudmesh_gcp" }, "credentials": d } config["cloudmesh"]["storage"][name] = element config.save() pprint(config["cloudmesh"]["storage"][name])
def __init__(self, service=None, config="~/.cloudmesh/.cloudmesh.yaml"): super(Provider, self).__init__(service=service, config=config) self.config = Config() self.kind = config[f"cloudmesh.storage.{service}.cm.kind"] self.cloud = service self.service = service Console.msg("FOUND Kind", self.kind) if self.kind in ["awsS3"]: from cloudmesh.storage.provider.awss3 import \ Provider as AwsStorageProvider self.p = AwsStorageProvider(service=service, config=config) elif self.kind in ["parallelawsS3"]: from cloudmesh.storage.provider.parallelawss3 import \ Provider as ParallelAwsStorageProvider self.p = ParallelAwsStorageProvider(service=service, config=config) elif self.kind in ["box"]: from cloudmesh.storage.provider.box import \ Provider as BoxStorageProvider self.p = BoxStorageProvider(service=service, config=config) elif self.kind in ["gcpbucket"]: from cloudmesh.google.storage.Provider import \ Provider as GCPStorageProvider self.p = GCPStorageProvider(service=service, config=config) elif self.kind in ["gdrive"]: from cloudmesh.storage.provider.gdrive import \ Provider as GdriveStorageProvider self.p = GdriveStorageProvider(service=service, config=config) elif self.kind in ["azureblob"]: from cloudmesh.storage.provider.azureblob import \ Provider as AzureblobStorageProvider self.p = AzureblobStorageProvider(service=service, config=config) elif self.kind in ["oracle"]: from cloudmesh.oracle.storage.Provider import \ Provider as OracleStorageProvider self.p = OracleStorageProvider(service=service, config=config) else: raise NotImplementedError
def __init__(self, name=None, configuration="~/.cloudmesh/cloudmesh.yaml"): conf = Config(configuration)["cloudmesh"] # self.user = conf["profile"] #self.spec = conf["cloud"][name] self.cloud = name #cred = self.spec["credentials"] #deft = self.spec["default"] #self.cloudtype = self.spec["cm"]["kind"] super().__init__(name, conf) print(self.cloud) #print(self.cloudtype) # # BUG: the test must be self.kind and not self.cloud # if self.cloud == "multipass": from cloudmesh.volume.multipass.Provider import \ Provider as MulitpassProvider self.provider = MulitpassProvider(self.cloud)
def __init__(self, service=None, config="~/.cloudmesh/cloudmesh.yaml"): super().__init__(service=service, config=config) self.config = Config() self.storage_credentials = self.config.credentials("storage", "gdrive") if self.storage_credentials['maxfiles'] > 1000: Console.error("Page size must be smaller than 1000") sys.exit(1) self.limitFiles = self.storage_credentials['maxfiles'] self.scopes = self.storage_credentials['scopes'] self.clientSecretFile = path_expand( self.storage_credentials['location_secret']) self.applicationName = self.storage_credentials['application_name'] self.generate_key_json() self.flags = self.generate_flags_json() self.credentials = self.get_credentials() self.http = self.credentials.authorize(httplib2.Http()) self.driveService = discovery.build('drive', 'v3', http=self.http) self.cloud = service self.service = service self.fields = "nextPageToken, files(id, name, mimeType, parents,size,modifiedTime,createdTime)"
def setup(self): banner("setup", c="-") self.user = Config()["cloudmesh.profile.user"] self.clouduser = '******' self.name_generator = Name( schema=f"{self.user}-vm", counter=1) self.name = str(self.name_generator) self.name_generator.incr() self.new_name = str(self.name_generator) self.p = Provider(name=CLOUD) self.secgroupname = "CM4TestSecGroup" self.secgrouprule = {"ip_protocol": "tcp", "from_port": 8080, "to_port": 8088, "ip_range": "129.79.0.0/16"} self.testnode = None
def __init__(self, cloud): """ Initialize the provider for the yaml file """ config = Config() cred = config.get("cloud.azure.credentials") self.defaults = config.get("cloud.azure.default") self.resource_group = self.defaults["resource_group"] self.subscription_id = cred["AZURE_TENANT_ID"] cls = get_driver(LibCloudProvider.AZURE_ARM) self.api_version = {"api-version": "2018-08-01"} self.provider = cls( tenant_id=cred["AZURE_TENANT_ID"], subscription_id=cred["AZURE_SUBSCRIPTION_ID"], key=cred["AZURE_APPLICATION_ID"], secret=cred["AZURE_SECRET_KEY"], region=cred["AZURE_REGION"] )
def remove(service, name): removed_item = None try: # Update the google cloud section of cloudmesh.yaml config file. config = Config() config_service = config["cloudmesh"][service] if name in config_service: removed_item = config_service.pop(name, None) config.save() Console.ok(f"Removed {name} from {service} service.") else: Console.warning( f"{name} is not registered for cloudmesh.{service}") except Exception as se: Console.error(f"Error removing {service}-{name} :: {se}") return removed_item
def add(self, name, source): """ key add [NAME] [--source=FILENAME] key add [NAME] [--source=git] key add [NAME] [--source=ssh] """ keys = None if source == "git": config = Config() username = config["cloudmesh.profile.github"] keys = SSHkey().get_from_git(username) elif source == "ssh": key = SSHkey(name=name) keys = [key] else: raise NotImplementedError # source is filename return keys
def __init__(self, cloud, path): # noinspection SpellCheckingInspection """ Initialize self.cm, self.default, self.credentials, self.group, self.experiment :param cloud: name of provider :param path: "~/.cloudmesh/cloudmesh.yaml" """ try: # noinspection SpellCheckingInspection config = Config(config_path=path)["cloudmesh"] self.cm = config["cloud"][cloud]["cm"] self.default = config["cloud"][cloud]["default"] self.credentials = config["cloud"][cloud]["credentials"] self.group = config["default"]["group"] self.experiment = config["default"]["experiment"] except Exception as e: print(e)
def test_predict(ip, cloud): url = f"http://{ip}:8080/cloudmesh/EigenfacesSVM/predict" config = Config() if cloud == "aws": user = config[f"cloudmesh.cloud.aws.default.username"] home = f"/home/{user}" elif cloud == "azure": user = config[f"cloudmesh.cloud.azure.default.AZURE_VM_USER"] home = f"/home/{user}" elif cloud == "google": user = config[f"cloudmesh.profile.user"] home = f"/home/{user}" Benchmark.Start() for i in range(30): payload = { 'image_file_paths': f'{home}/.cloudmesh/upload-file/example_image{cloud}{i}.jpg' } r = requests.get(url, params=payload) assert r.status_code == 200 Benchmark.Stop()
def _get_specification(self, cloud=None, name=None, port=None, image=None, **kwargs): arg = dotdict(kwargs) arg.port = port config = Config() pprint(self.config) if cloud is None: # # TOD read default cloud # cloud = "vagrant" # TODO must come through parameter or set cloud print("CCC", cloud) spec = config.data["cloudmesh"]["cloud"][cloud] pprint(spec) default = spec["default"] pprint(self.default) if name is not None: arg.name = name else: # TODO get new name pass if image is not None: arg.image = image else: arg.image = default["image"] pass arg.path = default["path"] arg.directory = os.path.expanduser("{path}/{name}".format(**arg)) arg.vagrantfile = "{directory}/Vagrantfile".format(**arg) return arg
def load(self): self["profile"] = Config()["cloudmesh"]["profile"] self["path"] = path_expand(self["profile"]["publickey"]) self["uri"] = 'file://{path}'.format(path=self["path"]) self['public_key'] = open(Path(self["path"]), "r").read().rstrip() (self['type'], self['key'], self['comment']) = SSHkey._parse(self['public_key']) self['fingerprint'] = SSHkey._fingerprint(self['public_key']) self["name"] = basename(self["path"]).replace(".pub", "").replace("id_", "") self['comment'] = self['comment'] self['source'] = 'ssh' self['location'] = { 'public': self['path'], 'private': self['path'].replace(".pub", "") } self = self._update_dict(self['name'], self)
def create(self, name=None, **kwargs): #name is volume name cloud = kwargs['cloud'] config = Config() default = config[f"cloudmesh.volume.{cloud}.default"] #banner(f"print default {default}") #banner(f"print kwargs {kwargs}") for key in default.keys(): if key not in kwargs.keys(): kwargs[key] = default[key] elif kwargs[key] == None: kwargs[key] = default[key] #default.update(kwargs) #banner(f"print kwargs after update {kwargs}") result = self._create(name=name, **kwargs) #size: 2 #iops: 1000 #encrypted: False #multi_attach_enabled: True result = self.update_dict(result) return result