Beispiel #1
0
    def do_pi(self, args, arguments):
        """
        ::

          Usage:
            pi led reset [NAMES]
            pi led (red|green) VALUE
            pi led (red|green) VALUE NAMES [--user=USER]
            pi led list NAMES [--user=USER]
            pi led blink (red|green) NAMES [--user=USER] [--rate=SECONDS]
            pi led sequence (red|green) NAMES [--user=USER] [--rate=SECONDS]
            pi temp NAMES [--rate=RATE] [--user=USER] [--output=FORMAT]
            pi free NAMES [--rate=RATE] [--user=USER] [--output=FORMAT]
            pi load NAMES [--rate=RATE] [--user=USER] [--output=FORMAT]
            pi spark setup --master=MASTER --workers=WORKERS
            pi spark start --master=MASTER --workers=WORKERS
            pi spark stop --master=MASTER --workers=WORKERS
            pi spark test --master=MASTER --workers=WORKERS
            pi spark check [--master=MASTER] [--workers=WORKERS]
            pi k3 install [--master=MASTER] [--workers=WORKERS] [--step=COMMAND]
            pi k3 join --master=MASTER --workers=WORKERS
            pi k3 uninstall [--master=MASTER] [--workers=WORKERS]
            pi k3 delete [--master=MASTER] [--workers=WORKERS]
            pi k3 test [--master=MASTER] [--workers=WORKERS]
            pi k3 view
            pi script list SERVICE [--details]
            pi script list SERVICE NAMES
            pi script list
            pi wifi SSID [PASSWORD] [--dryrun]

          This command does some useful things.

          Arguments:
              FILE   a file name

          Options:
              -f      specify the file


          Description:

            This command switches on and off the LEDs of the specified PIs. If
            the hostname is ommitted. IT is assumed that the code is executed on
            a PI and its LED are set. To list the PIs LED status you can use the
            list command

            Examples:

                cms pi led list  "red,red[01-03]"

                    lists the LED status of the given hosts

                cms pi led red off  "red,red[01-03]"

                    switches off the led of the given PIs

                cms pi led red on  "red,red[01-03]"

                    switches on the led of the given PIs

                cms pi led red blink  "red,red[01-03]"

                    switches on and off the led of the given PIs

                cms pi led red sequence  "red,red[01-03]"

                    goes in sequential order and switches on and off the led of
                    the given PIs

        """

        map_parameters(arguments, 'details', 'master', 'workers', 'output',
                       'user', 'rate')

        arguments.output = arguments.output or 'table'

        if arguments.free:

            free = Free()
            free.execute(arguments)

        elif arguments.temp:

            temp = Temperature()
            temp.execute(arguments)

        elif arguments.load:

            load = Load()
            load.execute(arguments)

        elif arguments.led:

            led = LED()
            led.execute(arguments)

        elif arguments.spark:

            from cloudmesh.cluster.spark.spark import Spark
            spark = Spark()
            spark.execute(arguments)

        elif arguments.k3:
            from cloudmesh.cluster.k3.k3 import K3
            k3 = K3()
            k3.execute(arguments)

        elif arguments.script:

            script = Script()
            script.execute(arguments)

        elif arguments.wifi:

            wifi = Wifi()

            if not wifi.is_root():
                Console.error("You are not running in sudo")
                return ""

            if arguments.PASSWORD is None:
                arguments.PASSWORD = getpass(
                    f"Wifi Password for {arguments.SSID}: ")

            wifi.set(arguments.SSID,
                     arguments.PASSWORD,
                     dryrun=arguments["--dryrun"])

        return ""
    def install(self, sudo=True):
        """
        check where the MongoDB is installed in mongo location.
        if MongoDB is not installed, python help install it
        """
        if self.dryrun:
            print(self.mongo_path)
        # pprint(self.data)

        mode = self.data['MODE']

        Console.msg(f"Installing mongo in  mode: {mode}")

        if mode == 'docker':
            Console.ok(
                "Installing mongoDB in a docker container cloudmesh-mongo")

            from cloudmesh.mongo.MongoDocker import MongoDocker
            mongo = MongoDocker()
            mongo.kill()
            mongo.install(clean=True, pull=True)

            Console.ok("Shutting mongoDB down")
            Console.msg("")
            Console.ok("Start the mongodb service with")
            Console.msg("")
            Console.msg("   cms admin mongo create")
            Console.msg("   cms admin mongo start")
            Console.msg("")

            return ""

        if not self.data["MONGO_AUTOINSTALL"]:
            Console.error("Mongo auto install is off")
            print("You can set it with")
            print()
            Console.ok(
                "    cms config set cloudmesh.data.mongo.MONGO_AUTOINSTALL=True"
            )
            print()
            if self.machine == 'darwin':
                print("To install it with brew you need to set also")
                print()
                Console.ok(
                    "    cms config set cloudmesh.data.mongo.MONGO_BREWINSTALL=True"
                )
                print()

            return ""

        #
        # the path test may be wrong as we need to test for mongo and mongod
        #
        # print ('OOO', os.path.isdir(path), self.data["MONGO_AUTOINSTALL"] )
        if self.force or (not os.path.isdir(self.mongo_home)
                          and self.data["MONGO_AUTOINSTALL"]):
            print(f"MongoDB is not installed in {self.mongo_home}")
            #
            # ask if you like to install and give info where it is being installed
            #
            # use cloudmesh yes no question see cloudmesh 3
            #
            # print(f"Auto-install the MongoDB into {mongo_path}")

            self.local = self.data["LOCAL"]
            if self.machine == 'linux':
                self.linux()
            elif self.machine == 'darwin':
                self.darwin()
            elif self.machine == 'win32':  # Replaced windows with win32
                self.windows()
            else:
                print("platform not found", platform)
        elif os.path.isdir(self.mongo_home):
            Console.error(f"Folder {self.mongo_home} already exists")
Beispiel #3
0
    def do_open(self, args, arguments):
        """
        ::

            Usage:
                open chameleon baremetal tacc
                open chameleon baremetal uc
                open chameleon vm
                open chameleon openstack
                open FILENAME
                open doc local
                open doc
                open git REPO
                open account aws [NAME]


            Arguments:

                FILENAME  the file to open in the cwd if . is
                          specified. If file in in cwd
                          you must specify it with ./FILENAME

                          if the FILENAME is doc than teh documentation from the Web
                          is opened.

            Description:

                Opens the given URL in a browser window.

                open chameleon baremetal tacc
                   starts horizon for baremetal for chameleon cloud at TACC

                open chameleon baremetal uc
                    starts horizon for baremetal for chameleon cloud at UC

                open chameleon vm
                    starts horizon for virtual machines

                open account aws [NAME]
                    opens the aws users web page, if the NAME is users or is
                    omitted, it goes to the page that allows you to create a user

        """

        # pprint(arguments)
        filename = arguments.FILENAME

        if arguments.git and arguments.REPO:

            try:
                from cloudmesh_installer.install.installer import Git
                filename = Git.url(arguments.REPO)
            except ModuleNotFoundError:
                Console.error("To run this command you need to install "
                              "cloudmesh-installer.\n"
                              "       Install it with\n\n"
                              "       pip install cloudmesh-installer -U\n")
                return ""

        elif arguments.aws and arguments.account:
            name = arguments.NAME or "users"

            if name == "users":
                filename = f"https://console.aws.amazon.com/iam/home#/users"
            else:
                filename = "https://console.aws.amazon.com/iam/home#/users" \
                           f"/{name}?section=security_credentials"
        elif arguments.baremetal and arguments.tacc:
            filename = "https://chi.tacc.chameleoncloud.org"
        elif arguments.baremetal and arguments.uc:
            filename = "https://chi.uc.chameleoncloud.org"
        elif arguments.chameleon and (arguments.vm or arguments.openstack):
            filename = "https://openstack.tacc.chameleoncloud.org"

        elif arguments.doc and arguments.local:
            filename = "./docs/index.html"

        elif filename == "doc":
            filename = "https://cloudmesh.github.io/cloudmesh-manual/"

        if not (filename.startswith("file:") or filename.startswith("http")):

            if not filename.startswith(".") and not filename.startswith("/"):
                filename = "./" + filename

            filename = path_expand(filename)

            if os.path.isfile(Path(filename)):
                filename = "file://" + filename
            else:
                Console.error("can not find the file {0}".format(filename))
                return ""

        Console.ok("open {0}".format(filename))

        try:
            webbrowser.open("%s" % filename)
        except Exception as e:
            Console.error(
                "can not open browser with file {0}".format(filename))
        return ""
    def do_config(self, args, arguments):
        """
        ::

           Usage:
             config  -h | --help
             config cat [less]
             config check
             config encrypt [SOURCE] [--keep]
             config decrypt [SOURCE]
             config edit [ATTRIBUTE]
             config set ATTRIBUTE=VALUE
             config get ATTRIBUTE [--output=OUTPUT]
             config value ATTRIBUTE
             config ssh keygen
             config ssh verify
             config ssh check
             config ssh pem
             config cloud verify NAME [KIND]
             config cloud edit [NAME] [KIND]
             config cloud list NAME [KIND] [--secrets]


           Arguments:
             SOURCE           the file to encrypted or decrypted.
                              an .enc is added to the filename or removed form it
                              dependent of if you encrypt or decrypt
             ATTRIBUTE=VALUE  sets the attribute with . notation in the
                              configuration file.
             ATTRIBUTE        reads the attribute from the container and sets it
                              in the configuration file
                              If the attribute is a password, * is written instead
                              of the character included

           Options:
              --name=KEYNAME     The name of a key
              --output=OUTPUT    The output format [default: yaml]
              --secrets          Print the secrets. Use carefully.

           Description:

             config check
                checks if the ssh key ~/.ssh/id_rsa has a password. Verifies it
                through entering the passphrase

             Key generation

                Keys must be generated with

                    ssh-keygen -t rsa -m pem
                    openssl rsa -in ~/.ssh/id_rsa -out ~/.ssh/id_rsa.pem

                or
                    cms config ssh keygen

                Key validity can be checked with

                    cms config check

                The key password can be verified with

                    cms config verify


                ssh-add

                cms config encrypt ~/.cloudmesh/cloudmesh.yaml
                cms config decrypt ~/.cloudmesh/cloudmesh.yaml


                config set ATTRIBUTE=VALUE

                    config set profile.name=Gregor

                In case the ATTRIBUTE is the name of a cloud defined under
                cloudmesh.cloud, the value will be written into the credentials
                attributes for that cloud this way you can safe a lot of
                typing. An example is

                    cms config set aws.AWS_TEST=Gregor

                which would write the AWS_TEST attribute in the credentials
                of the cloud aws. This can naturally be used to set for
                example username and password.

        """
        # d = Config()                #~/.cloudmesh/cloudmesh.yaml
        # d = Config(encryted=True)   # ~/.cloudmesh/cloudmesh.yaml.enc

        map_parameters(arguments, "keep", "secrets", "output")

        source = arguments.SOURCE or path_expand("~/.cloudmesh/cloudmesh.yaml")
        destination = source + ".enc"

        if arguments.cloud and arguments.edit and arguments.NAME is None:
            path = path_expand("~/.cloudmesh/cloudmesh.yaml")
            print(path)
            Shell.edit(path)
            return ""

        cloud = arguments.NAME
        kind = arguments.KIND
        if kind is None:
            kind = "cloud"

        configuration = Config()

        if arguments.cloud and arguments.verify:
            service = configuration[f"cloudmesh.{kind}.{cloud}"]

            result = {"cloudmesh": {"cloud": {cloud: service}}}

            action = "verify"
            banner(
                f"{action} cloudmesh.{kind}.{cloud} in ~/.cloudmesh/cloudmesh.yaml"
            )

            print(yaml.dump(result))

            flat = flatten(service, sep=".")

            for attribute in flat:
                if "TBD" in str(flat[attribute]):
                    Console.error(
                        f"~/.cloudmesh.yaml: Attribute cloudmesh.{cloud}.{attribute} contains TBD"
                    )

        elif arguments.cloud and arguments.list:
            service = configuration[f"cloudmesh.{kind}.{cloud}"]
            result = {"cloudmesh": {"cloud": {cloud: service}}}

            action = "list"
            banner(
                f"{action} cloudmesh.{kind}.{cloud} in ~/.cloudmesh/cloudmesh.yaml"
            )

            lines = yaml.dump(result).split("\n")
            secrets = not arguments.secrets
            result = Config.cat_lines(lines, mask_secrets=secrets)
            print(result)

        elif arguments.cloud and arguments.edit:

            #
            # there is a duplicated code in config.py for this
            #
            action = "edit"
            banner(
                f"{action} cloudmesh.{kind}.{cloud}.credentials in ~/.cloudmesh/cloudmesh.yaml"
            )

            credentials = configuration[
                f"cloudmesh.{kind}.{cloud}.credentials"]

            print(yaml.dump(credentials))

            for attribute in credentials:
                if "TBD" in credentials[str(attribute)]:
                    value = credentials[attribute]
                    result = input(f"Please enter {attribute}[{value}]: ")
                    credentials[attribute] = result

            # configuration[f"cloudmesh.{kind}.{cloud}.credentials"] = credentials

            print(
                yaml.dump(
                    configuration[f"cloudmesh.{kind}.{cloud}.credentials"]))

        elif arguments["edit"] and arguments["ATTRIBUTE"]:

            attribute = arguments.ATTRIBUTE

            config = Config()

            config.edit(attribute)

            config.save()

            return ""

        elif arguments.cat:

            content = Config.cat()

            import shutil
            columns, rows = shutil.get_terminal_size(fallback=(80, 24))

            lines = content.split("\n")

            counter = 1
            for line in lines:
                if arguments.less:
                    if counter % (rows - 2) == 0:
                        x = input().split("\n")[0].strip()
                        if x != '' and x in 'qQxX':
                            return ""
                print(line)
                counter += 1

            return ""

        elif arguments.check and not arguments.ssh:

            Config.check()

        elif arguments.encrypt:

            e = EncryptFile(source, destination)

            e.encrypt()
            Console.ok(f"{source} --> {destination}")
            if not arguments.keep:
                os.remove(source)

            Console.ok("file encrypted")

            return ""

        elif arguments.decrypt:

            if ".enc" not in source:
                source = source + ".enc"
            else:
                destination = source.replace(".enc", "")

            if not os.path.exists(source):
                Console.error(f"encrypted file {source} does not exist")
                sys.exit(1)

            if os.path.exists(destination):
                Console.error(
                    f"decrypted file {destination} does already exist")
                sys.exit(1)

            e = EncryptFile(source, destination)

            e.decrypt(source)
            Console.ok(f"{source} --> {source}")

            Console.ok("file decrypted")
            return ""

        elif arguments.ssh and arguments.verify:

            e = EncryptFile(source, destination)

            e.pem_verify()

        elif arguments.ssh and arguments.check:

            e = EncryptFile(source, destination)

            key = path_expand("~/.ssh/id_rsa")
            r = e.check_key(key)
            if r:
                Console.ok(f"Key {key} is valid")
            # does not work as it does not change it to pem format
            # e.check_passphrase()

        elif arguments.ssh and arguments.pem:

            e = EncryptFile(source, destination)

            r = e.pem_create()

        elif arguments.set:

            config = Config()
            clouds = config["cloudmesh.cloud"].keys()

            line = arguments["ATTRIBUTE=VALUE"]
            attribute, value = line.split("=", 1)

            cloud, field = attribute.split(".", 1)

            if cloud in clouds:
                attribute = f"cloudmesh.cloud.{cloud}.credentials.{field}"

            elif not attribute.startswith("cloudmesh."):
                attribute = f"cloudmesh.{attribute}"

            config[attribute] = value
            config.save()

        elif arguments.value:

            config = Config()

            attribute = arguments.ATTRIBUTE
            if not attribute.startswith("cloudmesh."):
                attribute = f"cloudmesh.{attribute}"

            try:
                value = config[attribute]
                if type(value) == dict:
                    raise Console.error("the variable is a dict")
                else:
                    print(f"{value}")

            except Exception as e:
                print(e)
                return ""

        elif arguments.get:

            print()

            config = Config()
            clouds = config["cloudmesh.cloud"].keys()

            attribute = arguments.ATTRIBUTE

            try:
                cloud, field = attribute.split(".", 1)
                field = f".{field}"
            except:
                cloud = attribute
                field = ""

            if cloud in clouds:
                attribute = f"cloudmesh.cloud.{cloud}{field}"
            elif not attribute.startswith("cloudmesh."):
                attribute = f"cloudmesh.{attribute}"

            try:
                value = config[attribute]
                if type(value) == dict:
                    print(Printer.write(value, output=arguments.output))
                else:
                    print(f"{attribute}={value}")

            except Exception as e:
                print(e)
                return ""

        elif arguments.ssh and arguments.keygen:

            e = EncryptFile(source, destination)

            e.ssh_keygen()

        return ""
Beispiel #5
0
    def _create(self, **kwargs):
        """
        Create a volume.

        :param name (string): name of volume
        :param region (string): availability-zone
        :param encrypted (boolean): True|False
        :param size (integer): size of volume. Minimum size for st1 and sc1 is 500 GB.
        :param volume_type (string): type of volume. This can be gp2 for General
                                     Purpose SSD, io1 for Provisioned IOPS SSD
                                     (not implemented), st1 for Throughput
                                     Optimized HDD, sc1 for Cold HDD,
                                     or standard for Magnetic volumes.
        :param snapshot (string): snapshot id
        :return: dict
        """
        if kwargs['volume_type'] == 'io1':
            raise NotImplementedError
        if kwargs['volume_type'] in ['sc1', 'st1']:
            if int(kwargs['size']) < 500:
                Console.error("minimum volume size for sc1 is 500 GB")
        if kwargs['snapshot'] != "None":
            r = self.client.create_volume(
                AvailabilityZone=kwargs['region'],
                Encrypted=kwargs['encrypted'],
                Size=int(kwargs['size']),
                SnapshotId=kwargs['snapshot'],
                VolumeType=kwargs['volume_type'],
                TagSpecifications=[
                    {
                        'ResourceType': 'volume',
                        'Tags': [
                            {
                                'Key': "Name",
                                'Value': kwargs['NAME']
                            },
                        ]
                    },
                ],
            )
        else:
            r = self.client.create_volume(
                AvailabilityZone=kwargs['region'],
                Encrypted=kwargs['encrypted'],
                Size=int(kwargs['size']),
                VolumeType=kwargs['volume_type'],
                TagSpecifications=[
                    {
                        'ResourceType': 'volume',
                        'Tags': [
                            {
                                'Key': "Name",
                                'Value': kwargs['NAME']
                            },
                        ]
                    },
                ],
            )
        r = [r]
        result = {'Volumes': r}
        result['Volumes'][0]['AttachedToVm'] = []
        return result
    def do_storage(self, args, arguments):
        """
        ::

          Usage:
                storage [--storage=SERVICE] put FILENAME SOURCEDIR
                storage [--storage=SERVICE] get FILENAME DESTDIR
                storage [--storage=SERVICE] delete file FILENAME
                storage [--storage=SERVICE] list file DIRNAME
                storage [--storage=SERVICE] info FILENAME
                storage [--storage=SERVICE] create dir DIRNAME
                storage [--storage=SERVICE] list dir
                storage [--storage=SERVICE] delete dir DIRNAME


          This command does some useful things.

          Arguments:
              FILENAME   a BLOB name
              SOURCEDIR  local path for the FILENAME to be uploaded
              DESTDIR    local path for the FILENAME to be downloaded

          Options:
              --storage=SERVICE  specify the cloud service name like aws or azure or box or google

          Description:
                commands used to upload, download, list files on different cloud storage services.

                storage put [options..]
                    Uploads the file specified in the filename to specified cloud from the SOURCEDIR.

                storage get [options..]
                    Downloads the file specified in the filename from the specified cloud to the DESTDIR.

                storage delete file [options..]
                    Deletes the file specified in the filename from the specified cloud.

                storage list file [options..]
                    lists all the files from the container name specified on the specified cloud.

                storage info [options..]
                    returns the properties of the filename specified on the specified cloud.

                storage create dir [options..]
                    creates a folder with the directory name specified on the specified cloud.

                storage list dir [options..]
                    lists all the folders on the specified cloud.

                storage delete dir [options..]
                    deletes all the files in the directory specified on the specified cloud.


          Example:
            set storage=azureblob
            storage put FILENAME SOURCEDIR

            is the same as 

            storage --storage=azureblob put FILENAME SOURCEDIR


        """
        # arguments.CONTAINER = arguments["--container"]
        arguments.SERVICE = arguments["--storage"]
        pprint(arguments)

        m = Manager()

        service = None

        try:
            service = arguments["--storage"][0]
        except Exception as e:
            try:
                v = Variables()
                service = v['storage']
            except Exception as e:
                service = None

        if service is None:
            Console.error("storage service not defined")

        if arguments['get']:
            if arguments.SERVICE is None:
                variables = Variables()
                arguments.SERVICE = variables['storage']
            m.get(arguments.SERVICE, arguments.FILENAME, arguments.DESTDIR)

        elif arguments['put']:
            if arguments.SERVICE is None:
                variables = Variables()
                arguments.SERVICE = variables['storage']
            m.put(arguments.SERVICE, arguments.FILENAME, arguments.SOURCEDIR)

        elif arguments['delete'] and arguments['file']:
            if arguments.SERVICE is None:
                variables = Variables()
                arguments.SERVICE = variables['storage']
            m.delete(arguments.SERVICE, arguments.FILENAME)

        elif arguments['list'] and arguments['file']:
            if arguments.SERVICE is None:
                variables = Variables()
                arguments.SERVICE = variables['storage']
            m.listfiles(arguments.SERVICE, arguments.DIRNAME)

        elif arguments['info']:
            if arguments.SERVICE is None:
                variables = Variables()
                arguments.SERVICE = variables['storage']
            m.info(arguments.SERVICE, arguments.FILENAME)

        elif arguments['create'] and arguments['dir']:
            if arguments.SERVICE is None:
                variables = Variables()
                arguments.SERVICE = variables['storage']
            m.createdir(arguments.SERVICE, arguments.DIRNAME)

        elif arguments['list'] and arguments['dir']:
            if arguments.SERVICE is None:
                variables = Variables()
                arguments.SERVICE = variables['storage']
            m.listdir(arguments.SERVICE)

        elif arguments['delete'] and arguments['dir']:
            if arguments.SERVICE is None:
                variables = Variables()
                arguments.SERVICE = variables['storage']
            m.deletedir(arguments.SERVICE, arguments.DIRNAME)
Beispiel #7
0
    def execute(cls,
                cmd,
                arguments="",
                shell=False,
                cwd=None,
                traceflag=True,
                witherror=True):
        """Run Shell command

        :param witherror: if set to False the error will not be printed
        :param traceflag: if set to true the trace is printed in case of an error
        :param cwd: the current working directory in whcih the command is supposed to be executed.
        :param shell: if set to true the subprocess is called as part of a shell
        :param cmd: command to run
        :param arguments: we do not know yet
        :return:
        """
        # print "--------------"
        result = None
        terminal = cls.terminal_type()
        # print cls.command
        os_command = [cmd]
        if terminal in ['linux', 'windows']:
            os_command = [cmd]
        elif 'cygwin' in terminal:
            if not cls.command_exists(cmd):
                print("ERROR: the command could not be found", cmd)
                return
            else:
                os_command = [cls.command[cls.operating_system()][cmd]]

        if isinstance(arguments, list):
            os_command = os_command + arguments
        elif isinstance(arguments, tuple):
            os_command = os_command + list(arguments)
        elif isinstance(arguments, str):
            os_command = os_command + arguments.split()
        else:
            print("ERROR: Wrong parameter type", type(arguments))

        if cwd is None:
            cwd = os.getcwd()
        try:
            if shell:
                result = subprocess.check_output(os_command,
                                                 stderr=subprocess.STDOUT,
                                                 shell=True,
                                                 cwd=cwd)
            else:
                result = subprocess.check_output(
                    os_command,
                    # shell=True,
                    stderr=subprocess.STDOUT,
                    cwd=cwd)
        except:
            if witherror:
                Console.error("problem executing subprocess",
                              traceflag=traceflag)
        if result is not None:
            result = result.strip().decode()
        return result
Beispiel #8
0
    def do_storage(self, args, arguments):
        """
        ::

           Usage:
             storage create dir DIRECTORY [--storage=SERVICE] [--parallel=N]
             storage get SOURCE DESTINATION [--recursive] [--storage=SERVICE] [--parallel=N]
             storage put SOURCE DESTINATION [--recursive] [--storage=SERVICE] [--parallel=N]
             storage list [SOURCE] [--recursive] [--parallel=N] [--output=OUTPUT] [--dryrun]
             storage delete SOURCE [--parallel=N] [--dryrun]
             storage search  DIRECTORY FILENAME [--recursive] [--storage=SERVICE] [--parallel=N] [--output=OUTPUT]
             storage sync SOURCE DESTINATION [--name=NAME] [--async] [--storage=SERVICE]
             storage sync status [--name=NAME] [--storage=SERVICE]
             storage config list [--output=OUTPUT]
             storage [--parallel=N] copy SOURCE DESTINATION [--recursive]
             storage copy --source=SOURCE:SOURCE_FILE_DIR --target=TARGET:TARGET_FILE_DIR

           This command does some useful things.

           Arguments:
             SOURCE        SOURCE can be a directory or file
             DESTINATION   DESTINATION can be a directory or file
             DIRECTORY     DIRECTORY refers to a folder on the cloud service
             SOURCE:SOURCE_FILE_DIR   source provider name: file or directory name
             TARGET:SOURCE_FILE_DIR   destination provider name

           Options:
             --storage=SERVICE  specify the cloud service name like aws or
                                azure or box or google

           Description:
             commands used to upload, download, list files on different
             cloud storage services.

             storage put [options..]
               Uploads the file specified in the filename to specified
               cloud from the SOURCEDIR.

             storage get [options..]
               Downloads the file specified in the filename from the
               specified cloud to the DESTDIR.

             storage delete [options..]
                Deletes the file specified in the filename from the
                specified cloud.

             storage list [options..]
               lists all the files from the container name specified on
               the specified cloud.

             storage create dir [options..]
               creates a folder with the directory name specified on the
               specified cloud.

             storage search [options..]
               searches for the source in all the folders on the specified
               cloud.

             sync SOURCE DESTINATION
               puts the content of source to the destination.
                If --recursive is specified this is done recursively from
                   the source
                If --async is specified, this is done asynchronously
                If a name is specified, the process can also be monitored
                   with the status command by name.
                If the name is not specified all date is monitored.

             sync status
               The status for the asynchronous sync can be seen with this
               command

             config list
               Lists the configures storage services in the yaml file

             storage copy SOURCE DESTINATION
               Copies files from source storage to destination storage.
               The syntax of SOURCE and DESTINATION is:
               SOURCE - awss3:source.txt
               DESTINATION - azure:target.txt

           Description of the copy command:

                Command enables to Copy files between different cloud service
                providers, list and delete them. This command accepts `aws` ,
                `google` and `local` as the SOURCE and TARGET provider.

                cms storage copy --source=SERVICE:SOURCE --target=DEST:TARGET

                    Command copies files or directories from Source provider to
                    Target Provider.

                cms storage slist --source=SERVICE:SOURCE
                    Command lists all the files present in SOURCE provider's in
                    the given SOURCE_FILE_DIR location This command accepts
                    `aws` or `google` as the SOURCE provider

                cms storage sdelete --source=SERVICE:SOURCE
                    Command deletes the file or directory from the SOURCE
                    provider's SOURCE_FILE_DIR location

            Examples:
                cms storage_service copy --source=local:test1.txt --target=aws:uploadtest1.txt
                cms storage_service list --source=google:test
                cms storage_service delete --source=aws:uploadtest1.txt


           Example:
              set storage=azureblob
              storage put SOURCE DESTINATION --recursive

              is the same as
              storage --storage=azureblob put SOURCE DESTINATION --recursive

              storage copy azure:source.txt oracle:target.txt

        """
        # arguments.CONTAINER = arguments["--container"]

        VERBOSE(arguments)
        map_parameters(arguments, "dryrun", "recursive", "storage", "source",
                       "target")

        source = arguments.source
        target = arguments.target
        variables = Variables()

        VERBOSE(arguments)

        arguments.storage = Parameter.expand(arguments.storage)

        if arguments["get"]:
            provider = Provider(arguments.storage[0])

            result = provider.get(arguments.SOURCE, arguments.DESTINATION,
                                  arguments.recursive)

        elif arguments.put:
            provider = Provider(arguments.storage[0])

            result = provider.put(arguments.SOURCE, arguments.DESTINATION,
                                  arguments.recursive)

        elif arguments.create and arguments.dir:
            provider = Provider(arguments.storage[0])

            result = provider.create_dir(arguments.DIRECTORY)

        elif arguments.list:
            """
            storage list SOURCE [--parallel=N]
            """
            sources = arguments.SOURCE or variables["storage"] or 'local:.'
            sources = Parameter.expand(sources)

            deletes = []
            for source in sources:
                storage, entry = Parameter.separate(source)

                storage = storage or "local"
                deletes.append((storage, entry))

            _sources = ', '.join(sources)

            for delete in deletes:
                service, entry = delete
                if arguments.dryrun:
                    print(f"Dryrun: list {service}:{entry}")
                else:
                    provider = Provider(service=service)
                    provider.list(name=entry)

            return ""

        elif arguments.delete:
            """
            storage delete SOURCE [--parallel=N]
            """
            sources = arguments.SOURCE or variables["storage"] or 'local:.'
            sources = Parameter.expand(sources)

            deletes = []
            for source in sources:
                storage, entry = Parameter.separate(source)

                storage = storage or "local"
                deletes.append((storage, entry))

            _sources = ', '.join(sources)

            answer = yn_choice(f"Would you like to delete {_sources}?",
                               default="no")

            if answer:

                for delete in deletes:
                    service, entry = delete
                    if arguments.dryrun:
                        print(f"Dryrun: delete {service}:{entry}")
                    else:
                        provider = Provider(service=service)
                        provider.delete(name=entry)

            else:
                Console.error("Deletion canceled")

            return ""

        elif arguments.search:

            for storage in arguments.storage:
                provider = Provider(storage)

                provider.search(arguments.DIRECTORY, arguments.FILENAME,
                                arguments.recursive)

        elif arguments.rsync:
            # TODO: implement
            raise NotImplementedError

        elif arguments.copy:
            VERBOSE(f"COPY: Executing Copy command from {arguments.SOURCE} to "
                    f"{arguments.DESTINATION} providers")
            print(f"DEBUG storage.py: INITIALIZE with {arguments.storage[0]} "
                  "provider.")

            provider = Provider(arguments.storage[0])

            result = provider.copy(arguments.SOURCE, arguments.DESTINATION,
                                   arguments.recursive)

        elif arguments.copy:
            scloud, sbucket = source.split(":", 1) or None
            tcloud, tbucket = target.split(":", 1) or None
            # print(scloud + " " + tcloud + " " + sbucket + " " + tbucket)

            if scloud == "aws" or scloud == "google":
                provider = Provider(service=scloud)
                provider.copy(scloud, tcloud, sbucket, tbucket)
            elif (scloud == "local"
                  and tcloud == "aws") or (scloud == "local"
                                           and tcloud == "google"):
                provider = Provider(service=tcloud)
                provider.copy(scloud, tcloud, sbucket, tbucket)
            else:
                print("Not Implemented")

        return ""
    def do_openapi3(self, args, arguments):
        """
        ::

          Usage:
              openapi3 generate FUNCTION [YAML]
                                         --baseurl=BASEURL
                                         --filename=FILENAME
                                         --yamldirectory=DIRECTORY
                                         [--verbose]
              openapi3 server start YAML [NAME]
                              [--directory=DIRECTORY]
                              [--port=PORT]
                              [--server=SERVER]
                              [--verbose]
                              [--debug]
                              [--debug]
                              [--fg]
                              [--os]
              openapi3 server stop NAME
              openapi3 server list [NAME] [--output=OUTPUT]
              openapi3 server ps [NAME] [--output=OUTPUT]
              openapi3 register add NAME ENDPOINT
              openapi3 register filename NAME
              openapi3 register delete NAME
              openapi3 register list [NAME] [--output=OUTPUT]
              openapi3 tbd
              openapi3 tbd merge [SERVICES...] [--dir=DIR] [--verbose]
              openapi3 tdb list [--dir=DIR]
              openapi3 tbd description [SERVICES...] [--dir=DIR]
              openapi3 tbd md FILE [--indent=INDENT]
              openapi3 tbd codegen [SERVICES...] [--srcdir=SRCDIR]
                              [--destdir=DESTDIR]

          Arguments:
              DIR   The directory of the specifications
              FILE  The specification

          Options:
              --debug                use the server in debug mode
              --verbose              specifies to run in debug mode [default: False]
              --port=PORT            the port for the server [default: 8080]
              --directory=DIRECTORY  the directory in which the server is run
              --server=SERVER        the server [default: flask]
              --output=OUTPUT        the outputformat, table, csv, yaml, json [default: table]
              --srcdir=SRCDIR   The directory of the specifications
              --destdir=DESTDIR  The directory where the generated code should be put

          Description:
            This command does some useful things.


        """

        map_parameters(arguments, 'fg', 'os', 'output', 'verbose', 'port',
                       'directory', 'yamldirectory', 'baseurl', 'filename',
                       'name')
        arguments.debug = arguments.verbose

        # VERBOSE(arguments)

        if arguments.generate:

            try:
                function = arguments.FUNCTION
                yamlfile = arguments.YAML
                baseurl = path_expand(arguments.baseurl)
                filename = arguments.filename.strip().split(".")[0]
                yamldirectory = path_expand(arguments.yamldirectory)

                sys.path.append(baseurl)

                module_name = pathlib.Path(f"{filename}").stem

                imported_module = import_module(module_name)

                func_obj = getattr(imported_module, function)

                setattr(sys.modules[module_name], function, func_obj)

                # get dataclasses defined in module
                dataclass_list = []
                for attr_name in dir(imported_module):
                    #
                    # BUG: module is highloghted in pycharm
                    #
                    attr = getattr(imported_module, attr_name)
                    if is_dataclass(attr):
                        dataclass_list.append(attr)

                openAPI = generator.Generator()

                # BUG: this is windows specific and must be done differently
                # check if os.path.dirname, os.path.basename does this

                if sys.platform == 'win32':
                    baseurl_short = baseurl.split("\\")[-1]
                else:
                    baseurl_short = baseurl.split("/")[-1]
                openAPI.generate_openapi(func_obj, baseurl_short,
                                         yamldirectory, yamlfile,
                                         dataclass_list)
            except Exception as e:
                Console.error("Failed to generate openapi yaml")
                print(e)

        elif arguments.server and arguments.start and arguments.os:

            try:
                s = Server(spec=path_expand(arguments.YAML),
                           directory=path_expand(arguments.directory)
                           if arguments.directory else arguments.directory,
                           port=arguments.port,
                           server=arguments.wsgi,
                           debug=arguments.debug,
                           name=arguments.NAME)

                pid = s.run_os()

                VERBOSE(arguments, label="Server parameters")

                print(f"Run PID: {pid}")

            except FileNotFoundError:

                Console.error("specification file not found")

            except Exception as e:

                print(e)

        elif arguments.server and arguments.list:

            try:
                result = Server.list(self, name=arguments.NAME)

                # BUG: order= nt yet defined

                print(Printer.list(result))

            except ConnectionError:
                Console.error("Server not running")

        elif arguments.server and arguments.ps:

            try:
                print()
                Console.info("Running Cloudmesh OpenAPI Servers")
                print()
                result = Server.ps(name=arguments.NAME)

                print(Printer.list(result, order=["name", "pid", "spec"]))

                print()
            except ConnectionError:
                Console.error("Server not running")

        elif arguments.server and arguments.stop and arguments.os:

            try:
                Server.stop(self, name=arguments.NAME)
            except ConnectionError:
                Console.error("Server not running")

        elif arguments.register and arguments.add:

            registry = Registry()
            result = registry.add(name=arguments.NAME,
                                  url=arguments.BASEURL,
                                  pid=arguments.PID)

            registry.Print(data=result, output=arguments.output)

        elif arguments.register and arguments.delete:

            registry = Registry()
            result = registry.delete(name=arguments.NAME)
            if result == 0:
                Console.error("Entry could not be found")
            else:
                Console.ok("Ok. Entry deleted")

        elif arguments.register and arguments.list:

            registry = Registry()
            result = registry.list(name=arguments.NAME)

            registry.Print(data=result, output=arguments.output)

        elif arguments.register and arguments.filename:

            registry = Registry()
            result = [registry.add_form_file(arguments.filename)]

            registry.Print(data=result, output=arguments.output)

        elif arguments.server and arguments.start:

            # VERBOSE(arguments)

            try:
                s = Server(name=arguments.NAME,
                           spec=arguments.YAML,
                           directory=arguments.directory,
                           port=arguments.port,
                           server=arguments.wsgi,
                           debug=arguments.debug)

                print("spec: ", path_expand(arguments.YAML))
                pid = s.start(name=arguments.NAME,
                              spec=path_expand(arguments.YAML),
                              foreground=arguments.fg)

                print(f"Run PID: {pid}")

            except FileNotFoundError:

                Console.error("specification file not found")

            except Exception as e:
                print(e)

        elif arguments.server and arguments.stop:

            try:
                print()
                Console.info("Stopping Cloudmesh OpenAPI Server")
                print()

                Server.stop(name=arguments.NAME)

                print()
            except ConnectionError:
                Console.error("Server not running")
        '''
Beispiel #10
0
    def do_admin(self, args, arguments):
        """
        ::

          Usage:
            admin mongo install [--brew] [--download=PATH] [--nosudo] [--docker] [--dryrun] [--force]
            admin mongo create
            admin mongo status
            admin mongo stats
            admin mongo version
            admin mongo start
            admin mongo stop
            admin mongo backup FILENAME
            admin mongo load FILENAME
            admin mongo security
            admin mongo password PASSWORD
            admin mongo list [--output=OUTPUT]
            admin mongo ssh
            admin mongo mode [MODE]
            admin status
            admin system info

          The admin command performs some administrative functions, such as
          installing packages, software and services. It also is used to
          start services and configure them.

          Arguments:
            FILENAME  the filename for backups

          Options:
            -f      specify the file

          Description:

            Mongo DB

              MongoDB is managed through a number of commands.

              The configuration is read from ~/.cloudmesh/cloudmesh.yaml

              First, you need to create a MongoDB database with

                cms admin mongo create

              Second, you need to start it with

                 cms admin mongo start

              Now you can interact with it to find out the status, the stats,
              and the database listing with the commands

                 cms admin mongo status
                 cms admin mongo stats
                 cms admin mongo list

              To stop it from running use the command

                 cms admin mongo stop

              System information about your machine can be returned by

                 cms admin system info

              This can be very useful in case you are filing an issue or bug.

              The command

                cms admin mongo ssh

              is only supported for docker and allows for debugging to login
              to the running container. This function may be disabled in future.


            admin mongo mode native
               switches configuration file to use native mode

            admin mongo mode running
                switches the configuration to use running mode

        """

        map_parameters(arguments,
                       "output",
                       "nosudo",
                       "docker",
                       "dryrun",
                       "force")
        arguments.output = arguments.output or "table"

        VERBOSE(arguments)
        # arguments.PATH = arguments['--download'] or None
        result = None

        if arguments.mongo:

            if arguments.install and arguments.docker:

                installer = MongoInstaller(dryrun=arguments.dryrun,
                                           force=arguments.force)
                r = installer.docker()
                return r

            elif arguments.install:

                print("MongoDB install")
                print(79 * "=")
                # print(arguments.force)
                installer = MongoInstaller(dryrun=arguments.dryrun,
                                           force=arguments.force)

                sudo = not arguments.nosudo
                # if 'linux' in platform.lower() :
                #     print("SUDO:", sudo)
                # r = installer.install(sudo=sudo)
                r = installer.install()
                return r

            elif arguments.status:

                mongo = MongoDBController()
                state = mongo.status()

                if "error" in state["status"]:
                    Console.error(state["message"])
                    print(Printer.attribute(state))
                else:
                    data = dotdict()

                    for pid in state['output']:
                        entry = state['output'][pid]
                        data["pid"] = state['output'][pid]
                        data["command"] = state['output'][pid][
                            'command'].strip()

                    print(Printer.dict(data, order=["pid", "command"]))
                    Console.ok(str(data.pid['pid']) + " " + state["message"])

            elif arguments.version:
                print("MongoDB Version")
                print(79 * "=")
                mongo = MongoDBController()
                r = mongo.version()
                print(r)

            elif arguments.security:

                mongo = MongoDBController()
                mongo.set_auth()
                print()

            elif arguments.create:

                print("MongoDB create")
                MongoDBController().create()

            elif arguments.ssh:

                print("MongoDB ssh")
                MongoDBController().ssh()

            elif arguments.start:

                print("MongoDB start")
                MongoDBController().start(security=True)

            elif arguments.stop:

                print("MongoDB stop")
                MongoDBController().stop()

            elif arguments.backup:

                print("MongoDB backup")
                MongoDBController().dump(arguments.get('FILENAME'))

            elif arguments.load:

                print("MongoDB backup")
                MongoDBController().restore(arguments.get('FILENAME'))

            elif arguments.stats:

                mongo = MongoDBController()
                r = mongo.stats()

                if len(r) > 0:
                    print(Printer.attribute(r))
                    Console.ok("ok")
                else:
                    Console.ok("is your MongoDB server running")

            elif arguments.list:

                mongo = MongoDBController()

                r = mongo.list()

                if len(r) > 0:
                    if arguments.output == 'table':
                        print(Printer.dict(r, order=["name",
                                                     "sizeOnDisk",
                                                     "empty",
                                                     "collections"],
                                           output=arguments.output),
                              )
                    else:
                        print(Printer.write(r, output=arguments.output))
                    Console.ok("ok")
                else:
                    Console.ok("is your MongoDB server running")

            elif arguments.mode:

                if arguments.MODE:

                    if arguments.MODE not in ["native", "running", "docker"]:
                        Console.error("The mode is not supported")
                    config = Config()
                    config["cloudmesh.data.mongo.MODE"] = arguments.MODE
                    config.save()

                else:
                    config = Config()
                    mode = config["cloudmesh.data.mongo.MODE"]
                    print(mode)
                    return ""


        elif arguments.status:

            # config = Config()
            # data = config["cloudmesh.data.mongo"]

            print("Rest Service status")

            print("MongoDB status")

            try:
                mongo = MongoDBController()
                mongo.login()
                if mongo.status()['status'] == 'ok':
                    Console.ok("Mongo is running")
            except Exception as e:
                Console.error("Mongo is not running")
                print(e)

        elif arguments.system:

            s = OperatingSystem.get()
            print(Printer.attribute(s))

        return result
Beispiel #11
0
    def do_storage(self, args, arguments):
        """
        ::

           Usage:
             storage run
             storage monitor [--storage=SERVICES] [--status=all | --status=STATUS] [--output=output] [--clear]
             storage create dir DIRECTORY [--storage=SERVICE] [--parallel=N]
             storage get SOURCE DESTINATION [--recursive] [--storage=SERVICE] [--parallel=N]
             storage put SOURCE DESTINATION [--recursive] [--storage=SERVICE] [--parallel=N]
             storage list [SOURCE] [--recursive] [--parallel=N] [--output=OUTPUT] [--dryrun]
             storage delete SOURCE [--parallel=N] [--dryrun]
             storage search  DIRECTORY FILENAME [--recursive] [--storage=SERVICE] [--parallel=N] [--output=OUTPUT]
             storage sync SOURCE DESTINATION [--name=NAME] [--async] [--storage=SERVICE]
             storage sync status [--name=NAME] [--storage=SERVICE]
             storage config list [--output=OUTPUT]
             storage copy --source=SOURCE:SOURCE_FILE_DIR --target=TARGET:TARGET_FILE_DIR

           This command does some useful things.

           Arguments:
             SOURCE        SOURCE can be a directory or file
             DESTINATION   DESTINATION can be a directory or file
             DIRECTORY     DIRECTORY refers to a folder on the cloud service
             SOURCE:SOURCE_FILE_DIR source provider name: file or directory name
             TARGET:SOURCE_FILE_DIR destination provider name

           Options:
             --storage=SERVICE  specify the cloud service name like aws or
                                azure or box or google

           Description:
             commands used to upload, download, list files on different
             cloud storage services.

             storage run
                Execute the actions in database that are in waiting status.

           > storage monitor [--storage=SERVICE]
           >                 [--status=all | --status=STATUS]
           >                 [--output=output]
           >                 [--clear]
                Monitor the actions in database and refresh every 5 seconds.

           > storage put SOURCE DESTINATION [--recursive] [--storage=SERVICE]
           >                               [--parallel=N]
               Uploads the file specified in the filename to specified
               cloud from the SOURCEDIR.

           > storage get SOURCE DESTINATION [--recursive] [--storage=SERVICE]
           >                               [--parallel=N]
               Downloads the file specified in the filename from the
               specified cloud to the DESTDIR.

             storage delete SOURCE [--parallel=N] [--dryrun]
                Deletes the file specified in the filename from the
                specified cloud.

           > storage list [SOURCE] [--recursive] [--parallel=N]
           >             [--output=OUTPUT] [--dryrun]
               lists all the files from the container name specified on
               the specified cloud.

             storage create dir DIRECTORY [--storage=SERVICE] [--parallel=N]
               creates a folder with the directory name specified on the
               specified cloud.

           > storage search DIRECTORY FILENAME [--recursive]
           >                                  [--storage=SERVICE]
           >                                  [--parallel=N]
           >                                  [--output=OUTPUT]
               searches for the source in all the folders on the specified
               cloud.

             sync SOURCE DESTINATION
               puts the content of source to the destination.
                If --recursive is specified this is done recursively from
                   the source
                If --async is specified, this is done asynchronously
                If a name is specified, the process can also be monitored
                   with the status command by name.
                If the name is not specified all date is monitored.

             sync status
               The status for the asynchronous sync can be seen with this
               command

             config list
               Lists the configures storage services in the yaml file

             storage copy SOURCE DESTINATION
               Copies files from source storage to destination storage.
               The syntax of SOURCE and DESTINATION is:
               SOURCE - awss3:source.txt
               DESTINATION - azure:target.txt

           Description of the copy command:

                Command enables to Copy files between different cloud service
                providers, list and delete them. This command accepts `aws` ,
                `google` and `local` as the SOURCE and TARGET provider.

                cms storage copy --source=SERVICE:SOURCE --target=DEST:TARGET

                    Command copies files or directories from Source provider to
                    Target Provider.

                cms storage slist --source=SERVICE:SOURCE
                    Command lists all the files present in SOURCE provider's in
                    the given SOURCE_FILE_DIR location This command accepts
                    `aws` or `google` as the SOURCE provider

                cms storage sdelete --source=SERVICE:SOURCE
                    Command deletes the file or directory from the SOURCE
                    provider's SOURCE_FILE_DIR location

            Examples:
                cms storage_service copy --source=local:test1.txt --target=aws:uploadtest1.txt
                cms storage_service list --source=google:test
                cms storage_service delete --source=aws:uploadtest1.txt

                cms storage put test_file1.txt aws_test_file1.txt
                cms storage put ./recur_dir recur_dir_aws/ --recursive
                cms storage put ./recur_dir recur_dir_aws/

                cms storage get aws_test_file1.txt aws_file1.txt
                cms storage get recur_dir_aws from_aws_dir
                cms storage get recur_dir_aws from_aws_dir --recursive

                cms storage list
                cms storage list --recursive
                cms storage list aws:recur_dir_aws --recursively

                cms storage delete aws:aws_test_file1.txt

                cms storage search recur_dir_aws recur_file1.txt

           Example:
              set storage=aws
              storage put SOURCE DESTINATION --recursive

              is the same as
              storage --storage=aws put SOURCE DESTINATION --recursive

              storage copy aws:source.txt oracle:target.txt

        """
        # arguments.CONTAINER = arguments["--container"]

        VERBOSE(arguments)
        map_parameters(arguments, "dryrun", "recursive", "storage", "source",
                       "target", "parallel")

        source = arguments.source
        target = arguments.target
        variables = Variables()

        parallelism = arguments.parallel or 1

        arguments.storage = Parameter.expand(arguments.storage
                                             or variables['storage'])

        if arguments.monitor:
            provider = Provider(arguments.storage[0], parallelism=parallelism)
            status = arguments['--status'] or "all"
            output = arguments['--output'] or "table"
            result = provider.monitor(status=status, output=output)
        elif arguments.run:
            provider = Provider(arguments.storage[0], parallelism=parallelism)
            result = provider.run()
        elif arguments['get']:
            provider = Provider(arguments.storage[0], parallelism=parallelism)

            result = provider.get(arguments.SOURCE, arguments.DESTINATION,
                                  arguments.recursive)
            # result = provider.run()

        elif arguments.put:
            provider = Provider(arguments.storage[0], parallelism=parallelism)

            result = provider.put(arguments.SOURCE, arguments.DESTINATION,
                                  arguments.recursive)

        elif arguments.create and arguments.dir:
            provider = Provider(arguments.storage[0], parallelism=parallelism)

            result = provider.create_dir(arguments.DIRECTORY)

        elif arguments.list:
            """
            storage list SOURCE [--parallel=N]
            """
            if variables['storage']:
                default_source = f"{variables['storage']}:/"
            else:
                default_source = "local:/"
            sources = arguments.SOURCE or default_source
            sources = Parameter.expand(sources)

            deletes = []
            for source in sources:
                storage, entry = Parameter.separate(source)

                storage = storage or source or "local"
                deletes.append((storage, entry))

            _sources = ', '.join(sources)

            for delete in deletes:
                service, entry = delete
                if arguments.dryrun:
                    print(f"Dryrun: list {service}:{entry}")
                else:
                    provider = Provider(service=service,
                                        parallelism=parallelism)
                    provider.list(name=entry, recursive=arguments.recursive)

            return ""

        elif arguments.delete:
            """
            storage delete SOURCE [--parallel=N]
            """
            if variables['storage']:
                default_source = f"{variables['storage']}:/"
            else:
                default_source = "local:/"
            sources = arguments.SOURCE or default_source
            sources = Parameter.expand(sources)

            deletes = []
            for source in sources:
                storage, entry = Parameter.separate(source)

                storage = storage or source or "local"
                deletes.append((storage, entry))

            _sources = ', '.join(sources)

            answer = yn_choice(f"Would you like to delete {_sources}?",
                               default="no")

            if answer:

                for delete in deletes:
                    service, entry = delete
                    if arguments.dryrun:
                        print(f"Dryrun: delete {service}:{entry}")
                    else:
                        provider = Provider(service=service,
                                            parallelism=parallelism)
                        provider.delete(name=entry)

            else:
                Console.error("Deletion canceled")

            return ""

        elif arguments.search:

            for storage in arguments.storage:
                provider = Provider(storage, parallelism=parallelism)

                provider.search(arguments.DIRECTORY, arguments.FILENAME,
                                arguments.recursive)

        elif arguments.rsync:
            # TODO: implement
            raise NotImplementedError

        elif arguments.copy:
            scloud, sfileDir = source.split(":", 1) or None
            tcloud, tfileDir = target.split(":", 1) or None
            print(
                f" Copying from Source {scloud} : {sfileDir} to Target  {tcloud} : {tfileDir}"
            )

            cloudName = ["aws", "google"]
            if scloud in cloudName:
                provider = Provider(service=scloud, parallelism=parallelism)
                provider.copyFiles(scloud, sfileDir, tcloud, tfileDir)
            else:
                print("Not Implemented")

        return ""
from cloudmesh.common.util import path_expand
from cloudmesh.common.Benchmark import Benchmark
from cloudmesh.management.configuration.name import Name
from cloudmesh.common.util import HEADING
from cloudmesh.common.console import Console
from cloudmesh.configuration.Config import Config
from cloudmesh.common.debug import VERBOSE
import sys

Benchmark.debug()

config = Config()
username = config["cloudmesh.profile.user"]

if username == 'TBD':
    Console.error("please set cloudmesh.profile.user in ~/.cloudmesh.yaml")
    sys.exit()

path = path_expand(f"{config.location}/name.yaml")
data = {
    'counter': 1,
    'path': path,
    'kind': "vm",
    'schema': "{experiment}-{group}-{user}-{kind}-{counter}",
    'experiment': 'exp',
    'group': 'group',
    'user': '******'
}

try:
    os.remove(path)
    def update(self, _entries, progress=True):
        MongoDBController().start_if_not_running()
        if type(_entries) == dict:
            entries = [_entries]
        else:
            entries = _entries

        if progress:
            bar = Bar('Cloudmesh Database Update', max=len(entries))

        result = []
        for entry in entries:
            if progress:
                bar.next()
            if 'cm' not in entry:
                print("UPDATE ERROR")
                VERBOSE(entry)
                raise ValueError("The cm attribute is not in the entry")
            entry['cm']['collection'] = "{cloud}-{kind}".format(**entry["cm"])

            # noinspection PyUnusedLocal
            try:
                self.col = self.db[entry['cm']['collection']]

                old_entry = self.col.find_one({
                    "cm.kind": entry["cm"]["kind"],
                    "cm.cloud": entry["cm"]["cloud"],
                    "cm.name": entry["cm"]["name"]
                })

                if old_entry is not None:

                    cm = dict(old_entry['cm'])

                    cm.update(entry['cm'])
                    cm['modified'] = str(datetime.utcnow())

                    # entry['cm']['created'] = cm['created']
                    entry['cm'] = cm

                    post = self.col.replace_one(
                        {
                            "cm.kind": entry['cm']["kind"],
                            "cm.cloud": entry['cm']["cloud"],
                            "cm.name": entry['cm']["name"]
                        },
                        entry,
                        upsert=True)

                else:
                    entry['cm']['created'] = entry['cm']['modified'] = str(
                        datetime.utcnow())
                    self.col.insert_one(entry)

            except Exception as e:
                Console.error(
                    "uploading document\n{entry}\n-------\n\n".format(
                        entry=str(entry)))
                pass
            result.append(entry)

        if progress:
            bar.finish()

        return result
Beispiel #14
0
    def do_test(self, args, arguments):
        """
        ::

          Usage:
                test

          This command is intended to check if your windows set up is
          correctly done.

          Bugs:
              This program is supposed to be implemented. It is at this
              time just a template

          Description:

          Checks we do

             1. are you running python 3.8.1
             2. are you having the newest version of pip
             3. is cl installed
             4. is nmake installed
             5. is the username without spaces
             6. are you running in a vnenv
             7. is the default mongo port used
             8. do you have docker installed
             9. do you have vbox installed
            10. how much memory do you have
            11. do you have free diskspace

          Checks that are missing or need implemented

            12. is hyperv switched on or off
            13. are containers running
            14. .... other tyings that can help us debug your environment

        """

        tester = CloudmeshTest()

        #
        # Python setup
        #
        tester.check_venv()
        tester.check_python()
        tester.check_command("pip --version", test="20.0.2")

        #
        # command tool setup
        #

        if platform.system() == "Windows":

            if not tester.check_windows():
                Console.error(" THIS VERSION OF WINDOWS IS NOT SUPPORTED.")
                return ""

            tester.which("cl")
            tester.which("nmake")
            tester.which("git")
            tester.which("ssh")
            tester.which("ssh-keygen")
            tester.which("docker")
            tester.which("yamllint")

        else:

            tester.check_command("git --version", test="git version")
            tester.check_command("ssh", test="usage", show=False)
            tester.check_command("ssh-keygen --help", test="usage", show=False)
            tester.check_command("docker --version", test="Docker version")
            tester.check_command("VirtualBox --help",
                                 test="Oracle VM VirtualBox VM Selector",
                                 show=False)
            tester.check_command("yamllint",
                                 test="usage: yamllint",
                                 show=False)

        tester.is_user_name_valid()
        tester.check_mongo()

        tester.usage()
        tester.check_yaml()
        return ""
Beispiel #15
0
    def ssh(self, vm=None, command=None):
        def key_selector(keys):
            """
           This is a helper method for ssh key selection
           THIS IS JUST A SAFETY MEASURE, PLEASE DON'T MIND IT
            :param keys:
            :return:
            """
            tmp_keys = keys[:]
            # indices = range(1,len(tmp_keys)+1)
            for key_idx, key in enumerate(keys):
                key['idx'] = key_idx + 1
            print(
                Printer.flatwrite(
                    tmp_keys,
                    sort_keys=["idx"],
                    order=['idx', 'KeyName', 'KeyFingerprint'],
                    header=['Index', 'Key Name', "Key Fingerprint"],
                    output="table",
                    humanize=None))
            # Console.msg("Please select one of the AWS key indices from the table above: ")
            picked = 0
            while picked < 1 or picked > len(keys):
                try:
                    picked = int(
                        input(
                            "Please select one of the AWS key indices from the table above: "
                        ))
                except ValueError:
                    pass
            return keys[picked - 1]

        cm = CmDatabase()
        ip = vm['public_ips']

        try:
            key_name = vm['KeyName']
            keys = cm.find_all_by_name(name=key_name, kind="key")
            for k in keys:
                if 'location' in k.keys():
                    if 'private' in k['location'].keys():
                        key = k['location']['private']
                        break

        except (KeyError, IndexError):
            aws_keys = cm.find(kind='key', cloud='aws')
            if len(aws_keys) == 0:
                Console.error(
                    f"Could not find a key for the AWS instance '{vm['name']}'"
                )
                Console.error(
                    f"Use `cms help key` to learn how to add and upload a key for AWS"
                )
                return
            aws_key = key_selector(aws_keys)
            for sshkey in cm.find_all_by_name(name=aws_key['KeyName'],
                                              kind="key"):
                if "location" in sshkey.keys():
                    key = sshkey['location']['private']
                    break
        user = "******"  # needs to be set on creation.

        if command is None:
            command = ""

        if user is None:
            location = ip
        else:
            location = user + '@' + ip
        cmd = "ssh " \
              "-o StrictHostKeyChecking=no " \
              "-o UserKnownHostsFile=/dev/null " \
              f"-i {key} {location} {command}"
        cmd = cmd.strip()
        print(cmd)
        # VERBOSE(cmd)

        if command == "":
            if platform.lower() == 'win32':

                class disable_file_system_redirection:
                    _disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
                    _revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection

                    def __enter__(self):
                        self.old_value = ctypes.c_long()
                        self.success = self._disable(
                            ctypes.byref(self.old_value))

                    def __exit__(self, type, value, traceback):
                        if self.success:
                            self._revert(self.old_value)

                with disable_file_system_redirection():
                    os.system(cmd)
            else:
                os.system(cmd)

        else:
            if platform.lower() == 'win32':

                class disable_file_system_redirection:
                    _disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
                    _revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection

                    def __enter__(self):
                        self.old_value = ctypes.c_long()
                        self.success = self._disable(
                            ctypes.byref(self.old_value))

                    def __exit__(self, type, value, traceback):
                        if self.success:
                            self._revert(self.old_value)

                with disable_file_system_redirection():
                    ssh = subprocess.Popen(cmd,
                                           shell=True,
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE)
            else:
                ssh = subprocess.Popen(cmd,
                                       shell=True,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
            result = ssh.stdout.read().decode("utf-8")
            if not result:
                error = ssh.stderr.readlines()
                print("ERROR: %s" % error)
            else:
                return result
    def copy(self,
             source=None,
             source_obj=None,
             target=None,
             target_obj=None,
             recursive=True):
        """
        Copy objects from source to target storage
        :param source: source CSP - awss3/azure/local
        :param source_obj: It can be file or folder
        :param target: target CSP - awss3/azure/local
        :param target_obj: It can be file or folder
        :param recursive: enlist directories/sub-directories
        :return: dictionary enlisting copied objects
        """
        banner(f"CALLING AZURE BLOB STORAGE PROVIDER'S GET METHOD FOR "
               f"{source.upper()} TO {target.upper()} COPY")

        if target_obj is None:
            target_obj = source_obj

        target_obj = target_obj.replace("\\", "/")
        source_obj = source_obj.replace("\\", "/")

        if target == "local":

            result = self.storage_provider.get(source=source_obj,
                                               destination=target_obj,
                                               recursive=recursive)
        elif target == "azure":
            source_obj = str(Path(source_obj).expanduser()).replace("\\", "/")

            if source == "awss3":
                source_provider = StorageAwss3Provider(service='awss3')
                config = Config(config_path="~/.cloudmesh/cloudmesh.yaml")

                spec = config["cloudmesh.storage"]
                local_target = spec["local"]["default"]["directory"]
                local_target = local_target.replace("\\", "/")

                result = source_provider.get(source=source_obj,
                                             destination=local_target,
                                             recursive=recursive)
                print("Fetched from s3 to local:\n")
                # pprint(result)
                # TODO: return error if get fails, no put required

                source_obj = Path(Path(local_target).expanduser() / source_obj)

            result = self.storage_provider.put(source=source_obj,
                                               destination=target_obj,
                                               recursive=recursive)
        else:
            raise NotImplementedError

        if result is None:
            return Console.error(f"Object {source_obj} couldn't be copied "
                                 f"from {source} to {target}. Please check.")
        else:
            Console.ok(
                f"Copied {source_obj} from {source} to {target}\nTarget "
                f"object name is {target_obj} ")
            pprint(result)
            return self.print_table(result,
                                    status="Copied",
                                    source=source,
                                    target=target)
Beispiel #17
0
    def create(self,
               name=None,
               image=None,
               size=None,
               location=None,
               timeout=360,
               key=None,
               secgroup=None,
               ip=None,
               user=None,
               public=None,
               group=None,
               metadata=None,
               **kwargs):
        """
        creates a named node

        :param name: the name of the node
        :param image: the image used
        :param size: the size of the image
        :param timeout: a timeout in seconds that is invoked in case the image
                        does not boot. The default is set to 3 minutes.
        :param kwargs: additional arguments passed along at time of boot

        :return: the list with the modified dicts
        """
        """
        create one node
        """
        if not ip and public:
            ip = self.find_available_public_ip()
        elif ip is not None:
            entry = self.list_public_ips(ip=ip, available=True)
            if len(entry) == 0:
                Console.error("ip not available")
            return None

        banner("Create Server")
        Console.msg(f"    Name:     {name}")
        Console.msg(f"    User:     {user}")
        Console.msg(f"    IP:       {ip}")
        Console.msg(f"    Image:    {image}")
        Console.msg(f"    Size:     {size}")
        Console.msg(f"    Public:   {public}")
        Console.msg(f"    Key:      {key}")
        Console.msg(f"    Location: {location}")
        Console.msg(f"    Timeout:  {timeout}")
        Console.msg(f"    Secgroup: {secgroup}")
        Console.msg(f"    Group:    {group}")
        # Console.msg(f"    Groups:   {groups}")
        Console.msg("")

        if secgroup is None:
            secgroup = 'default'

        if key is None:
            raise ValueError("Key must be set. Use cms set key=<key name>")

        #
        # BUG: the tags seem incomplete
        #
        if metadata is None:
            metadata = []
        metadata = [{
            'Key': 'cm.image',
            'Value': image
        }, {
            'Key': 'cm.name',
            'Value': name
        }, {
            'Key': 'cm.flavor',
            'Value': size
        }, {
            'Key': 'cm.user',
            'Value': self.user
        }, {
            'Key': 'cm.kind',
            'Value': "vm"
        }, {
            'Key': 'cm.status',
            'Value': "BOOTING"
        }, {
            'Key': 'Name',
            'Value': name
        }]
        # VERBOSE(metadata)
        new_ec2_instance = self.ec2_resource.create_instances(
            ImageId=image,
            InstanceType=size,
            MaxCount=1,
            MinCount=1,
            SecurityGroups=[secgroup],
            KeyName=key,
            TagSpecifications=[{
                'ResourceType': 'instance',
                'Tags': metadata
            }])
        # VERBOSE(new_ec2_instance)
        new_ec2_instance = new_ec2_instance[0]
        waiter = self.ec2_client.get_waiter('instance_exists')

        waiter.wait(Filters=[{
            'Name': 'instance-id',
            'Values': [new_ec2_instance.instance_id]
        }],
                    WaiterConfig={
                        'Delay': 20,
                        'MaxAttempts': timeout / 20
                    })
        print()
        Console.ok("Instance created...")
        print()
        # if IP provided, Attach it to new instance
        if ip:
            self.attach_public_ip(name, ip)
        # x = self.ec2_client.describe_instances(InstanceIds=[new_ec2_instance.instance_id])
        # VERBOSE(x)
        data = self.info(name=name)

        # VERBOSE(data)
        data['name'] = name
        data['kind'] = 'aws'
        data['status'] = new_ec2_instance.state['Name'],
        data['created'] = new_ec2_instance.launch_time.strftime(
            "%m/%d/%Y, %H:%M:%S") if new_ec2_instance.launch_time else '',
        data['updated'] = new_ec2_instance.launch_time.strftime(
            "%m/%d/%Y, %H:%M:%S") if new_ec2_instance.launch_time else '',
        data['name'] = new_ec2_instance.tags[0][
            'Value'] if new_ec2_instance.tags else '',
        data['instance_id'] = new_ec2_instance.id,
        data['image'] = new_ec2_instance.image_id,
        data['key_name'] = key,
        Console.msg("Waiting for the Public IP address assignment ...")
        while True:
            try:
                public_ip = \
                    self.ec2_client.describe_instances(
                        InstanceIds=[new_ec2_instance.id])['Reservations'][0][
                        'Instances'] \
                        [0]['PublicIpAddress'],
                break
            except KeyError:
                time.sleep(0.5)
        data['public_ips'] = public_ip[0]
        data['private_ips'] = new_ec2_instance.private_ip_address

        Console.msg(f"    Public IP:   {data['public_ips']}")
        Console.msg(f"    Private IP:  {data['private_ips']}")

        output = self.update_dict(data, kind="vm")[0]
        return output
    def __init__(self, name=None, configuration="~/.cloudmesh/cloudmesh.yaml"):
        """
        Initializes the provider. The default parameters are read from the
        configuration file that is defined in yaml format.

        :param name: The name of the provider as defined in the yaml file
        :param configuration: The location of the yaml configuration file
        """

        conf = Config(configuration)["cloudmesh"]

        self.user = Config()["cloudmesh"]["profile"]["user"]

        self.spec = conf["cloud"][name]
        self.cloud = name

        cred = self.spec["credentials"]
        self.default = self.spec["default"]
        self.cloudtype = self.spec["cm"]["kind"]
        super().__init__(name, conf)

        VERBOSE(cred, verbose=10)

        if self.cloudtype != 'azure':
            Console.error("This class is meant for azure cloud")

        # ServicePrincipalCredentials related Variables to configure in
        # cloudmesh.yaml file

        # AZURE_APPLICATION_ID = '<Application ID from Azure Active Directory
        # App Registration Process>'

        # AZURE_SECRET_KEY = '<Secret Key from Application configured in
        # Azure>'

        # AZURE_TENANT_ID = '<Directory ID from Azure Active Directory
        # section>'

        credentials = ServicePrincipalCredentials(
            client_id=cred['AZURE_APPLICATION_ID'],
            secret=cred['AZURE_SECRET_KEY'],
            tenant=cred['AZURE_TENANT_ID']
        )

        subscription = cred['AZURE_SUBSCRIPTION_ID']

        # Management Clients
        self.resource_client = ResourceManagementClient(
            credentials, subscription)
        self.compute_client = ComputeManagementClient(
            credentials, subscription)
        self.network_client = NetworkManagementClient(
            credentials, subscription)

        # VMs abbreviation
        self.vms = self.compute_client.virtual_machines
        self.imgs = self.compute_client.virtual_machine_images

        # Azure Resource Group
        self.GROUP_NAME = self.default["resource_group"]

        # Azure Datacenter Region
        self.LOCATION = cred["AZURE_REGION"]

        # NetworkManagementClient related Variables
        self.VNET_NAME = self.default["network"]
        self.SUBNET_NAME = self.default["subnet"]
        self.IP_CONFIG_NAME = self.default["AZURE_VM_IP_CONFIG"]
        self.NIC_NAME = self.default["AZURE_VM_NIC"]

        # Azure VM Storage details
        self.OS_DISK_NAME = self.default["AZURE_VM_DISK_NAME"]
        self.USERNAME = self.default["AZURE_VM_USER"]
        self.PASSWORD = self.default["AZURE_VM_PASSWORD"]
        self.VM_NAME = self.default["AZURE_VM_NAME"]

        # Create or Update Resource group
        self.get_resource_group()
Beispiel #19
0
    def do_vm(self, args, arguments):
        """
        ::

            Usage:
                vm ping [NAMES] [--cloud=CLOUDS] [--count=N]
                vm check [NAMES] [--cloud=CLOUDS] [--username=USERNAME]
                vm status [NAMES] [--cloud=CLOUDS] [--output=OUTPUT]
                vm console [NAME] [--force]
                vm log [NAME] [--force]
                vm stop [NAMES]  [--dryrun]
                vm start [NAMES] [--dryrun]
                vm terminate [NAMES] [--cloud=CLOUD] [--dryrun]
                vm delete [NAMES] [--cloud=CLOUD] [--dryrun]
                vm refresh [--cloud=CLOUDS]
                vm list [NAMES]
                        [--cloud=CLOUDS]
                        [--output=OUTPUT]
                        [--refresh]
                vm boot [--n=COUNT]
                        [--name=VMNAMES]
                        [--cloud=CLOUD]
                        [--username=USERNAME]
                        [--image=IMAGE]
                        [--flavor=FLAVOR]
                        [--public]
                        [--secgroup=SECGROUPs]
                        [--group=GROUPs]
                        [--key=KEY]
                        [--dryrun]
                        [-v]
                vm meta list [NAME]
                vm meta set [NAME] KEY=VALUE...
                vm meta delete [NAME] KEY...
                vm script [--name=NAMES]
                          [--username=USERNAME]
                          [--key=KEY]
                          [--dryrun]
                          [--dir=DESTINATION]
                          SCRIPT
                vm ip assign [NAMES]
                          [--cloud=CLOUD]
                vm ip show [NAMES]
                           [--group=GROUP]
                           [--cloud=CLOUD]
                           [--output=OUTPUT]
                           [--refresh]
                vm ip inventory [NAMES]
                vm ssh [NAMES]
                       [--username=USER]
                       [--quiet]
                       [--ip=IP]
                       [--key=KEY]
                       [--command=COMMAND]
                vm put SOURCE DESTINATION [NAMES]
                vm get SOURCE DESTINATION [NAMES]
                vm rename [OLDNAMES] [NEWNAMES] [--force] [--dryrun]
                vm wait [--cloud=CLOUD] [--interval=INTERVAL] [--timeout=TIMEOUT]
                vm info [--cloud=CLOUD]
                        [--output=OUTPUT]
                vm username USERNAME [NAMES] [--cloud=CLOUD]
                vm resize [NAMES] [--size=SIZE]

            Arguments:
                OUTPUT         the output format
                COMMAND        positional arguments, the commands you want to
                               execute on the server(e.g. ls -a) separated by ';',
                               you will get a return of executing result instead of login to
                               the server, note that type in -- is suggested before
                               you input the commands
                NAME           server name. By default it is set to the name of last vm from database.
                NAMES          server name. By default it is set to the name of last vm from database.
                KEYPAIR_NAME   Name of the vm keypair to be used to create VM. Note this is
                               not a path to key.
                NEWNAMES       New names of the VM while renaming.
                OLDNAMES       Old names of the VM while renaming.

            Options:
                -v             verbose, prints the dict at the end
                --output=OUTPUT   the output format
                -H --modify-knownhosts  Do not modify ~/.ssh/known_hosts file
                                      when ssh'ing into a machine
                --username=USERNAME   the username to login into the vm. If not
                                      specified it will be guessed
                                      from the image name and the cloud
                --ip=IP          give the public ip of the server
                --cloud=CLOUD    give a cloud to work on, if not given, selected
                                 or default cloud will be used
                --count=COUNT    give the number of servers to start
                --detail         for table, a brief version
                                 is used as default, use this flag to print
                                 detailed table
                --flavor=FLAVOR  give the name or id of the flavor
                --group=GROUP          give the group name of server
                --secgroup=SECGROUP    security group name for the server
                --image=IMAGE    give the name or id of the image
                --key=KEY        specify a key to use, input a string which
                                 is the full path to the private key file
                --keypair_name=KEYPAIR_NAME   Name of the vm keypair to
                                              be used to create VM.
                                              Note this is not a path to key.
                --user=USER      give the user name of the server that you want
                                 to use to login
                --name=NAME      give the name of the virtual machine
                --force          rename/ delete vms without user's confirmation
                --command=COMMAND
                                 specify the commands to be executed


            Description:
                commands used to boot, start or delete servers of a cloud

                vm default [options...]
                    Displays default parameters that are set for vm boot either
                    on the default cloud or the specified cloud.

                vm boot [options...]
                    Boots servers on a cloud, user may specify flavor, image
                    .etc, otherwise default values will be used, see how to set
                    default values of a cloud: cloud help

                vm start [options...]
                    Starts a suspended or stopped vm instance.

                vm stop [options...]
                    Stops a vm instance .

                vm delete [options...]

                    Delete servers of a cloud, user may delete a server by its
                    name or id, delete servers of a group or servers of a cloud,
                    give prefix and/or range to find servers by their names.
                    Or user may specify more options to narrow the search

                vm floating_ip_assign [options...]
                    assign a public ip to a VM of a cloud

                vm ip show [options...]
                    show the ips of VMs

                vm ssh [options...]
                    login to a server or execute commands on it

                vm list [options...]
                    same as command "list vm", please refer to it

                vm status [options...]
                    Retrieves status of last VM booted on cloud and displays it.

                vm refresh [--cloud=CLOUDS]
                    this command refreshes the data for virtual machines,
                    images and flavors for the specified clouds.

                vm ping [NAMES] [--cloud=CLOUDS] [--count=N] [--processors=PROCESSORS]
                     pings the specified virtual machines, while using at most N pings.
                     The ping is executed in parallel.
                     If names are specifies the ping is restricted to the given names in
                     parameter format. If clouds are specified, names that are not in
                     these clouds are ignored. If the name is set in the variables
                     this name is used.

                cms vm ssh --command=\"uname -a\"

                      executes the uname command on the last booted vm

                vm script [--name=NAMES]
                          [--username=USERNAME]
                          [--key=KEY]
                          [--dryrun]
                          [--dir=DESTINATION]
                          [--shell=SHELL]
                          SCRIPT

                   The script command copies a shell script to the specified vms
                   into the DESTINATION directory and than execute it. With
                   SHELL you can set the shell for executing the command,
                   this coudl even be a python interpreter. Examples for
                   SHELL are /bin/sh, /usr/bin/env python

                vm put SOURCE DESTINATION [NAMES]

                    puts the file defined by SOURCE into the DESINATION folder
                    on the specified machines. If the file exists it is
                    overwritten, so be careful.

                vm get SOURCE DESTINATION [NAMES]

                    gets  the file defined by SOURCE into the DESINATION folder
                    on the specified machines. The SOURCE is on the remote
                    machine. If one machine is specified, the SOURCE is the same
                    name as on the remote machine. If multiple machines are
                    specified, the name of the machine will be a prefix to the
                    filename. If the filenames exists, they will be overwritten,
                    so be careful.

            Tip:
                give the VM name, but in a hostlist style, which is very
                convenient when you need a range of VMs e.g. sample[1-3]
                => ['sample1', 'sample2', 'sample3']
                sample[1-3,18] => ['sample1', 'sample2', 'sample3', 'sample18']

            Quoting commands:
                cm vm login gregor-004 --command=\"uname -a\"

            Limitations:

                Azure: rename is not supported
        """

        map_parameters(arguments,
                       'active',
                       'cloud',
                       'command',
                       'dryrun',
                       'flavor',
                       'force',
                       'group'
                       'output',
                       'group',
                       'image',
                       'interval',
                       'timeout',
                       'ip',
                       'key',
                       'modify-knownhosts',
                       'n',
                       'name',
                       'public',
                       'quiet',
                       'secgroup',
                       'size',
                       'username',
                       'output',
                       'count',
                       'refresh')

        variables = Variables()
        database = CmDatabase()

        arguments.output = Parameter.find("output",
                                          arguments,
                                          variables,
                                          "table")

        arguments.refresh = Parameter.find_bool("refresh",
                                                arguments,
                                                variables)

        if (arguments.meta and arguments.list):

            name = arguments.NAME
            if arguments.NAME is None:
                name = variables['vm']
                if name is None:
                    Console.error("No vm specified")

            cloud = "chameleon"
            # cloud = Parameter.find(arguments, variables)
            print(f"vm metadata for {name} on {cloud}")

            provider = Provider(name=cloud)
            r = provider.get_server_metadata(name)
            print(r)

        elif arguments.meta and arguments.set:

            metadata = {}
            pairs = arguments['KEY=VALUE']
            for pair in pairs:
                key, value = pair.split("=", 1)
                metadata[key] = value

            name = arguments.NAME
            if arguments.NAME is None:
                name = variables['vm']
                if name is None:
                    Console.error("No vm specified")

            cloud = "chameleon"
            # cloud = Parameter.find(arguments, variables)
            print(f"cloud {cloud} {name}")

            provider = Provider(name=cloud)
            provider.set_server_metadata(name, **metadata)
            r = provider.get_server_metadata(name)

            pprint(r)

        elif arguments.meta and arguments.delete:

            metadata = {}
            keys = arguments['KEY']

            name = arguments.NAME
            if arguments.NAME is None:
                name = variables['vm']
                if name is None:
                    Console.error("No vm specified")

            cloud = "chameleon"
            # cloud = Parameter.find(arguments, variables)
            print(f"cloud {cloud} {name}")

            provider = Provider(name=cloud)

            for key in keys:
                provider.delete_server_metadata(name, key)

            r = provider.get_server_metadata(name)

            pprint(r)


        elif arguments.list and arguments.refresh:

            names = []

            clouds, names = Arguments.get_cloud_and_names("list",
                                                          arguments,
                                                          variables)

            for cloud in clouds:
                print(f"cloud {cloud}")
                provider = Provider(name=cloud)
                vms = provider.list()

                provider.Print(vms, output=arguments.output, kind="vm")

        elif arguments.list:

            names = []

            clouds, names = Arguments.get_cloud_and_names("list",
                                                          arguments,
                                                          variables)

            try:

                for cloud in clouds:
                    print(f"List {cloud}")

                    p = Provider(cloud)

                    kind = p.kind

                    collection = "{cloud}-vm".format(cloud=cloud,
                                                     kind=p.kind)
                    db = CmDatabase()
                    vms = db.find(collection=collection)

                    p.Print(vms, output=arguments.output, kind="vm")

            except Exception as e:

                VERBOSE(e)

            return ""


        elif arguments.ping:
            raise NotImplementedError

            """
            vm ping [NAMES] [--cloud=CLOUDS] [--count=N]
            """
            if arguments.NAMES:
                variables['vm'] = arguments.NAMES
            if arguments['--cloud']:
                variables['cloud'] = arguments['--cloud']
            clouds, names = Arguments.get_cloud_and_names("status",
                                                          arguments,
                                                          variables)

            count = arguments.count
            if arguments.count:
                count = int(count)
            else:
                count = 1

            ips = set()

            for cloud in clouds:
                params = {}
                # gets public ips from database
                cursor = database.db[f'{cloud}-vm']
                for name in names:
                    for node in cursor.find({'name': name}):
                        ips.update(set(node['public_ips']))
                ips = list(ips)
                pprint(ips)

            for ip in ips:
                Shell.ping(host=ip, count=count)

        elif arguments.check:

            raise NotImplementedError
            """
            vm check [NAMES] [--cloud=CLOUDS] [--username=USERNAME]
            """
            """
            
            THIS IS ALL WRONG AS PROVIDER DEPENDENT !!!
            
            if arguments.NAMES:
                variables['vm'] = arguments.NAMES
            if arguments['--cloud']:
                variables['cloud'] = arguments['--cloud']
            clouds, names = Arguments.get_cloud_and_names("status", arguments, variables)

            for cloud in clouds:
                provider = Provider(cloud)
                params = {}

                params['key'] = \
                    provider.p.spec["credentials"]['EC2_PRIVATE_KEY_FILE_PATH'] + \
                    provider.p.spec["credentials"]['EC2_PRIVATE_KEY_FILE_NAME']

                params['username'] = arguments['--username']  # or get from db

                processors = arguments['--processors']
                if processors:
                    params['processors'] = int(processors[0])

                # gets public ips from database
                public_ips = []
                cursor = database.db['{cloud}-vm']
                for name in names:
                    for node in cursor.find({'name': name}):
                        public_ips.append(node['public_ips'])
                public_ips = [y for x in public_ips for y in x]

                Host.check(hosts=public_ips, **params)
            """

        elif arguments.status:
            if arguments.NAMES:
                variables['vm'] = arguments.NAMES
            if arguments['--cloud']:
                variables['cloud'] = arguments['--cloud']
            clouds, names = Arguments.get_cloud_and_names("status", arguments,
                                                          variables)

            # gets status from database
            for cloud in clouds:
                provider = Provider(cloud)
                status = []
                cursor = database.db[f'{cloud}-vm']
                print(cloud)
                for name in names:
                    for node in cursor.find({'name': name}):
                        status.append(node)

                provider.Print(status, output=arguments.output, kind="status")
                return ""


        elif arguments.start:
            # TODO: not tested
            if arguments.NAMES:
                names = variables['vm'] = arguments.NAMES

            if arguments['--cloud']:
                variables['cloud'] = arguments['--cloud']
            clouds, names = Arguments.get_cloud_and_names("stop", arguments,
                                                          variables)

            cloud = clouds[0]
            print(cloud)
            print(names)

            for name in names:

                provider = Provider(cloud)

                if arguments['--dryrun']:
                    print(f"start node {name}")
                else:
                    vms = provider.start(names=name, cloud=cloud)

                    provider.Print(vms, output=arguments.output, kind="vm")

            return ""

        elif arguments.stop:
            # TODO: not tested

            if arguments.NAMES:
                variables['vm'] = arguments.NAMES
            if arguments['--cloud']:
                variables['cloud'] = arguments['--cloud']
            clouds, names = Arguments.get_cloud_and_names("stop", arguments,
                                                          variables)

            for cloud in clouds:
                params = {}
                provider = Provider(cloud)

                if arguments['--dryrun']:
                    Console.ok(f"Dryrun stop: "
                               f"        {cloud}\n"
                               f"        {names}"
                               f"        {provider}")
                else:
                    for name in names:
                        vms = provider.stop(name)

                    provider.Print(vms, output=arguments.output, kind="vm")


        elif arguments.terminate:
            # TODO: not tested

            if arguments.NAMES:
                variables['vm'] = arguments.NAMES
            if arguments['--cloud']:
                variables['cloud'] = arguments['--cloud']
            clouds, names = Arguments.get_cloud_and_names("stop", arguments,
                                                          variables)

            for cloud in clouds:
                params = {}
                provider = Provider(cloud)

                if arguments['--dryrun']:
                    Console.ok(f"Dryrun terminate: "
                               f"        {cloud}\n"
                               f"        {names}"
                               f"        {provider}")
                else:
                    for name in names:
                        vms = provider.destroy(name)

                    provider.Print(vms, output=arguments.output, kind="vm")


        elif arguments.delete:

            if arguments.NAMES:
                variables['vm'] = arguments.NAMES
            if arguments['--cloud']:
                variables['cloud'] = arguments['--cloud']
            clouds, names = Arguments.get_cloud_and_names("stop", arguments,
                                                          variables)

            if names is not None:
                pass
            elif clouds is not None:
                for cloud in clouds:
                    provider = Provider(cloud)
                    vms = provider.list()
                    for vm in vms:
                        r = provider.destroy(name=vm)
                return ""
            else:
                return ""

            for cloud in clouds:
                provider = Provider(cloud)
                vms = provider.list()
                for vm in vms:
                    name = vm["cm"]["name"]
                    if name in names:
                        r = provider.destroy(name=name)



        # TODO: username, secgroup
        elif arguments.boot:
            # not everything works

            """
                vm boot 
                        [--name=VMNAMES]
                        [--cloud=CLOUD]
                        [--username=USERNAME]
                        [--image=IMAGE]
                        [--flavor=FLAVOR]
                        [--public]
                        [--secgroup=SECGROUP]
                        [--key=KEY]
                        [--group=GROUP]
                        [--dryrun]
            """
            # for name in names:
            #    node = p.create(name=name, size=flavor, image=image)

            # VERBOSE(arguments)
            parameters = dotdict()

            names = Parameter.expand(arguments.name)

            cloud = Parameter.find("cloud",
                                   arguments,
                                   variables.dict())
            defaults = Config()[f"cloudmesh.cloud.{cloud}.default"]
            groups = Parameter.find("group",
                                    arguments,
                                    variables.dict(),
                                    {"group": "default"})

            parameters = dotdict()

            parameters.names = arguments.name
            parameters.group = groups
            for attribute in ["image", "username", "flavor", "key", "secgroup"]:
                parameters[attribute] = Parameter.find(attribute,
                                                       arguments,
                                                       variables.dict(),
                                                       defaults)

            if arguments.username is None:
                parameters.user = Image.guess_username(parameters.image)

            provider = Provider(name=cloud)

            parameters.secgroup = arguments.secgroup or "default"

            # pprint(parameters)

            if arguments['--dryrun']:
                Console.ok(f"Dryrun stop: \n"
                           f"        cloud={cloud}\n"
                           f"        names={names}\n"
                           f"        provide={provider}")
                for attribute in parameters:
                    value = parameters[attribute]
                    Console.ok(f"        {attribute}={value}")




            else:

                # pprint (parameters)
                if not arguments.n:
                    count = 1
                else:
                    count = int(arguments.n)

                for i in range(0, count):
                    if names is None:
                        n = Name()
                        n.incr()
                        parameters.names = str(n)

                    # parameters.progress = len(parameters.names) < 2

                    vms = provider.create(**parameters)
                    variables['vm'] = str(n)
                    if arguments["-v"]:
                        banner("Details")
                        pprint(vms)

                # provider.Print(arguments.output, "vm", vms)



        elif arguments.info:
            """
            vm info [--cloud=CLOUD] [--output=OUTPUT]
            """
            print("info for the vm")

            cloud, names = Arguments.get_cloud_and_names("info", arguments,
                                                         variables)

            raise NotImplementedError

        elif arguments.rename:
            raise NotImplementedError
            # Not tested
            print("rename the vm")

            v = Variables()
            cloud = v["cloud"]

            p = Provider(cloud)

            try:
                oldnames = Parameter.expand(arguments["OLDNAMES"])
                newnames = Parameter.expand(arguments["NEWNAMES"])
                force = arguments["--force"]

                if oldnames is None or newnames is None:
                    Console.error("Wrong VMs specified for rename",
                                  traceflag=False)
                elif len(oldnames) != len(newnames):
                    Console.error("The number of VMs to be renamed is wrong",
                                  traceflag=False)
                else:
                    print(oldnames)
                    print(newnames)
                    for i in range(0, len(oldnames)):
                        oldname = oldnames[i]
                        newname = newnames[i]
                        if arguments["--dryrun"]:
                            Console.ok(
                                "Rename {} to {}".format(oldname, newname))
                        else:
                            print(f"rename {oldname} -> {newname}")

                            p.rename(source=oldname, destination=newname)

                    msg = "info. OK."
                    Console.ok(msg)
            except Exception as e:
                Error.traceback(e)
                Console.error("Problem renaming instances", traceflag=True)

        elif arguments["ip"] and arguments["show"]:
            raise NotImplementedError

            print("show the ips")
            """
            vm ip show [NAMES]
                   [--group=GROUP]
                   [--cloud=CLOUD]
                   [--output=OUTPUT]
                   [--refresh]

            """

        elif arguments["ip"] and arguments["assign"]:
            raise NotImplementedError
            """
            vm ip assign [NAMES] [--cloud=CLOUD]
            """
            print("assign the public ip")

        elif arguments["ip"] and arguments["inventory"]:
            raise NotImplementedError

            """
            vm ip inventory [NAMES]

            """
            print("list ips that could be assigned")

        elif arguments.default:
            raise NotImplementedError

            print("sets defaults for the vm")

        elif arguments.script:
            raise NotImplementedError
            clouds, names = Arguments.get_cloud_and_names("run", arguments,
                                                          variables)
            username = arguments['--username']
            script = arguments.SCRIPT

            for cloud in clouds:
                provider = Provider(cloud)

                name_ips = {}
                cursor = database.db['{}-node'.format(cloud)]
                for name in names:
                    for node in cursor.find({'name': name}):
                        name_ips[name] = node['public_ips']

                if arguments['--dryrun']:
                    print("run script {} on vms: {}".format(script, names))
                else:
                    provider.ssh(name_ips, username=username, script=script)

        elif arguments.username:
            raise NotImplementedError

            """
            vm username USERNAME [NAMES] [--cloud=CLOUD]
            """
            print("sets the username for the vm")

        elif arguments.resize:
            raise NotImplementedError
            """
            vm resize [NAMES] [--size=SIZE]
            """
            pass

        elif arguments.ssh:

            """
            vm ssh [NAMES] [--username=USER]
                 [--quiet]
                 [--ip=IP]
                 [--key=KEY]
                 [--command=COMMAND]
            """

            # VERBOSE(arguments)
            clouds, names, command = Arguments.get_commands("ssh",
                                                            arguments,
                                                            variables)

            # print (clouds)
            # print(names)
            # print (command)

            if arguments.command is None and len(names) > 1:
                Console.error("Interactive shell can only be done on one vm")
                return ""
            elif arguments.command is None and len(names) == 1:
                name = names[0]
                cloud = clouds[0]
                cm = CmDatabase()
                try:
                    vm = cm.find_name(name, "vm")[0]
                except IndexError:
                    Console.error(f"could not find vm {name}")
                    return ""
                # VERBOSE(vm)
                cloud = vm["cm"]["cloud"]
                provider = Provider(name=cloud)
                provider.ssh(vm=vm)
                return ""
            else:
                # command on all vms

                if clouds is None or names is None or command is None:
                    return ""
                else:
                    for cloud in clouds:
                        p = Provider(cloud)
                        for name in names:
                            cm = CmDatabase()
                            try:
                                vm = cm.find_name(name, "vm")[0]
                            except IndexError:
                                Console.error(f"could not find vm {name}")
                                continue
                            r = p.ssh(vm=vm, command=command)
                            print(r)
            return ""

        elif arguments.console:

            # why is this not vm
            clouds, names, command = Arguments.get_commands("ssh",
                                                            arguments,
                                                            variables)

            print(clouds)
            print(names)
            print(command)

            for cloud in clouds:
                p = Provider(cloud)
                for name in names:
                    cm = CmDatabase()
                    try:
                        vm = cm.find_name(name, "vm")[0]
                    except IndexError:
                        Console.error(f"could not find vm {name}")
                        continue
                    r = p.console(vm=vm)
                    print(r)

            return ""

        elif arguments.log:

            # why is this not vm
            clouds, names, command = Arguments.get_commands("ssh",
                                                            arguments,
                                                            variables)

            print(clouds)
            print(names)
            print(command)

            for cloud in clouds:
                p = Provider(cloud)
                for name in names:
                    cm = CmDatabase()
                    try:
                        vm = cm.find_name(name, "vm")[0]
                    except IndexError:
                        Console.error(f"could not find vm {name}")
                        continue
                    r = p.log(vm=vm)
                    print(r)

            return ""

        elif arguments.wait:
            """
            vm wait [--cloud=CLOUD] [--interval=INTERVAL] [--timeout=TIMEOUT]
            """

            # why is this not vm
            clouds, names, command = Arguments.get_commands("ssh",
                                                            arguments,
                                                            variables)

            # print (clouds)
            # print (names)
            # print (command)

            for cloud in clouds:
                p = Provider(cloud)
                for name in names:
                    cm = CmDatabase()
                    try:
                        vm = cm.find_name(name, "vm")[0]
                    except IndexError:
                        Console.error(f"could not find vm {name}")
                        continue
                    r = p.wait(vm=vm, interval=arguments.interval,
                               timeout=arguments.timeout)
                    if r:
                        Console.ok("Instance available for SSH")
                    else:
                        Console.error(
                            f"Instance unavailable after timeout of {arguments.timeout}")
                    # print(r)

            return ""
Beispiel #20
0
    def do_queue(self, args, arguments):
        """
        ::

          Usage:
            queue create --name=NAME --policy=POLICY --cluster=CLUSTER
                [--charge=CHARGE]
                [--unit=UNIT]
            queue activate [--name=NAME]
            queue deactivate [--name=NAME]
            queue set unit
            queue connection_test --job=JOB
            queue cluster list [--cluster=CLUSTERS] [--depth=DEPTH]
            queue cluster remove [--cluster=CLUSTERS]
            queue cluster set [--cluster=CLUSTERS] PARAMETER=VALUE

          Arguments:
              FILE   a file name
              INPUT_TYPE  tbd

          Options:
              -f      specify the file
              --depth=DEPTH   [default: 1]
              --format=FORMAT    [default: table]

          Description:

            This command creates a queue that is associated with a cloud.

            We assume that a number of experiments are conducted with possibly
            running the script multiple times. Each experiment will save the
            batch script in its own folder.

            The output of the script can be saved in a destination folder. A virtual
            directory is used to coordinate all saved files.

            The files can be located due to the use of the virtual directory on
            multiple different data or file services

            Authentication to the Batch systems is done viw the underlaying HPC
            center authentication. We assume that the user has an account to
            submit on these systems.

            (SSH, 2 factor, XSEDE-account) TBD.

          Examples:

             LOTS OF DOCUMENTATION MISSING HERE

                [--companion-file=COMPANION_FILE]
                [--outfile-name=OUTPUT_FILE_NAME]
                [--suffix=SUFFIX] [--overwrite]




        """

        #
        # create slurm manager so it can be used in all commands
        #
        queue = Queue()  # debug=arguments["--debug"])

        # arguments["--cloud"] = "test"
        # arguments["NAME"] = "fix"

        # map_parameters(arguments,
        #                "cloud",
        #                "name",
        #                "cluster",
        #                "script",
        #                "type",
        #                "destination",
        #                "source",
        #                "format")

        # if not arguments.create

        #    find cluster name from Variables()
        #    if no cluster is defined look it up in yaml in batch default:
        #    if not defined there fail

        #    clusters = Parameter.expand(arguments.cluster)
        #    name = Parameters.expand[argumnets.name)
        #    this will return an array of clusters and names of jobs and all cluster
        #    job or clusterc commands will be executed on them
        #    see the vm
        #
        #    if active: False in the yaml file for the cluster this cluster is not used and scipped.

        VERBOSE.print(arguments, verbose=9)
        implemented_policies = ['FIFO', 'FILO']
        variables = Variables()

        # docopt for some reason does not show all of the arguments in dot
        # format that's the reason I used -- format.
        if   arguments.create and \
             arguments['--name'] and \
             arguments['--cluster'] and \
             arguments['--policy']:

            queue_name = arguments['--name']
            cluster_name = arguments['--cluster']
            policy = arguments['--policy']
            if policy.upper() not in ['FIFO', 'FILO']:
                Console.error("Policy {policy} not defined, currently "
                              "implemented policies are {policies} ".format(
                                  policy=policy.upper(),
                                  policies=implemented_policies))
                return
            charge = arguments['--charge']
            unit = arguments['--unit']
            queue.create(queue_name, cluster_name, policy, charge, unit)
Beispiel #21
0
 def register(self, file=None):
     Console.error("Not implemented")
     raise NotImplementedError
    def start(self, security=True):
        """
        start the MongoDB server
        """
        mode = self.data['MODE']

        if mode == 'docker':
            from cloudmesh.mongo.MongoDocker import MongoDocker
            mongo = MongoDocker()
            mongo.start(auth=security)
            # mongo.wait()
            # mongo.ps()
            return

        auth = ""
        if security:
            auth = "--auth"
        mongo_host = self.data['MONGO_HOST']
        if platform.lower() == 'win32':
            try:
                # command = 'where mongo'
                # proc = subprocess.Popen(command, shell=True,
                #                        stdin=subprocess.PIPE,
                #                        stdout=subprocess.PIPE)
                # out, err = proc.communicate()

                # print ("MMM", command)
                # print ("O", out)
                # print ("E", err)

                # if out == b'':
                #    Console.error("mongo command not found")
                #    sys.exit()
                mongo_runner = f"\"{self.mongo_home}\\bin\mongod\" {auth} " \
                               f"--bind_ip {mongo_host}" \
                               f" --dbpath \"{self.mongo_path}\" --logpath \"{self.mongo_log}\mongod.log\""
                print(mongo_runner)
                if not os.path.isfile(f'{self.mongo_path}/invisible.vbs'):
                    with open(f'{self.mongo_path}/invisible.vbs', 'w') as f:
                        f.write(
                            'CreateObject("Wscript.Shell").Run """" & WScript.Arguments(0) & """", 0, False'
                        )
                if not os.path.isfile(f'{self.mongo_path}/mongo_starter.bat'):
                    with open(f'{self.mongo_path}/mongo_starter.bat',
                              'w') as f:
                        f.write(mongo_runner)
                script = f'wscript.exe \"{self.mongo_path}/invisible.vbs\" \"{self.mongo_path}/mongo_starter.bat\"'
                print(script)
                p = subprocess.Popen(script,
                                     shell=True,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE)
                result = "mongod child process should be started successfully."
            except Exception as e:
                result = "Mongo in windows could not be started: \n\n" + str(e)
        else:
            try:
                script = f"mongod {auth} --bind_ip {mongo_host}" \
                         f" --dbpath {self.mongo_path} --logpath {self.mongo_log}/mongod.log --fork"
                result = Script.run(script)

            except Exception as e:
                result = "Mongo could not be started." + str(e)

        if "successfully" in result:
            print(Console.ok(result))
        else:
            print(Console.error(result))
Beispiel #23
0
    def get_run(self, specification):
        """
        function to download file or directory
        gets the source from the service
        :param: specification:
        :return: dict
        """

        source = specification['source']
        destination = specification['destination']
        recursive = specification['recursive']
        trimmed_source = massage_path(source)
        trimed_dest = massage_path(destination)

        self.s3_resource, self.s3_client = self.get_s3_resource_client()

        file_obj = ''

        try:
            file_obj = self.s3_client.get_object(Bucket=self.container_name,
                                                 Key=trimmed_source)
        except botocore.exceptions.ClientError as e:
            # object not found
            Console.error(e)
        files_downloaded = []

        is_target_file = os.path.isfile(trimed_dest)
        is_target_dir = os.path.isdir(trimed_dest)
        '''
        print('is_target_file')
        print(is_target_file)
        print('is_target_dir')
        print(is_target_dir)
        '''

        if file_obj:
            try:
                if is_target_dir:
                    '''
                    print('target is directory...')
                    print('trimmed_destination : '+ trimmed_destination)
                    print(trimmed_source)
                    '''
                    os_path_trim_source = os.path.basename(trimmed_source)
                    blob = self.s3_resource.Bucket(
                        self.container_name).download_file(
                            trimmed_source,
                            f"{trimed_dest}/{os_path_trim_source}")
                else:
                    blob = self.s3_resource.Bucket(
                        self.container_name).download_file(
                            trimmed_source, trimed_dest)

                # make head call since file download does not return
                # obj dict to extract meta data
                metadata = self.s3_client.head_object(
                    Bucket=self.container_name, Key=trimmed_source)
                files_downloaded.append(
                    extract_file_dict(trimmed_source, metadata))

                self.storage_dict['message'] = 'Source downloaded'
            except FileNotFoundError as e:
                self.storage_dict['message'] = 'Destination not found'
                Console.error(e)

        else:
            # Search for a directory
            all_objs = list(
                self.s3_resource.Bucket(
                    self.container_name).objects.filter(Prefix=trimmed_source))

            total_all_objs = len(all_objs)

            if total_all_objs == 0:
                self.storage_dict['message'] = 'Source Not Found'

            elif total_all_objs > 0 and recursive is False:
                for obj in all_objs:
                    if os.path.basename(obj.key) != self.dir_marker_file_name:
                        if massage_path(
                            obj.key.replace(trimmed_source, '')).count('/') \
                            == 0:
                            try:
                                blob = self.s3_resource.Bucket(
                                    self.container_name
                                ).download_file(
                                    obj.key,
                                    f"{trimed_dest}/{os.path.basename(obj.key)}"
                                )

                                # make head call since file download does not
                                # return obj dict to extract meta data
                                metadata = self.s3_client.head_object(
                                    Bucket=self.container_name, Key=obj.key)
                                files_downloaded.append(
                                    extract_file_dict(obj.key, metadata))

                                self.storage_dict[
                                    'message'] = 'Source downloaded'
                                # files_downloaded.append(obj.key)
                            except FileNotFoundError as e:
                                self.storage_dict[
                                    'message'] = 'Destination not found'
                                Console.error(e)

            elif total_all_objs > 0 and recursive is True:
                files_downloaded = []
                for obj in all_objs:
                    if os.path.basename(obj.key) != \
                        self.dir_marker_file_name \
                        and obj.key[-1] != '/':
                        if massage_path(obj.key.replace(trimmed_source, '')) \
                            .count('/') == 0:
                            try:
                                blob = self.s3_resource.Bucket(
                                    self.container_name
                                ).download_file(
                                    obj.key,
                                    f"{trimed_dest}/{os.path.basename(obj.key)}"
                                )

                                # make head call since file download does
                                # not return obj dict to extract meta data
                                metadata = self.s3_client.head_object(
                                    Bucket=self.container_name, Key=obj.key)
                                files_downloaded.append(
                                    extract_file_dict(obj.key, metadata))

                            except FileNotFoundError as e:
                                Console.error(e)
                        else:

                            folder_path = massage_path(
                                obj.key.replace(trimmed_source, '').replace(
                                    os.path.basename(obj.key), ''))
                            dest_path = f"{trimed_dest}/{folder_path}"
                            try:
                                os.makedirs(dest_path, 0o777)
                                Console.msg()
                            except FileExistsError as e:
                                os.chmod(dest_path, stat.S_IRWXO)
                                Console.error(e)

                            try:
                                blob = self.s3_resource.Bucket(
                                    self.container_name
                                ).download_file(
                                    # obj.key, trimmedDestination + '/'
                                    # + os.path.basename(obj.key))
                                    obj.key,
                                    f"{trimed_dest}/folder_path{os.path.basename(obj.key)}"
                                )

                                # make head call since file download
                                # does not return obj dict to extract meta data
                                metadata = self.s3_client.head_object(
                                    Bucket=self.container_name, Key=obj.key)
                                files_downloaded.append(
                                    extract_file_dict(obj.key, metadata))

                            except FileNotFoundError as e:
                                Console.error(e)

        specification['status'] = 'completed'

        return specification
Beispiel #24
0
    def do_openapi(self, args, arguments):
        """
        ::

          Usage:
              openapi generate [FUNCTION] --filename=FILENAME
                                         [--serverurl=SERVERURL]
                                         [--yamlfile=YAML]
                                         [--import_class]
                                         [--all_functions]
                                         [--enable_upload]
                                         [--verbose]
              openapi server start YAML [NAME]
                              [--directory=DIRECTORY]
                              [--port=PORT]
                              [--server=SERVER]
                              [--host=HOST]
                              [--verbose]
                              [--debug]
                              [--fg]
                              [--os]
              openapi server stop NAME
              openapi server list [NAME] [--output=OUTPUT]
              openapi server ps [NAME] [--output=OUTPUT]
              openapi register add NAME ENDPOINT
              openapi register filename NAME
              openapi register delete NAME
              openapi register list [NAME] [--output=OUTPUT]
              openapi TODO merge [SERVICES...] [--dir=DIR] [--verbose]
              openapi TODO doc FILE --format=(txt|md)[--indent=INDENT]
              openapi TODO doc [SERVICES...] [--dir=DIR]
              openapi sklearn FUNCTION MODELTAG
              openapi sklearnreadfile FUNCTION MODELTAG
              openapi sklearn upload --filename=FILENAME

          Arguments:
              FUNCTION  The name for the function or class
              MODELTAG  The arbirtary name choosen by the user to store the Sklearn trained model as Pickle object
              FILENAME  Path to python file containing the function or class
              SERVERURL OpenAPI server URL Default: https://localhost:8080/cloudmesh
              YAML      Path to yaml file that will contain OpenAPI spec. Default: FILENAME with .py replaced by .yaml
              DIR       The directory of the specifications
              FILE      The specification

          Options:
              --import_class         FUNCTION is a required class name instead of an optional function name
              --all_functions        Generate OpenAPI spec for all functions in FILENAME
              --debug                Use the server in debug mode
              --verbose              Specifies to run in debug mode
                                     [default: False]
              --port=PORT            The port for the server [default: 8080]
              --directory=DIRECTORY  The directory in which the server is run
              --server=SERVER        The server [default: flask]
              --output=OUTPUT        The outputformat, table, csv, yaml, json
                                     [default: table]
              --srcdir=SRCDIR        The directory of the specifications
              --destdir=DESTDIR      The directory where the generated code
                                     is placed

          Description:
            This command does some useful things.

            openapi TODO doc FILE --format=(txt|md|rst) [--indent=INDENT]
                Sometimes it is useful to generate teh openaopi documentation
                in another format. We provide fucntionality to generate the
                documentation from the yaml file in a different formt.

            openapi TODO doc --format=(txt|md|rst) [SERVICES...]
                Creates a short documentation from services registered in the
                registry.

            openapi TODO merge [SERVICES...] [--dir=DIR] [--verbose]
                Merges tow service specifications into a single servoce
                TODO: do we have a prototype of this?


            openapi sklearn sklearn.linear_model.LogisticRegression
                Generates the .py file for the Model given for the generator

            openapi sklearnreadfile sklearn.linear_model.LogisticRegression
            Generates the .py file for the Model given for the generator which supports reading files

            openapi generate [FUNCTION] --filename=FILENAME
                                         [--serverurl=SERVERURL]
                                         [--yamlfile=YAML]
                                         [--import_class]
                                         [--all_functions]
                                         [--enable_upload]
                                         [--verbose]
                Generates an OpenAPI specification for FUNCTION in FILENAME and
                writes the result to YAML. Use --import_class to import a class
                with its associated class methods, or use --all_functions to 
                import all functions in FILENAME. These options ignore functions
                whose names start with '_'. Use --enable_upload to add file
                upload functionality to a copy of your python file and the
                resulting yaml file.

            openapi server start YAML [NAME]
                              [--directory=DIRECTORY]
                              [--port=PORT]
                              [--server=SERVER]
                              [--host=HOST]
                              [--verbose]
                              [--debug]
                              [--fg]
                              [--os]
                starts an openapi web service using YAML as a specification
                TODO: directory is hard coded as None, and in server.py it
                  defaults to the directory where the yaml file lives. Can
                  we just remove this argument?

            openapi server stop NAME
                stops the openapi service with the given name
                TODO: where does this command has to be started from

            openapi server list [NAME] [--output=OUTPUT]
                Provides a list of all OpenAPI services in the registry

            openapi server ps [NAME] [--output=OUTPUT]
                list the running openapi service

            openapi register add NAME ENDPOINT
                Openapi comes with a service registry in which we can register
                openapi services.

            openapi register filename NAME
                In case you have a yaml file the openapi service can also be
                registerd from a yaml file

            openapi register delete NAME
                Deletes the names service from the registry

            openapi register list [NAME] [--output=OUTPUT]
                Provides a list of all registerd OpenAPI services


        """
        #print(arguments)
        map_parameters(arguments, 'fg', 'os', 'output', 'verbose', 'port',
                       'directory', 'yamlfile', 'serverurl', 'name',
                       'import_class', 'all_functions', 'enable_upload',
                       'host')
        arguments.debug = arguments.verbose

        #VERBOSE(arguments)

        if arguments.generate:
            if arguments.import_class and arguments.all_functions:
                Console.error(
                    'Cannot generate openapi with both --import_class and --all_functions'
                )
            if arguments.import_class and not arguments.function:
                Console.error(
                    'FUNCTION paramter (class name) is required when using --import_class'
                )
            try:
                p = Parameter(arguments)
                p.Print()
                filename = p.filename  # ./dir/myfile.py
                yamlfile = p.yamlfile  # ./dir/myfile.yaml
                directory = p.yamldirectory  # ./dir
                function = p.function  # myfunction
                serverurl = p.serverurl  # http://localhost:8080/cloudmesh/
                module_name = p.module_name  # myfile

                enable_upload = arguments.enable_upload
                # append the upload function to the end of a copy of the file if not already done
                if enable_upload:
                    uploadPython = textwrap.dedent("""
                        from cloudmesh.openapi.registry.fileoperation import FileOperation
                        
                        def upload() -> str:
                            filename=FileOperation().file_upload()
                            return filename
                        
                        #### upload functionality added
                        """)
                    upload_added = False
                    for line in open(filename):
                        if '#### upload functionality added' in line:
                            upload_added = True
                    if not upload_added:
                        filename_upload = filename.replace(
                            '.py', '_upload-enabled.py')
                        copyfile(filename, filename_upload)
                        Console.info(f'copied {filename} to {filename_upload}')
                        filename = filename_upload
                        module_name = module_name + '_upload-enabled'
                        with open(filename, 'a') as f:
                            f.write('\n')
                            f.write(uploadPython)
                        Console.info(
                            f'added upload functionality to {filename}')

                # Parameter() takes care of putting the filename in the path
                imported_module = import_module(module_name)
                dataclass_list = []
                for attr_name in dir(imported_module):
                    attr = getattr(imported_module, attr_name)
                    if is_dataclass(attr):
                        dataclass_list.append(attr)
                # not currently supporting multiple functions or all functions
                # could do comma-separated function/class names

                if enable_upload:
                    upload_obj = getattr(imported_module, 'upload')
                    setattr(sys.modules[module_name], 'upload', upload_obj)

                if arguments.import_class:
                    class_obj = getattr(imported_module, function)
                    # do we maybe need to do this here?
                    # setattr(sys.modules[module_name], function, class_obj)
                    class_description = class_obj.__doc__.strip().split(
                        "\n")[0]
                    func_objects = {}
                    for attr_name in dir(class_obj):
                        attr = getattr(class_obj, attr_name)
                        if isinstance(
                                attr,
                                types.MethodType) and attr_name[0] != '_':
                            # are we sure this is right?
                            # would probably create a valid openapi yaml, but not technically accurate
                            # module.function may work but it should be module.Class.function
                            setattr(sys.modules[module_name], attr_name, attr)
                            func_objects[attr_name] = attr
                        elif is_dataclass(attr):
                            dataclass_list.append(attr)
                    openAPI = generator.Generator()
                    Console.info('Generating openapi for class: ' +
                                 class_obj.__name__)
                    openAPI.generate_openapi_class(
                        class_name=class_obj.__name__,
                        class_description=class_description,
                        filename=filename,
                        func_objects=func_objects,
                        serverurl=serverurl,
                        outdir=directory,
                        yamlfile=yamlfile,
                        dataclass_list=dataclass_list,
                        all_function=False,
                        enable_upload=enable_upload,
                        write=True)
                elif arguments.all_functions:
                    func_objects = {}
                    for attr_name in dir(imported_module):
                        if type(
                                getattr(imported_module, attr_name)
                        ).__name__ == 'function' and attr_name[0] != '_':
                            func_obj = getattr(imported_module, attr_name)
                            setattr(sys.modules[module_name], attr_name,
                                    func_obj)
                            func_objects[attr_name] = func_obj
                    openAPI = generator.Generator()
                    Console.info(
                        'Generating openapi for all functions in file: ' +
                        filename)
                    openAPI.generate_openapi_class(
                        class_name=module_name,
                        class_description="No description provided",
                        filename=filename,
                        func_objects=func_objects,
                        serverurl=serverurl,
                        outdir=directory,
                        yamlfile=yamlfile,
                        dataclass_list=dataclass_list,
                        all_function=True,
                        enable_upload=enable_upload,
                        write=True)

                else:
                    func_obj = getattr(imported_module, function)
                    setattr(sys.modules[module_name], function, func_obj)
                    openAPI = generator.Generator()
                    Console.info('Generating openapi for function: ' +
                                 func_obj.__name__)
                    openAPI.generate_openapi(f=func_obj,
                                             filename=filename,
                                             serverurl=serverurl,
                                             outdir=directory,
                                             yamlfile=yamlfile,
                                             dataclass_list=dataclass_list,
                                             enable_upload=enable_upload,
                                             write=True)

            except Exception as e:
                Console.error("Failed to generate openapi yaml")
                print(e)

        elif arguments.server and arguments.start and arguments.os:

            try:
                s = Server(name=arguments.NAME,
                           spec=path_expand(arguments.YAML),
                           directory=path_expand(arguments.directory)
                           or arguments.directory,
                           port=arguments.port,
                           server=arguments.wsgi,
                           debug=arguments.debug)

                pid = s.run_os()

                VERBOSE(arguments, label="Server parameters")

                print(f"Run PID: {pid}")

            except FileNotFoundError:

                Console.error("specification file not found")

            except Exception as e:

                print(e)

        elif arguments.server and arguments.list:

            try:
                result = Server.list(name=arguments.NAME)

                # BUG: order= nt yet defined

                print(Printer.list(result))

            except ConnectionError:
                Console.error("Server not running")

        elif arguments.server and arguments.ps:

            try:
                print()
                Console.info("Running Cloudmesh OpenAPI Servers")
                print()
                result = Server.ps(name=arguments.NAME)
                print(Printer.list(result, order=["name", "pid", "spec"]))

                print()
            except ConnectionError:
                Console.error("Server not running")

        elif arguments.register and arguments.add:

            registry = Registry()
            result = registry.add(name=arguments.NAME,
                                  url=arguments.BASEURL,
                                  pid=arguments.PID)

            registry.Print(data=result, output=arguments.output)

        elif arguments.register and arguments.delete:

            registry = Registry()
            result = registry.delete(name=arguments.NAME)
            if result == 0:
                Console.error("Entry could not be found")
            else:
                Console.ok("Ok. Entry deleted")

        elif arguments.register and arguments.list:

            registry = Registry()
            result = registry.list(name=arguments.NAME)

            registry.Print(data=result, output=arguments.output)

        elif arguments.register and arguments['filename']:

            registry = Registry()
            result = [registry.add_form_file(arguments['filename'])]

            registry.Print(data=result, output=arguments.output)

        elif arguments.server and arguments.start:

            # VERBOSE(arguments)

            try:
                s = Server(
                    name=arguments.NAME,
                    spec=path_expand(arguments.YAML),
                    directory=None,
                    #directory=path_expand(
                    #    arguments.directory) or arguments.directory,
                    port=arguments.port,
                    host=arguments.host,
                    server=arguments.wsgi,
                    debug=arguments.debug)

                pid = s.start(name=arguments.NAME,
                              spec=path_expand(arguments.YAML),
                              foreground=arguments.fg)

                print(f"Run PID: {pid}")

            except FileNotFoundError:

                Console.error("specification file not found")

            except Exception as e:
                print(e)

        elif arguments.server and arguments.stop:

            try:
                print()
                Console.info("Stopping Cloudmesh OpenAPI Server")
                print()

                Server.stop(name=arguments.NAME)

                print()
            except ConnectionError:
                Console.error("Server not running")

        elif arguments.sklearn and not arguments.upload:

            try:
                Sklearngenerator(input_sklibrary=arguments.FUNCTION,
                                 model_tag=arguments.MODELTAG)
            except Exception as e:
                print(e)

        elif arguments.sklearnreadfile and not arguments.upload:

            try:
                SklearngeneratorFile(input_sklibrary=arguments.FUNCTION,
                                     model_tag=arguments.MODELTAG)
            except Exception as e:
                print(e)

        #TODO: implement this?
        elif arguments.sklearn and arguments.upload:

            try:
                openAPI = generator.Generator()
                openAPI.fileput()

            except Exception as e:
                print(e)
        '''