Ejemplo n.º 1
0
def execute4(cmd, print_error=True, **kwargs):

    logger = ElasticDevLogger("execute4")
    output_to_json = kwargs.get("output_to_json", True)
    #cmd = 'cd {}; (./run_order 2>&1 ; echo $? > {}) | tee -a {}; exit `cat {}`'.format(link,exit_file,logfile,exit_file)

    exit_file = "/tmp/{}".format(id_generator(10,
                                              chars=string.ascii_lowercase))
    logfile = "/tmp/{}".format(id_generator(10, chars=string.ascii_lowercase))

    cmd = '({} 2>&1 ; echo $? > {}) | tee -a {}; exit `cat {}`'.format(
        cmd, exit_file, logfile, exit_file)

    exitcode = os.system(cmd)

    status = None
    if exitcode == 0: status = True

    output = open(logfile, "r").readlines()

    if output_to_json and not isinstance(output, dict):
        try:
            output = json.loads(output)
        except:
            logger.warn("Could not convert output to json")

    results = {"output": output, "status": status}

    exit_error = kwargs.get("exit_error")

    if exit_error and not status:
        print output
        exit(exitcode)

    return results
Ejemplo n.º 2
0
    def __init__(self, **kwargs):

        self.classname = 'EC2_connections'
        self.logger = ElasticDevLogger(self.classname)
        self.logger.debug("Instantiating %s" % self.classname)
        self.aws_default_region = os.environ["AWS_DEFAULT_REGION"]
        self.aws_access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
        self.aws_secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
Ejemplo n.º 3
0
    def __init__(self, **kwargs):

        ResourceCmdHelper.__init__(self)
        self.classname = 'AwsCli'
        self.logger = ElasticDevLogger(self.classname)
        self.logger.debug("Instantiating %s" % self.classname)
        self.file_config = None
        self.file_config_loc = None
        self.tempdir = None
        self.resource_tags_keys = [
            "tags", "name", "schedule_id", "job_instance_id", "job_id"
        ]
Ejemplo n.º 4
0
    def __init__(self, **kwargs):
        '''
        # Testingyoyo - delete below
        ###################################################33
        # old format:
        ###################################################33
        # run_dir - current run directory - e.g. /tmp/ondisktmp/abc123/
        # working_subdir - e.g. var/tmp/ansible
        # app_dir - app directory is run_dir + working directory - e.g. /tmp/ondisktmp/abc123/var/tmp/ansible

        ###################################################33
        # new format:
        ###################################################33
        # run_dir -> exec_base_dir - e.g. /tmp/ondisktmp/abc123
        # app_dir -> exec_dir - e.g. /tmp/ondisktmp/abc123/var/tmp/ansible
        # working_subdir -> app_dir - e.g. var/tmp/ansible

        # share_dir - share directory with docker or execution container - e.g. /var/tmp/share
        # run_share_dir - share directory with stateful_id - e.g. /var/tmp/share/ABC123
        '''

        self.classname = 'ResourceCmdHelper'
        self.logger = ElasticDevLogger(self.classname)
        self.logger.debug("Instantiating %s" % self.classname)

        self.cwd = os.getcwd()
        self.exec_base_dir = os.environ.get("EXEC_BASE_DIR")
        if not self.exec_base_dir: self.exec_base_dir = os.getcwd()

        # must exists as environmental variables
        self.must_exists = kwargs.get("must_exists", [])

        self._set_stateful_params(**kwargs)
        self._set_app_params(**kwargs)
        self._set_exec_dir(**kwargs)

        # by default, we set template_dir relative to the app_dir
        # this can be over written by the inheriting class
        self.template_dir = None
        self.resources_dir = None

        if hasattr(self, "exec_dir") and self.exec_dir:
            self.template_dir = "{}/_ed_templates".format(self.exec_dir)
            # ref 34532045732
            self.resources_dir = os.path.join(self.exec_dir, "ed_resources")

        self._set_docker_settings(**kwargs)
        self._set_destroy_env_vars(**kwargs)
        self._set_os_env_prefix(**kwargs)
        self._get_docker_env_filepath()

        self.output = []
Ejemplo n.º 5
0
class OnDiskTmpDir(object):
    def __init__(self, **kwargs):

        self.tmpdir = kwargs.get("tmpdir")
        if not self.tmpdir: self.tmpdir = "/tmp"

        self.subdir = kwargs.get("subdir", "ondisktmp")

        if self.subdir:
            self.basedir = "{}/{}".format(self.tmpdir, self.subdir)
        else:
            self.basedir = self.tmpdir

        self.classname = "OnDiskTmpDir"

        mkdir("/tmp/ondisktmpdir/log")

        self.logger = ElasticDevLogger(self.classname)
        if kwargs.get("init", True): self.set_dir(**kwargs)

    def set_dir(self, **kwargs):

        createdir = kwargs.get("createdir", True)

        self.fqn_dir, self.dir = generate_random_path(self.basedir,
                                                      folder_depth=1,
                                                      folder_length=16,
                                                      createdir=createdir,
                                                      string_only=True)

        return self.fqn_dir

    def get(self, **kwargs):

        if not self.fqn_dir:
            msg = "fqn_dir has not be set"
            raise Exception(msg)

        self.logger.debug('Returning fqn_dir "{}"'.format(self.fqn_dir))

        return self.fqn_dir

    def delete(self, **kwargs):

        self.logger.debug('Deleting fqn_dir "{}"'.format(self.fqn_dir))

        return rm_rf(self.fqn_dir)
Ejemplo n.º 6
0
    def __init__(self, **kwargs):

        ResourceCmdHelper.__init__(self)
        self.classname = 'GcloudCli'
        self.logger = ElasticDevLogger(self.classname)
        self.logger.debug("Instantiating %s" % self.classname)
        self.file_config = None
        self.file_config_loc = None
        self.tempdir = None
        self.resource_tags_keys = [
            "tags", "name", "schedule_id", "job_instance_id", "job_id"
        ]

        self.share_dir = os.environ.get("SHARE_DIR", "/var/tmp/share")
        self.stateful_dir = os.path.join(self.share_dir, id_generator(8))
        self.docker_image = "google/cloud-sdk"
        self.output = []
Ejemplo n.º 7
0
    def __init__(self, **kwargs):

        self.tmpdir = kwargs.get("tmpdir")
        if not self.tmpdir: self.tmpdir = "/tmp"

        self.subdir = kwargs.get("subdir", "ondisktmp")

        if self.subdir:
            self.basedir = "{}/{}".format(self.tmpdir, self.subdir)
        else:
            self.basedir = self.tmpdir

        self.classname = "OnDiskTmpDir"

        mkdir("/tmp/ondisktmpdir/log")

        self.logger = ElasticDevLogger(self.classname)
        if kwargs.get("init", True): self.set_dir(**kwargs)
Ejemplo n.º 8
0
class EC2_connections(object):
    """
    assumes the boto library is installed

    general ec2 connections and attributes for:

    servers
    securitygroups
    ssh keys

    """
    def __init__(self, **kwargs):

        self.classname = 'EC2_connections'
        self.logger = ElasticDevLogger(self.classname)
        self.logger.debug("Instantiating %s" % self.classname)
        self.aws_default_region = os.environ["AWS_DEFAULT_REGION"]
        self.aws_access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
        self.aws_secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]

    def _set_conn(self):
        '''simple method to establish a connection to a region'''

        self.conn = boto.ec2.connect_to_region(
            self.aws_default_region,
            aws_access_key_id=self.aws_access_key_id,
            aws_secret_access_key=self.aws_secret_access_key)

        return self.conn

    def _regions_list(self):
        '''list the regions for ec2 related actions'''

        regions = boto.ec2.regions()

        return regions
Ejemplo n.º 9
0
def get_hash(data):
    '''determines the hash of a data object'''

    logger = ElasticDevLogger("get_hash")

    try:
        calculated_hash = hashlib.md5(data).hexdigest()
    except:
        logger.debug("Falling back to shellout md5sum for hash")
        calculated_hash = shellout_hash(data)

    if not calculated_hash:
        logger.error("Could not calculate hash for %s" % data)
        return False

    return calculated_hash
Ejemplo n.º 10
0
def convert_str2json(_object, exit_error=None):

    if isinstance(_object, dict): return _object
    if isinstance(_object, list): return _object
    logger = ElasticDevLogger("convert_str2json")

    try:
        _object = json.loads(_object)
        #logger.debug("Success: Converting str to a json")
        return _object
    except:
        pass
        #logger.debug("Cannot convert str to a json.  Will try to eval")

    try:
        _object = eval(_object)
        #logger.debug("Success: Evaluating str to a json")
        return _object
    except:
        #logger.debug("Cannot eval str to a json.")
        if exit_error: exit(13)
        return False

    return _object
Ejemplo n.º 11
0
class AwsCli(ResourceCmdHelper):
    def __init__(self, **kwargs):

        ResourceCmdHelper.__init__(self)
        self.classname = 'AwsCli'
        self.logger = ElasticDevLogger(self.classname)
        self.logger.debug("Instantiating %s" % self.classname)
        self.file_config = None
        self.file_config_loc = None
        self.tempdir = None
        self.resource_tags_keys = [
            "tags", "name", "schedule_id", "job_instance_id", "job_id"
        ]

    def get_tags(self):

        tags = [self.aws_default_region, self.product, self.provider]

        for key_eval in self.resource_tags_keys:
            if not self.inputargs.get(key_eval): continue
            tags.append(self.inputargs[key_eval])

        return tags

    def set_ondisktmp(self):
        self.tempdir = OnDiskTmpDir()

    def write_file_config(self):

        with open(self.file_config_loc, 'w') as _file:
            _file.write(json.dumps(self.file_config, indent=4))

    def parse_set_env_vars(self, env_vars, upper_case=True):

        self.inputargs = {}

        for env_var in env_vars:

            if not os.environ.get(env_var.upper()): continue

            if env_var == "aws_default_region":
                self.inputargs["aws_default_region"] = os.environ[
                    env_var.upper()]
            else:
                self.inputargs[env_var] = os.environ[env_var.upper()]

    def get_resource_tags(self, **kwargs):

        name = kwargs.get("name")
        if not name: name = self.inputargs.get("name")

        tags = "["
        if name:
            tags = tags + "{" + "Key={},Value={}".format("Name", name) + "}"

        for key_eval in self.resource_tags_keys:
            if not self.inputargs.get(key_eval): continue
            tags = tags + ",{" + "Key={},Value={}".format(
                key_eval, self.inputargs[key_eval]) + "}"
        tags = tags + "]"

        return tags

    def get_cmd_region(self, cmd):
        return "{} --region {}".format(cmd, self.aws_default_region)

    def get_region(self):

        self.aws_default_region = self.inputargs.get("aws_default_region")

        if not self.aws_default_region or self.aws_default_region == "None":
            self.aws_default_region = "us-east-1"

        self.logger.debug('Region set to "{}"'.format(self.aws_default_region))
Ejemplo n.º 12
0
class GcloudCli(ResourceCmdHelper):
    def __init__(self, **kwargs):

        ResourceCmdHelper.__init__(self)
        self.classname = 'GcloudCli'
        self.logger = ElasticDevLogger(self.classname)
        self.logger.debug("Instantiating %s" % self.classname)
        self.file_config = None
        self.file_config_loc = None
        self.tempdir = None
        self.resource_tags_keys = [
            "tags", "name", "schedule_id", "job_instance_id", "job_id"
        ]

        self.share_dir = os.environ.get("SHARE_DIR", "/var/tmp/share")
        self.stateful_dir = os.path.join(self.share_dir, id_generator(8))
        self.docker_image = "google/cloud-sdk"
        self.output = []

    def get_tags(self):

        tags = [self.gcloud_region, self.product, self.provider]

        for key_eval in self.resource_tags_keys:
            if not self.inputargs.get(key_eval): continue
            tags.append(self.inputargs[key_eval])

        return tags

    def set_ondisktmp(self):
        self.tempdir = OnDiskTmpDir()

    def write_file_config(self):

        with open(self.file_config_loc, 'w') as _file:
            _file.write(json.dumps(self.file_config, indent=4))

    def parse_set_env_vars(self, env_vars, upper_case=True):

        self.inputargs = {}

        for env_var in env_vars:

            if not os.environ.get(env_var.upper()): continue

            if env_var == "gcloud_region":
                self.inputargs["gcloud_region"] = os.environ[env_var.upper()]
            else:
                self.inputargs[env_var] = os.environ[env_var.upper()]

    def get_resource_tags(self, **kwargs):

        name = kwargs.get("name")
        if not name: name = self.inputargs.get("name")

        tags = "["
        if name:
            tags = tags + "{" + "Key={},Value={}".format("Name", name) + "}"

        for key_eval in self.resource_tags_keys:
            if not self.inputargs.get(key_eval): continue
            tags = tags + ",{" + "Key={},Value={}".format(
                key_eval, self.inputargs[key_eval]) + "}"
        tags = tags + "]"

        return tags

    def get_region(self):

        self.gcloud_region = self.inputargs.get("gcloud_region")

        if not self.gcloud_region or self.gcloud_region == "None":
            self.gcloud_region = "us-west1"

        self.logger.debug('Region set to "{}"'.format(self.gcloud_region))

    #################################################################################################################
    # non docker execution

    def _get_init_credentials_cmds(self):

        self.set_required()
        cmds = [
            "gcloud auth activate-service-account --key-file={}".format(
                self.google_application_credentials)
        ]
        cmds.append("gcloud config set project {}".format(self.gcloud_project))

        return cmds

    def set_credentials(self):

        cmds = self._get_init_credentials_cmds()

        for cmd in cmds:
            results = self.execute(cmd, output_to_json=None, exit_error=True)
            output = results.get("output")
            if output: self.logger.debug(output)

            self.add_output(cmd=cmd, remove_empty=True, **results)

    #################################################################################################################
    # docker execution

    def cleanup_docker_run(self):

        if hasattr(self,
                   "gcloud_container_name") and self.gcloud_container_name:
            cmd = [
                "docker rm -fv {} 2>&1 > /dev/null".format(
                    self.gcloud_container_name)
            ]
            self.execute(cmd, exit_error=False, output_to_json=None)

        if hasattr(self, "filename") and self.filename:
            os.system("rm -rf {}".format(self.filename))

        if hasattr(self, "tempdir") and self.tempdir:
            self.tempdir.delete()

    def init_docker_run(self):

        self.gcloud_container_name = id_generator(8)

        cmds = [
            "docker pull {}:latest 2>&1 > /dev/null".format(self.docker_image)
        ]
        cmds.append(
            'for i in `docker ps -a|grep gcloud| cut -d " " -f 1`; do echo $i; docker rm -fv $i; done'
        )

        cmds.append(
            "docker run -v {}:{} --name {} {} gcloud auth activate-service-account --key-file {} || exit 4"
            .format(self.google_application_credentials,
                    self.google_application_credentials,
                    self.gcloud_container_name, self.docker_image,
                    self.google_application_credentials))

        cmds.append(
            "docker run --rm --volumes-from {} {} gcloud config set project {}"
            .format(self.gcloud_container_name, self.docker_image,
                    self.gcloud_project))

        for cmd in cmds:

            results = self.execute(cmd, output_to_json=None, exit_error=False)
            status = results.get("status")
            output = results.get("output")
            if output: self.logger.debug(output)

            self.add_output(cmd=cmd, remove_empty=True, **results)

            if not status: return False

        return True

    def write_cloud_creds(self):

        project_id = os.environ.get("GCLOUD_PROJECT")
        private_key_id = os.environ.get("GCLOUD_PRIVATE_KEY_ID")
        private_key = os.environ.get("GCLOUD_PRIVATE_KEY")
        client_id = os.environ.get("GCLOUD_CLIENT_ID")
        client_email = os.environ.get("GCLOUD_CLIENT_EMAIL")
        client_x509_cert_url = os.environ.get("GCLOUD_CLIENT_X509_CERT_URL")

        if not project_id:
            self.logger.debug(
                "GCLOUD_PROJECT is required for write credentials")
            return

        if not private_key_id:
            self.logger.debug(
                "GCLOUD_PRIVATE_KEY_ID is required for write credentials")
            return

        if not private_key:
            self.logger.debug(
                "GCLOUD_PRIVATE_KEY is required for write credentials")
            return

        if not client_id:
            self.logger.debug(
                "GCLOUD_CLIENT_ID is required for write credentials")
            return

        if not client_email:
            self.logger.debug(
                "GCLOUD_CLIENT_EMAIL is required for write credentials")
            return

        if not client_x509_cert_url:
            self.logger.debug(
                "GCLOUD_CLIENT_X509_CERT_URL is required for write credentials"
            )
            return

        if not hasattr(self, "tempdir") or not self.tempdir:
            self.set_ondisktmp()

        self.google_application_credentials = os.path.join(
            self.stateful_dir, ".creds", "gcloud.json")

        creds_dir = os.path.dirname(self.google_application_credentials)

        auth_uri = os.environ.get("GCLOUD_AUTH_URI",
                                  "https://accounts.google.com/o/oauth2/auth")
        token_uri = os.environ.get("GCLOUD_TOKEN_URI",
                                   "https://oauth2.googleapis.com/token")
        auth_provider = os.environ.get(
            "GCLOUD_AUTH_PROVIDER",
            "https://www.googleapis.com/oauth2/v1/certs")

        values = {
            "type": "service_account",
            "auth_uri": auth_uri,
            "token_uri": token_uri,
            "auth_provider_x509_cert_url": auth_provider,
            "project_id": project_id,
            "private_key_id": private_key_id,
            "private_key": private_key,
            "client_email": client_email,
            "client_id": client_id,
            "client_x509_cert_url": client_x509_cert_url,
        }

        json_object = json.dumps(values, indent=2).replace('\\\\', '\\')

        if not os.path.exists(creds_dir):
            os.system("mkdir -p {}".format(creds_dir))

        self.logger.debug("gcloud directory {} ...".format(
            self.google_application_credentials))

        # Writing to sample.json
        with open(self.google_application_credentials, "w") as outfile:
            outfile.write(json_object)

        return self.google_application_credentials

    def set_required(self):

        self.google_application_credentials = self.write_cloud_creds()

        if not self.google_application_credentials:
            self.google_application_credentials = os.environ.get(
                "GOOGLE_APPLICATION_CREDENTIALS")

        if not self.google_application_credentials:
            self.logger.error(
                'cannot find environmental variables "GOOGLE_APPLICATION_CREDENTIALS"'
            )
            exit(4)

        self.gcloud_project = os.environ.get("GCLOUD_PROJECT")

        if not self.gcloud_project:
            self.logger.error(
                'cannot find environmental variables "GCLOUD_PROJECT"')
            exit(4)

        return True
Ejemplo n.º 13
0
class ResourceCmdHelper(object):
    def __init__(self, **kwargs):
        '''
        # Testingyoyo - delete below
        ###################################################33
        # old format:
        ###################################################33
        # run_dir - current run directory - e.g. /tmp/ondisktmp/abc123/
        # working_subdir - e.g. var/tmp/ansible
        # app_dir - app directory is run_dir + working directory - e.g. /tmp/ondisktmp/abc123/var/tmp/ansible

        ###################################################33
        # new format:
        ###################################################33
        # run_dir -> exec_base_dir - e.g. /tmp/ondisktmp/abc123
        # app_dir -> exec_dir - e.g. /tmp/ondisktmp/abc123/var/tmp/ansible
        # working_subdir -> app_dir - e.g. var/tmp/ansible

        # share_dir - share directory with docker or execution container - e.g. /var/tmp/share
        # run_share_dir - share directory with stateful_id - e.g. /var/tmp/share/ABC123
        '''

        self.classname = 'ResourceCmdHelper'
        self.logger = ElasticDevLogger(self.classname)
        self.logger.debug("Instantiating %s" % self.classname)

        self.cwd = os.getcwd()
        self.exec_base_dir = os.environ.get("EXEC_BASE_DIR")
        if not self.exec_base_dir: self.exec_base_dir = os.getcwd()

        # must exists as environmental variables
        self.must_exists = kwargs.get("must_exists", [])

        self._set_stateful_params(**kwargs)
        self._set_app_params(**kwargs)
        self._set_exec_dir(**kwargs)

        # by default, we set template_dir relative to the app_dir
        # this can be over written by the inheriting class
        self.template_dir = None
        self.resources_dir = None

        if hasattr(self, "exec_dir") and self.exec_dir:
            self.template_dir = "{}/_ed_templates".format(self.exec_dir)
            # ref 34532045732
            self.resources_dir = os.path.join(self.exec_dir, "ed_resources")

        self._set_docker_settings(**kwargs)
        self._set_destroy_env_vars(**kwargs)
        self._set_os_env_prefix(**kwargs)
        self._get_docker_env_filepath()

        self.output = []

    def _set_os_env_prefix(self, **kwargs):

        self.os_env_prefix = kwargs.get("os_env_prefix")
        if self.os_env_prefix: return

        if not self.app_name: return

        if self.app_name == "terraform":
            self.os_env_prefix = "TF_VAR"
        elif self.app_name == "ansible":
            self.os_env_prefix = "ANS_VAR"

    def _get_template_vars(self, **kwargs):

        # if the app_template_vars is provided, we use it, otherwise, we
        # assume it is the <APP_NAME>_TEMPLATE_VARS
        _template_vars = kwargs.get("app_template_vars")
        if not _template_vars:
            _template_vars = "{}_TEMPLATE_VARS".format(self.app_name)

        if not os.environ.get(_template_vars.upper()):
            _template_vars = "ED_TEMPLATE_VARS"

        if os.environ.get(_template_vars.upper()):
            return [
                _var.strip()
                for _var in os.environ.get(_template_vars.upper()).split(",")
            ]

        if not self.app_name: return
        if not self.os_env_prefix: return

        # get template_vars e.g. "ANS_VAR_<var>"
        _template_vars = []
        for _var in os.environ.keys():
            if self.os_env_prefix not in _var: continue
            _template_vars.append(_var)

        if not _template_vars:
            self.logger.warn(
                "ED_TEMPLATE_VARS and <APP> template vars not set/given")

        return _template_vars

    def _set_destroy_env_vars(self, **kwargs):

        try:
            self.destroy_env_vars = eval(os.environ.get("DESTROY_ENV_VARS"))
        except:
            self.destroy_env_vars = None

        self.destroy_execgroup = os.environ.get("DESTROY_EXECGROUP")

    def _set_docker_settings(self, **kwargs):

        if "USE_DOCKER" in os.environ:
            self.use_docker = os.environ.get("USE_DOCKER")
            if self.use_docker in ["None", "null", None, "none"]:
                self.use_docker = None
        else:
            self.use_docker = True

        if not self.app_name: return

        self.docker_image = os.environ.get(
            "DOCKER_EXEC_ENV", "elasticdev/{}-run-env".format(self.app_name))

    def _create_dir(self, dir_path):

        if os.path.exists(dir_path): return

        cmd = "mkdir -p {}".format(dir_path)

        self.execute(cmd, output_to_json=False, exit_error=True)

    def _set_stateful_params(self, **kwargs):

        self.share_dir = os.environ.get("SHARE_DIR", "/var/tmp/share")
        self.stateful_id = os.environ.get("STATEFUL_ID")
        #self.stateful_dir = os.environ.get("STATEFUL_DIR")
        self.run_share_dir = None

        self.postscript_path = None
        self.postscript = None
        self.creds_dir = None

        if not self.stateful_id and 'stateful_id' in self.must_exists:
            raise MissingEnvironmentVariable(
                "{} does not exist".format("STATEFUL_ID"))

        if not self.stateful_id: return

        self.run_share_dir = os.path.join(self.share_dir, self.stateful_id)
        self.creds_dir = os.path.join(self.run_share_dir, ".creds")
        self._create_dir(self.run_share_dir)

        return

        #if not self.stateful_id: self.stateful_id = id_generator(20)

        #if self.stateful_id:
        #    self.run_share_dir = os.path.join(self.share_dir,self.stateful_id)
        #else:
        #    self.run_share_dir = self.share_dir

        # This can be overwritten - either you run from the share directory
        # or the exec_base_dir + app/app_dir
        # ref 453646
        #self.exec_dir = os.path.join(self.share_dir,self.stateful_id)

    def _set_app_params(self, **kwargs):

        self.shelloutconfig = None
        self.app_dir = kwargs.get("app_dir")

        self.app_name = kwargs.get("app_name")
        if not self.app_name: return

        # below app_name must be defined
        # set app_dir
        if not self.app_dir:
            self.app_dir = os.environ.get(
                "{}_DIR".format(self.app_name.upper()),
                "/var/tmp/{}".format(self.app_name))

        if self.app_dir[0] == "/": self.app_dir = self.app_dir[1:]

        # this can be overided by inherited class
        self.shelloutconfig = "elasticdev:::{}::resource_wrapper".format(
            self.app_name)

    def _set_exec_dir(self, **kwargs):

        if self.stateful_id:
            #_exec_dir = os.path.join(self.share_dir,self.stateful_id)
            self.exec_dir = self.run_share_dir
        else:
            self.exec_dir = self.exec_base_dir

        # ref 453646
        # overide the exec_dir set from _set_stateful_params
        # e.g. /var/tmp/share/ABC123/var/tmp/ansible
        if self.app_dir:
            self.exec_dir = os.path.join(self.exec_dir, self.app_dir)

        self._create_dir(self.exec_dir)

    def _get_resource_files(self):

        self.logger.debug("getting json files from resources_dir {}".format(
            self.resources_dir))

        if not os.path.exists(self.resources_dir):
            self.logger.debug("DOES NOT EXIST resources_dir {}".format(
                self.resources_dir))
            return

        _files = glob.glob("{}/*.json".format(self.resources_dir))

        self.logger.debug(_files)
        self.logger.debug(_files)
        self.logger.debug(_files)

        if not _files: return

        resources = []

        for _file in _files:

            try:
                _values = json.loads(open(_file, "r").read())
                resources.append(_values)
            except:
                self.logger.warn(
                    "could not retrieve resource json contents from {}".format(
                        _file))

        if not resources: return

        if len(resources) == 1: return resources[0]

        return resources

    def to_resource_db(self, resources):
        return to_resource_db(resources)

    def get_state_info(self):

        if not self.postscript_path:
            self.logger.warn("post script is not set")
            return

        if not os.path.exists(self.postscript_path):
            self.logger.warn("post script {} does not exists".format(
                self.postscript_path))
            return

        os.chdir(self.exec_dir)
        cmd = [self.postscript_path]

        try:
            output = self.execute(cmd, output_to_json=False,
                                  exit_error=True).get("output")
        except:
            self.logger.debug("{} failed at terraform dir {}".format(
                self.postscript_path, self.exec_dir))
            exit(9)

        # try to get resources from resource file
        # in resources directory
        # ref 34532045732
        #testtest
        self.logger.debug("a1" * 32)
        self.logger.debug("a1" * 32)
        self.logger.debug("a1" * 32)
        resources = self._get_resource_files()
        self.logger.debug("a2" * 32)
        self.logger.debug("a2" * 32)
        self.logger.debug(resources)
        self.logger.debug("a3" * 32)
        self.logger.debug("a3" * 32)

        if resources: return resources

        if not output: return

        # try to convert output with delimiters
        # to values
        values = convert_ed_output_to_values(output)

        os.chdir(self.cwd)

        return values

    def add_destroy_params(self, resource):

        self.logger.debug(
            "add_destroy_params is to specified by the inherited class")

        return

    def get_resources_details(self):

        resources = self.get_state_info()
        if not resources: return

        if not isinstance(resources, dict) and not isinstance(resources, list):
            self.logger.warn("resource needs to be a dictionary or list!")
            exit(9)

        if isinstance(resources, dict):
            self.add_resource_tags(resources)
            try:
                self.add_destroy_params(resources)
            except:
                self.logger.debug("Did not add destroy params")

        if isinstance(resources, list):
            for _resource in resources:

                self.add_resource_tags(_resource)

                if not _resource.get("main"): continue

                try:
                    self.add_destroy_params(_resource)
                except:
                    self.logger.debug("Did not add destroy params")

        return resources

    def _get_docker_env_filepath(self):

        try:
            _docker_env_file = self.get_env_var("DOCKER_ENV_FILE",
                                                default=".env")
            self.docker_env_file = os.path.join(self.run_share_dir,
                                                _docker_env_file)
        except:
            self.docker_env_file = None

        #self.docker_env_file = os.path.join(self.exec_dir,_docker_env_file)
        #self.docker_env_file = os.path.join(os.getcwd(),_docker_env_file)

        return self.docker_env_file

    # referenced and related to: dup dhdskyeucnfhrt2634521
    def get_env_var(self, variable, default=None, must_exists=None):

        _value = os.environ.get(variable)
        if _value: return _value

        if self.os_env_prefix:

            _value = os.environ.get("{}_{}".format(self.os_env_prefix,
                                                   variable))
            if _value: return _value

            _value = os.environ.get("{}_{}".format(self.os_env_prefix,
                                                   variable.lower()))
            if _value: return _value

            _value = os.environ.get("{}_{}".format(self.os_env_prefix,
                                                   variable.upper()))
            if _value: return _value

        if default: return default

        if not must_exists: return
        raise MissingEnvironmentVariable("{} does not exist".format(variable))

    def print_json(self, values):
        print_json(values)

    def templify(self, **kwargs):

        clobber = kwargs.get("clobber")
        _template_vars = self._get_template_vars(**kwargs)

        if not _template_vars: return

        if not self.template_dir:
            self.logger.warn(
                "template_dir not set (None) - skipping templating")
            return

        template_files = list_template_files(self.template_dir)
        if not template_files:
            self.logger.warn("template_files is empty - skipping templating")
            return

        for _file_stats in template_files:

            template_filepath = _file_stats["file"]
            file_dir = os.path.join(self.exec_dir, _file_stats["directory"])
            file_path = os.path.join(self.exec_dir, _file_stats["directory"],
                                     _file_stats["filename"].split(".ja2")[0])

            if not os.path.exists(file_dir):
                os.system("mkdir -p {}".format(file_dir))

            if os.path.exists(file_path) and not clobber:
                self.logger.warn(
                    "destination templated file already exists at {} - skipping templifying of it"
                    .format(file_path))
                continue

            self.logger.debug("creating templated file file {} from {}".format(
                file_path, template_filepath))

            templateVars = {}

            if self.os_env_prefix:
                _split_char = "{}_".format(self.os_env_prefix)
            else:
                _split_char = None

            for _var in _template_vars:

                if _split_char and _split_char in _var:
                    _mapped_key = _var.strip().split(_split_char)[-1]
                else:
                    _mapped_key = _var.strip().upper()

                var = _var.strip()

                if not os.environ.get(var):
                    self.logger.warn("cannot find {} to templify".format(var))
                    continue

                value = os.environ[var].replace("'", '"')

                # include both uppercase and regular keys
                templateVars[_mapped_key] = value
                templateVars[_mapped_key.upper()] = value

            templateLoader = jinja2.FileSystemLoader(searchpath="/")
            templateEnv = jinja2.Environment(loader=templateLoader)
            template = templateEnv.get_template(template_filepath)
            outputText = template.render(templateVars)
            writefile = open(file_path, "wb")
            writefile.write(outputText)
            writefile.close()

    def write_key_to_file(self, **kwargs):
        '''
        writing the value of a key in inputargs 
        into a file
        '''

        key = kwargs["key"]
        filepath = kwargs["filepath"]
        split_char = kwargs.get("split_char")
        add_return = kwargs.get("add_return", True)
        copy_to_share = kwargs.get("copy_to_share")
        deserialize = kwargs.get("deserialize")

        try:
            permission = str(int(kwargs.get("permission")))
        except:
            permission = "400"

        if not self.inputargs.get(key): return

        _value = self.inputargs[key]
        if deserialize: _value = base64.b64decode(_value)

        if split_char is None:
            _lines = _value
        elif split_char == "return":
            _lines = _value.split('\\n')
        else:
            _lines = _value

        with open(filepath, "wb") as wfile:
            for _line in _lines:
                # ref 45230598450
                #wfile.write(_line.replace('"','').replace("'",""))
                wfile.write(_line)
                if not add_return: continue
                wfile.write("\n")

        if permission:
            os.system("chmod {} {}".format(permission, filepath))

        if copy_to_share:
            self.copy_file_to_share(filepath)

        return filepath

    def copy_file_to_share(self, srcfile, dst_subdir=None):

        if not self.run_share_dir:
            self.logger.debug(
                "run_share_dir not defined - skipping sync-ing ...")
            return

        cmds = []
        _dirname = os.path.dirname(self.run_share_dir)
        if not os.path.exists(_dirname):
            cmds.append("mkdir -p {}".format(_dirname))

        _file_subpath = os.path.basename(srcfile)
        if dst_subdir:
            _file_subpath = "{}/{}".format(dst_subdir, _file_subpath)

        dstfile = "{}/{}".format(self.run_share_dir, _file_subpath)

        cmds.append("cp -rp {} {}".format(srcfile, dstfile))

        for cmd in cmds:
            self.execute(cmd, output_to_json=False, exit_error=True)

    def sync_to_share(self, rsync_args=None, exclude_existing=None):

        if not self.run_share_dir:
            self.logger.debug(
                "run_share_dir not defined - skipping sync-ing ...")
            return

        cmds = []
        _dirname = os.path.dirname(self.run_share_dir)
        if not os.path.exists(_dirname):
            cmds.append("mkdir -p {}".format(_dirname))

        if not rsync_args: rsync_args = "-avug"
        if exclude_existing:
            rsync_args = '{} --ignore-existing '.format(rsync_args)

        #rsync -h -v -r -P -t source target

        cmd = "rsync {} {}/ {}".format(rsync_args, self.exec_dir,
                                       self.run_share_dir)

        self.logger.debug(cmd)
        cmds.append(cmd)

        for cmd in cmds:
            self.execute(cmd, output_to_json=False, exit_error=True)

        self.logger.debug("Sync-ed to run share dir {}".format(
            self.run_share_dir))

    def remap_app_vars(self):

        if not self.os_env_prefix: return

        _split_char = "{}_".format(self.os_env_prefix)

        for _key, _value in self.inputargs.iteritems():
            if _split_char not in _key: continue
            _mapped_key = _key.split(_split_char)[-1]
            self.logger.debug("mapped key {} value {}".format(_key, _value))
            self.inputargs[_mapped_key] = _value
            del self.inputargs[_key]

    def add_resource_tags(self, resource):

        tags = self.get_env_var("RESOURCE_TAGS")
        if not tags: return

        tags = [tag.strip() for tag in tags.split(",")]
        if not isinstance(resource.get("tags"), list): resource["tags"] = []
        resource["tags"].extend(tags)

        if self.app_name:
            resource["tags"].append(self.app_name)

        # remove duplicates
        resource["tags"] = list(set(resource["tags"]))

        return resource

    def get_hash(self, _object):
        return get_hash(_object)

    def add_output(self, cmd=None, remove_empty=None, **results):

        try:
            _outputs = to_json(results["output"])
        except:
            _outputs = None

        if not _outputs: return

        if cmd: self.output.append(cmd)

        for _output in _outputs:
            if remove_empty and not _output: continue
            self.output.extend(_output)

    def to_json(self, output):
        return _to_json(output)

    def print_output(self, **kwargs):

        output = _to_json(kwargs["output"])

        print '_ed_begin_output'
        print output
        print '_ed_end_output'
        exit(0)

    def _print_output(self, **kwargs):

        output_to_json = kwargs.get("output_to_json", True)
        output = kwargs.get("output")
        if not output: output = "There is no output from the command"
        if output_to_json: output = _to_json(output)

        print ''
        print ''
        print '_ed_begin_output'
        print output
        print '_ed_end_output'

    def successful_output(self, **kwargs):
        self._print_output(**kwargs)
        exit(0)

    def execute(self, cmd, **kwargs):
        return execute3(cmd, **kwargs)

    def execute2(self, cmd, **kwargs):
        return execute3(cmd, **kwargs)
        #return execute4(cmd,**kwargs)

    def execute4(self, cmd, **kwargs):
        return execute4(cmd, **kwargs)

    def cmd_failed(self, **kwargs):

        failed_message = kwargs.get("failed_message")

        if not failed_message:
            failed_message = "No failed message to outputted"

        self.logger.error(message=failed_message)
        exit(9)

    def set_inputargs(self, upper_case=True, **kwargs):

        if kwargs.get("inputargs"):
            self.inputargs = kwargs["inputargs"]
        elif kwargs.get("json_input"):
            self.inputargs = to_json(kwargs["json_input"], exit_error=True)
        elif kwargs.get("set_env_vars"):
            self.parse_set_env_vars(kwargs["set_env_vars"],
                                    upper_case=upper_case)

        for _k, _v in self.inputargs.iteritems():
            if _v != "False": continue
            self.inputargs[_k] = False

    # This can be replaced by the inheriting class
    def parse_set_env_vars(self, env_vars, upper_case=True):

        self.inputargs = {}

        for env_var in env_vars:

            if upper_case:
                _var = env_var.upper()
            else:
                _var = env_var

            if not os.environ.get(_var): continue
            if os.environ.get(_var) == "None": continue

            if os.environ.get(_var) == "False":
                self.inputargs[_var] = False
                continue

            if upper_case:
                self.inputargs[env_var] = os.environ[_var]
            else:
                self.inputargs[_var] = os.environ[_var]

        if self.use_docker:
            self.inputargs["use_docker"] = True
        else:
            self.inputargs["use_docker"] = None

    def check_required_inputargs(self, **kwargs):

        status = True
        required_keys = []

        _keys = kwargs.get("keys")
        if not _keys: return

        for key in kwargs["keys"]:
            if key not in self.inputargs:
                required_keys.append(key)
                status = None

        if status: return True

        self.logger.aggmsg("These keys need to be set:", new=True)
        self.logger.aggmsg("")

        for key in required_keys:
            self.logger.aggmsg("\t{} or Environmental Variable {}".format(
                key, key.upper()))

        failed_message = self.logger.aggmsg("")
        self.cmd_failed(failed_message=failed_message)

    def check_either_inputargs(self, **kwargs):

        _keys = kwargs.get("keys")
        if not _keys: return

        for key in kwargs["keys"]:
            if key in self.inputargs: return

        self.logger.aggmsg("one of these keys need to be set:", new=True)
        self.logger.aggmsg("")

        for key in kwargs["keys"]:
            self.logger.aggmsg("\t{} or Environmental Variable {}".format(
                key, key.upper()))

        failed_message = self.logger.aggmsg("")
        self.cmd_failed(failed_message=failed_message)
Ejemplo n.º 14
0
class DoCli(ResourceCmdHelper):
    def __init__(self, **kwargs):

        ResourceCmdHelper.__init__(self)
        self.classname = 'DoCli'
        self.logger = ElasticDevLogger(self.classname)
        self.logger.debug("Instantiating %s" % self.classname)
        self.file_config = None
        self.file_config_loc = None
        self.tempdir = None
        self.resource_tags_keys = [
            "tags", "name", "schedule_id", "job_instance_id", "job_id"
        ]
        self.get_region()
        self._set_do_token(**kwargs)

    def _set_do_token(self, **kwargs):

        self.do_token = kwargs.get("do_token", os.environ["DO_TOKEN"])

        if not self.do_token and not os.environ.get("DO_TOKEN"):
            msg = 'The DO_TOKEN environmental variables need to be set'
            self.logger.error(msg)
            exit(9)

        if not self.do_token: self.do_token = os.environ["DO_TOKEN"]

    def get_tags(self):

        tags = [self.do_default_region, self.product, self.provider]

        for key_eval in self.resource_tags_keys:
            if not self.inputargs.get(key_eval): continue
            tags.append(self.inputargs[key_eval])

        return tags

    def set_ondisktmp(self):
        self.tempdir = OnDiskTmpDir()

    def write_file_config(self):

        with open(self.file_config_loc, 'w') as _file:
            _file.write(json.dumps(self.file_config, indent=4))

    def parse_set_env_vars(self, env_vars, upper_case=True):

        self.inputargs = {}

        for env_var in env_vars:

            if not os.environ.get(env_var.upper()): continue

            if env_var == "do_default_region":
                self.inputargs["do_default_region"] = os.environ[
                    env_var.upper()]
            else:
                self.inputargs[env_var] = os.environ[env_var.upper()]

    def get_final_cmd(self,
                      cmd,
                      add_region=True,
                      output_to_json=True,
                      add_token=True):

        if add_region:
            cmd = "{} --region {}".format(cmd, self.do_default_region)
        if add_token: cmd = "{} -t {}".format(cmd, self.do_token)
        if output_to_json: cmd = "{} --output json".format(cmd)

        return cmd

    def get_region(self):

        try:
            self.do_default_region = self.inputargs.get("do_default_region")
        except:
            self.do_default_region = None

        if not self.do_default_region or self.do_default_region == "None":
            self.do_default_region = "nyc3"

        self.logger.debug('Region set to "{}"'.format(self.do_default_region))
Ejemplo n.º 15
0
def execute3(cmd, print_error=True, **kwargs):

    logger = ElasticDevLogger("execute3")
    logger.debug("Running command %s from directory %s" % (cmd, os.getcwd()))

    output_queue = kwargs.get("output_queue")
    env_vars = kwargs.get("env_vars")
    output_to_json = kwargs.get("output_to_json", True)

    if env_vars:
        env_vars = env_vars.get()

        for ek, ev in env_vars.iteritems():
            if ev is None:
                ev = "None"
            elif not isinstance(ev, str) and not isinstance(ev, unicode):
                ev = str(ev)
            logger.debug(
                "Setting environment variable {} to {}, type {}".format(
                    ek, ev, type(ev)))
            os.environ[ek] = ev

    exit_error = kwargs.get("exit_error")

    process = Popen(cmd, shell=True, bufsize=0, stdout=PIPE, stderr=STDOUT)
    output = process.communicate()[0]

    if process.returncode != 0:

        logger.aggmsg('exit code {}'.format(process.returncode, new=True))
        logger.aggmsg(output, prt=True, cmethod="error")

        results = {"status": False}
        results["failed_message"] = output
        results["output"] = output
        results["exitcode"] = process.returncode
        if output_queue: output_queue.put(results)
        if exit_error: exit(process.returncode)
        return results

    if output_to_json and not isinstance(output, dict):
        try:
            output = json.loads(output)
        except:
            logger.warn("Could not convert output to json")

    results = {"status": True}
    results["output"] = output

    if output_queue:
        logger.debug("Attempting to place results in the output_queue")
        try:
            output_queue.put(results)
        except:
            logger.error("Could not append the results to the output_queue")

    return results