Ejemplo n.º 1
0
def get_s3_connection(aws_connect_kwargs, location, rgw, s3_url):
    if s3_url and rgw:
        rgw = urlparse(s3_url)
        s3 = boto.connect_s3(
            is_secure=rgw.scheme == 'https',
            host=rgw.hostname,
            port=rgw.port,
            calling_format=OrdinaryCallingFormat(),
            **aws_connect_kwargs
        )
    elif is_fakes3(s3_url):
        fakes3 = urlparse(s3_url)
        s3 = S3Connection(
            is_secure=fakes3.scheme == 'fakes3s',
            host=fakes3.hostname,
            port=fakes3.port,
            calling_format=OrdinaryCallingFormat(),
            **aws_connect_kwargs
        )
    elif is_walrus(s3_url):
        walrus = urlparse(s3_url).hostname
        s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
    else:
        aws_connect_kwargs['is_secure'] = True
        try:
            s3 = connect_to_aws(boto.s3, location, **aws_connect_kwargs)
        except AnsibleAWSError:
            # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
            s3 = boto.connect_s3(**aws_connect_kwargs)
    return s3
Ejemplo n.º 2
0
def test_generic_urlparse():
    url = 'https://ansible.com/blog'
    parts = urlparse(url)
    generic_parts = generic_urlparse(parts)
    assert generic_parts.as_list() == list(parts)

    assert urlunparse(generic_parts.as_list()) == url
Ejemplo n.º 3
0
    def run(self, terms, variables=None, **kwargs):

        if not HAS_CONSUL:
            raise AnsibleError('python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation')

        values = []
        try:
            for term in terms:
                params = self.parse_params(term)
                try:
                    url = os.environ['ANSIBLE_CONSUL_URL']
                    u = urlparse(url)
                    consul_api = consul.Consul(host=u.hostname, port=u.port)
                except KeyError:
                    port = kwargs.get('port', '8500')
                    host = kwargs.get('host', 'localhost')
                    consul_api = consul.Consul(host=host, port=port)

                results = consul_api.kv.get(params['key'],
                                            token=params['token'],
                                            index=params['index'],
                                            recurse=params['recurse'])
                if results[1]:
                    # responds with a single or list of result maps
                    if isinstance(results[1], list):
                        for r in results[1]:
                            values.append(r['Value'])
                    else:
                        values.append(results[1]['Value'])
        except Exception as e:
            raise AnsibleError(
                "Error locating '%s' in kv store. Error was %s" % (term, e))

        return values
Ejemplo n.º 4
0
def get_fqdn_and_port(repo_url):

    """ chop the hostname and port out of a url """

    fqdn = None
    port = None
    ipv6_re = re.compile(r'(\[[^]]*\])(?::([0-9]+))?')
    if "@" in repo_url and "://" not in repo_url:
        # most likely an user@host:path or user@host/path type URL
        repo_url = repo_url.split("@", 1)[1]
        match = ipv6_re.match(repo_url)
        # For this type of URL, colon specifies the path, not the port
        if match:
            fqdn, path = match.groups()
        elif ":" in repo_url:
            fqdn = repo_url.split(":")[0]
        elif "/" in repo_url:
            fqdn = repo_url.split("/")[0]
    elif "://" in repo_url:
        # this should be something we can parse with urlparse
        parts = urlparse(repo_url)
        # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
        # ensure we actually have a parts[1] before continuing.
        if parts[1] != '':
            fqdn = parts[1]
            if "@" in fqdn:
                fqdn = fqdn.split("@", 1)[1]
            match = ipv6_re.match(fqdn)
            if match:
                fqdn, port = match.groups()
            elif ":" in fqdn:
                fqdn, port = fqdn.split(":")[0:2]
    return fqdn, port
Ejemplo n.º 5
0
    def __init__(self, args):
        self._args = args
        self._cloud_environment = None
        self._compute_client = None
        self._resource_client = None
        self._network_client = None

        self.debug = False
        if args.debug:
            self.debug = True

        self.credentials = self._get_credentials(args)
        if not self.credentials:
            self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
                      "or define a profile in ~/.azure/credentials.")

        # if cloud_environment specified, look up/build Cloud object
        raw_cloud_env = self.credentials.get('cloud_environment')
        if not raw_cloud_env:
            self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD  # SDK default
        else:
            # try to look up "well-known" values via the name attribute on azure_cloud members
            all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
            matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
            if len(matched_clouds) == 1:
                self._cloud_environment = matched_clouds[0]
            elif len(matched_clouds) > 1:
                self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
            else:
                if not urlparse.urlparse(raw_cloud_env).scheme:
                    self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
                try:
                    self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
                except Exception as e:
                    self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message))

        if self.credentials.get('subscription_id', None) is None:
            self.fail("Credentials did not include a subscription_id value.")
        self.log("setting subscription_id")
        self.subscription_id = self.credentials['subscription_id']

        if self.credentials.get('credentials'):
            self.azure_credentials = self.credentials.get('credentials')
        elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'):
            self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
                                                                 secret=self.credentials['secret'],
                                                                 tenant=self.credentials['tenant'],
                                                                 cloud_environment=self._cloud_environment)
        elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
            tenant = self.credentials.get('tenant')
            if not tenant:
                tenant = 'common'
            self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
                                                         self.credentials['password'],
                                                         tenant=tenant,
                                                         cloud_environment=self._cloud_environment)
        else:
            self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
                      "Credentials must include client_id, secret and tenant or ad_user and password.")
Ejemplo n.º 6
0
def test_generic_urlparse_no_netloc_no_host():
    url = '/blog'
    parts = list(urlparse(url))
    generic_parts = generic_urlparse(parts)
    assert generic_parts.username is None
    assert generic_parts.password is None
    assert generic_parts.port is None
    assert generic_parts.hostname == ''
Ejemplo n.º 7
0
def test_generic_urlparse_netloc():
    url = 'https://ansible.com:443/blog'
    parts = urlparse(url)
    generic_parts = generic_urlparse(parts)
    assert generic_parts.hostname == parts.hostname
    assert generic_parts.hostname == 'ansible.com'
    assert generic_parts.port == 443
    assert urlunparse(generic_parts.as_list()) == url
Ejemplo n.º 8
0
def is_walrus(s3_url):
    """ Return True if it's Walrus endpoint, not S3

    We assume anything other than *.amazonaws.com is Walrus"""
    if s3_url is not None:
        o = urlparse.urlparse(s3_url)
        return not o.hostname.endswith('amazonaws.com')
    else:
        return False
Ejemplo n.º 9
0
def test_generic_urlparse_no_netloc():
    url = 'https://*****:*****@ansible.com:443/blog'
    parts = list(urlparse(url))
    generic_parts = generic_urlparse(parts)
    assert generic_parts.hostname == 'ansible.com'
    assert generic_parts.port == 443
    assert generic_parts.username == 'user'
    assert generic_parts.password == 'passwd'
    assert urlunparse(generic_parts.as_list()) == url
Ejemplo n.º 10
0
def do_notify_bearychat(module, url, payload):
    response, info = fetch_url(module, url, data=payload)
    if info['status'] != 200:
        url_info = urlparse(url)
        obscured_incoming_webhook = urlunparse(
            (url_info.scheme, url_info.netloc, '[obscured]', '', '', ''))
        module.fail_json(
            msg=" failed to send %s to %s: %s" % (
                payload, obscured_incoming_webhook, info['msg']))
Ejemplo n.º 11
0
 def verify_md5(self, file, remote_url):
     if os.path.exists(file):
         local_md5 = self._local_md5(file)
         if self.local:
             parsed_url = urlparse(remote_url)
             remote_md5 = self._local_md5(parsed_url.path)
         else:
             remote_md5 = self._getContent(remote_url + '.md5', "Failed to retrieve MD5", False)
         return local_md5 == remote_md5
     return False
Ejemplo n.º 12
0
def is_registry_match(item, pattern):
    """returns True if the registry matches the given whitelist pattern

    Unlike in OpenShift, the comparison is done solely on hostname part
    (excluding the port part) since the latter is much more difficult due to
    vague definition of port defaulting based on insecure flag. Moreover, most
    of the registries will be listed without the port and insecure flag.
    """
    item = "schema://" + item.split('://', 1)[-1]
    return is_match(urlparse(item).hostname, pattern.rsplit(':', 1)[0])
Ejemplo n.º 13
0
 def _get_policy_id_from_response(response):
     """
     Method to parse out the policy id from creation response
     :param response: response from firewall creation API call
     :return: policy_id: firewall policy id from creation call
     """
     url = response.get('links')[0]['href']
     path = urlparse(url).path
     path_list = os.path.split(path)
     policy_id = path_list[-1]
     return policy_id
Ejemplo n.º 14
0
def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url):
    if s3_url and rgw:  # TODO - test this
        rgw = urlparse(s3_url)
        params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
    elif is_fakes3(s3_url):
        for kw in ['is_secure', 'host', 'port'] and list(aws_connect_kwargs.keys()):
            del aws_connect_kwargs[kw]
        fakes3 = urlparse(s3_url)
        if fakes3.scheme == 'fakes3s':
            protocol = "https"
        else:
            protocol = "http"
        params = dict(service_name='s3', endpoint_url="%s://%s:%s" % (protocol, fakes3.hostname, to_text(fakes3.port)),
                      use_ssl=fakes3.scheme == 'fakes3s', region_name=None, **aws_connect_kwargs)
    elif is_walrus(s3_url):
        walrus = urlparse(s3_url).hostname
        params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=walrus, **aws_connect_kwargs)
    else:
        params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
    return boto3_conn(**params)
Ejemplo n.º 15
0
def lib_utils_oo_hostname_from_url(url):
    """ Returns the hostname contained in a URL

        Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
    """
    if not isinstance(url, string_types):
        raise errors.AnsibleFilterError("|failed expects a string or unicode")
    parse_result = urlparse(url)
    if parse_result.netloc != '':
        return parse_result.netloc
    else:
        # netloc wasn't parsed, assume url was missing scheme and path
        return parse_result.path
Ejemplo n.º 16
0
 def _getContent(self, url, failmsg, force=True):
     if self.local:
         parsed_url = urlparse(url)
         if os.path.isfile(parsed_url.path):
             with io.open(parsed_url.path, 'rb') as f:
                 return f.read()
         if force:
             raise ValueError(failmsg + " because can not find file: " + url)
         return None
     response = self._request(url, failmsg, force)
     if response:
         return response.read()
     return None
Ejemplo n.º 17
0
def update_qsl(url, params):
    ''' Add or update a URL query string '''

    if HAS_URLPARSE:
        url_parts = list(urlparse(url))
        query = dict(parse_qsl(url_parts[4]))
        query.update(params)
        url_parts[4] = urlencode(query)
        return urlunparse(url_parts)
    elif '?' in url:
        return url + '&' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
    else:
        return url + '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
Ejemplo n.º 18
0
    def _request(self, url, failmsg, force=True):
        url_to_use = url
        parsed_url = urlparse(url)

        if parsed_url.scheme == 's3':
            parsed_url = urlparse(url)
            bucket_name = parsed_url.netloc
            key_name = parsed_url.path[1:]
            client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
            url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10)

        req_timeout = self.module.params.get('timeout')

        # Hack to add parameters in the way that fetch_url expects
        self.module.params['url_username'] = self.module.params.get('username', '')
        self.module.params['url_password'] = self.module.params.get('password', '')
        self.module.params['http_agent'] = self.module.params.get('user_agent', None)

        response, info = fetch_url(self.module, url_to_use, timeout=req_timeout)
        if info['status'] == 200:
            return response
        if force:
            raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use)
        return None
Ejemplo n.º 19
0
    def detect_no_proxy(self, url):
        '''
        Detect if the 'no_proxy' environment variable is set and honor those locations.
        '''
        env_no_proxy = os.environ.get('no_proxy')
        if env_no_proxy:
            env_no_proxy = env_no_proxy.split(',')
            netloc = urlparse(url).netloc

            for host in env_no_proxy:
                if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
                    # Our requested URL matches something in no_proxy, so don't
                    # use the proxy for this
                    return False
        return True
Ejemplo n.º 20
0
    def auth_params(self):
        # Get authentication credentials.
        # Precedence: module parameters-> environment variables-> defaults.

        self.log('Getting credentials')

        params = dict()
        for key in DOCKER_COMMON_ARGS:
            params[key] = self.module.params.get(key)

        if self.module.params.get('use_tls'):
            # support use_tls option in docker_image.py. This will be deprecated.
            use_tls = self.module.params.get('use_tls')
            if use_tls == 'encrypt':
                params['tls'] = True
            if use_tls == 'verify':
                params['tls_verify'] = True

        result = dict(
            docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
                                        DEFAULT_DOCKER_HOST),
            tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
                                         'DOCKER_TLS_HOSTNAME', 'localhost'),
            api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
                                        'auto'),
            cacert_path=self._get_value('cacert_path', params['cacert_path'], 'DOCKER_CERT_PATH', None),
            cert_path=self._get_value('cert_path', params['cert_path'], 'DOCKER_CERT_PATH', None),
            key_path=self._get_value('key_path', params['key_path'], 'DOCKER_CERT_PATH', None),
            ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
            tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
            tls_verify=self._get_value('tls_verfy', params['tls_verify'], 'DOCKER_TLS_VERIFY',
                                       DEFAULT_TLS_VERIFY),
            timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
                                    DEFAULT_TIMEOUT_SECONDS),
        )

        if result['tls_hostname'] is None:
            # get default machine name from the url
            parsed_url = urlparse(result['docker_host'])
            if ':' in parsed_url.netloc:
                result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
            else:
                result['tls_hostname'] = parsed_url

        return result
Ejemplo n.º 21
0
def maybe_add_ssl_handler(url, validate_certs):
    parsed = generic_urlparse(urlparse(url))
    if parsed.scheme == 'https' and validate_certs:
        if not HAS_SSL:
            raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,'
                             ' however this is unsafe and not recommended')

        # do the cert validation
        netloc = parsed.netloc
        if '@' in netloc:
            netloc = netloc.split('@', 1)[1]
        if ':' in netloc:
            hostname, port = netloc.split(':', 1)
            port = int(port)
        else:
            hostname = netloc
            port = 443
        # create the SSL validation handler and
        # add it to the list of handlers
        return SSLValidationHandler(hostname, port)
Ejemplo n.º 22
0
 def download(self, artifact, verify_download, filename=None):
     filename = artifact.get_filename(filename)
     if not artifact.version or artifact.version == "latest":
         artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact),
                             artifact.classifier, artifact.extension)
     url = self.find_uri_for_artifact(artifact)
     if self.local:
         parsed_url = urlparse(url)
         if os.path.isfile(parsed_url.path):
             shutil.copy2(parsed_url.path, filename)
         else:
             return "Can not find local file: " + parsed_url.path
     else:
         response = self._request(url, "Failed to download artifact " + str(artifact))
         with io.open(filename, 'wb') as f:
             self._write_chunks(response, f, report_hook=self.chunk_report)
     if verify_download and not self.verify_md5(filename, url):
         # if verify_change was set, the previous file would be deleted
         os.remove(filename)
         return "Checksum verification failed"
     return None
Ejemplo n.º 23
0
def maybe_add_ssl_handler(url, validate_certs):
    # FIXME: change the following to use the generic_urlparse function
    #        to remove the indexed references for 'parsed'
    parsed = urlparse(url)
    if parsed[0] == 'https' and validate_certs:
        if not HAS_SSL:
            raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,'
                             ' however this is unsafe and not recommended')

        # do the cert validation
        netloc = parsed[1]
        if '@' in netloc:
            netloc = netloc.split('@', 1)[1]
        if ':' in netloc:
            hostname, port = netloc.split(':', 1)
            port = int(port)
        else:
            hostname = netloc
            port = 443
        # create the SSL validation handler and
        # add it to the list of handlers
        return SSLValidationHandler(hostname, port)
Ejemplo n.º 24
0
    def handle_uri(self):
        '''Allow uri module on localhost if it doesn't touch unsafe files.

        The :ansible:module:`uri` can be used from the executor to do
        things like pinging readthedocs.org that otherwise don't need a node.
        However, it can also download content to a local file, or be used to
        read from file:/// urls.

        Block any use of url schemes other than https, http and ftp. Further,
        block any local file interaction that falls outside of the zuul
        work dir.
        '''
        # uri takes all the file arguments, so just let handle_file validate
        # them for us.
        self.handle_file()
        scheme = urlparse(self._task.args['url']).scheme
        if scheme not in ALLOWED_URL_SCHEMES:
            raise AnsibleError(
                "{scheme} urls are not allowed from localhost."
                " Only {allowed_schemes} are allowed".format(
                    scheme=scheme,
                    allowed_schemes=ALLOWED_URL_SCHEMES))
Ejemplo n.º 25
0
    def get_consul_api(self):
        '''get an instance of the api based on the supplied configuration'''
        host = 'localhost'
        port = 8500
        token = None
        scheme = 'http'

        if hasattr(self, 'url'):
            from ansible.module_utils.six.moves.urllib.parse import urlparse
            o = urlparse(self.url)
            if o.hostname:
                host = o.hostname
            if o.port:
                port = o.port
            if o.scheme:
                scheme = o.scheme

        if hasattr(self, 'token'):
            token = self.token
            if not token:
                token = 'anonymous'
        return consul.Consul(host=host, port=port, token=token, scheme=scheme)
Ejemplo n.º 26
0
    def api_call(self, path):
        ''' Performs an API request '''

        headers = {
            'X-API-Token': self.token,
            'X-API-Version': self.version,
            'Content-Type': 'application/json;charset=utf-8'
        }

        target = urlparse(self.uri + path)
        method = 'GET'
        body = ''

        try:
            response, content = self.conn.request(target.geturl(), method, body, headers)
        except:
            self.fail_with_error('Error connecting to Rudder server')

        try:
            data = json.loads(content)
        except ValueError as e:
            self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response')

        return data
Ejemplo n.º 27
0
def is_fakes3(s3_url):
    """ Return True if s3_url has scheme fakes3:// """
    if s3_url is not None:
        return urlparse.urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
    else:
        return False
Ejemplo n.º 28
0
def main():
    # Validates the dependence of the utility module
    if HAS_IDG_DEPS:
        module_args = dict(
            state=dict(type='str',
                       required=False,
                       default='directory',
                       choices=['absent', 'directory', 'move',
                                'show']),  # State alternatives
            path=dict(type='str', required=True),  # Path to resource
            source=dict(
                type='str',
                required=False),  # Source. Only valid when state = move
            overwrite=dict(
                type='bool', required=False,
                default=False),  # overwrite target. Valid when state = move
            domain=dict(type='str', required=True),  # Domain name
            idg_connection=dict(type='dict',
                                options=idg_endpoint_spec,
                                required=True)  # IDG connection
        )

        # AnsibleModule instantiation
        module = AnsibleModule(
            argument_spec=module_args,
            supports_check_mode=True,
            required_if=[["state", "move", ["source", "overwrite"]]])
    else:
        # Failure AnsibleModule instance
        module = AnsibleModule(argument_spec={}, check_invalid_arguments=False)
        module.fail_json(msg="The IDG utils modules is required")

    # Parse arguments to dict
    idg_data_spec = IDGUtils.parse_to_dict(module,
                                           module.params['idg_connection'],
                                           'IDGConnection',
                                           IDGUtils.ANSIBLE_VERSION)
    path = module.params['path']
    state = module.params['state']
    domain_name = module.params['domain']

    # Init IDG API connect
    idg_mgmt = IDGApi(ansible_module=module,
                      idg_host="https://{0}:{1}".format(
                          idg_data_spec['server'],
                          idg_data_spec['server_port']),
                      headers=IDGUtils.BASIC_HEADERS,
                      http_agent=IDGUtils.HTTP_AGENT_SPEC,
                      use_proxy=idg_data_spec['use_proxy'],
                      timeout=idg_data_spec['timeout'],
                      validate_certs=idg_data_spec['validate_certs'],
                      user=idg_data_spec['user'],
                      password=idg_data_spec['password'],
                      force_basic_auth=IDGUtils.BASIC_AUTH_SPEC)

    # Intermediate values ​​for result
    tmp_result = {
        "domain": domain_name,
        "msg": None,
        "path": None,
        "changed": None,
        "output": None
    }

    #
    # Here the action begins
    #
    pdb.set_trace()

    try:
        # Do request
        parse = urlparse(path)
        ldir = parse.scheme  # Local directory
        rpath = parse.path  # Relative path
        path_as_list = [d for d in rpath.split('/') if d.strip() != '']
        td = '/'.join([IDGApi.URI_FILESTORE.format(domain_name),
                       ldir])  # Path prefix

        if state == 'directory':
            # Create directory recursively
            for d in path_as_list:

                idg_mgmt.api_call(td, method='GET', id="get_remote_directory")

                if idg_mgmt.is_ok(idg_mgmt.last_call()):

                    td = '/'.join([td, d])
                    tmp_result['path'] = idg_mgmt.apifilestore_uri2path(td)

                    if 'directory' in idg_mgmt.last_call(
                    )["data"]['filestore']['location'].keys():
                        filestore_abst = AbstractListDict(
                            idg_mgmt.last_call()["data"]['filestore']
                            ['location']['directory'])  # Get directories
                    else:
                        filestore_abst = AbstractListDict(
                            {})  # Not contain directories

                    if ('href' in filestore_abst.keys()) and (
                            td in filestore_abst.values(
                                key='href')):  # if directory exist
                        tmp_result['msg'] = IDGUtils.IMMUTABLE_MESSAGE

                    else:  # Not exist, create it
                        # If the user is working in only check mode we do not want to make any changes
                        IDGUtils.implement_check_mode(module)

                        create_dir_msg = {"directory": {"name": d}}
                        idg_mgmt.api_call(td,
                                          method='PUT',
                                          data=json.dumps(create_dir_msg),
                                          id="create_directory")

                        if idg_mgmt.is_created(idg_mgmt.last_call()):
                            tmp_result['msg'] = idg_mgmt.last_call(
                            )["data"]['result']
                            tmp_result['changed'] = True
                        else:
                            module.fail_json(
                                msg=IDGApi.ERROR_REACH_STATE.format(
                                    state, domain_name) + str(
                                        ErrorHandler(idg_mgmt.last_call()
                                                     ["data"]['error'])))

                else:
                    module.fail_json(msg=IDGApi.GENERAL_ERROR.format(
                        __MODULE_FULLNAME, state, domain_name) + str(
                            ErrorHandler(
                                idg_mgmt.call_by_id("get_remote_directory")
                                ["data"]['error'])))

        elif state == 'move':  # Move remote files

            # If the user is working in only check mode we do not want to make any changes
            IDGUtils.implement_check_mode(module)

            move_file_msg = {
                "MoveFile": {
                    "sURL": module.params['source'].strip('/'),
                    "dURL": path.strip('/'),
                    "Overwrite":
                    IDGUtils.str_on_off(module.params['overwrite'])
                }
            }
            tmp_result['path'] = move_file_msg['MoveFile']['dURL']

            idg_mgmt.api_call(IDGApi.URI_ACTION.format(domain_name),
                              method='POST',
                              data=json.dumps(move_file_msg),
                              id="move_file")

            if idg_mgmt.is_ok(idg_mgmt.last_call()):
                tmp_result['msg'] = idg_mgmt.last_call()["data"]['MoveFile']
                tmp_result['changed'] = True
            else:
                module.fail_json(
                    msg=IDGApi.ERROR_REACH_STATE.format(state, domain_name) +
                    str(ErrorHandler(idg_mgmt.last_call()["data"]['error'])))

        elif state == 'show':  # Show details of file or content of directories

            # If the user is working in only check mode we do not want to make any changes
            IDGUtils.implement_check_mode(module)

            list_target = '/'.join([td] + path_as_list)
            idg_mgmt.api_call(list_target,
                              method='GET',
                              id="get_remote_target")

            if idg_mgmt.is_ok(idg_mgmt.last_call()):
                output = {}
                if 'filestore' in idg_mgmt.last_call()["data"].keys(
                ):  # is directory

                    if 'directory' in idg_mgmt.last_call(
                    )["data"]['filestore']['location'].keys():
                        output['directory'] = [{
                            "name": i["name"]
                        } for i in AbstractListDict(
                            idg_mgmt.last_call()["data"]['filestore']
                            ['location']['directory']).raw_data()]

                    if 'file' in idg_mgmt.last_call(
                    )["data"]['filestore']['location'].keys():
                        output['file'] = [{
                            "name": i["name"],
                            "size": i["size"],
                            "modified": i["modified"]
                        } for i in AbstractListDict(idg_mgmt.last_call(
                        )["data"]['filestore']['location']['file']).raw_data()]
                else:
                    idg_mgmt.api_call('/'.join([td] + path_as_list[:-1]),
                                      method='GET',
                                      id="get_file_detail")

                    if idg_mgmt.is_ok(idg_mgmt.last_call()):
                        output = [{
                            "name": i["name"],
                            "size": i["size"],
                            "modified": i["modified"]
                        } for i in idg_mgmt.last_call()["data"]['filestore']
                                  ['location']['file']
                                  if i['name'] == path_as_list[-1]]
                    else:
                        module.fail_json(msg=IDGApi.ERROR_REACH_STATE.format(
                            state, domain_name) + str(
                                ErrorHandler(idg_mgmt.last_call()["data"]
                                             ['error'])))

                tmp_result['msg'] = IDGUtils.COMPLETED_MESSAGE
                tmp_result['path'] = idg_mgmt.apifilestore_uri2path(
                    list_target)
                tmp_result['output'] = output

            else:
                module.fail_json(msg=IDGApi.GENERAL_ERROR.format(
                    __MODULE_FULLNAME, state, domain_name) + str(
                        ErrorHandler(
                            idg_mgmt.call_by_id("get_remote_target")["data"]
                            ['error'])))

        else:  # Remove
            # Remove directory recursively

            # If the user is working in only check mode we do not want to make any changes
            IDGUtils.implement_check_mode(module)

            td = '/'.join([td] + path_as_list)
            idg_mgmt.api_call(td, method='DELETE', id="remove_remote_target")
            tmp_result['path'] = idg_mgmt.apifilestore_uri2path(td)

            if idg_mgmt.is_ok(idg_mgmt.last_call()):
                tmp_result['msg'] = idg_mgmt.last_call()["data"]['result']
                tmp_result['changed'] = True

            elif idg_mgmt.is_notfound(idg_mgmt.last_call()):
                tmp_result['msg'] = IDGUtils.IMMUTABLE_MESSAGE

            else:
                module.fail_json(
                    msg=IDGApi.ERROR_REACH_STATE.format(state, domain_name) +
                    str(ErrorHandler(idg_mgmt.last_call()["data"]['error'])))

        #
        # Finish
        #
        # Customize
        del result['name']
        # Update
        for k, v in tmp_result.items():
            if v is not None:
                result[k] = v

    except Exception as e:
        # Uncontrolled exception
        module.fail_json(msg=(IDGUtils.UNCONTROLLED_EXCEPTION +
                              '. {0}').format(to_native(e)))
    else:
        # That's all folks!
        module.exit_json(**result)
Ejemplo n.º 29
0
def open_url(url,
             data=None,
             headers=None,
             method=None,
             use_proxy=True,
             force=False,
             last_mod_time=None,
             timeout=10,
             validate_certs=True,
             url_username=None,
             url_password=None,
             http_agent=None,
             force_basic_auth=False,
             follow_redirects='urllib2'):
    '''
    Fetches a file from an HTTP/FTP server using urllib2

    Does not require the module environment
    '''
    handlers = []
    ssl_handler = maybe_add_ssl_handler(url, validate_certs)
    if ssl_handler:
        handlers.append(ssl_handler)

    # FIXME: change the following to use the generic_urlparse function
    #        to remove the indexed references for 'parsed'
    parsed = urlparse(url)
    if parsed[0] != 'ftp':
        username = url_username

        if headers is None:
            headers = {}

        if username:
            password = url_password
            netloc = parsed[1]
        elif '@' in parsed[1]:
            credentials, netloc = parsed[1].split('@', 1)
            if ':' in credentials:
                username, password = credentials.split(':', 1)
            else:
                username = credentials
                password = ''

            parsed = list(parsed)
            parsed[1] = netloc

            # reconstruct url without credentials
            url = urlunparse(parsed)

        if username and not force_basic_auth:
            passman = urllib_request.HTTPPasswordMgrWithDefaultRealm()

            # this creates a password manager
            passman.add_password(None, netloc, username, password)

            # because we have put None at the start it will always
            # use this username/password combination for  urls
            # for which `theurl` is a super-url
            authhandler = urllib_request.HTTPBasicAuthHandler(passman)

            # create the AuthHandler
            handlers.append(authhandler)

        elif username and force_basic_auth:
            headers["Authorization"] = basic_auth_header(username, password)

        else:
            try:
                rc = netrc.netrc(os.environ.get('NETRC'))
                login = rc.authenticators(parsed[1])
            except IOError:
                login = None

            if login:
                username, _, password = login
                if username and password:
                    headers["Authorization"] = basic_auth_header(
                        username, password)

    if not use_proxy:
        proxyhandler = urllib_request.ProxyHandler({})
        handlers.append(proxyhandler)

    if HAS_SSLCONTEXT and not validate_certs:
        # In 2.7.9, the default context validates certificates
        context = SSLContext(ssl.PROTOCOL_SSLv23)
        context.options |= ssl.OP_NO_SSLv2
        context.options |= ssl.OP_NO_SSLv3
        context.verify_mode = ssl.CERT_NONE
        context.check_hostname = False
        handlers.append(urllib_request.HTTPSHandler(context=context))

    # pre-2.6 versions of python cannot use the custom https
    # handler, since the socket class is lacking create_connection.
    # Some python builds lack HTTPS support.
    if hasattr(socket, 'create_connection') and CustomHTTPSHandler:
        handlers.append(CustomHTTPSHandler)

    handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs))

    opener = urllib_request.build_opener(*handlers)
    urllib_request.install_opener(opener)

    if method:
        if method.upper() not in ('OPTIONS', 'GET', 'HEAD', 'POST', 'PUT',
                                  'DELETE', 'TRACE', 'CONNECT', 'PATCH'):
            raise ConnectionError('invalid HTTP request method; %s' %
                                  method.upper())
        request = RequestWithMethod(url, method.upper(), data)
    else:
        request = urllib_request.Request(url, data)

    # add the custom agent header, to help prevent issues
    # with sites that block the default urllib agent string
    request.add_header('User-agent', http_agent)

    # if we're ok with getting a 304, set the timestamp in the
    # header, otherwise make sure we don't get a cached copy
    if last_mod_time and not force:
        tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
        request.add_header('If-Modified-Since', tstamp)
    else:
        request.add_header('cache-control', 'no-cache')

    # user defined headers now, which may override things we've set above
    if headers:
        if not isinstance(headers, dict):
            raise ValueError("headers provided to fetch_url() must be a dict")
        for header in headers:
            request.add_header(header, headers[header])

    urlopen_args = [request, None]
    if sys.version_info >= (2, 6, 0):
        # urlopen in python prior to 2.6.0 did not
        # have a timeout parameter
        urlopen_args.append(timeout)

    r = urllib_request.urlopen(*urlopen_args)
    return r
Ejemplo n.º 30
0
 def parse_rommon_file_location(rommon_file_location):
     rommon_url = urlparse(rommon_file_location)
     if rommon_url.scheme != 'tftp':
         raise ValueError('The ROMMON image must be downloaded from TFTP server, other protocols are not supported.')
     return rommon_url.netloc, rommon_url.path
Ejemplo n.º 31
0
def create_virtual_machine(module, azure):
    """
    Create new virtual machine

    module : AnsibleModule object
    azure: authenticated azure ServiceManagementService object

    Returns:
        True if a new virtual machine and/or cloud service was created, false otherwise
    """
    name = module.params.get('name')
    os_type = module.params.get('os_type')
    hostname = module.params.get('hostname') or name + ".cloudapp.net"
    endpoints = module.params.get('endpoints').split(',')
    ssh_cert_path = module.params.get('ssh_cert_path')
    user = module.params.get('user')
    password = module.params.get('password')
    location = module.params.get('location')
    role_size = module.params.get('role_size')
    storage_account = module.params.get('storage_account')
    image = module.params.get('image')
    virtual_network_name = module.params.get('virtual_network_name')
    wait = module.params.get('wait')
    wait_timeout = int(module.params.get('wait_timeout'))

    changed = False

    # Check if a deployment with the same name already exists
    cloud_service_name_available = azure.check_hosted_service_name_availability(name)
    if cloud_service_name_available.result:
        # cloud service does not exist; create it
        try:
            result = azure.create_hosted_service(service_name=name, label=name, location=location)
            _wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
            changed = True
        except AzureException as e:
            module.fail_json(msg="failed to create the new service, error was: %s" % str(e))

    try:
        # check to see if a vm with this name exists; if so, do nothing
        azure.get_role(name, name, name)
    except AzureMissingException:
        # vm does not exist; create it

        if os_type == 'linux':
            # Create linux configuration
            disable_ssh_password_authentication = not password
            vm_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
        else:
            # Create Windows Config
            vm_config = WindowsConfigurationSet(hostname, password, None, module.params.get('auto_updates'), None, user)
            vm_config.domain_join = None
            if module.params.get('enable_winrm'):
                listener = Listener('Http')
                vm_config.win_rm.listeners.listeners.append(listener)
            else:
                vm_config.win_rm = None

        # Add ssh certificates if specified
        if ssh_cert_path:
            fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path)
            # Add certificate to cloud service
            result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '')
            _wait_for_completion(azure, result, wait_timeout, "add_service_certificate")

            # Create ssh config
            ssh_config = SSH()
            ssh_config.public_keys = PublicKeys()
            authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
            ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
            # Append ssh config to linux machine config
            vm_config.ssh = ssh_config

        # Create network configuration
        network_config = ConfigurationSetInputEndpoints()
        network_config.configuration_set_type = 'NetworkConfiguration'
        network_config.subnet_names = []
        network_config.public_ips = None
        for port in endpoints:
            network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port,
                                                                                protocol='TCP',
                                                                                port=port,
                                                                                local_port=port))

        # First determine where to store disk
        today = datetime.date.today().strftime('%Y-%m-%d')
        disk_prefix = u'%s-%s' % (name, name)
        media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today)
        # Create system hard disk
        os_hd = OSVirtualHardDisk(image, media_link)

        # Spin up virtual machine
        try:
            result = azure.create_virtual_machine_deployment(service_name=name,
                                                             deployment_name=name,
                                                             deployment_slot='production',
                                                             label=name,
                                                             role_name=name,
                                                             system_config=vm_config,
                                                             network_config=network_config,
                                                             os_virtual_hard_disk=os_hd,
                                                             role_size=role_size,
                                                             role_type='PersistentVMRole',
                                                             virtual_network_name=virtual_network_name)
            _wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
            changed = True
        except AzureException as e:
            module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))

    try:
        deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
        return (changed, urlparse(deployment.url).hostname, deployment)
    except AzureException as e:
        module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e)))
Ejemplo n.º 32
0
def open_url(url,
             data=None,
             headers=None,
             method=None,
             use_proxy=True,
             force=False,
             last_mod_time=None,
             timeout=10,
             validate_certs=True,
             url_username=None,
             url_password=None,
             http_agent=None,
             force_basic_auth=False,
             follow_redirects='urllib2',
             client_cert=None,
             client_key=None,
             cookies=None):
    '''
    Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)

    Does not require the module environment
    '''
    handlers = []
    ssl_handler = maybe_add_ssl_handler(url, validate_certs)
    if ssl_handler:
        handlers.append(ssl_handler)

    parsed = generic_urlparse(urlparse(url))
    if parsed.scheme != 'ftp':
        username = url_username

        if headers is None:
            headers = {}

        if username:
            password = url_password
            netloc = parsed.netloc
        elif '@' in parsed.netloc:
            credentials, netloc = parsed.netloc.split('@', 1)
            if ':' in credentials:
                username, password = credentials.split(':', 1)
            else:
                username = credentials
                password = ''

            parsed_list = parsed.as_list()
            parsed_list[1] = netloc

            # reconstruct url without credentials
            url = urlunparse(parsed_list)

        if username and not force_basic_auth:
            passman = urllib_request.HTTPPasswordMgrWithDefaultRealm()

            # this creates a password manager
            passman.add_password(None, netloc, username, password)

            # because we have put None at the start it will always
            # use this username/password combination for  urls
            # for which `theurl` is a super-url
            authhandler = urllib_request.HTTPBasicAuthHandler(passman)
            digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman)

            # create the AuthHandler
            handlers.append(authhandler)
            handlers.append(digest_authhandler)

        elif username and force_basic_auth:
            headers["Authorization"] = basic_auth_header(username, password)

        else:
            try:
                rc = netrc.netrc(os.environ.get('NETRC'))
                login = rc.authenticators(parsed.hostname)
            except IOError:
                login = None

            if login:
                username, _, password = login
                if username and password:
                    headers["Authorization"] = basic_auth_header(
                        username, password)

    if not use_proxy:
        proxyhandler = urllib_request.ProxyHandler({})
        handlers.append(proxyhandler)

    if HAS_SSLCONTEXT and not validate_certs:
        # In 2.7.9, the default context validates certificates
        context = SSLContext(ssl.PROTOCOL_SSLv23)
        context.options |= ssl.OP_NO_SSLv2
        context.options |= ssl.OP_NO_SSLv3
        context.verify_mode = ssl.CERT_NONE
        context.check_hostname = False
        handlers.append(
            HTTPSClientAuthHandler(client_cert=client_cert,
                                   client_key=client_key,
                                   context=context))
    elif client_cert:
        handlers.append(
            HTTPSClientAuthHandler(client_cert=client_cert,
                                   client_key=client_key))

    # pre-2.6 versions of python cannot use the custom https
    # handler, since the socket class is lacking create_connection.
    # Some python builds lack HTTPS support.
    if hasattr(socket, 'create_connection') and CustomHTTPSHandler:
        handlers.append(CustomHTTPSHandler)

    handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs))

    # add some nicer cookie handling
    if cookies is not None:
        handlers.append(urllib_request.HTTPCookieProcessor(cookies))

    opener = urllib_request.build_opener(*handlers)
    urllib_request.install_opener(opener)

    data = to_bytes(data, nonstring='passthru')
    if method:
        if method.upper() not in ('OPTIONS', 'GET', 'HEAD', 'POST', 'PUT',
                                  'DELETE', 'TRACE', 'CONNECT', 'PATCH'):
            raise ConnectionError('invalid HTTP request method; %s' %
                                  method.upper())
        request = RequestWithMethod(url, method.upper(), data)
    else:
        request = urllib_request.Request(url, data)

    # add the custom agent header, to help prevent issues
    # with sites that block the default urllib agent string
    if http_agent:
        request.add_header('User-agent', http_agent)

    # Cache control
    # Either we directly force a cache refresh
    if force:
        request.add_header('cache-control', 'no-cache')
    # or we do it if the original is more recent than our copy
    elif last_mod_time:
        tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
        request.add_header('If-Modified-Since', tstamp)

    # user defined headers now, which may override things we've set above
    if headers:
        if not isinstance(headers, dict):
            raise ValueError("headers provided to open_url() must be a dict")
        for header in headers:
            request.add_header(header, headers[header])

    urlopen_args = [request, None]
    if sys.version_info >= (2, 6, 0):
        # urlopen in python prior to 2.6.0 did not
        # have a timeout parameter
        urlopen_args.append(timeout)

    r = urllib_request.urlopen(*urlopen_args)
    return r
Ejemplo n.º 33
0
def is_fakes3(s3_url):
    """ Return True if s3_url has scheme fakes3:// """
    if s3_url is not None:
        return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
    else:
        return False
Ejemplo n.º 34
0
    def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
                 check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
                 required_one_of=None, add_file_common_args=False, supports_check_mode=False,
                 required_if=None, supports_tags=True, facts_module=False, skip_exec=False):

        merged_arg_spec = dict()
        merged_arg_spec.update(AZURE_COMMON_ARGS)
        if supports_tags:
            merged_arg_spec.update(AZURE_TAG_ARGS)

        if derived_arg_spec:
            merged_arg_spec.update(derived_arg_spec)

        merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
        if required_if:
            merged_required_if += required_if

        self.module = AnsibleModule(argument_spec=merged_arg_spec,
                                    bypass_checks=bypass_checks,
                                    no_log=no_log,
                                    check_invalid_arguments=check_invalid_arguments,
                                    mutually_exclusive=mutually_exclusive,
                                    required_together=required_together,
                                    required_one_of=required_one_of,
                                    add_file_common_args=add_file_common_args,
                                    supports_check_mode=supports_check_mode,
                                    required_if=merged_required_if)

        if not HAS_PACKAGING_VERSION:
            self.fail("Do you have packaging installed? Try `pip install packaging`"
                      "- {0}".format(HAS_PACKAGING_VERSION_EXC))

        if not HAS_MSRESTAZURE:
            self.fail("Do you have msrestazure installed? Try `pip install msrestazure`"
                      "- {0}".format(HAS_MSRESTAZURE_EXC))

        if not HAS_AZURE:
            self.fail("Do you have azure>={1} installed? Try `pip install 'azure>={1}' --upgrade`"
                      "- {0}".format(HAS_AZURE_EXC, AZURE_MIN_RELEASE))

        self._cloud_environment = None
        self._network_client = None
        self._storage_client = None
        self._resource_client = None
        self._compute_client = None
        self._dns_client = None
        self._web_client = None
        self._containerservice_client = None

        self.check_mode = self.module.check_mode
        self.facts_module = facts_module
        # self.debug = self.module.params.get('debug')

        # authenticate
        self.credentials = self._get_credentials(self.module.params)
        if not self.credentials:
            self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
                      "or define a profile in ~/.azure/credentials or be logged using AzureCLI.")

        # if cloud_environment specified, look up/build Cloud object
        raw_cloud_env = self.credentials.get('cloud_environment')
        if not raw_cloud_env:
            self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD  # SDK default
        else:
            # try to look up "well-known" values via the name attribute on azure_cloud members
            all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
            matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
            if len(matched_clouds) == 1:
                self._cloud_environment = matched_clouds[0]
            elif len(matched_clouds) > 1:
                self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
            else:
                if not urlparse.urlparse(raw_cloud_env).scheme:
                    self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
                try:
                    self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
                except Exception as e:
                    self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message), exception=traceback.format_exc(e))

        if self.credentials.get('subscription_id', None) is None:
            self.fail("Credentials did not include a subscription_id value.")
        self.log("setting subscription_id")
        self.subscription_id = self.credentials['subscription_id']

        if self.credentials.get('client_id') is not None and \
           self.credentials.get('secret') is not None and \
           self.credentials.get('tenant') is not None:
            self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
                                                                 secret=self.credentials['secret'],
                                                                 tenant=self.credentials['tenant'],
                                                                 cloud_environment=self._cloud_environment)

        elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
            tenant = self.credentials.get('tenant')
            if not tenant:
                tenant = 'common'  # SDK default

            self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
                                                         self.credentials['password'],
                                                         tenant=tenant,
                                                         cloud_environment=self._cloud_environment)
        else:
            self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
                      "Credentials must include client_id, secret and tenant or ad_user and password or "
                      "be logged using AzureCLI.")

        # common parameter validation
        if self.module.params.get('tags'):
            self.validate_tags(self.module.params['tags'])

        if not skip_exec:
            res = self.exec_module(**self.module.params)
            self.module.exit_json(**res)
Ejemplo n.º 35
0
    def __init__(self, argument_spec, **kwargs):
        args = dict(
            tower_host=dict(required=False,
                            fallback=(env_fallback, ['TOWER_HOST'])),
            tower_username=dict(required=False,
                                fallback=(env_fallback, ['TOWER_USERNAME'])),
            tower_password=dict(no_log=True,
                                required=False,
                                fallback=(env_fallback, ['TOWER_PASSWORD'])),
            validate_certs=dict(type='bool',
                                aliases=['tower_verify_ssl'],
                                required=False,
                                fallback=(env_fallback, ['TOWER_VERIFY_SSL'])),
            tower_oauthtoken=dict(type='str',
                                  no_log=True,
                                  required=False,
                                  fallback=(env_fallback,
                                            ['TOWER_OAUTH_TOKEN'])),
            tower_config_file=dict(type='path', required=False, default=None),
        )
        args.update(argument_spec)
        kwargs['supports_check_mode'] = True

        self.json_output = {'changed': False}

        super(TowerModule, self).__init__(argument_spec=args, **kwargs)

        self.load_config_files()

        # Parameters specified on command line will override settings in any config
        if self.params.get('tower_host'):
            self.host = self.params.get('tower_host')
        if self.params.get('tower_username'):
            self.username = self.params.get('tower_username')
        if self.params.get('tower_password'):
            self.password = self.params.get('tower_password')
        if self.params.get('validate_certs') is not None:
            self.verify_ssl = self.params.get('validate_certs')
        if self.params.get('tower_oauthtoken'):
            self.oauth_token = self.params.get('tower_oauthtoken')

        # Perform some basic validation
        if not re.match('^https{0,1}://', self.host):
            self.host = "https://{0}".format(self.host)

        # Try to parse the hostname as a url
        try:
            self.url = urlparse(self.host)
        except Exception as e:
            self.fail_json(
                msg="Unable to parse tower_host as a URL ({1}): {0}".format(
                    self.host, e))

        # Try to resolve the hostname
        hostname = self.url.netloc.split(':')[0]
        try:
            gethostbyname(hostname)
        except Exception as e:
            self.fail_json(
                msg="Unable to resolve tower_host ({1}): {0}".format(
                    hostname, e))

        self.session = Request(cookies=CookieJar(),
                               validate_certs=self.verify_ssl)
Ejemplo n.º 36
0
 def transform_ansible_unicode_to_str(value):
     parsed_url = urlparse(str(value))
     if OpenTelemetrySource.is_valid_url(parsed_url):
         return OpenTelemetrySource.redact_user_password(
             parsed_url).geturl()
     return str(value)
Ejemplo n.º 37
0
    def http_request(self, req):
        tmp_ca_cert_path, paths_checked = self.get_ca_certs()
        https_proxy = os.environ.get('https_proxy')
        context = None
        if HAS_SSLCONTEXT:
            context = self._make_context(tmp_ca_cert_path)

        # Detect if 'no_proxy' environment variable is set and if our URL is included
        use_proxy = self.detect_no_proxy(req.get_full_url())

        if not use_proxy:
            # ignore proxy settings for this host request
            return req

        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            if https_proxy:
                proxy_parts = generic_urlparse(urlparse(https_proxy))
                port = proxy_parts.get('port') or 443
                s.connect((proxy_parts.get('hostname'), port))
                if proxy_parts.get('scheme') == 'http':
                    s.sendall(self.CONNECT_COMMAND %
                              (self.hostname, self.port))
                    if proxy_parts.get('username'):
                        credentials = "%s:%s" % (proxy_parts.get(
                            'username', ''), proxy_parts.get('password', ''))
                        s.sendall('Proxy-Authorization: Basic %s\r\n' %
                                  credentials.encode('base64').strip())
                    s.sendall('\r\n')
                    connect_result = s.recv(4096)
                    self.validate_proxy_response(connect_result)
                    if context:
                        ssl_s = context.wrap_socket(
                            s, server_hostname=self.hostname)
                    elif HAS_URLLIB3_SNI_SUPPORT:
                        ssl_s = ssl_wrap_socket(s,
                                                ca_certs=tmp_ca_cert_path,
                                                cert_reqs=ssl.CERT_REQUIRED,
                                                ssl_version=PROTOCOL,
                                                server_hostname=self.hostname)
                    else:
                        ssl_s = ssl.wrap_socket(s,
                                                ca_certs=tmp_ca_cert_path,
                                                cert_reqs=ssl.CERT_REQUIRED,
                                                ssl_version=PROTOCOL)
                        match_hostname(ssl_s.getpeercert(), self.hostname)
                else:
                    raise ProxyError(
                        'Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.'
                        % proxy_parts.get('scheme'))
            else:
                s.connect((self.hostname, self.port))
                if context:
                    ssl_s = context.wrap_socket(s,
                                                server_hostname=self.hostname)
                elif HAS_URLLIB3_SNI_SUPPORT:
                    ssl_s = ssl_wrap_socket(s,
                                            ca_certs=tmp_ca_cert_path,
                                            cert_reqs=ssl.CERT_REQUIRED,
                                            ssl_version=PROTOCOL,
                                            server_hostname=self.hostname)
                else:
                    ssl_s = ssl.wrap_socket(s,
                                            ca_certs=tmp_ca_cert_path,
                                            cert_reqs=ssl.CERT_REQUIRED,
                                            ssl_version=PROTOCOL)
                    match_hostname(ssl_s.getpeercert(), self.hostname)
            # close the ssl connection
            #ssl_s.unwrap()
            s.close()
        except (ssl.SSLError, socket.error):
            e = get_exception()
            # fail if we tried all of the certs but none worked
            if 'connection refused' in str(e).lower():
                raise ConnectionError('Failed to connect to %s:%s.' %
                                      (self.hostname, self.port))
            else:
                build_ssl_validation_error(self.hostname, self.port,
                                           paths_checked)
        except CertificateError:
            build_ssl_validation_error(self.hostname, self.port, paths_checked)

        try:
            # cleanup the temp file created, don't worry
            # if it fails for some reason
            os.remove(tmp_ca_cert_path)
        except:
            pass

        return req
Ejemplo n.º 38
0
    def intersight_call(self,
                        http_method="",
                        resource_path="",
                        query_params=None,
                        body=None,
                        moid=None,
                        name=None):
        """
        Invoke the Intersight API

        :param resource_path: intersight resource path e.g. '/ntp/Policies'
        :param query_params: dictionary object with query string parameters as key/value pairs
        :param body: dictionary object with intersight data
        :param moid: intersight object moid
        :param name: intersight object name
        :return: json http response object
        """

        target_host = urlparse(self.host).netloc
        target_path = urlparse(self.host).path
        query_path = ""
        method = http_method.upper()
        bodyString = ""

        # Verify an accepted HTTP verb was chosen
        if (method not in ['GET', 'POST', 'PATCH', 'DELETE']):
            raise ValueError(
                'Please select a valid HTTP verb (GET/POST/PATCH/DELETE)')

        # Verify the resource path isn't empy & is a valid <str> object
        if (resource_path != "" and not (resource_path, str)):
            raise TypeError(
                'The *resource_path* value is required and must be of type "<str>"'
            )

        # Verify the query parameters isn't empy & is a valid <dict> object
        if (query_params is not None and not isinstance(query_params, dict)):
            raise TypeError(
                'The *query_params* value must be of type "<dict>"')

        # Verify the MOID is not null & of proper length
        if (moid is not None and len(moid.encode('utf-8')) != 24):
            raise ValueError('Invalid *moid* value!')

        # Check for query_params, encode, and concatenate onto URL
        if query_params:
            query_path = "?" + urlencode(query_params)

        # Handle PATCH/DELETE by Object "name" instead of "moid"
        if method in ('PATCH', 'DELETE'):
            if moid is None:
                if name is not None:
                    if isinstance(name, str):
                        moid = self.get_moid_by_name(resource_path, name)
                    else:
                        raise TypeError(
                            'The *name* value must be of type "<str>"')
                else:
                    raise ValueError(
                        'Must set either *moid* or *name* with "PATCH/DELETE!"'
                    )

        # Check for moid and concatenate onto URL
        if moid is not None:
            resource_path += "/" + moid

        # Check for GET request to properly form body
        if method != "GET":
            bodyString = json.dumps(body)

        # Concatenate URLs for headers
        target_url = self.host + resource_path + query_path
        request_target = method.lower(
        ) + " " + target_path + resource_path + query_path

        # Get the current GMT Date/Time
        cdate = get_gmt_date()

        # Generate the body digest
        body_digest = get_sha256_digest(bodyString)
        b64_body_digest = b64encode(body_digest.digest())

        # Generate the authorization header
        auth_header = {
            'Host': target_host,
            'Date': cdate,
            'Digest': "SHA-256=" + b64_body_digest.decode('ascii'),
        }

        string_to_sign = prepare_str_to_sign(request_target, auth_header)
        b64_signed_msg = self.get_sig_b64encode(string_to_sign)
        auth_header = self.get_auth_header(auth_header, b64_signed_msg)

        # Generate the HTTP requests header
        request_header = {
            'Accept': 'application/json',
            'Content-Type': 'application/json',
            'Host': '{0}'.format(target_host),
            'Date': '{0}'.format(cdate),
            'Digest': 'SHA-256={0}'.format(b64_body_digest.decode('ascii')),
            'Authorization': '{0}'.format(auth_header),
        }

        response, info = fetch_url(self.module,
                                   target_url,
                                   data=bodyString,
                                   headers=request_header,
                                   method=method,
                                   use_proxy=self.module.params['use_proxy'])

        return response, info
Ejemplo n.º 39
0
    def get_collection_versions(self, namespace, name):
        """
        Gets a list of available versions for a collection on a Galaxy server.

        :param namespace: The collection namespace.
        :param name: The collection name.
        :return: A list of versions that are available.
        """
        relative_link = False
        if 'v3' in self.available_api_versions:
            api_path = self.available_api_versions['v3']
            pagination_path = ['links', 'next']
            relative_link = True  # AH pagination results are relative an not an absolute URI.
        else:
            api_path = self.available_api_versions['v2']
            pagination_path = ['next']

        page_size_name = 'limit' if 'v3' in self.available_api_versions else 'page_size'
        versions_url = _urljoin(
            self.api_server, api_path, 'collections', namespace, name,
            'versions', '/?%s=%d' % (page_size_name, COLLECTION_PAGE_SIZE))
        versions_url_info = urlparse(versions_url)

        # We should only rely on the cache if the collection has not changed. This may slow things down but it ensures
        # we are not waiting a day before finding any new collections that have been published.
        if self._cache:
            server_cache = self._cache.setdefault(get_cache_id(versions_url),
                                                  {})
            modified_cache = server_cache.setdefault('modified', {})

            try:
                modified_date = self.get_collection_metadata(namespace,
                                                             name).modified_str
            except GalaxyError as err:
                if err.http_code != 404:
                    raise
                # No collection found, return an empty list to keep things consistent with the various APIs
                return []

            cached_modified_date = modified_cache.get(
                '%s.%s' % (namespace, name), None)
            if cached_modified_date != modified_date:
                modified_cache['%s.%s' % (namespace, name)] = modified_date
                if versions_url_info.path in server_cache:
                    del server_cache[versions_url_info.path]

                self._set_cache()

        error_context_msg = 'Error when getting available collection versions for %s.%s from %s (%s)' \
                            % (namespace, name, self.name, self.api_server)

        try:
            data = self._call_galaxy(versions_url,
                                     error_context_msg=error_context_msg,
                                     cache=True)
        except GalaxyError as err:
            if err.http_code != 404:
                raise
            # v3 doesn't raise a 404 so we need to mimick the empty response from APIs that do.
            return []

        if 'data' in data:
            # v3 automation-hub is the only known API that uses `data`
            # since v3 pulp_ansible does not, we cannot rely on version
            # to indicate which key to use
            results_key = 'data'
        else:
            results_key = 'results'

        versions = []
        while True:
            versions += [v['version'] for v in data[results_key]]

            next_link = data
            for path in pagination_path:
                next_link = next_link.get(path, {})

            if not next_link:
                break
            elif relative_link:
                # TODO: This assumes the pagination result is relative to the root server. Will need to be verified
                # with someone who knows the AH API.

                # Remove the query string from the versions_url to use the next_link's query
                versions_url = urljoin(versions_url,
                                       urlparse(versions_url).path)
                next_link = versions_url.replace(versions_url_info.path,
                                                 next_link)

            data = self._call_galaxy(to_native(next_link,
                                               errors='surrogate_or_strict'),
                                     error_context_msg=error_context_msg,
                                     cache=True)
        self._set_cache()

        return versions
Ejemplo n.º 40
0
    def _call_galaxy(self,
                     url,
                     args=None,
                     headers=None,
                     method=None,
                     auth_required=False,
                     error_context_msg=None,
                     cache=False):
        url_info = urlparse(url)
        cache_id = get_cache_id(url)
        query = parse_qs(url_info.query)
        if cache and self._cache:
            server_cache = self._cache.setdefault(cache_id, {})
            iso_datetime_format = '%Y-%m-%dT%H:%M:%SZ'

            valid = False
            if url_info.path in server_cache:
                expires = datetime.datetime.strptime(
                    server_cache[url_info.path]['expires'],
                    iso_datetime_format)
                valid = datetime.datetime.utcnow() < expires

            is_paginated_url = 'page' in query or 'offset' in query
            if valid and not is_paginated_url:
                # Got a hit on the cache and we aren't getting a paginated response
                path_cache = server_cache[url_info.path]
                if path_cache.get('paginated'):
                    if '/v3/' in url_info.path:
                        res = {'links': {'next': None}}
                    else:
                        res = {'next': None}

                    # Technically some v3 paginated APIs return in 'data' but the caller checks the keys for this so
                    # always returning the cache under results is fine.
                    res['results'] = []
                    for result in path_cache['results']:
                        res['results'].append(result)

                else:
                    res = path_cache['results']

                return res

            elif not is_paginated_url:
                # The cache entry had expired or does not exist, start a new blank entry to be filled later.
                expires = datetime.datetime.utcnow()
                expires += datetime.timedelta(days=1)
                server_cache[url_info.path] = {
                    'expires': expires.strftime(iso_datetime_format),
                    'paginated': False,
                }

        headers = headers or {}
        self._add_auth_token(headers, url, required=auth_required)

        try:
            display.vvvv("Calling Galaxy at %s" % url)
            resp = open_url(to_native(url),
                            data=args,
                            validate_certs=self.validate_certs,
                            headers=headers,
                            method=method,
                            timeout=20,
                            http_agent=user_agent(),
                            follow_redirects='safe')
        except HTTPError as e:
            raise GalaxyError(e, error_context_msg)
        except Exception as e:
            raise AnsibleError(
                "Unknown error when attempting to call Galaxy at '%s': %s" %
                (url, to_native(e)))

        resp_data = to_text(resp.read(), errors='surrogate_or_strict')
        try:
            data = json.loads(resp_data)
        except ValueError:
            raise AnsibleError(
                "Failed to parse Galaxy response from '%s' as JSON:\n%s" %
                (resp.url, to_native(resp_data)))

        if cache and self._cache:
            path_cache = self._cache[cache_id][url_info.path]

            # v3 can return data or results for paginated results. Scan the result so we can determine what to cache.
            paginated_key = None
            for key in ['data', 'results']:
                if key in data:
                    paginated_key = key
                    break

            if paginated_key:
                path_cache['paginated'] = True
                results = path_cache.setdefault('results', [])
                for result in data[paginated_key]:
                    results.append(result)

            else:
                path_cache['results'] = data

        return data
Ejemplo n.º 41
0
def main():

    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(
            force=dict(required=False, default='no', type='bool'),
            policy=dict(required=False, default=None, type='json'),
            name=dict(required=True, type='str'),
            requester_pays=dict(default='no', type='bool'),
            s3_url=dict(aliases=['S3_URL'], type='str'),
            state=dict(default='present', type='str', choices=['present', 'absent']),
            tags=dict(required=False, default=None, type='dict'),
            versioning=dict(default=None, type='bool'),
            ceph=dict(default='no', type='bool')
        )
    )

    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    region, ec2_url, aws_connect_params = get_aws_connection_info(module)

    if region in ('us-east-1', '', None):
        # S3ism for the US Standard region
        location = Location.DEFAULT
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    s3_url = module.params.get('s3_url')

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    ceph = module.params.get('ceph')

    if ceph and not s3_url:
        module.fail_json(msg='ceph flavour requires s3_url')

    flavour = 'aws'

    # bucket names with .'s in them need to use the calling_format option,
    # otherwise the connection will fail. See https://github.com/boto/boto/issues/2836
    # for more details.
    aws_connect_params['calling_format'] = OrdinaryCallingFormat()

    # Look at s3_url and tweak connection settings
    # if connecting to Walrus or fakes3
    try:
        if s3_url and ceph:
            ceph = urlparse.urlparse(s3_url)
            connection = boto.connect_s3(
                host=ceph.hostname,
                port=ceph.port,
                is_secure=ceph.scheme == 'https',
                **aws_connect_params
            )
            flavour = 'ceph'
        elif is_fakes3(s3_url):
            fakes3 = urlparse.urlparse(s3_url)
            connection = S3Connection(
                is_secure=fakes3.scheme == 'fakes3s',
                host=fakes3.hostname,
                port=fakes3.port,
                **aws_connect_params
            )
        elif is_walrus(s3_url):
            del aws_connect_params['calling_format']
            walrus = urlparse.urlparse(s3_url).hostname
            connection = boto.connect_walrus(walrus, **aws_connect_params)
        else:
            connection = boto.s3.connect_to_region(location, is_secure=True, **aws_connect_params)
            # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
            if connection is None:
                connection = boto.connect_s3(**aws_connect_params)

    except boto.exception.NoAuthHandlerFound as e:
        module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
    except Exception as e:
        module.fail_json(msg='Failed to connect to S3: %s' % str(e))

    if connection is None:  # this should never happen
        module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')

    state = module.params.get("state")

    if state == 'present':
        create_or_update_bucket(connection, module, location, flavour=flavour)
    elif state == 'absent':
        destroy_bucket(connection, module, flavour=flavour)
Ejemplo n.º 42
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            group_id=dict(required=True),
            artifact_id=dict(required=True),
            version=dict(default="latest"),
            classifier=dict(default=''),
            extension=dict(default='jar'),
            repository_url=dict(default=None),
            username=dict(default=None, aliases=['aws_secret_key']),
            password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']),
            state=dict(default="present", choices=["present", "absent"]),  # TODO - Implement a "latest" state
            timeout=dict(default=10, type='int'),
            dest=dict(type="path", required=True),
            validate_certs=dict(required=False, default=True, type='bool'),
            keep_name=dict(required=False, default=False, type='bool'),
            verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always'])
        ),
        add_file_common_args=True
    )

    if not HAS_LXML_ETREE:
        module.fail_json(msg='module requires the lxml python library installed on the managed machine')

    repository_url = module.params["repository_url"]
    if not repository_url:
        repository_url = "http://repo1.maven.org/maven2"
    try:
        parsed_url = urlparse(repository_url)
    except AttributeError as e:
        module.fail_json(msg='url parsing went wrong %s' % e)

    local = parsed_url.scheme == "file"

    if parsed_url.scheme == 's3' and not HAS_BOTO:
        module.fail_json(msg='boto3 required for this module, when using s3:// repository URLs')

    group_id = module.params["group_id"]
    artifact_id = module.params["artifact_id"]
    version = module.params["version"]
    classifier = module.params["classifier"]
    extension = module.params["extension"]
    state = module.params["state"]
    dest = module.params["dest"]
    b_dest = to_bytes(dest, errors='surrogate_or_strict')
    keep_name = module.params["keep_name"]
    verify_checksum = module.params["verify_checksum"]
    verify_download = verify_checksum in ['download', 'always']
    verify_change = verify_checksum in ['change', 'always']

    downloader = MavenDownloader(module, repository_url, local)

    try:
        artifact = Artifact(group_id, artifact_id, version, classifier, extension)
    except ValueError as e:
        module.fail_json(msg=e.args[0])

    changed = False
    prev_state = "absent"

    if dest.endswith(os.sep):
        b_dest = to_bytes(dest, errors='surrogate_or_strict')
        if not os.path.exists(b_dest):
            (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest)
            os.makedirs(b_dest)
            directory_args = module.load_file_common_arguments(module.params)
            directory_mode = module.params["directory_mode"]
            if directory_mode is not None:
                directory_args['mode'] = directory_mode
            else:
                directory_args['mode'] = None
            changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)

    if os.path.isdir(b_dest):
        version_part = version
        if keep_name and version == 'latest':
            version_part = downloader.find_latest_version_available(artifact)

        if classifier:
            dest = posixpath.join(dest, "%s-%s-%s.%s" % (artifact_id, version_part, classifier, extension))
        else:
            dest = posixpath.join(dest, "%s-%s.%s" % (artifact_id, version_part, extension))
        b_dest = to_bytes(dest, errors='surrogate_or_strict')

    if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_md5(dest, downloader.find_uri_for_artifact(artifact))):
        prev_state = "present"

    if prev_state == "absent":
        try:
            download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest)
            if download_error is None:
                changed = True
            else:
                module.fail_json(msg="Cannot retrieve the artifact to destination: " + download_error)
        except ValueError as e:
            module.fail_json(msg=e.args[0])

    module.params['dest'] = dest
    file_args = module.load_file_common_arguments(module.params)
    changed = module.set_fs_attributes_if_different(file_args, changed)
    if changed:
        module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier,
                         extension=extension, repository_url=repository_url, changed=changed)
    else:
        module.exit_json(state=state, dest=dest, changed=changed)
Ejemplo n.º 43
0
def main():

    module = AnsibleModule(argument_spec=dict(
        group_id=dict(default=None),
        artifact_id=dict(default=None),
        version=dict(default="latest"),
        classifier=dict(default=None),
        extension=dict(default='jar'),
        repository_url=dict(default=None),
        username=dict(default=None, aliases=['aws_secret_key']),
        password=dict(
            default=None, no_log=True, aliases=['aws_secret_access_key']),
        state=dict(default="present", choices=["present", "absent"]
                   ),  # TODO - Implement a "latest" state
        timeout=dict(default=10, type='int'),
        dest=dict(type="path", default=None),
        validate_certs=dict(required=False, default=True, type='bool'),
        keep_name=dict(required=False, default=False, type='bool'),
    ))

    repository_url = module.params["repository_url"]
    if not repository_url:
        repository_url = "http://repo1.maven.org/maven2"

    try:
        parsed_url = urlparse(repository_url)
    except AttributeError as e:
        module.fail_json(msg='url parsing went wrong %s' % e)

    if parsed_url.scheme == 's3' and not HAS_BOTO:
        module.fail_json(
            msg=
            'boto3 required for this module, when using s3:// repository URLs')

    group_id = module.params["group_id"]
    artifact_id = module.params["artifact_id"]
    version = module.params["version"]
    classifier = module.params["classifier"]
    extension = module.params["extension"]
    state = module.params["state"]
    dest = module.params["dest"]
    keep_name = module.params["keep_name"]

    #downloader = MavenDownloader(module, repository_url, repository_username, repository_password)
    downloader = MavenDownloader(module, repository_url)

    try:
        artifact = Artifact(group_id, artifact_id, version, classifier,
                            extension)
    except ValueError as e:
        module.fail_json(msg=e.args[0])

    prev_state = "absent"
    if os.path.isdir(dest):
        version_part = version
        if keep_name and version == 'latest':
            version_part = downloader.find_latest_version_available(artifact)
        dest = posixpath.join(
            dest, "%s-%s.%s" % (artifact_id, version_part, extension))
    if os.path.lexists(dest) and downloader.verify_md5(
            dest,
            downloader.find_uri_for_artifact(artifact) + '.md5'):
        prev_state = "present"
    else:
        path = os.path.dirname(dest)
        if not os.path.exists(path):
            os.makedirs(path)

    if prev_state == "present":
        module.exit_json(dest=dest, state=state, changed=False)

    try:
        if downloader.download(artifact, dest):
            module.exit_json(state=state,
                             dest=dest,
                             group_id=group_id,
                             artifact_id=artifact_id,
                             version=version,
                             classifier=classifier,
                             extension=extension,
                             repository_url=repository_url,
                             changed=True)
        else:
            module.fail_json(msg="Unable to download the artifact")
    except ValueError as e:
        module.fail_json(msg=e.args[0])
Ejemplo n.º 44
0
    def __init__(self,
                 derived_arg_spec,
                 bypass_checks=False,
                 no_log=False,
                 check_invalid_arguments=None,
                 mutually_exclusive=None,
                 required_together=None,
                 required_one_of=None,
                 add_file_common_args=False,
                 supports_check_mode=False,
                 required_if=None,
                 supports_tags=True,
                 facts_module=False,
                 skip_exec=False):

        merged_arg_spec = dict()
        merged_arg_spec.update(AZURE_COMMON_ARGS)
        if supports_tags:
            merged_arg_spec.update(AZURE_TAG_ARGS)

        if derived_arg_spec:
            merged_arg_spec.update(derived_arg_spec)

        merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
        if required_if:
            merged_required_if += required_if

        self.module = AnsibleModule(
            argument_spec=merged_arg_spec,
            bypass_checks=bypass_checks,
            no_log=no_log,
            check_invalid_arguments=check_invalid_arguments,
            mutually_exclusive=mutually_exclusive,
            required_together=required_together,
            required_one_of=required_one_of,
            add_file_common_args=add_file_common_args,
            supports_check_mode=supports_check_mode,
            required_if=merged_required_if)

        if not HAS_PACKAGING_VERSION:
            self.fail(
                "Do you have packaging installed? Try `pip install packaging`"
                "- {0}".format(HAS_PACKAGING_VERSION_EXC))

        if not HAS_MSRESTAZURE:
            self.fail(
                "Do you have msrestazure installed? Try `pip install msrestazure`"
                "- {0}".format(HAS_MSRESTAZURE_EXC))

        if not HAS_AZURE:
            self.fail(
                "Do you have azure>={1} installed? Try `pip install ansible[azure]`"
                "- {0}".format(HAS_AZURE_EXC, AZURE_MIN_RELEASE))

        self._cloud_environment = None
        self._network_client = None
        self._storage_client = None
        self._resource_client = None
        self._compute_client = None
        self._dns_client = None
        self._web_client = None
        self._containerservice_client = None
        self._mysql_client = None
        self._postgresql_client = None
        self._adfs_authority_url = None
        self._resource = None

        self.check_mode = self.module.check_mode
        self.api_profile = self.module.params.get('api_profile')
        self.facts_module = facts_module

        # authenticate
        self.credentials = self._get_credentials(self.module.params)
        if not self.credentials:
            if HAS_AZURE_CLI_CORE:
                self.fail(
                    "Failed to get credentials. Either pass as parameters, set environment variables, "
                    "define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`)."
                )
            else:
                self.fail(
                    "Failed to get credentials. Either pass as parameters, set environment variables, "
                    "define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`)."
                )

        # cert validation mode precedence: module-arg, credential profile, env, "validate"
        self._cert_validation_mode = self.module.params['cert_validation_mode'] or self.credentials.get('cert_validation_mode') or \
            os.environ.get('AZURE_CERT_VALIDATION_MODE') or 'validate'

        if self._cert_validation_mode not in ['validate', 'ignore']:
            self.fail('invalid cert_validation_mode: {0}'.format(
                self._cert_validation_mode))

        # if cloud_environment specified, look up/build Cloud object
        raw_cloud_env = self.credentials.get('cloud_environment')
        if self.credentials.get(
                'credentials') is not None and raw_cloud_env is not None:
            self._cloud_environment = raw_cloud_env
        elif not raw_cloud_env:
            self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD  # SDK default
        else:
            # try to look up "well-known" values via the name attribute on azure_cloud members
            all_clouds = [
                x[1] for x in inspect.getmembers(azure_cloud)
                if isinstance(x[1], azure_cloud.Cloud)
            ]
            matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
            if len(matched_clouds) == 1:
                self._cloud_environment = matched_clouds[0]
            elif len(matched_clouds) > 1:
                self.fail(
                    "Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'"
                    .format(raw_cloud_env))
            else:
                if not urlparse.urlparse(raw_cloud_env).scheme:
                    self.fail(
                        "cloud_environment must be an endpoint discovery URL or one of {0}"
                        .format([x.name for x in all_clouds]))
                try:
                    self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(
                        raw_cloud_env)
                except Exception as e:
                    self.fail(
                        "cloud_environment {0} could not be resolved: {1}".
                        format(raw_cloud_env, e.message),
                        exception=traceback.format_exc(e))

        if self.credentials.get(
                'subscription_id',
                None) is None and self.credentials.get('credentials') is None:
            self.fail("Credentials did not include a subscription_id value.")
        self.log("setting subscription_id")
        self.subscription_id = self.credentials['subscription_id']

        # get authentication authority
        # for adfs, user could pass in authority or not.
        # for others, use default authority from cloud environment
        if self.credentials.get('adfs_authority_url') is None:
            self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
        else:
            self._adfs_authority_url = self.credentials.get(
                'adfs_authority_url')

        # get resource from cloud environment
        self._resource = self._cloud_environment.endpoints.active_directory_resource_id

        if self.credentials.get('credentials') is not None:
            # AzureCLI credentials
            self.azure_credentials = self.credentials['credentials']
        elif self.credentials.get('client_id') is not None and \
                self.credentials.get('secret') is not None and \
                self.credentials.get('tenant') is not None:
            self.azure_credentials = ServicePrincipalCredentials(
                client_id=self.credentials['client_id'],
                secret=self.credentials['secret'],
                tenant=self.credentials['tenant'],
                cloud_environment=self._cloud_environment,
                verify=self._cert_validation_mode == 'validate')

        elif self.credentials.get('ad_user') is not None and \
                self.credentials.get('password') is not None and \
                self.credentials.get('client_id') is not None and \
                self.credentials.get('tenant') is not None:

            self.azure_credentials = self.acquire_token_with_username_password(
                self._adfs_authority_url, self._resource,
                self.credentials['ad_user'], self.credentials['password'],
                self.credentials['client_id'], self.credentials['tenant'])

        elif self.credentials.get(
                'ad_user') is not None and self.credentials.get(
                    'password') is not None:
            tenant = self.credentials.get('tenant')
            if not tenant:
                tenant = 'common'  # SDK default

            self.azure_credentials = UserPassCredentials(
                self.credentials['ad_user'],
                self.credentials['password'],
                tenant=tenant,
                cloud_environment=self._cloud_environment,
                verify=self._cert_validation_mode == 'validate')
        else:
            self.fail(
                "Failed to authenticate with provided credentials. Some attributes were missing. "
                "Credentials must include client_id, secret and tenant or ad_user and password, or "
                "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
                "be logged in using AzureCLI.")

        # common parameter validation
        if self.module.params.get('tags'):
            self.validate_tags(self.module.params['tags'])

        if not skip_exec:
            res = self.exec_module(**self.module.params)
            self.module.exit_json(**res)
Ejemplo n.º 45
0
    def __init__(self, args):
        self._args = args
        self._cloud_environment = None
        self._compute_client = None
        self._resource_client = None
        self._network_client = None

        self.debug = False
        if args.debug:
            self.debug = True

        self.credentials = self._get_credentials(args)
        if not self.credentials:
            self.fail(
                "Failed to get credentials. Either pass as parameters, set environment variables, "
                "or define a profile in ~/.azure/credentials.")

        # if cloud_environment specified, look up/build Cloud object
        raw_cloud_env = self.credentials.get('cloud_environment')
        if not raw_cloud_env:
            self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD  # SDK default
        else:
            # try to look up "well-known" values via the name attribute on azure_cloud members
            all_clouds = [
                x[1] for x in inspect.getmembers(azure_cloud)
                if isinstance(x[1], azure_cloud.Cloud)
            ]
            matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
            if len(matched_clouds) == 1:
                self._cloud_environment = matched_clouds[0]
            elif len(matched_clouds) > 1:
                self.fail(
                    "Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'"
                    .format(raw_cloud_env))
            else:
                if not urlparse.urlparse(raw_cloud_env).scheme:
                    self.fail(
                        "cloud_environment must be an endpoint discovery URL or one of {0}"
                        .format([x.name for x in all_clouds]))
                try:
                    self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(
                        raw_cloud_env)
                except Exception as e:
                    self.fail(
                        "cloud_environment {0} could not be resolved: {1}".
                        format(raw_cloud_env, e.message))

        if self.credentials.get('subscription_id', None) is None:
            self.fail("Credentials did not include a subscription_id value.")
        self.log("setting subscription_id")
        self.subscription_id = self.credentials['subscription_id']

        if self.credentials.get('client_id') is not None and \
           self.credentials.get('secret') is not None and \
           self.credentials.get('tenant') is not None:
            self.azure_credentials = ServicePrincipalCredentials(
                client_id=self.credentials['client_id'],
                secret=self.credentials['secret'],
                tenant=self.credentials['tenant'],
                cloud_environment=self._cloud_environment)
        elif self.credentials.get(
                'ad_user') is not None and self.credentials.get(
                    'password') is not None:
            tenant = self.credentials.get('tenant')
            if not tenant:
                tenant = 'common'
            self.azure_credentials = UserPassCredentials(
                self.credentials['ad_user'],
                self.credentials['password'],
                tenant=tenant,
                cloud_environment=self._cloud_environment)
        else:
            self.fail(
                "Failed to authenticate with provided credentials. Some attributes were missing. "
                "Credentials must include client_id, secret and tenant or ad_user and password."
            )
    def serialize_vm(self, vm):
        '''
        Convert a VirtualMachine object to dict.

        :param vm: VirtualMachine object
        :return: dict
        '''

        result = self.serialize_obj(vm,
                                    AZURE_OBJECT_CLASS,
                                    enum_modules=AZURE_ENUM_MODULES)
        resource_group = parse_resource_id(result['id']).get('resource_group')
        instance = None
        power_state = None

        try:
            instance = self.compute_client.virtual_machines.instance_view(
                resource_group, vm.name)
            instance = self.serialize_obj(instance,
                                          AZURE_OBJECT_CLASS,
                                          enum_modules=AZURE_ENUM_MODULES)
        except Exception as exc:
            self.fail(
                "Error getting virtual machine {0} instance view - {1}".format(
                    vm.name, str(exc)))

        for index in range(len(instance['statuses'])):
            code = instance['statuses'][index]['code'].split('/')
            if code[0] == 'PowerState':
                power_state = code[1]
            elif code[0] == 'OSState' and code[1] == 'generalized':
                power_state = 'generalized'
                break

        new_result = {}
        new_result['power_state'] = power_state
        new_result['id'] = vm.id
        new_result['resource_group'] = resource_group
        new_result['name'] = vm.name
        new_result['state'] = 'present'
        new_result['location'] = vm.location
        new_result['vm_size'] = result['properties']['hardwareProfile'][
            'vmSize']
        os_profile = result['properties'].get('osProfile')
        if os_profile is not None:
            new_result['admin_username'] = os_profile.get('adminUsername')
        image = result['properties']['storageProfile'].get('imageReference')
        if image is not None:
            if image.get('publisher', None) is not None:
                new_result['image'] = {
                    'publisher': image['publisher'],
                    'sku': image['sku'],
                    'offer': image['offer'],
                    'version': image['version']
                }
            else:
                new_result['image'] = {'id': image.get('id', None)}

        new_result['boot_diagnostics'] = {
            'enabled':
            'diagnosticsProfile' in result['properties']
            and 'bootDiagnostics' in result['properties']['diagnosticsProfile']
            and result['properties']['diagnosticsProfile']['bootDiagnostics']
            ['enabled'] or False,
            'storage_uri':
            'diagnosticsProfile' in result['properties']
            and 'bootDiagnostics' in result['properties']['diagnosticsProfile']
            and result['properties']['diagnosticsProfile']['bootDiagnostics']
            ['storageUri'] or None
        }
        if new_result['boot_diagnostics']['enabled']:
            new_result['boot_diagnostics']['console_screenshot_uri'] = result[
                'properties']['instanceView']['bootDiagnostics'][
                    'consoleScreenshotBlobUri']
            new_result['boot_diagnostics']['serial_console_log_uri'] = result[
                'properties']['instanceView']['bootDiagnostics'][
                    'serialConsoleLogBlobUri']

        vhd = result['properties']['storageProfile']['osDisk'].get('vhd')
        if vhd is not None:
            url = urlparse(vhd['uri'])
            new_result['storage_account_name'] = url.netloc.split('.')[0]
            new_result['storage_container_name'] = url.path.split('/')[1]
            new_result['storage_blob_name'] = url.path.split('/')[-1]

        new_result['os_disk_caching'] = result['properties']['storageProfile'][
            'osDisk']['caching']
        new_result['os_type'] = result['properties']['storageProfile'][
            'osDisk']['osType']
        new_result['data_disks'] = []
        disks = result['properties']['storageProfile']['dataDisks']
        for disk_index in range(len(disks)):
            new_result['data_disks'].append({
                'lun':
                disks[disk_index].get('lun'),
                'disk_size_gb':
                disks[disk_index].get('diskSizeGB'),
                'managed_disk_type':
                disks[disk_index].get('managedDisk',
                                      {}).get('storageAccountType'),
                'caching':
                disks[disk_index].get('caching')
            })

        new_result['network_interface_names'] = []
        nics = result['properties']['networkProfile']['networkInterfaces']
        for nic_index in range(len(nics)):
            new_result['network_interface_names'].append(
                re.sub('.*networkInterfaces/', '', nics[nic_index]['id']))

        new_result['tags'] = vm.tags
        return new_result
Ejemplo n.º 47
0
 def __init__(self, url):
     parts = urlparse(url)
     _query = frozenset(parse_qs(parts.query))
     _path = unquote_plus(parts.path)
     self.parts = parts._replace(query=_query, path=_path)
Ejemplo n.º 48
0
    def __init__(self,
                 argument_spec,
                 direct_params=None,
                 error_callback=None,
                 warn_callback=None,
                 **kwargs):
        full_argspec = {}
        full_argspec.update(TowerModule.AUTH_ARGSPEC)
        full_argspec.update(argument_spec)
        kwargs['supports_check_mode'] = True

        self.error_callback = error_callback
        self.warn_callback = warn_callback

        self.json_output = {'changed': False}

        if direct_params is not None:
            self.params = direct_params
        else:
            super(TowerModule, self).__init__(argument_spec=full_argspec,
                                              **kwargs)

        self.load_config_files()

        # Parameters specified on command line will override settings in any config
        for short_param, long_param in self.short_params.items():
            direct_value = self.params.get(long_param)
            if direct_value is not None:
                setattr(self, short_param, direct_value)

        # Perform magic depending on whether tower_oauthtoken is a string or a dict
        if self.params.get('tower_oauthtoken'):
            token_param = self.params.get('tower_oauthtoken')
            if type(token_param) is dict:
                if 'token' in token_param:
                    self.oauth_token = self.params.get(
                        'tower_oauthtoken')['token']
                else:
                    self.fail_json(
                        msg=
                        "The provided dict in tower_oauthtoken did not properly contain the token entry"
                    )
            elif isinstance(token_param, string_types):
                self.oauth_token = self.params.get('tower_oauthtoken')
            else:
                error_msg = "The provided tower_oauthtoken type was not valid ({0}). Valid options are str or dict.".format(
                    type(token_param).__name__)
                self.fail_json(msg=error_msg)

        # Perform some basic validation
        if not re.match('^https{0,1}://', self.host):
            self.host = "https://{0}".format(self.host)

        # Try to parse the hostname as a url
        try:
            self.url = urlparse(self.host)
        except Exception as e:
            self.fail_json(
                msg="Unable to parse tower_host as a URL ({1}): {0}".format(
                    self.host, e))

        # Try to resolve the hostname
        hostname = self.url.netloc.split(':')[0]
        try:
            gethostbyname(hostname)
        except Exception as e:
            self.fail_json(
                msg="Unable to resolve tower_host ({1}): {0}".format(
                    hostname, e))

        self.session = Request(cookies=CookieJar(),
                               validate_certs=self.verify_ssl)
Ejemplo n.º 49
0
def terminate_virtual_machine(module, azure):
    """
    Terminates a virtual machine

    module : AnsibleModule object
    azure: authenticated azure ServiceManagementService object

    Returns:
        True if a new virtual machine was deleted, false otherwise
    """

    # Whether to wait for termination to complete before returning
    wait = module.params.get('wait')
    wait_timeout = int(module.params.get('wait_timeout'))
    name = module.params.get('name')
    delete_empty_services = module.params.get('delete_empty_services')

    changed = False

    deployment = None
    public_dns_name = None
    disk_names = []
    try:
        deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
    except AzureMissingException as e:
        pass  # no such deployment or service
    except AzureException as e:
        module.fail_json(msg="failed to find the deployment, error was: %s" % str(e))

    # Delete deployment
    if deployment:
        changed = True
        try:
            # gather disk info
            results = []
            for role in deployment.role_list:
                role_props = azure.get_role(name, deployment.name, role.role_name)
                if role_props.os_virtual_hard_disk.disk_name not in disk_names:
                    disk_names.append(role_props.os_virtual_hard_disk.disk_name)
        except AzureException as e:
            module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e)))

        try:
            result = azure.delete_deployment(name, deployment.name)
            _wait_for_completion(azure, result, wait_timeout, "delete_deployment")
        except AzureException as e:
            module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e)))

        # It's unclear when disks associated with terminated deployment get detached.
        # Thus, until the wait_timeout is reached, we continue to delete disks as they
        # become detached by polling the list of remaining disks and examining the state.
        try:
            _delete_disks_when_detached(azure, wait_timeout, disk_names)
        except (AzureException, TimeoutError) as e:
            module.fail_json(msg=str(e))

        try:
            # Now that the vm is deleted, remove the cloud service
            result = azure.delete_hosted_service(service_name=name)
            _wait_for_completion(azure, result, wait_timeout, "delete_hosted_service")
        except AzureException as e:
            module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e)))
        public_dns_name = urlparse(deployment.url).hostname

    return changed, public_dns_name, deployment
Ejemplo n.º 50
0
 def test_urlparse(self):
     actual = urlparse("http://test.com/here")
     self.assertEqual("http", actual.scheme)
     self.assertEqual("test.com", actual.netloc)
     self.assertEqual("/here", actual.path)
Ejemplo n.º 51
0
def update_firmware_redfish(idrac, module):
    """Update firmware from a network share and return the job details."""
    msg = {}
    msg['changed'], msg['failed'] = False, False
    msg['update_msg'] = "Successfully triggered the job to update the firmware."
    try:
        share_name = module.params['share_name']
        catalog_file_name = module.params['catalog_file_name']
        share_user = module.params['share_user']
        share_pwd = module.params['share_password']
        reboot = module.params['reboot']
        job_wait = module.params['job_wait']
        ignore_cert_warning = module.params['ignore_cert_warning']
        apply_update = module.params['apply_update']
        payload = {
            "RebootNeeded": reboot,
            "CatalogFile": catalog_file_name,
            "ApplyUpdate": str(apply_update),
            "IgnoreCertWarning": CERT_WARN[ignore_cert_warning]
        }
        if share_user is not None:
            payload['UserName'] = share_user
        if share_pwd is not None:
            payload['Password'] = share_pwd

        if share_name.lower().startswith(('http://', 'https://', 'ftp://')):
            msg['update_status'], job_details = update_firmware_url_redfish(
                module, idrac, share_name, catalog_file_name, apply_update,
                reboot, ignore_cert_warning, job_wait, payload)
            if job_details:
                msg['update_status']['job_details'] = job_details
        else:
            if share_name.startswith('\\\\'):
                cifs = share_name.split('\\')
                payload['IPAddress'] = cifs[2]
                payload['ShareName'] = '\\'.join(cifs[3:])
                payload['ShareType'] = 'CIFS'
            else:
                nfs = urlparse(share_name)
                payload['IPAddress'] = nfs.scheme
                payload['ShareName'] = nfs.path.strip('/')
                payload['ShareType'] = 'NFS'
            resp = idrac.invoke_request(PATH, method="POST", data=payload)
            job_id = get_jobid(module, resp)
            resp, mesg = wait_for_job_completion(module,
                                                 JOB_URI.format(job_id=job_id),
                                                 job_wait, reboot,
                                                 apply_update)
            if not mesg:
                msg['update_status'] = resp.json_data
            else:
                msg['update_status'] = mesg
            repo_based_update_list = idrac.invoke_request(
                GET_REPO_BASED_UPDATE_LIST_PATH,
                method="POST",
                data="{}",
                dump=False)
            msg['update_status'][
                'job_details'] = repo_based_update_list.json_data

        json_data, repo_status, failed = msg['update_status'][
            'job_details'], False, False
        if "PackageList" not in json_data:
            job_data = json_data.get('Data')
            pkglst = job_data['body'] if 'body' in job_data else job_data.get(
                'GetRepoBasedUpdateList_OUTPUT')
            if 'PackageList' in pkglst:
                pkglst[
                    'PackageList'], repo_status, failed = _convert_xmltojson(
                        module, pkglst, idrac)
        else:
            json_data['PackageList'], repo_status, failed = _convert_xmltojson(
                module, json_data, None)

        if not apply_update and not failed:
            msg['update_msg'] = "Successfully fetched the applicable firmware update package list."
        elif apply_update and not reboot and not job_wait and not failed:
            msg['update_msg'] = "Successfully triggered the job to stage the firmware."
        elif apply_update and job_wait and not reboot and not failed:
            msg['update_msg'] = "Successfully staged the applicable firmware update packages."
            msg['changed'] = True
        elif apply_update and job_wait and not reboot and failed:
            msg['update_msg'] = "Successfully staged the applicable firmware update packages with error(s)."
            msg['failed'] = True

    except RuntimeError as e:
        module.fail_json(msg=str(e))

    if module.check_mode and not (json_data.get('PackageList') or json_data.get('Data')) and \
            msg['update_status']['JobStatus'] == 'OK':
        module.exit_json(msg="No changes found to commit!")
    elif module.check_mode and (json_data.get('PackageList') or json_data.get('Data')) and \
            msg['update_status']['JobStatus'] == 'OK':
        module.exit_json(msg="Changes found to commit!",
                         changed=True,
                         update_status=msg['update_status'])
    elif module.check_mode and not msg['update_status']['JobStatus'] == 'OK':
        msg['update_status'].pop('job_details')
        module.fail_json(
            msg="Unable to complete the firmware repository download.",
            update_status=msg['update_status'])
    elif not module.check_mode and "JobStatus" in msg['update_status']:
        if not msg['update_status']['JobStatus'] == "Critical":
            if module.params['job_wait'] and module.params['apply_update'] and module.params['reboot'] and \
                    ('job_details' in msg['update_status'] and repo_status) and not failed:
                msg['changed'] = True
                msg['update_msg'] = "Successfully updated the firmware."
            elif module.params['job_wait'] and module.params['apply_update'] and module.params['reboot'] and \
                    ('job_details' in msg['update_status'] and repo_status) and failed:
                msg['failed'], msg['changed'] = True, False
                msg['update_msg'] = "Firmware update failed."
        else:
            failed_msg = "Firmware update failed."
            if not apply_update:
                failed_msg = "Unable to complete the repository update."
            module.fail_json(msg=failed_msg,
                             update_status=msg['update_status'])
    return msg
Ejemplo n.º 52
0
def _is_http_url(tested_str):
    return urlparse(tested_str).scheme.lower() in {'http', 'https'}
Ejemplo n.º 53
0
    def http_request(self, req):
        tmp_ca_cert_path, paths_checked = self.get_ca_certs()
        https_proxy = os.environ.get('https_proxy')
        context = None
        if HAS_SSLCONTEXT:
            context = self._make_context(tmp_ca_cert_path)

        # Detect if 'no_proxy' environment variable is set and if our URL is included
        use_proxy = self.detect_no_proxy(req.get_full_url())

        if not use_proxy:
            # ignore proxy settings for this host request
            return req

        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            if https_proxy:
                proxy_parts = generic_urlparse(urlparse(https_proxy))
                port = proxy_parts.get('port') or 443
                s.connect((proxy_parts.get('hostname'), port))
                if proxy_parts.get('scheme') == 'http':
                    s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port))
                    if proxy_parts.get('username'):
                        credentials = "%s:%s" % (proxy_parts.get('username',''), proxy_parts.get('password',''))
                        s.sendall('Proxy-Authorization: Basic %s\r\n' % credentials.encode('base64').strip())
                    s.sendall('\r\n')
                    connect_result = s.recv(4096)
                    self.validate_proxy_response(connect_result)
                    if context:
                        ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
                    elif HAS_URLLIB3_SNI_SUPPORT:
                        ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
                    else:
                        ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
                        match_hostname(ssl_s.getpeercert(), self.hostname)
                else:
                    raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
            else:
                s.connect((self.hostname, self.port))
                if context:
                    ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
                elif HAS_URLLIB3_SNI_SUPPORT:
                    ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
                else:
                    ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
                    match_hostname(ssl_s.getpeercert(), self.hostname)
            # close the ssl connection
            #ssl_s.unwrap()
            s.close()
        except (ssl.SSLError, socket.error):
            e = get_exception()
            # fail if we tried all of the certs but none worked
            if 'connection refused' in str(e).lower():
                raise ConnectionError('Failed to connect to %s:%s.' % (self.hostname, self.port))
            else:
                build_ssl_validation_error(self.hostname, self.port, paths_checked)
        except CertificateError:
            build_ssl_validation_error(self.hostname, self.port, paths_checked)

        try:
            # cleanup the temp file created, don't worry
            # if it fails for some reason
            os.remove(tmp_ca_cert_path)
        except:
            pass

        return req
Ejemplo n.º 54
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            group_id=dict(required=True),
            artifact_id=dict(required=True),
            version=dict(default=None),
            version_by_spec=dict(default=None),
            classifier=dict(default=''),
            extension=dict(default='jar'),
            repository_url=dict(default='https://repo1.maven.org/maven2'),
            username=dict(default=None, aliases=['aws_secret_key']),
            password=dict(default=None,
                          no_log=True,
                          aliases=['aws_secret_access_key']),
            headers=dict(type='dict'),
            force_basic_auth=dict(default=False, type='bool'),
            state=dict(default="present",
                       choices=["present", "absent"
                                ]),  # TODO - Implement a "latest" state
            timeout=dict(default=10, type='int'),
            dest=dict(type="path", required=True),
            validate_certs=dict(required=False, default=True, type='bool'),
            client_cert=dict(type="path", required=False),
            client_key=dict(type="path", required=False),
            keep_name=dict(required=False, default=False, type='bool'),
            verify_checksum=dict(
                required=False,
                default='download',
                choices=['never', 'download', 'change', 'always']),
            directory_mode=dict(type='str'),
        ),
        add_file_common_args=True,
        mutually_exclusive=([('version', 'version_by_spec')]))

    if not HAS_LXML_ETREE:
        module.fail_json(msg=missing_required_lib('lxml'),
                         exception=LXML_ETREE_IMP_ERR)

    if module.params['version_by_spec'] and not HAS_SEMANTIC_VERSION:
        module.fail_json(msg=missing_required_lib('semantic_version'),
                         exception=SEMANTIC_VERSION_IMP_ERR)

    repository_url = module.params["repository_url"]
    if not repository_url:
        repository_url = "https://repo1.maven.org/maven2"
    try:
        parsed_url = urlparse(repository_url)
    except AttributeError as e:
        module.fail_json(msg='url parsing went wrong %s' % e)

    local = parsed_url.scheme == "file"

    if parsed_url.scheme == 's3' and not HAS_BOTO:
        module.fail_json(msg=missing_required_lib(
            'boto3', reason='when using s3:// repository URLs'),
                         exception=BOTO_IMP_ERR)

    group_id = module.params["group_id"]
    artifact_id = module.params["artifact_id"]
    version = module.params["version"]
    version_by_spec = module.params["version_by_spec"]
    classifier = module.params["classifier"]
    extension = module.params["extension"]
    headers = module.params['headers']
    state = module.params["state"]
    dest = module.params["dest"]
    b_dest = to_bytes(dest, errors='surrogate_or_strict')
    keep_name = module.params["keep_name"]
    verify_checksum = module.params["verify_checksum"]
    verify_download = verify_checksum in ['download', 'always']
    verify_change = verify_checksum in ['change', 'always']

    downloader = MavenDownloader(module, repository_url, local, headers)

    if not version_by_spec and not version:
        version = "latest"

    try:
        artifact = Artifact(group_id, artifact_id, version, version_by_spec,
                            classifier, extension)
    except ValueError as e:
        module.fail_json(msg=e.args[0])

    changed = False
    prev_state = "absent"

    if dest.endswith(os.sep):
        b_dest = to_bytes(dest, errors='surrogate_or_strict')
        if not os.path.exists(b_dest):
            (pre_existing_dir,
             new_directory_list) = split_pre_existing_dir(dest)
            os.makedirs(b_dest)
            directory_args = module.load_file_common_arguments(module.params)
            directory_mode = module.params["directory_mode"]
            if directory_mode is not None:
                directory_args['mode'] = directory_mode
            else:
                directory_args['mode'] = None
            changed = adjust_recursive_directory_permissions(
                pre_existing_dir, new_directory_list, module, directory_args,
                changed)

    if os.path.isdir(b_dest):
        version_part = version
        if version == 'latest':
            version_part = downloader.find_latest_version_available(artifact)
        elif version_by_spec:
            version_part = downloader.find_version_by_spec(artifact)

        filename = "{artifact_id}{version_part}{classifier}.{extension}".format(
            artifact_id=artifact_id,
            version_part="-{0}".format(version_part) if keep_name else "",
            classifier="-{0}".format(classifier) if classifier else "",
            extension=extension)
        dest = posixpath.join(dest, filename)

        b_dest = to_bytes(dest, errors='surrogate_or_strict')

    if os.path.lexists(b_dest) and (
        (not verify_change) or not downloader.is_invalid_md5(
            dest, downloader.find_uri_for_artifact(artifact))):
        prev_state = "present"

    if prev_state == "absent":
        try:
            download_error = downloader.download(module.tmpdir, artifact,
                                                 verify_download, b_dest)
            if download_error is None:
                changed = True
            else:
                module.fail_json(
                    msg="Cannot retrieve the artifact to destination: " +
                    download_error)
        except ValueError as e:
            module.fail_json(msg=e.args[0])

    try:
        file_args = module.load_file_common_arguments(module.params, path=dest)
    except TypeError:
        # The path argument is only supported in Ansible-base 2.10+. Fall back to
        # pre-2.10 behavior for older Ansible versions.
        module.params['path'] = dest
        file_args = module.load_file_common_arguments(module.params)
    changed = module.set_fs_attributes_if_different(file_args, changed)
    if changed:
        module.exit_json(state=state,
                         dest=dest,
                         group_id=group_id,
                         artifact_id=artifact_id,
                         version=version,
                         classifier=classifier,
                         extension=extension,
                         repository_url=repository_url,
                         changed=changed)
    else:
        module.exit_json(state=state, dest=dest, changed=changed)
Ejemplo n.º 55
0
def open_url(url, data=None, headers=None, method=None, use_proxy=True,
             force=False, last_mod_time=None, timeout=10, validate_certs=True,
             url_username=None, url_password=None, http_agent=None,
             force_basic_auth=False, follow_redirects='urllib2'):
    '''
    Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)

    Does not require the module environment
    '''
    handlers = []
    ssl_handler = maybe_add_ssl_handler(url, validate_certs)
    if ssl_handler:
        handlers.append(ssl_handler)

    # FIXME: change the following to use the generic_urlparse function
    #        to remove the indexed references for 'parsed'
    parsed = urlparse(url)
    if parsed[0] != 'ftp':
        username = url_username

        if headers is None:
            headers = {}

        if username:
            password = url_password
            netloc = parsed[1]
        elif '@' in parsed[1]:
            credentials, netloc = parsed[1].split('@', 1)
            if ':' in credentials:
                username, password = credentials.split(':', 1)
            else:
                username = credentials
                password = ''

            parsed = list(parsed)
            parsed[1] = netloc

            # reconstruct url without credentials
            url = urlunparse(parsed)

        if username and not force_basic_auth:
            passman = urllib_request.HTTPPasswordMgrWithDefaultRealm()

            # this creates a password manager
            passman.add_password(None, netloc, username, password)

            # because we have put None at the start it will always
            # use this username/password combination for  urls
            # for which `theurl` is a super-url
            authhandler = urllib_request.HTTPBasicAuthHandler(passman)

            # create the AuthHandler
            handlers.append(authhandler)

        elif username and force_basic_auth:
            headers["Authorization"] = basic_auth_header(username, password)

        else:
            try:
                rc = netrc.netrc(os.environ.get('NETRC'))
                login = rc.authenticators(parsed[1])
            except IOError:
                login = None

            if login:
                username, _, password = login
                if username and password:
                    headers["Authorization"] = basic_auth_header(username, password)

    if not use_proxy:
        proxyhandler = urllib_request.ProxyHandler({})
        handlers.append(proxyhandler)

    if HAS_SSLCONTEXT and not validate_certs:
        # In 2.7.9, the default context validates certificates
        context = SSLContext(ssl.PROTOCOL_SSLv23)
        context.options |= ssl.OP_NO_SSLv2
        context.options |= ssl.OP_NO_SSLv3
        context.verify_mode = ssl.CERT_NONE
        context.check_hostname = False
        handlers.append(urllib_request.HTTPSHandler(context=context))

    # pre-2.6 versions of python cannot use the custom https
    # handler, since the socket class is lacking create_connection.
    # Some python builds lack HTTPS support.
    if hasattr(socket, 'create_connection') and CustomHTTPSHandler:
        handlers.append(CustomHTTPSHandler)

    handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs))

    opener = urllib_request.build_opener(*handlers)
    urllib_request.install_opener(opener)

    if method:
        if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT','PATCH'):
            raise ConnectionError('invalid HTTP request method; %s' % method.upper())
        request = RequestWithMethod(url, method.upper(), data)
    else:
        request = urllib_request.Request(url, data)

    # add the custom agent header, to help prevent issues
    # with sites that block the default urllib agent string
    request.add_header('User-agent', http_agent)

    # if we're ok with getting a 304, set the timestamp in the
    # header, otherwise make sure we don't get a cached copy
    if last_mod_time and not force:
        tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
        request.add_header('If-Modified-Since', tstamp)
    else:
        request.add_header('cache-control', 'no-cache')

    # user defined headers now, which may override things we've set above
    if headers:
        if not isinstance(headers, dict):
            raise ValueError("headers provided to fetch_url() must be a dict")
        for header in headers:
            request.add_header(header, headers[header])

    urlopen_args = [request, None]
    if sys.version_info >= (2,6,0):
        # urlopen in python prior to 2.6.0 did not
        # have a timeout parameter
        urlopen_args.append(timeout)

    r = urllib_request.urlopen(*urlopen_args)
    return r
Ejemplo n.º 56
0
def main():

    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(
            force=dict(required=False, default='no', type='bool'),
            policy=dict(required=False, default=None, type='json'),
            name=dict(required=True, type='str'),
            requester_pays=dict(default='no', type='bool'),
            s3_url=dict(aliases=['S3_URL'], type='str'),
            state=dict(default='present', type='str', choices=['present', 'absent']),
            tags=dict(required=False, default=None, type='dict'),
            versioning=dict(default=None, type='bool'),
            ceph=dict(default='no', type='bool')
        )
    )

    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    region, ec2_url, aws_connect_params = get_aws_connection_info(module)

    if region in ('us-east-1', '', None):
        # S3ism for the US Standard region
        location = Location.DEFAULT
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    s3_url = module.params.get('s3_url')

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    ceph = module.params.get('ceph')

    if ceph and not s3_url:
        module.fail_json(msg='ceph flavour requires s3_url')

    flavour = 'aws'

    # bucket names with .'s in them need to use the calling_format option,
    # otherwise the connection will fail. See https://github.com/boto/boto/issues/2836
    # for more details.
    aws_connect_params['calling_format'] = OrdinaryCallingFormat()

    # Look at s3_url and tweak connection settings
    # if connecting to Walrus or fakes3
    try:
        if s3_url and ceph:
            ceph = urlparse.urlparse(s3_url)
            connection = boto.connect_s3(
                host=ceph.hostname,
                port=ceph.port,
                is_secure=ceph.scheme == 'https',
                **aws_connect_params
            )
            flavour = 'ceph'
        elif is_fakes3(s3_url):
            fakes3 = urlparse.urlparse(s3_url)
            connection = S3Connection(
                is_secure=fakes3.scheme == 'fakes3s',
                host=fakes3.hostname,
                port=fakes3.port,
                **aws_connect_params
            )
        elif is_walrus(s3_url):
            del aws_connect_params['calling_format']
            walrus = urlparse.urlparse(s3_url).hostname
            connection = boto.connect_walrus(walrus, **aws_connect_params)
        else:
            connection = boto.s3.connect_to_region(location, is_secure=True, **aws_connect_params)
            # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
            if connection is None:
                connection = boto.connect_s3(**aws_connect_params)

    except boto.exception.NoAuthHandlerFound as e:
        module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
    except Exception as e:
        module.fail_json(msg='Failed to connect to S3: %s' % str(e))

    if connection is None:  # this should never happen
        module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')

    state = module.params.get("state")

    if state == 'present':
        create_or_update_bucket(connection, module, location, flavour=flavour)
    elif state == 'absent':
        destroy_bucket(connection, module, flavour=flavour)
Ejemplo n.º 57
0
def main():
    # Validates the dependence of the utility module
    if HAS_IDG_DEPS:
        module_args = dict(
            backup=dict(type='bool', required=False,
                        default=False),  # Create a backup file
            domain=dict(type='str', required=True),  # Domain name
            src=dict(type='str',
                     required=True),  # Local path to a file or directory
            dest=dict(type='str', required=True),  # Remote absolute path
            recursive=dict(type='bool', required=False,
                           default=False),  # Download recursively
            idg_connection=dict(type='dict',
                                options=idg_endpoint_spec,
                                required=True)  # IDG connection
        )

        # AnsibleModule instantiation
        module = AnsibleModule(argument_spec=module_args,
                               supports_check_mode=True)
    else:
        # Failure AnsibleModule instance
        module = AnsibleModule(argument_spec={}, check_invalid_arguments=False)
        module.fail_json(msg="The IDG utils modules is required")

    # Parse arguments to dict
    idg_data_spec = IDGUtils.parse_to_dict(module,
                                           module.params['idg_connection'],
                                           'IDGConnection',
                                           IDGUtils.ANSIBLE_VERSION)
    domain_name = module.params['domain']
    backup = module.params['backup']
    recursive = module.params['recursive']

    tmp_dir = ''  # Directory for processing on the control host

    src = module.params['src']

    dest = module.params['dest']
    _dest_parse = urlparse(dest)
    _dest_ldir = _dest_parse.scheme  # Local directory
    if _dest_ldir + ':' not in IDGUtils.IDG_DIRS:
        module.fail_json(
            msg=
            "Base directory of the destination file {0} does not correspond to what is specified for datapower."
            .format(dest))
    _dest_path_list = [
        d for d in _dest_parse.path.split('/') if d.strip() != ''
    ]

    # Init IDG API connect
    idg_mgmt = IDGApi(ansible_module=module,
                      idg_host="https://{0}:{1}".format(
                          idg_data_spec['server'],
                          idg_data_spec['server_port']),
                      headers=IDGUtils.BASIC_HEADERS,
                      http_agent=IDGUtils.HTTP_AGENT_SPEC,
                      use_proxy=idg_data_spec['use_proxy'],
                      timeout=idg_data_spec['timeout'],
                      validate_certs=idg_data_spec['validate_certs'],
                      user=idg_data_spec['user'],
                      password=idg_data_spec['password'],
                      force_basic_auth=IDGUtils.BASIC_AUTH_SPEC)

    # Intermediate values ​​for result
    tmp_result = {
        "msg": IDGUtils.COMPLETED_MESSAGE,
        "domain": domain_name,
        "backup_file": None,
        "changed": True
    }

    #
    # Here the action begins
    #
    pdb.set_trace()

    try:
        remote_home_path = '/'.join(
            [IDGApi.URI_FILESTORE.format(domain_name), _dest_ldir] +
            _dest_path_list)
        idg_path = '/'.join([_dest_ldir + ':'] + _dest_path_list)

        if os.path.isdir(src):  # The source is a directory

            # If the user is working in only check mode we do not want to make any changes
            IDGUtils.implement_check_mode(module)

            if recursive:
                for home, subdirs, files in os.walk(
                        src):  # Loop over directory

                    home_dir = home.strip('/').split(
                        os.sep)[-1 *
                                ((len(home.strip('/').split(os.sep)) -
                                  len(src.strip('/').split(os.sep))) + 1):]
                    remote_home_path_uri = '/'.join(
                        [remote_home_path] + home_dir)  # Update root path
                    idg_path_upd = '/'.join([idg_path] +
                                            home_dir)  # Update path inside IDG

                    create_directory(module, idg_mgmt, remote_home_path_uri,
                                     domain_name)

                    for file_name in files:  # files in home

                        uri_file = '/'.join([remote_home_path_uri,
                                             file_name])  # Update URI for file
                        remote_file = '/'.join([idg_path_upd, file_name
                                                ])  # Update path inside IDG

                        if backup:  # Backup required
                            dummy = do_backup(module, idg_mgmt, uri_file,
                                              remote_file, domain_name)

                        local_file_path = os.path.join(home, file_name)
                        upload_file(module, idg_mgmt, local_file_path,
                                    uri_file, domain_name)

            else:  # Not recursive
                for home, dummy, files in os.walk(src):  # Loop over directory

                    home_dir = home.split(os.sep)[-1]
                    remote_home_path = '/'.join([remote_home_path,
                                                 home_dir])  # Update root path
                    idg_path = '/'.join([idg_path,
                                         home_dir])  # Update path inside IDG

                    create_directory(module, idg_mgmt, remote_home_path,
                                     domain_name)

                    for file_name in files:  # files in home

                        uri_file = '/'.join([remote_home_path,
                                             file_name])  # Update URI for file
                        remote_file = '/'.join([idg_path,
                                                file_name])  # Path inside IDG

                        if backup:  # check backup
                            dummy = do_backup(module, idg_mgmt, uri_file,
                                              remote_file, domain_name)

                        local_file_path = os.path.join(home, file_name)
                        upload_file(module, idg_mgmt, local_file_path,
                                    uri_file, domain_name)

                    break  # Prevent continue touring directories

        elif os.path.isfile(src):  # The source is a local file

            file_name = src.split(os.sep)[-1]
            uri_file = '/'.join([remote_home_path,
                                 file_name])  # Update URI for file
            remote_file = '/'.join([idg_path, file_name])  # Path inside IDG

            idg_mgmt.api_call(remote_home_path,
                              method='GET',
                              id="get_remote_path")

            if idg_mgmt.is_ok(idg_mgmt.last_call()) or idg_mgmt.is_notfound(
                    idg_mgmt.last_call()):

                # If the user is working in only check mode we do not want to make any changes
                IDGUtils.implement_check_mode(module)

                if 'filestore' not in idg_mgmt.last_call()["data"].keys(
                ) or idg_mgmt.is_notfound(idg_mgmt.last_call()
                                          ):  # Is not a directory or not found
                    create_directory(module, idg_mgmt, remote_home_path,
                                     domain_name)

                if backup:  # check backup
                    tmp_result["backup_file"] = do_backup(
                        module, idg_mgmt, uri_file, remote_file, domain_name)

                upload_file(module, idg_mgmt, src, uri_file,
                            domain_name)  # Upload file

            else:
                # Other Errors
                module.fail_json(msg=IDGApi.GENERAL_STATELESS_ERROR.format(
                    __MODULE_FULLNAME, domain_name) + str(
                        ErrorHandler(
                            idg_mgmt.call_by_id("get_remote_path")["data"]
                            ['error'])))

        else:
            module.fail_json(msg='Source "{0}" is not supported.'.format(src))

        #
        # Finish
        #
        # Customize
        del result['name']
        # Update
        for k, v in tmp_result.items():
            if v is not None:
                result[k] = v

    except Exception as e:
        # Uncontrolled exception
        module.fail_json(msg=(IDGUtils.UNCONTROLLED_EXCEPTION +
                              '. {0}').format(to_native(e)))
    else:
        # That's all folks!
        module.exit_json(**result)
Ejemplo n.º 58
0
    def http_request(self, req):
        tmp_ca_cert_path, to_add_ca_cert_path, paths_checked = self.get_ca_certs(
        )
        https_proxy = os.environ.get('https_proxy')
        context = None
        try:
            context = self._make_context(to_add_ca_cert_path)
        except Exception:
            # We'll make do with no context below
            pass

        # Detect if 'no_proxy' environment variable is set and if our URL is included
        use_proxy = self.detect_no_proxy(req.get_full_url())

        if not use_proxy:
            # ignore proxy settings for this host request
            if tmp_ca_cert_path:
                try:
                    os.remove(tmp_ca_cert_path)
                except OSError:
                    pass
            if to_add_ca_cert_path:
                try:
                    os.remove(to_add_ca_cert_path)
                except OSError:
                    pass
            return req

        try:
            if https_proxy:
                proxy_parts = generic_urlparse(urlparse(https_proxy))
                port = proxy_parts.get('port') or 443
                s = socket.create_connection(
                    (proxy_parts.get('hostname'), port))
                if proxy_parts.get('scheme') == 'http':
                    s.sendall(
                        to_bytes(self.CONNECT_COMMAND %
                                 (self.hostname, self.port),
                                 errors='surrogate_or_strict'))
                    if proxy_parts.get('username'):
                        credentials = "%s:%s" % (proxy_parts.get(
                            'username', ''), proxy_parts.get('password', ''))
                        s.sendall(
                            b'Proxy-Authorization: Basic %s\r\n' %
                            base64.b64encode(
                                to_bytes(
                                    credentials,
                                    errors='surrogate_or_strict')).strip())
                    s.sendall(b'\r\n')
                    connect_result = b""
                    while connect_result.find(b"\r\n\r\n") <= 0:
                        connect_result += s.recv(4096)
                        # 128 kilobytes of headers should be enough for everyone.
                        if len(connect_result) > 131072:
                            raise ProxyError(
                                'Proxy sent too verbose headers. Only 128KiB allowed.'
                            )
                    self.validate_proxy_response(connect_result)
                    if context:
                        ssl_s = context.wrap_socket(
                            s, server_hostname=self.hostname)
                    elif HAS_URLLIB3_SSL_WRAP_SOCKET:
                        ssl_s = ssl_wrap_socket(s,
                                                ca_certs=tmp_ca_cert_path,
                                                cert_reqs=ssl.CERT_REQUIRED,
                                                ssl_version=PROTOCOL,
                                                server_hostname=self.hostname)
                    else:
                        ssl_s = ssl.wrap_socket(s,
                                                ca_certs=tmp_ca_cert_path,
                                                cert_reqs=ssl.CERT_REQUIRED,
                                                ssl_version=PROTOCOL)
                        match_hostname(ssl_s.getpeercert(), self.hostname)
                else:
                    raise ProxyError(
                        'Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.'
                        % proxy_parts.get('scheme'))
            else:
                s = socket.create_connection((self.hostname, self.port))
                if context:
                    ssl_s = context.wrap_socket(s,
                                                server_hostname=self.hostname)
                elif HAS_URLLIB3_SSL_WRAP_SOCKET:
                    ssl_s = ssl_wrap_socket(s,
                                            ca_certs=tmp_ca_cert_path,
                                            cert_reqs=ssl.CERT_REQUIRED,
                                            ssl_version=PROTOCOL,
                                            server_hostname=self.hostname)
                else:
                    ssl_s = ssl.wrap_socket(s,
                                            ca_certs=tmp_ca_cert_path,
                                            cert_reqs=ssl.CERT_REQUIRED,
                                            ssl_version=PROTOCOL)
                    match_hostname(ssl_s.getpeercert(), self.hostname)
            # close the ssl connection
            # ssl_s.unwrap()
            s.close()
        except (ssl.SSLError, CertificateError) as e:
            build_ssl_validation_error(self.hostname, self.port, paths_checked,
                                       e)
        except socket.error as e:
            raise ConnectionError('Failed to connect to %s at port %s: %s' %
                                  (self.hostname, self.port, to_native(e)))

        try:
            # cleanup the temp file created, don't worry
            # if it fails for some reason
            os.remove(tmp_ca_cert_path)
        except:
            pass

        try:
            # cleanup the temp file created, don't worry
            # if it fails for some reason
            if to_add_ca_cert_path:
                os.remove(to_add_ca_cert_path)
        except:
            pass

        return req
Ejemplo n.º 59
0
def transfer(connection, module, direction, transfer_func):
    transfers_service = connection.system_service().image_transfers_service()
    transfer = transfers_service.add(
        otypes.ImageTransfer(
            image=otypes.Image(
                id=module.params['id'],
            ),
            direction=direction,
        )
    )
    transfer_service = transfers_service.image_transfer_service(transfer.id)

    try:
        # After adding a new transfer for the disk, the transfer's status will be INITIALIZING.
        # Wait until the init phase is over. The actual transfer can start when its status is "Transferring".
        while transfer.phase == otypes.ImageTransferPhase.INITIALIZING:
            time.sleep(module.params['poll_interval'])
            transfer = transfer_service.get()

        proxy_url = urlparse(transfer.proxy_url)
        context = ssl.create_default_context()
        auth = module.params['auth']
        if auth.get('insecure'):
            context.check_hostname = False
            context.verify_mode = ssl.CERT_NONE
        elif auth.get('ca_file'):
            context.load_verify_locations(cafile=auth.get('ca_file'))

        proxy_connection = HTTPSConnection(
            proxy_url.hostname,
            proxy_url.port,
            context=context,
        )

        transfer_func(
            transfer_service,
            proxy_connection,
            proxy_url,
            transfer.signed_ticket
        )
        return True
    finally:
        transfer_service.finalize()
        while transfer.phase in [
            otypes.ImageTransferPhase.TRANSFERRING,
            otypes.ImageTransferPhase.FINALIZING_SUCCESS,
        ]:
            time.sleep(module.params['poll_interval'])
            transfer = transfer_service.get()
        if transfer.phase in [
            otypes.ImageTransferPhase.UNKNOWN,
            otypes.ImageTransferPhase.FINISHED_FAILURE,
            otypes.ImageTransferPhase.FINALIZING_FAILURE,
            otypes.ImageTransferPhase.CANCELLED,
        ]:
            raise Exception(
                "Error occurred while uploading image. The transfer is in %s" % transfer.phase
            )
        if module.params.get('logical_unit'):
            disks_service = connection.system_service().disks_service()
            wait(
                service=disks_service.service(module.params['id']),
                condition=lambda d: d.status == otypes.DiskStatus.OK,
                wait=module.params['wait'],
                timeout=module.params['timeout'],
            )
Ejemplo n.º 60
0
    def parse_gcp_url(url):
        """
        Parse GCP urls and return dict of parts.

        Supported URL structures:
        /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE
        /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME
        /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME/METHOD_NAME
        /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE
        /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME
        /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME/METHOD_NAME
        /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE
        /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME
        /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME/METHOD_NAME

        :param url: GCP-generated URL, such as a selflink or resource location.
        :type url: ``str``

        :return: dictionary of parts. Includes stanard components of urlparse, plus
                 GCP-specific 'service', 'api_version', 'project' and
                 'resource_name' keys. Optionally, 'zone', 'region', 'entity_name'
                 and 'method_name', if applicable.
        :rtype: ``dict``
        """

        p = urlparse.urlparse(url)
        if not p:
            return None
        else:
            # we add extra items such as
            # zone, region and resource_name
            url_parts = {}
            url_parts['scheme'] = p.scheme
            url_parts['host'] = p.netloc
            url_parts['path'] = p.path
            if p.path.find('/') == 0:
                url_parts['path'] = p.path[1:]
            url_parts['params'] = p.params
            url_parts['fragment'] = p.fragment
            url_parts['query'] = p.query
            url_parts['project'] = None
            url_parts['service'] = None
            url_parts['api_version'] = None

            path_parts = url_parts['path'].split('/')
            url_parts['service'] = path_parts[0]
            url_parts['api_version'] = path_parts[1]
            if path_parts[2] == 'projects':
                url_parts['project'] = path_parts[3]
            else:
                # invalid URL
                raise GCPInvalidURLError('unable to parse: %s' % url)

            if 'global' in path_parts:
                url_parts['global'] = True
                idx = path_parts.index('global')
                if len(path_parts) - idx == 4:
                    # we have a resource, entity and method_name
                    url_parts['resource_name'] = path_parts[idx + 1]
                    url_parts['entity_name'] = path_parts[idx + 2]
                    url_parts['method_name'] = path_parts[idx + 3]

                if len(path_parts) - idx == 3:
                    # we have a resource and entity
                    url_parts['resource_name'] = path_parts[idx + 1]
                    url_parts['entity_name'] = path_parts[idx + 2]

                if len(path_parts) - idx == 2:
                    url_parts['resource_name'] = path_parts[idx + 1]

                if len(path_parts) - idx < 2:
                    # invalid URL
                    raise GCPInvalidURLError('unable to parse: %s' % url)

            elif 'regions' in path_parts or 'zones' in path_parts:
                idx = -1
                if 'regions' in path_parts:
                    idx = path_parts.index('regions')
                    url_parts['region'] = path_parts[idx + 1]
                else:
                    idx = path_parts.index('zones')
                    url_parts['zone'] = path_parts[idx + 1]

                if len(path_parts) - idx == 5:
                    # we have a resource, entity and method_name
                    url_parts['resource_name'] = path_parts[idx + 2]
                    url_parts['entity_name'] = path_parts[idx + 3]
                    url_parts['method_name'] = path_parts[idx + 4]

                if len(path_parts) - idx == 4:
                    # we have a resource and entity
                    url_parts['resource_name'] = path_parts[idx + 2]
                    url_parts['entity_name'] = path_parts[idx + 3]

                if len(path_parts) - idx == 3:
                    url_parts['resource_name'] = path_parts[idx + 2]

                if len(path_parts) - idx < 3:
                    # invalid URL
                    raise GCPInvalidURLError('unable to parse: %s' % url)

            else:
                # no location in URL.
                idx = path_parts.index('projects')
                if len(path_parts) - idx == 5:
                    # we have a resource, entity and method_name
                    url_parts['resource_name'] = path_parts[idx + 2]
                    url_parts['entity_name'] = path_parts[idx + 3]
                    url_parts['method_name'] = path_parts[idx + 4]

                if len(path_parts) - idx == 4:
                    # we have a resource and entity
                    url_parts['resource_name'] = path_parts[idx + 2]
                    url_parts['entity_name'] = path_parts[idx + 3]

                if len(path_parts) - idx == 3:
                    url_parts['resource_name'] = path_parts[idx + 2]

                if len(path_parts) - idx < 3:
                    # invalid URL
                    raise GCPInvalidURLError('unable to parse: %s' % url)

            return url_parts