예제 #1
0
 def url(self, path, **query_params):
     params = dict(parse_qsl(urlparse(path).query))
     params.update(query_params)
     url = 'http://127.0.0.1:{}{}'.format(self.port, urlparse(path).path)
     if params:
         url += '?' + urlencode(params)
     return url
예제 #2
0
 def port(self):
     netloc = urlparse(self.source_host).netloc
     if ":" in netloc:
         return int(netloc.split(":", 1)[1])
     else:
         scheme = urlparse(self.source_host).scheme or 'http'
         return {'http': 80, 'https': 443}[scheme]
예제 #3
0
파일: args.py 프로젝트: Marius786/gitfs
 def get_ssh_user(self, args):
     url = args.remote_url
     parse_result = urlparse(url)
     if not parse_result.scheme:
         url = 'ssh://' + url
         parse_result = urlparse(url)
     return parse_result.username if parse_result.username else ""
예제 #4
0
def get_msg(hinfo, binding, response=False):
    if binding == BINDING_SOAP:
        msg = hinfo["data"]
    elif binding == BINDING_HTTP_POST:
        _inp = hinfo["data"][3]
        i = _inp.find(TAG1)
        i += len(TAG1) + 1
        j = _inp.find('"', i)
        msg = _inp[i:j]
    elif binding == BINDING_HTTP_ARTIFACT:
        # either by POST or by redirect
        if hinfo["data"]:
            _inp = hinfo["data"][3]
            i = _inp.find(TAG1)
            i += len(TAG1) + 1
            j = _inp.find('"', i)
            msg = _inp[i:j]
        else:
            parts = urlparse(hinfo["url"])
            msg = parse_qs(parts.query)["SAMLart"][0]
    else: # BINDING_HTTP_REDIRECT
        parts = urlparse(hinfo["headers"][0][1])
        msg = parse_qs(parts.query)["SAMLRequest"][0]

    return msg
예제 #5
0
    def _list_buckets_non_empty_helper(self, project, use_default=False):
        from six.moves.urllib.parse import parse_qs
        from six.moves.urllib.parse import urlencode
        from six.moves.urllib.parse import urlparse
        from gcloud._testing import _monkey_defaults as _base_monkey_defaults
        from gcloud.storage._testing import _monkey_defaults
        from gcloud.storage.connection import Connection
        BUCKET_NAME = 'bucket-name'
        conn = Connection()
        query_params = urlencode({'project': project, 'projection': 'noAcl'})
        BASE_URI = '/'.join([
            conn.API_BASE_URL,
            'storage',
            conn.API_VERSION,
        ])
        URI = '/'.join([BASE_URI, 'b?%s' % (query_params,)])
        http = conn._http = Http(
            {'status': '200', 'content-type': 'application/json'},
            '{{"items": [{{"name": "{0}"}}]}}'.format(BUCKET_NAME)
            .encode('utf-8'),
        )

        if use_default:
            with _base_monkey_defaults(project=project):
                with _monkey_defaults(connection=conn):
                    buckets = list(self._callFUT())
        else:
            buckets = list(self._callFUT(project=project, connection=conn))

        self.assertEqual(len(buckets), 1)
        self.assertEqual(buckets[0].name, BUCKET_NAME)
        self.assertEqual(http._called_with['method'], 'GET')
        self.assertTrue(http._called_with['uri'].startswith(BASE_URI))
        self.assertEqual(parse_qs(urlparse(http._called_with['uri']).query),
                         parse_qs(urlparse(URI).query))
예제 #6
0
 def test_naive_detection(self):
     self.assertInvalidSERP(self.custom_serp_url)
     self.assertValidSERP(self.custom_serp_url, u'piccshare', u'test', use_naive_method=True)
     url = 'http://www.yahoo.com/#/%C2%BF??;%C2%AB99555$&&&4&'
     urlp = urlparse(url)
     self.assertInvalidSERP(urlparse(url), use_naive_method=True)
     self.assertInvalidSERP(url, use_naive_method=True)
예제 #7
0
    def assertRedirectsNoFollow(self, response, expected_url, use_params=True,
                                status_code=302):
        """Checks response redirect without loading the destination page.

        Django's assertRedirects method loads the destination page, which
        requires that the page be renderable in the current test context
        (possibly requiring additional, unrelated setup).
        """
        # Assert that the response has the correct redirect code.
        self.assertEqual(
            response.status_code, status_code,
            "Response didn't redirect as expected: Response code was {0} "
            "(expected {1})".format(response.status_code, status_code))

        # Assert that the response redirects to the correct base URL.
        # Use force_text to force evaluation of anything created by
        # reverse_lazy.
        response_url = force_text(response['location'])
        expected_url = force_text(expected_url)
        parsed1 = urlparse(response_url)
        parsed2 = urlparse(expected_url)
        self.assertEquals(
            parsed1.path, parsed2.path,
            "Response did not redirect to the expected URL: Redirect "
            "location was {0} (expected {1})".format(parsed1.path, parsed2.path))

        # Optionally assert that the response redirect URL has the correct
        # GET parameters.
        if use_params:
            self.assertDictEqual(
                parse_qs(parsed1.query), parse_qs(parsed2.query),
                "Response did not have the GET parameters expected: GET "
                "parameters were {0} (expected "
                "{1})".format(parsed1.query or {}, parsed2.query or {}))
예제 #8
0
 def test_source_is_linked_or_nested(self):
     authenticate(self.client, 'eric')
     response = self.client.get(self.build_list_url)
     if self.source_should_be_embedded_in_build:
         self.assertTrue(isinstance(response.data['results'][0]['source'], dict))
     else:
         urlparse(response.data['results'][0]['source'])
예제 #9
0
    def umount_check(self, hosts):
        """
        Check for and print unmounted drives

        :param hosts: set of hosts to check. in the format of:
            set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
        """
        unmounted = {}
        errors = {}
        recon = Scout("unmounted", self.verbose, self.suppress_errors,
                      self.timeout)
        print("[%s] Getting unmounted drives from %s hosts..." %
              (self._ptime(), len(hosts)))
        for url, response, status, ts_start, ts_end in self.pool.imap(
                recon.scout, hosts):
            if status == 200:
                unmounted[url] = []
                errors[url] = []
                for i in response:
                    if not isinstance(i['mounted'], bool):
                        errors[url].append(i['device'])
                    else:
                        unmounted[url].append(i['device'])
        for host in unmounted:
            node = urlparse(host).netloc
            for entry in unmounted[host]:
                print("Not mounted: %s on %s" % (entry, node))
        for host in errors:
            node = urlparse(host).netloc
            for entry in errors[host]:
                print("Device errors: %s on %s" % (entry, node))
        print("=" * 79)
예제 #10
0
def absolute_location(location, base):
    """Make an url absolute (if it is optional) via the passed base url.

    :param location: The (relative) url
    :type location: str
    :param base: The base location
    :type base: str
    :returns: An absolute URL
    :rtype: str

    """
    if location == base:
        return location

    if urlparse(location).scheme in ('http', 'https', 'file'):
        return location

    if base and urlparse(base).scheme in ('http', 'https', 'file'):
        return urljoin(base, location)
    else:
        if os.path.isabs(location):
            return location
        if base:
            return os.path.realpath(
                os.path.join(os.path.dirname(base), location))
    return location
예제 #11
0
def relative_uri(source, target):
    """
    Make a relative URI from source to target.
    """
    su = urlparse.urlparse(source)
    tu = urlparse.urlparse(target)
    extra = list(tu[3:])
    relative = None
    if tu[0] == '' and tu[1] == '':
        if tu[2] == su[2]:
            relative = ''
        elif not tu[2].startswith('/'):
            relative = tu[2]
    elif su[0:2] != tu[0:2]:
        return target

    if relative is None:
        if tu[2] == su[2]:
            relative = ''
        else:
            relative = os.path.relpath(tu[2], os.path.dirname(su[2]))
    if relative == '.':
        relative = ''
    relative = urlparse.urlunparse(["", "", relative] + extra)
    return relative
예제 #12
0
파일: actions.py 프로젝트: delfick/harpoon
def pull_arbitrary(collector, image, **kwargs):
    """Pull an arbitrary image"""
    image_index_of = lambda image: urlparse("https://{0}".format(image)).netloc

    if image.startswith("file://"):
        parsed = urlparse(image)
        filename = parsed.netloc + parsed.path
        if not os.path.exists(filename):
            raise HarpoonError("Provided file doesn't exist!", wanted=image)
        with open(filename) as fle:
            image_indexes = [(line.strip(), image_index_of(line.strip())) for line in fle]
    else:
        image_indexes = [(image, image_index_of(image))]

    authentication = collector.configuration.get("authentication", NotSpecified)
    for index, (image, image_index) in enumerate(image_indexes):
        tag = sb.NotSpecified
        if ":" in image:
            image, tag = image.split(":", 1)

        image = {
              "image_name": image
            , "tag": tag
            , "harpoon": collector.configuration["harpoon"]
            , "commands": ["FROM scratch"]
            , "image_index": image_index
            , "assume_role": NotSpecified
            , "authentication": authentication
            }
        meta = Meta(collector.configuration, []).at("images").at("__arbitrary_{0}__".format(index))
        image = HarpoonSpec().image_spec.normalise(meta, image)
        Syncer().pull(image)
    def create_plugin(self, session, version, url, raw_status=None):
        """Handle default Keystone endpoint configuration

        Build the actual API endpoint from the scheme, host and port of the
        original auth URL and the rest from the returned version URL.
        """

        ver_u = urlparse.urlparse(url)

        # Only hack this if it is the default setting
        if ver_u.netloc.startswith('localhost'):
            auth_u = urlparse.urlparse(self.auth_url)
            # from original auth_url: scheme, netloc
            # from api_url: path, query (basically, the rest)
            url = urlparse.urlunparse((
                auth_u.scheme,
                auth_u.netloc,
                ver_u.path,
                ver_u.params,
                ver_u.query,
                ver_u.fragment,
            ))
            LOG.debug('Version URL updated: %s' % url)

        return super(OSCGenericPassword, self).create_plugin(
            session=session,
            version=version,
            url=url,
            raw_status=raw_status,
        )
예제 #14
0
    def test_list_buckets_non_empty(self):
        from six.moves.urllib.parse import parse_qs
        from six.moves.urllib.parse import urlencode
        from six.moves.urllib.parse import urlparse
        PROJECT = 'PROJECT'
        CREDENTIALS = _Credentials()
        client = self._makeOne(project=PROJECT, credentials=CREDENTIALS)

        BUCKET_NAME = 'bucket-name'
        query_params = urlencode({'project': PROJECT, 'projection': 'noAcl'})
        BASE_URI = '/'.join([
            client.connection.API_BASE_URL,
            'storage',
            client.connection.API_VERSION,
        ])
        URI = '/'.join([BASE_URI, 'b?%s' % (query_params,)])
        http = client.connection._http = _Http(
            {'status': '200', 'content-type': 'application/json'},
            '{{"items": [{{"name": "{0}"}}]}}'.format(BUCKET_NAME)
            .encode('utf-8'),
        )
        buckets = list(client.list_buckets())
        self.assertEqual(len(buckets), 1)
        self.assertEqual(buckets[0].name, BUCKET_NAME)
        self.assertEqual(http._called_with['method'], 'GET')
        self.assertTrue(http._called_with['uri'].startswith(BASE_URI))
        self.assertEqual(parse_qs(urlparse(http._called_with['uri']).query),
                         parse_qs(urlparse(URI).query))
예제 #15
0
def test_add_handle_by_names(hurl, hpath, cpath, lcpath):

    class mocked_dirs:
        user_data_dir = lcpath

    with patch('datalad.cmdline.helpers.dirs', mocked_dirs), \
            swallow_logs() as cml:

        # get testrepos and make them known to datalad:
        handle = install_handle(hurl, hpath)
        collection = register_collection(cpath)
        assert_not_in(handle.name, collection)

        return_value = add_handle(handle.name, collection.name)

        # now handle is listed by collection:
        collection._reload()
        assert_in(handle.name, collection)

        # test collection repo:
        ok_clean_git(cpath, annex=False)
        ok_(isdir(opj(cpath, handle.name)))
        ok_(exists(opj(cpath, handle.name, REPO_CONFIG_FILE)))
        ok_(exists(opj(cpath, handle.name, REPO_STD_META_FILE)))

        # evaluate return value:
        assert_is_instance(return_value, Handle,
                           "install_handle() returns object of "
                           "incorrect class: %s" % type(return_value))
        eq_(return_value.name, handle.name)
        eq_(urlparse(return_value.url).path, urlparse(handle.url).path)
예제 #16
0
파일: test_files.py 프로젝트: jc7998/deuce
    def setUp(self):
        super(TestFiles, self).setUp()

        self.file_list = []
        self.max_ret_num = conf.api_configuration.default_returned_num
        self.total_file_num = 0

        self._hdrs = {"x-project-id": self.create_project_id()}

        # Create a vault and a file for us to work with
        self.vault_id = self.create_vault_id()
        self._vault_path = '/v1.0/vaults/' + self.vault_id
        self._files_path = self._vault_path + '/files'
        self._blocks_path = self._vault_path + '/blocks'

        # Create Vault
        response = self.simulate_put(self._vault_path, headers=self._hdrs)
        # Create File
        response = self.simulate_post(self._files_path, headers=self._hdrs)
        self._file_id = self.srmock.headers_dict['x-file-id']
        self._file_url = self.srmock.headers_dict['location']
        self._file_path = urlparse(self._file_url).path
        self._fileblocks_path = self._file_path + '/blocks'
        # Now, _file_id is '/v1.0/vaults/files_vault_test/files/SOME_FILE_ID'
        self.assertTrue(self._file_path.endswith(self._file_id))

        # Create distractor File
        response = self.simulate_post(self._files_path, headers=self._hdrs)
        self._distractor_file_id = self.srmock.headers_dict['x-file-id']
        self._distractor_url = self.srmock.headers_dict['location']
        self._distractor_file_path = urlparse(self._distractor_url).path
        self._distractor_fileblocks_path = self._distractor_file_path + \
            '/blocks'
        self._NOT_EXIST_files_path = '/v1.0/vaults/not_exists/files'
예제 #17
0
    def test_previous_solution_incorrect(self):
        client = RecaptchaClient(_FAKE_PRIVATE_KEY, _FAKE_PUBLIC_KEY)
        urls = client._get_challenge_urls(
            was_previous_solution_incorrect=True,
            use_ssl=False,
        )

        javascript_challenge_url = urls['javascript_challenge_url']
        javascript_challenge_url_components = \
            urlparse(javascript_challenge_url)
        javascript_challenge_url_query = parse_qs(
            javascript_challenge_url_components.query,
        )

        self.assertIn('error', javascript_challenge_url_query)
        self.assertEqual(
            'incorrect-captcha-sol',
            javascript_challenge_url_query['error'][0]
        )

        noscript_challenge_url = urls['noscript_challenge_url']
        noscript_challenge_url_components = urlparse(noscript_challenge_url)
        self.assertEqual(
            javascript_challenge_url_components.query,
            noscript_challenge_url_components.query
        )
예제 #18
0
def check_url(value, pattern=None, max_length=None, min_length=None, valid_values=None):
    '''
    Validate a URL value

    Parameters
    ----------
    value : any
        The value to validate.  This value will be cast to a string
        and converted to unicode.
    pattern : regular expression string, optional
        A regular expression used to validate string values
    max_length : int, optional
        The maximum length of the string
    min_length : int, optional
        The minimum length of the string
    valid_values : list of strings, optional
        List of the only possible values

    Returns
    -------
    string
        The validated URL value

    '''
    out = check_string(value, pattern=pattern, max_length=max_length,
                       min_length=min_length, valid_values=valid_values)
    try:
        urlparse(out)
    except:
        raise ESPOptionError('%s is not a valid URL' % value)
    return out
예제 #19
0
def get_canonical_and_alternates_urls(url, drop_ln=True, washed_argd=None, quote_path=False):
    """
    Given an Invenio URL returns a tuple with two elements. The first is the
    canonical URL, that is the original URL with CFG_SITE_URL prefix, and
    where the ln= argument stripped. The second element element is mapping,
    language code -> alternate URL

    @param quote_path: if True, the path section of the given C{url}
                       is quoted according to RFC 2396
    """
    dummy_scheme, dummy_netloc, path, dummy_params, query, fragment = urlparse(url)
    canonical_scheme, canonical_netloc = urlparse(cfg.get("CFG_SITE_URL"))[0:2]
    parsed_query = washed_argd or parse_qsl(query)
    no_ln_parsed_query = [(key, value) for (key, value) in parsed_query if key != "ln"]
    if drop_ln:
        canonical_parsed_query = no_ln_parsed_query
    else:
        canonical_parsed_query = parsed_query
    if quote_path:
        path = urllib.quote(path)
    canonical_query = urlencode(canonical_parsed_query)
    canonical_url = urlunparse((canonical_scheme, canonical_netloc, path, dummy_params, canonical_query, fragment))
    alternate_urls = {}
    for ln in cfg.get("CFG_SITE_LANGS"):
        alternate_query = urlencode(no_ln_parsed_query + [("ln", ln)])
        alternate_url = urlunparse((canonical_scheme, canonical_netloc, path, dummy_params, alternate_query, fragment))
        alternate_urls[ln] = alternate_url
    return canonical_url, alternate_urls
예제 #20
0
    def test_collection_ok_by_state(
            self, f_users, f_coprs,
            f_mock_chroots_many,
            f_build_many_chroots,
            f_db,
            f_users_api):

        self.db.session.commit()
        for status in StatusEnum.vals.values():
            expected_chroots = set([
                name
                for name, chroot_status in
                self.status_by_chroot.items()
                if chroot_status == status
            ])

            href = "/api_2/build_tasks?state={}&limit=50".format(StatusEnum(status))

            r0 = self.tc.get(href)
            assert r0.status_code == 200
            obj = json.loads(r0.data.decode("utf-8"))
            assert len(obj["build_tasks"]) == len(expected_chroots)
            assert set(bt["build_task"]["chroot_name"]
                       for bt in obj["build_tasks"]) == expected_chroots

            assert parse_qs(urlparse(obj["_links"]["self"]["href"]).query) \
                == parse_qs(urlparse(href).query)
예제 #21
0
    def __call__(self, controller_state):
        url = self.getArg(controller_state)
        # see if this is a relative url or an absolute
        if len(urlparse(url)[1]) != 0:
            # host specified, so url is absolute.  No good for traversal.
            raise ValueError('Can\'t traverse to absolute url %s' % str(url))

        url_path = urlparse(url)[2]
        # combine args from query string with args from the controller state
        # (args in the state supercede those in the query string)
        args = self.combineArgs(url, controller_state.kwargs)

        # put the args in the REQUEST
        REQUEST = controller_state.getContext().REQUEST
        for (key, value) in args.items():
            REQUEST.set(key, value)

        # make sure target exists
        context = controller_state.getContext()
        obj = context.restrictedTraverse(url_path, default=None)
        if obj is None:
            raise ValueError('Unable to find %s\n' % str(url_path))
        return mapply(obj, REQUEST.args, REQUEST,
                               call_object, 1, missing_name, dont_publish_class,
                               REQUEST, bind=1)
예제 #22
0
def submit_git_build(binstar, args):

    log.info("Submitting the following repo for package creation: %s" % args.git_url)
    builds = get_gitrepo(urlparse(args.git_url))

    if not args.package:
        user = binstar.user()
        user_name = user['login']
        package_name = builds['repo'].split('/')[1]
        log.info("Using repo name '%s' as the pkg name." % package_name)
        args.package = PackageSpec(user_name, package_name)

    try:
        _ = binstar.package(args.package.user, args.package.name)
    except errors.NotFound:
        raise errors.UserError("Package %s does not exist" % (args.package,))


    if not args.dry_run:
        log.info("Submitting the following repo for package creation: %s" % args.git_url)
        builds = get_gitrepo(urlparse(args.path))
        build = binstar.submit_for_url_build(args.package.user, args.package.name, builds,
                                             channels=args.channels, queue=args.queue, sub_dir=args.sub_dir,
                                             test_only=args.test_only, callback=upload_print_callback(args),
                                             filter_platform=args.platform,
                                                )

        print_build_results(args, build)

    else:
        log.info('Build not submitted (dry-run)')
예제 #23
0
    def jenkins_job_building(self, jobName):
        server = self.jenkins_server()

        nodes = server.get_nodes()
        for node in nodes:
            print('nodes')
            # the name returned is not the name to lookup when
            # dealing with master :/
            if node['name'] == 'master':
                node_name = '(master)'
            else:
                node_name = node['name']
            try:
                info = server.get_node_info(node_name, depth=2)
            except:
                print('except')
                # # Jenkins may 500 on depth >0. If the node info comes back
                # # at depth 0 treat it as a node not running any jobs.
                # if ('[500]' in str(e) and
                #         self.get_node_info(node_name, depth=0)):
                #     continue
                # else:
                #     raise
            for executor in info['executors']:
                executable = executor['currentExecutable']
                if executable:
                    executor_number = executor['number']
                    build_number = executable['number']
                    url = executable['url']
                    print(urlparse(url).path)
                    m = re.match(r'.*?/job/([^/]+)/.*', urlparse(url).path)
                    job_name = m.group(1)
                    if job_name == jobName:
                        return True
        return False
예제 #24
0
파일: utils.py 프로젝트: bird-house/birdy
def is_embedded_in_request(url, value):
    """Whether or not to encode the value as raw data content.

    Returns True if
      - value is a file:/// URI or a local path
      - value is a File-like instance
      - url is not localhost
      - value is a File object
      - value is already the string content
    """
    if hasattr(value, 'read'):  # File-like
        return True

    u = urlparse(url)

    if isinstance(value, Path):  # pathlib.Path
        p = value
        scheme = 'file'
    else:  # String-like
        v = urlparse(value)
        p = Path(v.path)
        scheme = v.scheme

    if scheme == 'file':  # Explicit link to file
        if is_file(p):
            return 'localhost' not in u.netloc
        else:
            raise IOError("{} should be a local file but was not found on disk.".format(value))
    elif scheme == '':  # Could be a local path or just a string
        if is_file(p):
            return 'localhost' not in u.netloc
        else:
            return True
    else:  # Other URL (http, https, ftp, ...)
        return False
예제 #25
0
파일: test_oauth.py 프로젝트: mozilla/PyFxA
 def test_redirect_url_takes_custom_url_parameters(self):
     redirect_url = urlparse(self.client.get_redirect_url(
         state="applicationstate",
         redirect_uri="https://my.site/oauth",
         scope="profile profile:email",
         action="signup",
         email="*****@*****.**",
         code_challenge="challenge",
         code_challenge_method="S1234",
         access_type="offline",
         keys_jwk="MockJWK",
     ))
     server_url = urlparse(self.server_url)
     self.assertEqual(redirect_url.hostname, server_url.hostname)
     params = parse_qs(redirect_url.query, keep_blank_values=True)
     all_params = ["action", "email", "client_id", "redirect_uri",
                   "scope", "state", "access_type", "code_challenge",
                   "code_challenge_method", "keys_jwk"]
     self.assertEqual(sorted(params.keys()), sorted(all_params))
     self.assertEqual(params["client_id"][0], self.client.client_id)
     self.assertEqual(params["state"][0], "applicationstate")
     self.assertEqual(params["redirect_uri"][0], "https://my.site/oauth")
     self.assertEqual(params["scope"][0], "profile profile:email")
     self.assertEqual(params["action"][0], "signup")
     self.assertEqual(params["email"][0], "*****@*****.**")
     self.assertEqual(params["code_challenge"][0], "challenge")
     self.assertEqual(params["code_challenge_method"][0], "S1234")
     self.assertEqual(params["access_type"][0], "offline")
     self.assertEqual(params["keys_jwk"][0], "MockJWK")
예제 #26
0
 def _wrapped(self):
     # If we are ignoring the request on the form, we should also ignore it
     # on the widget.  This means that when on the first widget we conclude
     # that the form should be ignored, we quickly ignore it on all widgets,
     # without needing to check the referer and method again and again.
     # When we do not ignore the request, we do still run these checks for
     # all widgets.  But it seems an international sport to override the
     # update or updateWidgets method of the base z3c form, which makes it
     # hard to fix all occurrences by one check on the form.
     if not self.ignoreRequest and getattr(self.form, 'ignoreRequest', False):
         self.ignoreRequest = True
     # If we are not already ignoring the request, check the request method.
     if (not self.ignoreRequest
             and hasattr(self.form, 'method')
             and hasattr(self.request, 'REQUEST_METHOD')):
         if self.request.REQUEST_METHOD.lower() != self.form.method.lower():
             # This is an unexpected request method.
             # For special cases we allow a form to bail out.
             if not getattr(self.form, ALLOW_PREFILL, False):
                 self.ignoreRequest = True
                 self.form.ignoreRequest = True
     # If we are not already ignoring the request, check the referer.
     if not self.ignoreRequest and hasattr(self.request, 'environ'):
         env = self.request.environ
         referrer = env.get('HTTP_REFERER', env.get('HTTP_REFERRER'))
         if referrer:
             req_url_parsed = urlparse(self.request.URL)
             referrer_parsed = urlparse(referrer)
             if req_url_parsed.netloc != referrer_parsed.netloc:
                 # We do not trust data from outside referrers.
                 self.ignoreRequest = True
                 self.form.ignoreRequest = True
     return update(self)
예제 #27
0
    def test_create_and_read_and_delete(self):
        expected = self.test_create_file().data
        url = urlparse(expected["url"])
        response = self.client.get(url.path)
        contents = response.content.decode("utf-8")
        self.assertEquals(self.file_contents, contents)

        response = self.client.get("/rp/jobs/files/", format="json")
        self.assertEquals(1, len(response.data))
        data = response.data[0]
        self.assertEquals("File contents not shown.", data["contents"])
        self.assertEquals(expected["file"], data["file"])
        self.assertEquals(expected["filename"], data["filename"])
        self.assertEquals(self.get_hashed_contents(), data["hexhash"])
        self.assertEquals(url.path, urlparse(data["url"]).path)

        response = self.client.delete(url.path)
        self.assertEqual(204, response.status_code)

        response = self.client.get(url.path)
        self.assertEqual(404, response.status_code)

        response = self.client.get("/rp/jobs/files/", format="json")
        data = response.data
        self.assertEquals(0, len(data))
예제 #28
0
    def build_integration(self, state):
        data = state['identity']['data']
        oauth_data = self.get_oauth_data(data)
        user = get_user_info(data['access_token'], state['installation_data'])
        group = self.get_group_info(data['access_token'], state['installation_data'])
        scopes = sorted(GitlabIdentityProvider.oauth_scopes)
        base_url = state['installation_data']['url']

        integration = {
            'name': group['name'],
            'external_id': u'{}:{}'.format(urlparse(base_url).netloc, group['id']),
            'metadata': {
                'icon': group['avatar_url'],
                'domain_name': group['web_url'].replace('https://', ''),
                'scopes': scopes,
                'verify_ssl': state['installation_data']['verify_ssl'],
                'base_url': base_url,
            },
            'user_identity': {
                'type': 'gitlab',
                'external_id': u'{}:{}'.format(urlparse(base_url).netloc, user['id']),
                'scopes': scopes,
                'data': oauth_data,
            },
        }

        return integration
예제 #29
0
def get_repo_information(repo_file_raw_content):
    base_url = re.search('baseurl=((.*)/os)', repo_file_raw_content).group(1)
    base_url = base_url.replace("$basearch", "x86_64")
    version = urlparse(base_url).path.split('/')[5]
    repo_name = urlparse(base_url).path.split('/')[4]

    return base_url, version, repo_name
예제 #30
0
    def test_cross_repo_mount(self):
        target_url = urlparse('docker://localhost:8787/t/nova-api:latest')
        other_url = urlparse('docker://localhost:8787/t/nova-compute:latest')
        image_layers = {
            'sha256:1234': other_url
        }
        source_layers = [
            'sha256:1234', 'sha256:6789'
        ]
        source_blob_dir = os.path.join(image_export.IMAGE_EXPORT_DIR,
                                       'v2/t/nova-compute/blobs')
        source_blob_path = os.path.join(source_blob_dir, 'sha256:1234.gz')
        target_blob_dir = os.path.join(image_export.IMAGE_EXPORT_DIR,
                                       'v2/t/nova-api/blobs')
        target_blob_path = os.path.join(target_blob_dir, 'sha256:1234.gz')

        # call with missing source, no change
        image_export.cross_repo_mount(target_url, image_layers, source_layers)
        self.assertFalse(os.path.exists(source_blob_path))
        self.assertFalse(os.path.exists(target_blob_path))

        image_export.make_dir(source_blob_dir)
        with open(source_blob_path, 'w') as f:
            f.write('blob')
        self.assertTrue(os.path.exists(source_blob_path))

        # call with existing source
        image_export.cross_repo_mount(target_url, image_layers, source_layers)
        self.assertTrue(os.path.exists(target_blob_path))
        with open(target_blob_path, 'r') as f:
            self.assertEqual('blob', f.read())
예제 #31
0
def main():
    start_time = time.time()
    args = parse_args()
    if args.verbose > 1:
        websocket.enableTrace(True)
    options = {}
    if args.proxy:
        p = urlparse(args.proxy)
        options["http_proxy_host"] = p.hostname
        options["http_proxy_port"] = p.port
    if args.origin:
        options["origin"] = args.origin
    if args.subprotocols:
        options["subprotocols"] = args.subprotocols
    opts = {}
    if args.nocert:
        opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
    if args.headers:
        options['header'] = map(str.strip, args.headers.split(','))
    ws = websocket.create_connection(args.url, sslopt=opts, **options)
    if args.raw:
        console = NonInteractive()
    else:
        console = InteractiveConsole()
        print("Press Ctrl+C to quit")

    def recv():
        try:
            frame = ws.recv_frame()
        except websocket.WebSocketException:
            return websocket.ABNF.OPCODE_CLOSE, None
        if not frame:
            raise websocket.WebSocketException("Not a valid frame %s" % frame)
        elif frame.opcode in OPCODE_DATA:
            return frame.opcode, frame.data
        elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
            ws.send_close()
            return frame.opcode, None
        elif frame.opcode == websocket.ABNF.OPCODE_PING:
            ws.pong(frame.data)
            return frame.opcode, frame.data

        return frame.opcode, frame.data

    def recv_ws():
        while True:
            opcode, data = recv()
            msg = None
            if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(
                    data, bytes):
                data = str(data, "utf-8")
            if not args.verbose and opcode in OPCODE_DATA:
                msg = data
            elif args.verbose:
                msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)

            if msg is not None:
                if args.timings:
                    console.write(str(time.time() - start_time) + ": " + msg)
                else:
                    console.write(msg)

            if opcode == websocket.ABNF.OPCODE_CLOSE:
                break

    thread = threading.Thread(target=recv_ws)
    thread.daemon = True
    thread.start()

    if args.text:
        ws.send(args.text)

    while True:
        try:
            message = console.read()
            ws.send(message)
        except KeyboardInterrupt:
            return
        except EOFError:
            time.sleep(args.eof_wait)
            return
예제 #32
0
    def return_response(self, method, path, data, headers, response):

        bucket_name = get_bucket_name(path, headers)

        # No path-name based bucket name? Try host-based
        hostname_parts = headers['host'].split('.')
        if (not bucket_name
                or len(bucket_name) == 0) and len(hostname_parts) > 1:
            bucket_name = hostname_parts[0]

        # POST requests to S3 may include a success_action_redirect field,
        # which should be used to redirect a client to a new location.
        key = None
        if method == 'POST':
            key, redirect_url = multipart_content.find_multipart_redirect_url(
                data, headers)

            if key and redirect_url:
                response.status_code = 303
                response.headers['Location'] = expand_redirect_url(
                    redirect_url, key, bucket_name)
                LOGGER.debug('S3 POST {} to {}'.format(
                    response.status_code, response.headers['Location']))

        parsed = urlparse.urlparse(path)

        bucket_name_in_host = headers['host'].startswith(bucket_name)

        should_send_notifications = all([
            method in ('PUT', 'POST', 'DELETE'),
            '/' in path[1:] or bucket_name_in_host,
            # check if this is an actual put object request, because it could also be
            # a put bucket request with a path like this: /bucket_name/
            bucket_name_in_host or
            (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0),
            # don't send notification if url has a query part (some/path/with?query)
            # (query can be one of 'notification', 'lifecycle', 'tagging', etc)
            not parsed.query
        ])

        # get subscribers and send bucket notifications
        if should_send_notifications:
            # if we already have a good key, use it, otherwise examine the path
            if key:
                object_path = '/' + key
            elif bucket_name_in_host:
                object_path = parsed.path
            else:
                parts = parsed.path[1:].split('/', 1)
                object_path = parts[1] if parts[1][
                    0] == '/' else '/%s' % parts[1]

            send_notifications(method, bucket_name, object_path)

        # publish event for creation/deletion of buckets:
        if method in ('PUT', 'DELETE') and ('/' not in path[1:] or
                                            len(path[1:].split('/')[1]) <= 0):
            event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method
                          == 'PUT' else event_publisher.EVENT_S3_DELETE_BUCKET)
            event_publisher.fire_event(
                event_type,
                payload={'n': event_publisher.get_hash(bucket_name)})

        # fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382)
        if method == 'PUT' and parsed.query == 'policy':
            response._content = ''
            response.status_code = 204
            return response

        if response:
            # append CORS headers to response
            append_cors_headers(bucket_name,
                                request_method=method,
                                request_headers=headers,
                                response=response)

            response_content_str = None
            try:
                response_content_str = to_str(response._content)
            except Exception:
                pass

            # we need to un-pretty-print the XML, otherwise we run into this issue with Spark:
            # https://github.com/jserver/mock-s3/pull/9/files
            # https://github.com/localstack/localstack/issues/183
            # Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n
            if response_content_str and response_content_str.startswith('<'):
                is_bytes = isinstance(response._content, six.binary_type)
                response._content = re.sub(r'([^\?])>\n\s*<',
                                           r'\1><',
                                           response_content_str,
                                           flags=re.MULTILINE)
                if is_bytes:
                    response._content = to_bytes(response._content)
                # fix content-type: https://github.com/localstack/localstack/issues/618
                #                   https://github.com/localstack/localstack/issues/549
                if 'text/html' in response.headers.get('Content-Type', ''):
                    response.headers[
                        'Content-Type'] = 'application/xml; charset=utf-8'

                response.headers['content-length'] = len(response._content)

            # update content-length headers (fix https://github.com/localstack/localstack/issues/541)
            if method == 'DELETE':
                response.headers['content-length'] = len(response._content)
예제 #33
0
def http_redirect_message(message,
                          location,
                          relay_state="",
                          typ="SAMLRequest",
                          sigalg=None,
                          key=None,
                          **kwargs):
    """The HTTP Redirect binding defines a mechanism by which SAML protocol
    messages can be transmitted within URL parameters.
    Messages are encoded for use with this binding using a URL encoding
    technique, and transmitted using the HTTP GET method.

    The DEFLATE Encoding is used in this function.

    :param message: The message
    :param location: Where the message should be posted to
    :param relay_state: for preserving and conveying state information
    :param typ: What type of message it is SAMLRequest/SAMLResponse/SAMLart
    :param sigalg: The signature algorithm to use.
    :param key: Key to use for signing
    :return: A tuple containing header information and a HTML message.
    """

    if not isinstance(message, six.string_types):
        message = "%s" % (message, )

    _order = None
    if typ in ["SAMLRequest", "SAMLResponse"]:
        if typ == "SAMLRequest":
            _order = REQ_ORDER
        else:
            _order = RESP_ORDER
        args = {typ: deflate_and_base64_encode(message)}
    elif typ == "SAMLart":
        args = {typ: message}
    else:
        raise Exception("Unknown message type: %s" % typ)

    if relay_state:
        args["RelayState"] = relay_state

    if sigalg:
        # sigalgs, one of the ones defined in xmldsig

        args["SigAlg"] = sigalg

        try:
            signer = SIGNER_ALGS[sigalg]
        except:
            raise Unsupported("Signing algorithm")
        else:
            string = "&".join([
                urlencode({k: args[k]}) for k in _order if k in args
            ]).encode('ascii')
            args["Signature"] = base64.b64encode(signer.sign(string, key))
            string = urlencode(args)
    else:
        string = urlencode(args)

    glue_char = "&" if urlparse(location).query else "?"
    login_url = glue_char.join([location, string])
    headers = [('Location', str(login_url))]
    body = []

    return {"headers": headers, "data": body}
예제 #34
0
    def setup_config(self,
                     project_url=None,
                     project_folder=None,
                     project_name=None,
                     over_write=False,
                     bypass_robots=False,
                     zip_project_folder=True,
                     delete_project_folder=False,
                     load_css=True,
                     load_javascript=True,
                     load_images=True,
                     join_timeout=None,
                     log_file=None,
                     debug=False):
        """Sets up the complete config parts which requires a project_url to be present.

        Complete configuration is done here and subject to change according to application structure
        You are advised to use only the .setup_path() method if you get any unusual behaviour

        :rtype: dict
        :returns: self
        """

        #: if external configuration is provided then
        #: the config dict will update its configuration
        #: values for global usages
        self.update(project_url=project_url,
                    over_write=over_write,
                    bypass_robots=bypass_robots,
                    zip_project_folder=zip_project_folder,
                    delete_project_folder=delete_project_folder,
                    load_css=load_css,
                    load_javascript=load_javascript,
                    load_images=load_images,
                    join_timeout=join_timeout,
                    debug=debug,
                    log_file=log_file)

        #: Default base paths configuration is done right away so
        #: it at least sets base files and folder for downloading files
        if not project_name:
            project_name = urlparse(project_url).hostname

        self.setup_paths(project_folder, project_name)

        #: Log this new configuration to the log file for debug purposes
        LOGGER.debug(str(dict(self)))

        #: Updates the headers of the requests object, it is set to
        #: reflect this package as a copy bot
        #: by default which lets the server distinguish it from other
        #: requests and can help the maintainer to optimize the access
        SESSION.headers.update(self.get('http_headers'))
        SESSION.set_bypass(self.get('bypass_robots'))
        #: Update the website access rules object which decide
        #: whether to access a site or not
        #: if you want to skip the checks then override the `bypass_robots` key
        # XXX user_agent = self['http_headers'].get('User-Agent', '*')
        # prepared_robots_txt = RobotsTxtParser(user_agent, urljoin(project_url, '/robots.txt'))
        SESSION.load_rules_from_url(urljoin(project_url, '/robots.txt'))

        return self
예제 #35
0
from __future__ import absolute_import

import datetime
import jwt

from unidiff import PatchSet

from six.moves.urllib.parse import urlparse

from sentry.utils.http import absolute_uri
from sentry.integrations.atlassian_connect import get_query_hash
from sentry.integrations.client import ApiClient

BITBUCKET_KEY = '%s.bitbucket' % urlparse(absolute_uri()).hostname


class BitbucketAPIPath(object):
    """
    All UUID's must be surrounded by culybraces.

    repo is the fully qualified slug containing 'username/repo_slug'

    repo_slug - repository slug or UUID
    username - username or UUID
    """

    issue = u'/2.0/repositories/{repo}/issues/{issue_id}'
    issues = u'/2.0/repositories/{repo}/issues'
    issue_comments = u'/2.0/repositories/{repo}/issues/{issue_id}/comments'

    repository = u'/2.0/repositories/{repo}'
예제 #36
0
 def __init__(self, driver, live_server, percy):
     self.driver = driver
     self.live_server_url = live_server.url
     self.percy = percy
     self.domain = urlparse(self.live_server_url).hostname
     self._has_initialized_cookie_store = False
예제 #37
0
파일: client.py 프로젝트: njzydark/sentry
import datetime
import jwt
import re
from hashlib import md5 as _md5
from six.moves.urllib.parse import parse_qs, urlparse, urlsplit

from sentry.utils.cache import cache
from django.utils.encoding import force_bytes

from sentry.integrations.atlassian_connect import get_query_hash
from sentry.integrations.exceptions import ApiError
from sentry.integrations.client import ApiClient
from sentry.utils.http import absolute_uri

JIRA_KEY = '%s.jira' % (urlparse(absolute_uri()).hostname, )


def md5(*bits):
    return _md5(':'.join((force_bytes(bit, errors='replace') for bit in bits)))


class JiraApiClient(ApiClient):
    COMMENT_URL = '/rest/api/2/issue/%s/comment'
    STATUS_URL = '/rest/api/2/status'
    CREATE_URL = '/rest/api/2/issue'
    ISSUE_URL = '/rest/api/2/issue/%s'
    META_URL = '/rest/api/2/issue/createmeta'
    PRIORITIES_URL = '/rest/api/2/priority'
    PROJECT_URL = '/rest/api/2/project'
    SEARCH_URL = '/rest/api/2/search/'
예제 #38
0
def _parse_netloc(netloc):
    info = urlparse("http://{0}".format(netloc))
    return {'username': info.username or None,
            'password': info.password or None,
            'host': info.hostname or 'localhost',
            'port': info.port or 8086}
예제 #39
0
def send_notifications(method, bucket_name, object_path):
    for bucket, config in iteritems(S3_NOTIFICATIONS):
        if bucket == bucket_name:
            action = {
                'PUT': 'ObjectCreated',
                'POST': 'ObjectCreated',
                'DELETE': 'ObjectRemoved'
            }[method]
            # TODO: support more detailed methods, e.g., DeleteMarkerCreated
            # http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
            api_method = {
                'PUT': 'Put',
                'POST': 'Post',
                'DELETE': 'Delete'
            }[method]
            event_name = '%s:%s' % (action, api_method)
            if (event_type_matches(config['Event'], action, api_method)
                    and filter_rules_match(config.get('Filter'), object_path)):
                # send notification
                message = get_event_message(event_name=event_name,
                                            bucket_name=bucket_name,
                                            file_name=urlparse.urlparse(
                                                object_path[1:]).path)
                message = json.dumps(message)
                if config.get('Queue'):
                    sqs_client = aws_stack.connect_to_service('sqs')
                    try:
                        queue_url = queue_url_for_arn(config['Queue'])
                        sqs_client.send_message(QueueUrl=queue_url,
                                                MessageBody=message)
                    except Exception as e:
                        LOGGER.warning(
                            'Unable to send notification for S3 bucket "%s" to SQS queue "%s": %s'
                            % (bucket_name, config['Queue'], e))
                if config.get('Topic'):
                    sns_client = aws_stack.connect_to_service('sns')
                    try:
                        sns_client.publish(TopicArn=config['Topic'],
                                           Message=message,
                                           Subject='Amazon S3 Notification')
                    except Exception as e:
                        LOGGER.warning(
                            'Unable to send notification for S3 bucket "%s" to SNS topic "%s".'
                            % (bucket_name, config['Topic']))
                # CloudFunction and LambdaFunction are semantically identical
                lambda_function_config = config.get(
                    'CloudFunction') or config.get('LambdaFunction')
                if lambda_function_config:
                    # make sure we don't run into a socket timeout
                    connection_config = botocore.config.Config(
                        read_timeout=300)
                    lambda_client = aws_stack.connect_to_service(
                        'lambda', config=connection_config)
                    try:
                        lambda_client.invoke(
                            FunctionName=lambda_function_config,
                            InvocationType='Event',
                            Payload=message)
                    except Exception as e:
                        LOGGER.warning(
                            'Unable to send notification for S3 bucket "%s" to Lambda function "%s".'
                            % (bucket_name, lambda_function_config))
                if not filter(lambda x: config.get(x),
                              NOTIFICATION_DESTINATION_TYPES):
                    LOGGER.warning(
                        'Neither of %s defined for S3 notification.' %
                        '/'.join(NOTIFICATION_DESTINATION_TYPES))
예제 #40
0
    def forward_request(self, method, path, data, headers):

        # Make sure we use 'localhost' as forward host, to ensure moto uses path style addressing.
        # Note that all S3 clients using LocalStack need to enable path style addressing.
        if 's3.amazonaws.com' not in headers.get('host', ''):
            headers['host'] = 'localhost'

        # check content md5 hash integrity
        if 'Content-MD5' in headers:
            response = check_content_md5(data, headers)
            if response is not None:
                return response

        modified_data = None

        # If this request contains streaming v4 authentication signatures, strip them from the message
        # Related isse: https://github.com/localstack/localstack/issues/98
        # TODO we should evaluate whether to replace moto s3 with scality/S3:
        # https://github.com/scality/S3/issues/237
        if headers.get('x-amz-content-sha256'
                       ) == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD':
            modified_data = strip_chunk_signatures(data)

        # POST requests to S3 may include a "${filename}" placeholder in the
        # key, which should be replaced with an actual file name before storing.
        if method == 'POST':
            original_data = modified_data or data
            expanded_data = multipart_content.expand_multipart_filename(
                original_data, headers)
            if expanded_data is not original_data:
                modified_data = expanded_data

        # If no content-type is provided, 'binary/octet-stream' should be used
        # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
        if method == 'PUT' and not headers.get('content-type'):
            headers['content-type'] = 'binary/octet-stream'

        # persist this API call to disk
        persistence.record('s3', method, path, data, headers)

        parsed = urlparse.urlparse(path)
        query = parsed.query
        path = parsed.path
        bucket = path.split('/')[1]
        query_map = urlparse.parse_qs(query)
        if query == 'notification' or 'notification' in query_map:
            # handle and return response for ?notification request
            response = handle_notification_request(bucket, method, data)
            return response

        if query == 'cors' or 'cors' in query_map:
            if method == 'GET':
                return get_cors(bucket)
            if method == 'PUT':
                return set_cors(bucket, data)
            if method == 'DELETE':
                return delete_cors(bucket)

        if query == 'lifecycle' or 'lifecycle' in query_map:
            if method == 'GET':
                return get_lifecycle(bucket)
            if method == 'PUT':
                return set_lifecycle(bucket, data)

        if modified_data:
            return Request(data=modified_data, headers=headers, method=method)
        return True
def doi2fn(doi, collection_dir='_publication'):
    fn = urlparse(doi).path.lstrip('/').replace('/', '_')
    return '{0}/{1}.md'.format(collection_dir, fn)
예제 #42
0
파일: config.py 프로젝트: BruceZu/gertty
    def __init__(self,
                 server=None,
                 palette='default',
                 keymap='default',
                 path=DEFAULT_CONFIG_PATH):
        self.path = os.path.expanduser(path)

        if not os.path.exists(self.path):
            self.printSample()
            exit(1)

        self.config = yaml.load(open(self.path))
        schema = ConfigSchema().getSchema(self.config)
        schema(self.config)
        server = self.getServer(server)
        self.server = server
        url = server['url']
        if not url.endswith('/'):
            url += '/'
        self.url = url
        result = urlparse.urlparse(url)
        self.hostname = result.netloc
        self.username = server['username']
        self.password = server.get('password')
        if self.password is None:
            self.password = getpass.getpass("Password for %s (%s): " %
                                            (self.url, self.username))
        else:
            # Ensure file is only readable by user as password is stored in
            # file.
            mode = os.stat(self.path).st_mode & 0o0777
            if not mode == 0o600:
                print("Error: Config file '{}' contains a password and does "
                      "not have permissions set to 0600.\n"
                      "Permissions are: {}".format(self.path, oct(mode)))
                exit(1)
        self.auth_type = server.get('auth-type', 'digest')
        auth_types = ['digest', 'basic']
        if self.auth_type not in auth_types:
            self.auth_type = 'digest'
        self.verify_ssl = server.get('verify-ssl', True)
        if not self.verify_ssl:
            os.environ['GIT_SSL_NO_VERIFY'] = 'true'
        self.ssl_ca_path = server.get('ssl-ca-path', None)
        if self.ssl_ca_path is not None:
            self.ssl_ca_path = os.path.expanduser(self.ssl_ca_path)
            # Gertty itself uses the Requests library
            os.environ['REQUESTS_CA_BUNDLE'] = self.ssl_ca_path
            # And this is to allow Git callouts
            os.environ['GIT_SSL_CAINFO'] = self.ssl_ca_path
        self.git_root = os.path.expanduser(server['git-root'])
        git_url = server.get('git-url', self.url + 'p/')
        if not git_url.endswith('/'):
            git_url += '/'
        self.git_url = git_url
        self.dburi = server.get(
            'dburi', 'sqlite:///' + os.path.expanduser('~/.gertty.db'))
        socket_path = server.get('socket', '~/.gertty.sock')
        self.socket_path = os.path.expanduser(socket_path)
        log_file = server.get('log-file', '~/.gertty.log')
        self.log_file = os.path.expanduser(log_file)

        self.palettes = {
            'default': gertty.palette.Palette({}),
            'light': gertty.palette.Palette(gertty.palette.LIGHT_PALETTE),
        }
        for p in self.config.get('palettes', []):
            if p['name'] not in self.palettes:
                self.palettes[p['name']] = gertty.palette.Palette(p)
            else:
                self.palettes[p['name']].update(p)
        self.palette = self.palettes[self.config.get('palette', palette)]

        self.keymaps = {
            'default': gertty.keymap.KeyMap({}),
            'vi': gertty.keymap.KeyMap(gertty.keymap.VI_KEYMAP)
        }
        for p in self.config.get('keymaps', []):
            if p['name'] not in self.keymaps:
                self.keymaps[p['name']] = gertty.keymap.KeyMap(p)
            else:
                self.keymaps[p['name']].update(p)
        self.keymap = self.keymaps[self.config.get('keymap', keymap)]

        self.commentlinks = [
            gertty.commentlink.CommentLink(c)
            for c in self.config.get('commentlinks', [])
        ]
        self.commentlinks.append(
            gertty.commentlink.CommentLink(
                dict(match="(?P<url>https?://\\S*)",
                     replacements=[dict(link=dict(text="{url}", url="{url}"))
                                   ])))

        self.project_change_list_query = self.config.get(
            'change-list-query', 'status:open')

        self.diff_view = self.config.get('diff-view', 'side-by-side')

        self.dashboards = OrderedDict()
        for d in self.config.get('dashboards', []):
            self.dashboards[d['key']] = d
            self.dashboards[d['key']]

        self.reviewkeys = OrderedDict()
        for k in self.config.get('reviewkeys', []):
            self.reviewkeys[k['key']] = k

        self.hide_comments = []
        for h in self.config.get('hide-comments', []):
            self.hide_comments.append(re.compile(h['author']))

        self.thread_changes = self.config.get('thread-changes', True)
        self.utc = self.config.get('display-times-in-utc', False)
        self.handle_mouse = self.config.get('handle-mouse', True)

        change_list_options = self.config.get('change-list-options', {})
        self.change_list_options = {
            'sort-by': change_list_options.get('sort-by', 'number'),
            'reverse': change_list_options.get('reverse', False)
        }

        self.expire_age = self.config.get('expire-age', '2 months')
예제 #43
0
def parse_s3_url(url):
    parsed_url = urlparse(url)
    if parsed_url.scheme != 's3':
        raise ValueError('Expecting \'s3\' scheme, got: {} in {}'\
                         .format(parsed_url.scheme, url))
    return parsed_url.netloc, parsed_url.path.lstrip('/')
예제 #44
0
def get_server(endpoint):
    """Extract and return the server & port that we're connecting to."""
    if endpoint is None:
        return None, None
    parts = urlparse.urlparse(endpoint)
    return parts.hostname, str(parts.port)
예제 #45
0
DATABASES = {
    'default': {
        'ENGINE': 'sentry.db.postgres',
        'NAME': 'sentry',
        'USER': '******',
        'PASSWORD': '',
        'HOST': '',
        'PORT': '',
        'AUTOCOMMIT': True,
        'ATOMIC_REQUESTS': False,
    }
}


if 'DATABASE_URL' in os.environ:
    url = urlparse(os.environ['DATABASE_URL'])

    # Ensure default database exists.
    DATABASES['default'] = DATABASES.get('default', {})

    # Update with environment configuration.
    DATABASES['default'].update({
        'NAME': url.path[1:],
        'USER': url.username,
        'PASSWORD': url.password,
        'HOST': url.hostname,
        'PORT': url.port,
    })
    if url.scheme == 'postgres':
        DATABASES['default']['ENGINE'] = 'sentry.db.postgres'
예제 #46
0
def get_logged_in_program_certificate_url(certificate_url):
    parsed_url = urlparse(certificate_url)
    query_string = 'next=' + parsed_url.path
    url_parts = (parsed_url.scheme, parsed_url.netloc, '/login/', '',
                 query_string, '')
    return urlunparse(url_parts)
예제 #47
0
        def response_iter():
            # NB: XML requires that the XML declaration, if present, be at the
            # very start of the document. Clients *will* call us out on not
            # being valid XML if we pass through whitespace before it.
            # Track whether we've sent anything yet so we can yield out that
            # declaration *first*
            yielded_anything = False

            try:
                try:
                    # TODO: add support for versioning
                    put_resp = req.get_response(self.app,
                                                'PUT',
                                                body=json.dumps(manifest),
                                                query={
                                                    'multipart-manifest':
                                                    'put',
                                                    'heartbeat': 'on'
                                                },
                                                headers=headers)
                    if put_resp.status_int == 202:
                        body = []
                        put_resp.fix_conditional_response()
                        for chunk in put_resp.response_iter:
                            if not chunk.strip():
                                if time.time() - start_time < 10:
                                    # Include some grace period to keep
                                    # ceph-s3tests happy
                                    continue
                                if not yielded_anything:
                                    yield (b'<?xml version="1.0" '
                                           b'encoding="UTF-8"?>\n')
                                yielded_anything = True
                                yield chunk
                                continue
                            body.append(chunk)
                        body = json.loads(b''.join(body))
                        if body['Response Status'] != '201 Created':
                            for seg, err in body['Errors']:
                                if err == too_small_message:
                                    raise EntityTooSmall()
                                elif err in ('Etag Mismatch', '404 Not Found'):
                                    raise InvalidPart(upload_id=upload_id)
                            raise InvalidRequest(
                                status=body['Response Status'],
                                msg='\n'.join(': '.join(err)
                                              for err in body['Errors']))
                except BadSwiftRequest as e:
                    msg = str(e)
                    if too_small_message in msg:
                        raise EntityTooSmall(msg)
                    elif ', Etag Mismatch' in msg:
                        raise InvalidPart(upload_id=upload_id)
                    elif ', 404 Not Found' in msg:
                        raise InvalidPart(upload_id=upload_id)
                    else:
                        raise

                # clean up the multipart-upload record
                obj = '%s/%s' % (req.object_name, upload_id)
                try:
                    req.get_response(self.app, 'DELETE', container, obj)
                except NoSuchKey:
                    # We know that this existed long enough for us to HEAD
                    pass

                result_elem = Element('CompleteMultipartUploadResult')

                # NOTE: boto with sig v4 appends port to HTTP_HOST value at
                # the request header when the port is non default value and it
                # makes req.host_url like as http://localhost:8080:8080/path
                # that obviously invalid. Probably it should be resolved at
                # swift.common.swob though, tentatively we are parsing and
                # reconstructing the correct host_url info here.
                # in detail, https://github.com/boto/boto/pull/3513
                parsed_url = urlparse(req.host_url)
                host_url = '%s://%s' % (parsed_url.scheme, parsed_url.hostname)
                # Why are we doing our own port parsing? Because py3 decided
                # to start raising ValueErrors on access after parsing such
                # an invalid port
                netloc = parsed_url.netloc.split('@')[-1].split(']')[-1]
                if ':' in netloc:
                    port = netloc.split(':', 2)[1]
                    host_url += ':%s' % port

                SubElement(result_elem, 'Location').text = host_url + req.path
                SubElement(result_elem, 'Bucket').text = req.container_name
                SubElement(result_elem, 'Key').text = req.object_name
                SubElement(result_elem, 'ETag').text = '"%s"' % s3_etag
                resp.headers.pop('ETag', None)
                if yielded_anything:
                    yield b'\n'
                yield tostring(result_elem,
                               xml_declaration=not yielded_anything)
            except ErrorResponse as err_resp:
                if yielded_anything:
                    err_resp.xml_declaration = False
                    yield b'\n'
                else:
                    # Oh good, we can still change HTTP status code, too!
                    resp.status = err_resp.status
                for chunk in err_resp({}, lambda *a: None):
                    yield chunk
예제 #48
0
def autoload_server(model=None,
                    app_path=None,
                    session_id=None,
                    url="default",
                    relative_urls=False):
    ''' Return a script tag that embeds content from a Bokeh server session.

    Bokeh apps embedded using ``autoload_server`` will NOT set the browser
    window title.

    .. note::
        Typically you will not want to save or re-use the output of this
        function for different or multiple page loads.

    Args:
        model (Model, optional) : The object to render from the session

            If ``None`` an entire document is rendered. (default: ``None``)

            If you supply a specific model to render, you must also supply the
            session ID containing that model.

            Supplying a model is usually only useful when embedding
            a specific session that was previously created using the
            ``bokeh.client`` API.

        session_id (str, optional) : A server session ID (default: None)

            If ``None``, let the server auto-generate a random session ID.

            Supplying a session id is usually only useful when embedding
            a specific session that was previously created using the
            ``bokeh.client`` API.

        url (str, optional) : A URL to a Bokeh application on a Bokeh server

            If ``None`` the default URL ``{DEFAULT_SERVER_HTTP_URL}`` will be used.

        relative_urls (bool, optional) :
            Whether to use relative URLs for resources.

            If ``True`` the links generated for resources such a BokehJS
            JavaScript and CSS will be relative links.

            This should normally be set to ``False``, but must be set to
            ``True`` in situations where only relative URLs will work. E.g.
            when running the Bokeh behind reverse-proxies under certain
            configurations

    Returns:
        A ``<script>`` tag that will execute an autoload script loaded
        from the Bokeh Server.

    Examples:

        In the simplest and most common case, we wish to embed Bokeh server
        application by providing the URL to where it is located.

        Suppose the app is running (perhaps behind Nginx or some other proxy)
        at ``http://app.server.org/foo/myapp``. We wish to embed this app in
        a page at ``mysite.com``. The following will provide an HTML script
        tag to do that, that can be included in ``mysite.com``:

        .. code-block:: python

            script = autoload_server(url="http://app.server.org/foo/myapp")

        Note that in order for this embedding to work, the Bokeh server needs
        to have been configured to allow connections from the public URL where
        the embedding happens. In this case, if the autoload script is run from
        a page located at ``http://mysite.com/report`` then the Bokeh server
        must have been started with an ``--allow-websocket-origin`` option
        specifically allowing websocket connections from pages that originate
        from ``mysite.com``:

        .. code-block:: sh

            bokeh serve mayapp.py --allow-websocket-origin=mysite.com

        If an autoload script runs from an origin that has not been allowed,
        the Bokeh server will return a 403 error.

        It's also possible to initiate sessions on a Bokeh server from
        Python, using the functions :func:`~bokeh.client.push_session` and
        :func:`~bokeh.client.push_session`. This can be useful in advanced
        situations where you may want to "set up" the session before you
        embed it. For example, you might to load up a session and modify
        ``session.document`` in some way (perhaps adding per-user data).

        In such cases you will pass the session id as an argument as well:

        .. code-block:: python

            script = autoload_server(session_id="some_session_id",
                                     url="http://app.server.org/foo/myapp")

        .. warning::
            It is typically a bad idea to re-use the same ``session_id`` for
            every page load. This is likely to create scalability and security
            problems, and will cause "shared Google doc" behaviour, which is
            typically not desired.

    '''
    if app_path is not None:
        deprecated(
            (0, 12, 5), "app_path", "url",
            "Now pass entire app URLS in the url arguments, e.g. 'url=http://foo.com:5010/bar/myapp'"
        )
        if not app_path.startswith("/"):
            app_path = "/" + app_path
        url = url + app_path

    coords = _SessionCoordinates(url=url, session_id=session_id)

    elementid = make_id()

    # empty model_id means render the entire doc from session_id
    model_id = ""
    if model is not None:
        model_id = model._id

    if model_id and session_id is None:
        raise ValueError(
            "A specific model was passed to autoload_server() but no session_id; "
            "this doesn't work because the server will generate a fresh session "
            "which won't have the model in it.")

    src_path = coords.url + "/autoload.js?bokeh-autoload-element=" + elementid

    if url != "default":
        app_path = urlparse(url).path.rstrip("/")
        if not app_path.startswith("/"):
            app_path = "/" + app_path
        src_path += "&bokeh-app-path=" + app_path

    if not relative_urls:
        src_path += "&bokeh-absolute-url=" + coords.url

    # we want the server to generate the ID, so the autoload script
    # can be embedded in a static page while every user still gets
    # their own session. So we omit bokeh-session-id rather than
    # using a generated ID.
    if coords.session_id_allowing_none is not None:
        src_path = src_path + "&bokeh-session-id=" + session_id

    tag = AUTOLOAD_TAG.render(
        src_path=src_path,
        app_path=app_path,
        elementid=elementid,
        modelid=model_id,
    )

    return encode_utf8(tag)
    def return_response(self, method, path, data, headers, response):

        path = to_str(path)
        method = to_str(method)
        bucket_name = get_bucket_name(path, headers)

        # No path-name based bucket name? Try host-based
        hostname_parts = headers['host'].split('.')
        if (not bucket_name or len(bucket_name) == 0) and len(hostname_parts) > 1:
            bucket_name = hostname_parts[0]

        # POST requests to S3 may include a success_action_redirect or
        # success_action_status field, which should be used to redirect a
        # client to a new location.
        key = None
        if method == 'POST':
            key, redirect_url = multipart_content.find_multipart_key_value(data, headers)

            if key and redirect_url:
                response.status_code = 303
                response.headers['Location'] = expand_redirect_url(redirect_url, key, bucket_name)
                LOGGER.debug('S3 POST {} to {}'.format(response.status_code, response.headers['Location']))

            key, status_code = multipart_content.find_multipart_key_value(
                data, headers, 'success_action_status')
            if response.status_code == 200 and status_code == '201' and key:
                response.status_code = 201
                response._content = self.get_201_reponse(key, bucket_name)
                response.headers['Content-Length'] = str(len(response._content))
                response.headers['Content-Type'] = 'application/xml; charset=utf-8'
                return response

        parsed = urlparse.urlparse(path)
        bucket_name_in_host = headers['host'].startswith(bucket_name)

        should_send_notifications = all([
            method in ('PUT', 'POST', 'DELETE'),
            '/' in path[1:] or bucket_name_in_host,
            # check if this is an actual put object request, because it could also be
            # a put bucket request with a path like this: /bucket_name/
            bucket_name_in_host or (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0),
            self.is_query_allowable(method, parsed.query)
        ])

        # get subscribers and send bucket notifications
        if should_send_notifications:
            # if we already have a good key, use it, otherwise examine the path
            if key:
                object_path = '/' + key
            elif bucket_name_in_host:
                object_path = parsed.path
            else:
                parts = parsed.path[1:].split('/', 1)
                object_path = parts[1] if parts[1][0] == '/' else '/%s' % parts[1]
            version_id = response.headers.get('x-amz-version-id', None)

            send_notifications(method, bucket_name, object_path, version_id)

        # publish event for creation/deletion of buckets:
        if method in ('PUT', 'DELETE') and ('/' not in path[1:] or len(path[1:].split('/')[1]) <= 0):
            event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method == 'PUT'
                else event_publisher.EVENT_S3_DELETE_BUCKET)
            event_publisher.fire_event(event_type, payload={'n': event_publisher.get_hash(bucket_name)})

        # fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382)
        if method == 'PUT' and parsed.query == 'policy':
            response._content = ''
            response.status_code = 204
            return response

        # emulate ErrorDocument functionality if a website is configured

        if method == 'GET' and response.status_code == 404 and parsed.query != 'website':
            s3_client = aws_stack.connect_to_service('s3')

            try:
                # Verify the bucket exists in the first place--if not, we want normal processing of the 404
                s3_client.head_bucket(Bucket=bucket_name)
                website_config = s3_client.get_bucket_website(Bucket=bucket_name)
                error_doc_key = website_config.get('ErrorDocument', {}).get('Key')

                if error_doc_key:
                    error_object = s3_client.get_object(Bucket=bucket_name, Key=error_doc_key)
                    response.status_code = 200
                    response._content = error_object['Body'].read()
                    response.headers['content-length'] = len(response._content)
            except ClientError:
                # Pass on the 404 as usual
                pass

        if response:
            reset_content_length = False

            # append CORS headers and other annotations/patches to response
            append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response)
            append_last_modified_headers(response=response)
            append_list_objects_marker(method, path, data, response)
            fix_location_constraint(response)
            fix_range_content_type(bucket_name, path, headers, response)
            fix_delete_objects_response(bucket_name, method, parsed, data, headers, response)

            # Remove body from PUT response on presigned URL
            # https://github.com/localstack/localstack/issues/1317
            if method == 'PUT' and ('X-Amz-Security-Token=' in path or
                    'X-Amz-Credential=' in path or 'AWSAccessKeyId=' in path):
                response._content = ''
                reset_content_length = True

            response_content_str = None
            try:
                response_content_str = to_str(response._content)
            except Exception:
                pass

            # Honor response header overrides
            # https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
            if method == 'GET':
                query_map = urlparse.parse_qs(parsed.query, keep_blank_values=True)
                for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items():
                    if param_name in query_map:
                        response.headers[header_name] = query_map[param_name][0]

            if response_content_str and response_content_str.startswith('<'):
                is_bytes = isinstance(response._content, six.binary_type)
                response._content = response_content_str

                append_last_modified_headers(response=response, content=response_content_str)

                # We need to un-pretty-print the XML, otherwise we run into this issue with Spark:
                # https://github.com/jserver/mock-s3/pull/9/files
                # https://github.com/localstack/localstack/issues/183
                # Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n
                # Note: make sure to return XML docs verbatim: https://github.com/localstack/localstack/issues/1037
                if method != 'GET' or not is_object_specific_request(path, headers):
                    response._content = re.sub(r'([^\?])>\n\s*<', r'\1><', response_content_str, flags=re.MULTILINE)

                # update Location information in response payload
                response._content = self._update_location(response._content, bucket_name)

                # convert back to bytes
                if is_bytes:
                    response._content = to_bytes(response._content)

                # fix content-type: https://github.com/localstack/localstack/issues/618
                #                   https://github.com/localstack/localstack/issues/549
                #                   https://github.com/localstack/localstack/issues/854
                if 'text/html' in response.headers.get('Content-Type', '') \
                        and not response_content_str.lower().startswith('<!doctype html'):
                    response.headers['Content-Type'] = 'application/xml; charset=utf-8'

                reset_content_length = True

            # update content-length headers (fix https://github.com/localstack/localstack/issues/541)
            if method == 'DELETE':
                reset_content_length = True

            if reset_content_length:
                response.headers['content-length'] = len(response._content)
예제 #50
0
    def forward(self, method):
        path = self.path
        if '://' in path:
            path = '/' + path.split('://', 1)[1].split('/', 1)[1]
        proxy_url = 'http://%s%s' % (self.proxy.forward_host, path)
        target_url = self.path
        if '://' not in target_url:
            target_url = 'http://%s%s' % (self.proxy.forward_host, target_url)
        data = None
        if method in ['POST', 'PUT', 'PATCH']:
            data_string = self.data_bytes
            try:
                if not isinstance(data_string, string_types):
                    data_string = data_string.decode(DEFAULT_ENCODING)
                data = json.loads(data_string)
            except Exception as e:
                # unable to parse JSON, fallback to verbatim string/bytes
                data = data_string

        forward_headers = CaseInsensitiveDict(self.headers)
        # update original "Host" header (moto s3 relies on this behavior)
        if not forward_headers.get('Host'):
            forward_headers['host'] = urlparse(target_url).netloc
        if 'localhost.atlassian.io' in forward_headers.get('Host'):
            forward_headers['host'] = 'localhost'

        try:
            response = None
            modified_request = None
            # update listener (pre-invocation)
            if self.proxy.update_listener:
                listener_result = self.proxy.update_listener(
                    method=method,
                    path=path,
                    data=data,
                    headers=forward_headers,
                    return_forward_info=True)
                if isinstance(listener_result, Response):
                    response = listener_result
                elif isinstance(listener_result, Request):
                    modified_request = listener_result
                    data = modified_request.data
                    forward_headers = modified_request.headers
                elif listener_result is not True:
                    # get status code from response, or use Bad Gateway status code
                    code = listener_result if isinstance(listener_result,
                                                         int) else 503
                    self.send_response(code)
                    self.end_headers()
                    return
            if response is None:
                if modified_request:
                    response = self.method(proxy_url,
                                           data=modified_request.data,
                                           headers=modified_request.headers)
                else:
                    response = self.method(proxy_url,
                                           data=self.data_bytes,
                                           headers=forward_headers)
            # update listener (post-invocation)
            if self.proxy.update_listener:
                updated_response = self.proxy.update_listener(
                    method=method,
                    path=path,
                    data=data,
                    headers=forward_headers,
                    response=response)
                if isinstance(updated_response, Response):
                    response = updated_response

            # copy headers and return response
            self.send_response(response.status_code)

            content_length_sent = False
            for header_key, header_value in iteritems(response.headers):
                self.send_header(header_key, header_value)
                content_length_sent = content_length_sent or header_key.lower(
                ) == 'content-length'
            if not content_length_sent:
                self.send_header(
                    'Content-Length',
                    '%s' % len(response.content) if response.content else 0)

            # allow pre-flight CORS headers by default
            if 'Access-Control-Allow-Origin' not in response.headers:
                self.send_header('Access-Control-Allow-Origin', '*')
            if 'Access-Control-Allow-Methods' not in response.headers:
                self.send_header('Access-Control-Allow-Methods',
                                 ','.join(CORS_ALLOWED_METHODS))
            if 'Access-Control-Allow-Headers' not in response.headers:
                self.send_header('Access-Control-Allow-Headers',
                                 ','.join(CORS_ALLOWED_HEADERS))

            self.end_headers()
            if response.content and len(response.content):
                self.wfile.write(bytes_(response.content))
            self.wfile.flush()
        except Exception as e:
            trace = str(traceback.format_exc())
            conn_error = 'ConnectionRefusedError' in trace or 'NewConnectionError' in trace
            if not self.proxy.quiet or not conn_error:
                LOGGER.error("Error forwarding request: %s %s" % (e, trace))
            self.send_response(502)  # bad gateway
            self.end_headers()
예제 #51
0
    def do_GET(self):
        addon = xbmcaddon.Addon('plugin.video.youtube')
        dash_proxy_enabled = addon.getSetting('kodion.mpd.videos') == 'true' and addon.getSetting('kodion.video.quality.mpd') == 'true'
        api_config_enabled = addon.getSetting('youtube.api.config.page') == 'true'

        if self.path == '/client_ip':
            client_json = json.dumps({"ip": "{ip}".format(ip=self.client_address[0])})
            self.send_response(200)
            self.send_header('Content-Type', 'application/json; charset=utf-8')
            self.send_header('Content-Length', len(client_json))
            self.end_headers()
            self.wfile.write(client_json.encode('utf-8'))

        logger.log_debug('HTTPServer: Request uri path |{proxy_path}|'.format(proxy_path=self.path))

        if not self.connection_allowed():
            self.send_error(403)
        else:
            if dash_proxy_enabled and self.path.endswith('.mpd'):
                file_path = os.path.join(self.base_path, self.path.strip('/').strip('\\'))
                file_chunk = True
                logger.log_debug('HTTPServer: Request file path |{file_path}|'.format(file_path=file_path.encode('utf-8')))
                try:
                    with open(file_path, 'rb') as f:
                        self.send_response(200)
                        self.send_header('Content-Type', 'application/xml+dash')
                        self.send_header('Content-Length', os.path.getsize(file_path))
                        self.end_headers()
                        while file_chunk:
                            file_chunk = f.read(self.chunk_size)
                            if file_chunk:
                                self.wfile.write(file_chunk)
                except IOError:
                    response = 'File Not Found: |{proxy_path}| -> |{file_path}|'.format(proxy_path=self.path, file_path=file_path.encode('utf-8'))
                    self.send_error(404, response)
            elif api_config_enabled and self.path == '/api':
                html = self.api_config_page()
                html = html.encode('utf-8')
                self.send_response(200)
                self.send_header('Content-Type', 'text/html; charset=utf-8')
                self.send_header('Content-Length', len(html))
                self.end_headers()
                for chunk in self.get_chunks(html):
                    self.wfile.write(chunk)
            elif api_config_enabled and self.path.startswith('/api_submit'):
                addon = xbmcaddon.Addon('plugin.video.youtube')
                i18n = addon.getLocalizedString
                xbmc.executebuiltin('Dialog.Close(addonsettings,true)')
                old_api_key = addon.getSetting('youtube.api.key')
                old_api_id = addon.getSetting('youtube.api.id')
                old_api_secret = addon.getSetting('youtube.api.secret')
                query = urlparse(self.path).query
                params = parse_qs(query)
                api_key = params.get('api_key', [None])[0]
                api_id = params.get('api_id', [None])[0]
                api_secret = params.get('api_secret', [None])[0]
                if api_key and api_id and api_secret:
                    footer = i18n(30638)
                else:
                    footer = u''
                if re.search(r'api_key=(?:&|$)', query):
                    api_key = ''
                if re.search(r'api_id=(?:&|$)', query):
                    api_id = ''
                if re.search(r'api_secret=(?:&|$)', query):
                    api_secret = ''
                updated = []
                if api_key is not None and api_key != old_api_key:
                    addon.setSetting('youtube.api.key', api_key)
                    updated.append(i18n(30201))
                if api_id is not None and api_id != old_api_id:
                    addon.setSetting('youtube.api.id', api_id)
                    updated.append(i18n(30202))
                if api_secret is not None and api_secret != old_api_secret:
                    updated.append(i18n(30203))
                    addon.setSetting('youtube.api.secret', api_secret)
                if addon.getSetting('youtube.api.key') and addon.getSetting('youtube.api.id') and \
                        addon.getSetting('youtube.api.secret'):
                    enabled = i18n(30636)
                else:
                    enabled = i18n(30637)
                if not updated:
                    updated = i18n(30635)
                else:
                    updated = i18n(30631) % u', '.join(updated)
                html = self.api_submit_page(updated, enabled, footer)
                html = html.encode('utf-8')
                self.send_response(200)
                self.send_header('Content-Type', 'text/html; charset=utf-8')
                self.send_header('Content-Length', len(html))
                self.end_headers()
                for chunk in self.get_chunks(html):
                    self.wfile.write(chunk)
            elif self.path == '/ping':
                self.send_error(204)
            else:
                self.send_error(501)
예제 #52
0
def get_domain(url):
    domain = urlparse(url).netloc
    return domain
예제 #53
0
def validate_url(url, ip_whitelist):
    # If it doesn't look like a URL, ignore it.
    if not (url.lstrip().startswith('http://')
            or url.lstrip().startswith('https://')):
        return url

    # Extract hostname component
    parsed_url = urlparse(url).netloc
    # If credentials are in this URL, we need to strip those.
    if parsed_url.count('@') > 0:
        # credentials.
        parsed_url = parsed_url[parsed_url.rindex('@') + 1:]
    # Percent encoded colons and other characters will not be resolved as such
    # so we don't have to either.

    # Sometimes the netloc will contain the port which is not desired, so we
    # need to extract that.
    port = None
    # However, it could ALSO be an IPv6 address they've supplied.
    if ':' in parsed_url:
        # IPv6 addresses have colons in them already (it seems like always more than two)
        if parsed_url.count(':') >= 2:
            # Since IPv6 already use colons extensively, they wrap it in
            # brackets when there is a port, e.g. http://[2001:db8:1f70::999:de8:7648:6e8]:100/
            # However if it ends with a ']' then there is no port after it and
            # they've wrapped it in brackets just for fun.
            if ']' in parsed_url and not parsed_url.endswith(']'):
                # If this +1 throws a range error, we don't care, their url
                # shouldn't end with a colon.
                idx = parsed_url.rindex(':')
                # We parse as an int and let this fail ungracefully if parsing
                # fails because we desire to fail closed rather than open.
                port = int(parsed_url[idx + 1:])
                parsed_url = parsed_url[:idx]
            else:
                # Plain ipv6 without port
                pass
        else:
            # This should finally be ipv4 with port. It cannot be IPv6 as that
            # was caught by earlier cases, and it cannot be due to credentials.
            idx = parsed_url.rindex(':')
            port = int(parsed_url[idx + 1:])
            parsed_url = parsed_url[:idx]

    # safe to log out, no credentials/request path, just an IP + port
    log.debug("parsed url, port: %s : %s", parsed_url, port)
    # Call getaddrinfo to resolve hostname into tuples containing IPs.
    addrinfo = socket.getaddrinfo(parsed_url, port)
    # Get the IP addresses that this entry resolves to (uniquely)
    # We drop:
    #   AF_* family: It will resolve to AF_INET or AF_INET6, getaddrinfo(3) doesn't even mention AF_UNIX,
    #   socktype: We don't care if a stream/dgram/raw protocol
    #   protocol: we don't care if it is tcp or udp.
    addrinfo_results = set([info[4][0] for info in addrinfo])
    # There may be multiple (e.g. IPv4 + IPv6 or DNS round robin). Any one of these
    # could resolve to a local addresses (and could be returned by chance),
    # therefore we must check them all.
    for raw_ip in addrinfo_results:
        # Convert to an IP object so we can tell if it is in private space.
        ip = ipaddress.ip_address(unicodify(raw_ip))
        # If this is a private address
        if ip.is_private:
            results = []
            # If this IP is not anywhere in the whitelist
            for whitelisted in ip_whitelist:
                # If it's an IP address range (rather than a single one...)
                if hasattr(whitelisted, 'subnets'):
                    results.append(ip in whitelisted)
                else:
                    results.append(ip == whitelisted)

            if any(results):
                # If we had any True, then THIS (and ONLY THIS) IP address that
                # that specific DNS entry resolved to is in whitelisted and
                # safe to access. But we cannot exit here, we must ensure that
                # all IPs that that DNS entry resolves to are likewise safe.
                pass
            else:
                # Otherwise, we deny access.
                raise ConfigDoesNotAllowException(
                    "Access to this address in not permitted by server configuration"
                )
    return url
    def forward_request(self, method, path, data, headers):

        # parse path and query params
        parsed_path = urlparse.urlparse(path)

        # Make sure we use 'localhost' as forward host, to ensure moto uses path style addressing.
        # Note that all S3 clients using LocalStack need to enable path style addressing.
        if 's3.amazonaws.com' not in headers.get('host', ''):
            headers['host'] = 'localhost'

        # check content md5 hash integrity if not a copy request
        if 'Content-MD5' in headers and not self.is_s3_copy_request(headers, path):
            response = check_content_md5(data, headers)
            if response is not None:
                return response

        modified_data = None

        # check bucket name
        bucket_name = get_bucket_name(path, headers)
        if method == 'PUT' and not re.match(BUCKET_NAME_REGEX, bucket_name):
            if len(parsed_path.path) <= 1:
                return error_response('Unable to extract valid bucket name. Please ensure that your AWS SDK is ' +
                    'configured to use path style addressing, or send a valid <Bucket>.s3.amazonaws.com "Host" header',
                    'InvalidBucketName', status_code=400)
            return error_response('The specified bucket is not valid.', 'InvalidBucketName', status_code=400)

        # TODO: For some reason, moto doesn't allow us to put a location constraint on us-east-1
        to_find = to_bytes('<LocationConstraint>us-east-1</LocationConstraint>')
        if data and data.startswith(to_bytes('<')) and to_find in data:
            modified_data = data.replace(to_find, to_bytes(''))

        # If this request contains streaming v4 authentication signatures, strip them from the message
        # Related isse: https://github.com/localstack/localstack/issues/98
        # TODO we should evaluate whether to replace moto s3 with scality/S3:
        # https://github.com/scality/S3/issues/237
        if headers.get('x-amz-content-sha256') == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD':
            modified_data = strip_chunk_signatures(modified_data or data)
            headers['content-length'] = headers.get('x-amz-decoded-content-length')

        # POST requests to S3 may include a "${filename}" placeholder in the
        # key, which should be replaced with an actual file name before storing.
        if method == 'POST':
            original_data = modified_data or data
            expanded_data = multipart_content.expand_multipart_filename(original_data, headers)
            if expanded_data is not original_data:
                modified_data = expanded_data

        # If no content-type is provided, 'binary/octet-stream' should be used
        # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
        if method == 'PUT' and not headers.get('content-type'):
            headers['content-type'] = 'binary/octet-stream'

        # persist this API call to disk
        persistence.record('s3', method, path, data, headers)

        # parse query params
        query = parsed_path.query
        path = parsed_path.path
        bucket = path.split('/')[1]
        query_map = urlparse.parse_qs(query, keep_blank_values=True)

        # remap metadata query params (not supported in moto) to request headers
        append_metadata_headers(method, query_map, headers)

        if query == 'notification' or 'notification' in query_map:
            # handle and return response for ?notification request
            response = handle_notification_request(bucket, method, data)
            return response

        if query == 'cors' or 'cors' in query_map:
            if method == 'GET':
                return get_cors(bucket)
            if method == 'PUT':
                return set_cors(bucket, data)
            if method == 'DELETE':
                return delete_cors(bucket)

        if query == 'lifecycle' or 'lifecycle' in query_map:
            if method == 'GET':
                return get_lifecycle(bucket)
            if method == 'PUT':
                return set_lifecycle(bucket, data)

        if query == 'replication' or 'replication' in query_map:
            if method == 'GET':
                return get_replication(bucket)
            if method == 'PUT':
                return set_replication(bucket, data)

        if query == 'encryption' or 'encryption' in query_map:
            if method == 'GET':
                return get_encryption(bucket)
            if method == 'PUT':
                return set_encryption(bucket, data)

        if query == 'object-lock' or 'object-lock' in query_map:
            if method == 'GET':
                return get_object_lock(bucket)
            if method == 'PUT':
                return set_object_lock(bucket, data)

        if modified_data is not None:
            return Request(data=modified_data, headers=headers, method=method)
        return True
예제 #55
0
def update_config():
    ''' This code needs to be run when the config is changed to take those
    changes into account. It is called whenever a plugin is loaded as the
    plugin might have changed the config values (for instance it might
    change ckan.site_url) '''

    webassets_init()

    for plugin in p.PluginImplementations(p.IConfigurer):
        # must do update in place as this does not work:
        # config = plugin.update_config(config)
        plugin.update_config(config)

    # Set whitelisted env vars on config object
    # This is set up before globals are initialized

    ckan_db = os.environ.get('CKAN_DB', None)
    if ckan_db:
        msg = 'Setting CKAN_DB as an env var is deprecated and will be' \
            ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'
        log.warn(msg)
        config['sqlalchemy.url'] = ckan_db

    for option in CONFIG_FROM_ENV_VARS:
        from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)
        if from_env:
            config[option] = from_env

    root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

    site_url = config.get('ckan.site_url', '')
    if not site_url:
        raise RuntimeError(
            'ckan.site_url is not configured and it must have a value.'
            ' Please amend your .ini file.')
    if not site_url.lower().startswith('http'):
        raise RuntimeError(
            'ckan.site_url should be a full URL, including the schema '
            '(http or https)')

    display_timezone = config.get('ckan.display_timezone', '')
    if (display_timezone and display_timezone != 'server'
            and display_timezone not in pytz.all_timezones):
        raise CkanConfigurationException(
            "ckan.display_timezone is not 'server' or a valid timezone")

    # Remove backslash from site_url if present
    config['ckan.site_url'] = config['ckan.site_url'].rstrip('/')

    ckan_host = config['ckan.host'] = urlparse(site_url).netloc
    if config.get('ckan.site_id') is None:
        if ':' in ckan_host:
            ckan_host, port = ckan_host.split(':')
        assert ckan_host, 'You need to configure ckan.site_url or ' \
                          'ckan.site_id for SOLR search-index rebuild to work.'
        config['ckan.site_id'] = ckan_host

    # ensure that a favicon has been set
    favicon = config.get('ckan.favicon', '/base/images/ckan.ico')
    config['ckan.favicon'] = favicon

    # Init SOLR settings and check if the schema is compatible
    # from ckan.lib.search import SolrSettings, check_solr_schema_version

    # lib.search is imported here as we need the config enabled and parsed
    search.SolrSettings.init(config.get('solr_url'), config.get('solr_user'),
                             config.get('solr_password'))
    search.check_solr_schema_version()

    lib_plugins.reset_package_plugins()
    lib_plugins.register_package_plugins()
    lib_plugins.reset_group_plugins()
    lib_plugins.register_group_plugins()

    # initialise the globals
    app_globals.app_globals._init()

    helpers.load_plugin_helpers()
    config['pylons.h'] = helpers.helper_functions

    # Templates and CSS loading from configuration
    valid_base_templates_folder_names = ['templates']
    templates = config.get('ckan.base_templates_folder', 'templates')
    config['ckan.base_templates_folder'] = templates

    if templates not in valid_base_templates_folder_names:
        raise CkanConfigurationException(
            'You provided an invalid value for ckan.base_templates_folder. '
            'Possible values are: "templates".')

    jinja2_templates_path = os.path.join(root, templates)
    log.info('Loading templates from %s' % jinja2_templates_path)
    template_paths = [jinja2_templates_path]

    extra_template_paths = config.get('extra_template_paths', '')
    if extra_template_paths:
        # must be first for them to override defaults
        template_paths = extra_template_paths.split(',') + template_paths
    config['computed_template_paths'] = template_paths

    # Markdown ignores the logger config, so to get rid of excessive
    # markdown debug messages in the log, set it to the level of the
    # root logger.
    logging.getLogger("MARKDOWN").setLevel(logging.getLogger().level)

    # CONFIGURATION OPTIONS HERE (note: all config options will override
    # any Pylons config options)

    # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)
    # to eliminate database errors due to stale pooled connections
    config.setdefault('sqlalchemy.pool_pre_ping', True)
    # Initialize SQLAlchemy
    engine = sqlalchemy.engine_from_config(config)
    model.init_model(engine)

    for plugin in p.PluginImplementations(p.IConfigurable):
        plugin.configure(config)

    # reset the template cache - we do this here so that when we load the
    # environment it is clean
    render.reset_template_info_cache()

    # clear other caches
    logic.clear_actions_cache()
    logic.clear_validators_cache()
    authz.clear_auth_functions_cache()

    # Here we create the site user if they are not already in the database
    try:
        logic.get_action('get_site_user')({'ignore_auth': True}, None)
    except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
        # (ProgrammingError for Postgres, OperationalError for SQLite)
        # The database is not initialised.  This is a bit dirty.  This occurs
        # when running tests.
        pass
    except sqlalchemy.exc.InternalError:
        # The database is not initialised.  Travis hits this
        pass

    # Close current session and open database connections to ensure a clean
    # clean environment even if an error occurs later on
    model.Session.remove()
    model.Session.bind.dispose()
예제 #56
0
파일: package.py 프로젝트: wdmapp/spack
class Pythia6(CMakePackage):
    """PYTHIA is a program for the generation of high-energy physics events,
    i.e. for the description of collisions at high energies between elementary
    particles such as e+, e-, p and pbar in various combinations.

    PYTHIA6 is a Fortran package which is no longer maintained: new
    prospective users should use Pythia8 instead.

    This recipe includes patches required to interoperate with Root.
    """

    homepage = 'https://pythiasix.hepforge.org/'
    url = 'http://www.hepforge.org/archive/pythiasix/pythia-6.4.28.tgz'

    version('6.4.28',
            sha256='01cbff47e99365b5e46f6d62c1735d3cae1932c4710604850d59f538cb758020')

    # Root's TPythia6 interface requires extra sources to be built into
    # the Pythia6 library.
    variant('root', default=False,
            description='Build extra (non OEM) code to allow use by Root.')

    # The maximum number of particles (NMXHEP) supported by the arrays
    # in the /HEPEVT/ COMMON block may need tweaking if pythia6 is
    # intended to be used with other code with different requirements.
    variant('nmxhep', default=4000, values=_is_integral, description='Extent of particle arrays in the /HEPEVT/ COMMON block.')

    # In the unlikely event of new versions >6.4.28,
    # pythia6_common_address.c should be checked for accuracy against
    # the definitions of the relevant COMMON blocks in the Pythia6
    # Fortran source, and patched if necessaary.
    resource(
        name='root-pythia6-shim',
        url='https://root.cern.ch/download/pythia6.tar.gz',
        sha256='d613dcb27c905710e2f13a934913cc5545e3e5d0e477e580107385d9ef260056',
        when='+root',
        destination='.',
        placement={'pythia6_common_address.c': 'pythia6_common_address.c',
                   'tpythia6_called_from_cc.F': 'tpythia6_called_from_cc.F'}
    )

    # Download examples separately.
    examples \
        = {'main60.f':
           'd713b8b267c4405cc9d31c58bba267ae3378902a26fa52393003bf35fd56902c',
           'main61.f':
           'e2a3d5524e43d16f60d9edc6e7198d41006d1ba127fb7b0e265aa509e13128b4',
           'main62.f':
           'dce822a72fe2d6cfb6d43c479ba98928fb0a39290a6ee26fdcacc66229313045',
           'main63.f':
           'b2dd343b3cd7969979b80c564d82b92e0d776d66bb19d346b52f2af27adeb62d',
           'main64.f':
           'a35f2f232e6e0d68d67fd350d4d46b0a353f5c7811de0c2db47ae16d17ed1843',
           'main65.f':
           '03c81e0bbd77710b0461e18265e80e3bd51360b9f416c86013401f882ac39a5e',
           'main66.f':
           '50dd9221a7e84ee7c5005db6758e5880d190eab8cce8a52e7c7b29e9fee8d3da',
           'main67.f':
           '1984aa90fe4e3d628c3bcceaa6fca1b08231d835158d975fa171337d55ca4a2f',
           'main68.f':
           'c8d6def1298477ffec6a1d98c7e02dcee0debe6badc7c63f752f9194b82f212d',
           'main69.f':
           'd14399d43f8c4b670907558849d3e5a4d7625d027de3c10002185c58b20b061a',
           'main71.f':
           '2e47af778003b0596e8999f0914033c6eda7335211b9e96ac3075d45a3cde12e',
           'main72.f':
           'e27ce2af68b40436c51c65767ebb5ff0955ab8dfdfc5fc5c217ae73cd53070da',
           'main73.f':
           '567db2d1a66896ce5103ffa7e10742442b0e934088883e91339536e0249772c4',
           'main75.f':
           'b850986c43a5af1e7d13b66d22b01584e3c68bb338be32eac39e31f971b80be4',
           'main77.f':
           '0679852c4f35719531ad38dc1dbb374b884181eb5e483c36d8867ccb449177a4',
           'main78.f':
           '5babc59fe6a0bd57d97ec398cf01745bc9b72ce6ce0711e934d53c7821e21912',
           'main79.f':
           '27ca84d6d0877f3605cbc1b865c3e1f571e7d2c9301094a4122e726a903dbead',
           'main81.f':
           'b02fecd1cd0f9ba16eaae53e9da0ba602569fdf0e46856cccdfb4c5b7ba33e8b',
           'ttbar.lhe':
           'db772b69ab4e0300d973b57414523ac8e7fa8535eac49ee52a6b69b1c131983d'}

    for example, checksum in iteritems(examples):
        resource(name=example,
                 url='http://pythiasix.hepforge.org/examples/{0}'.
                 format(example),
                 sha256=checksum,
                 expand=False,
                 destination='example',
                 placement={example: example})

    # Docs.
    docs = {
        'http://www.hepforge.org/archive/pythiasix/update_notes-6.4.28.txt': 'a229be4ba9a4eb65a9d53600a5f388b620038d56694c6cb4671c2be224b67751',
        'http://home.thep.lu.se/~torbjorn/pythia6/lutp0613man2.pdf': '03d637310ea80f0d7aea761492bd38452c602890d8cf913a1ec9edacd79fa43d',
        'https://pythiasix.hepforge.org/pythia6-announcement.txt': '2a52def41f0c93e32e0db58dbcf072b987ebfbd32e42ccfc1f9382fcf65f1271'
    }

    for docurl, checksum in iteritems(docs):
        doc = os.path.basename(urlparse(docurl).path)
        resource(name=doc,
                 url=docurl,
                 sha256=checksum,
                 expand=False,
                 destination='doc',
                 placement={doc: doc})

    # The included patch customizes some routines provided in dummy form
    # by the original source to be useful out of the box in the vast
    # majority of cases. If your case is different, platform- or
    # variant-based adjustments should be made.
    patch('pythia6.patch', level=0)

    def patch(self):
        # Use our provided CMakeLists.txt. The Makefile provided with
        # the source is GCC (gfortran) specific, and would have required
        # additional patching for the +root variant.
        llnl.util.filesystem.copy(os.path.join(os.path.dirname(__file__),
                                               'CMakeLists.txt'),
                                  self.stage.source_path)
        # Apply the variant value at the relevant place in the source.
        filter_file(r'^(\s+PARAMETER\s*\(\s*NMXHEP\s*=\s*)\d+',
                    r'\1{0}'.format(self.spec.variants['nmxhep'].value),
                    'pyhepc.f')

    def cmake_args(self):
        args = ['-DPYTHIA6_VERSION={0}'.format(self.version.dotted)]
        return args
예제 #57
0
def __main__():
    filename = sys.argv[1]
    try:
        max_file_size = int(sys.argv[2])
    except Exception:
        max_file_size = 0

    job_params, params = load_input_parameters(filename)
    if job_params is None:  # using an older tabular file
        enhanced_handling = False
        job_params = dict(param_dict=params)
        job_params['output_data'] = [dict(out_data_name='output',
                                          ext='data',
                                          file_name=filename,
                                          extra_files_path=None)]
        job_params['job_config'] = dict(GALAXY_ROOT_DIR=GALAXY_ROOT_DIR, GALAXY_DATATYPES_CONF_FILE=GALAXY_DATATYPES_CONF_FILE, TOOL_PROVIDED_JOB_METADATA_FILE=TOOL_PROVIDED_JOB_METADATA_FILE)
    else:
        enhanced_handling = True
        json_file = open(job_params['job_config']['TOOL_PROVIDED_JOB_METADATA_FILE'], 'w')  # specially named file for output junk to pass onto set metadata

    datatypes_registry = Registry()
    datatypes_registry.load_datatypes(root_dir=job_params['job_config']['GALAXY_ROOT_DIR'], config=job_params['job_config']['GALAXY_DATATYPES_CONF_FILE'])

    URL = params.get('URL', None)  # using exactly URL indicates that only one dataset is being downloaded
    URL_method = params.get('URL_method', None)
    token = 'GtAuth token={0}'.format(params.get('token', None))

    # The Python support for fetching resources from the web is layered. urllib uses the httplib
    # library, which in turn uses the socket library.  As of Python 2.3 you can specify how long
    # a socket should wait for a response before timing out. By default the socket module has no
    # timeout and can hang. Currently, the socket timeout is not exposed at the httplib or urllib2
    # levels. However, you can set the default timeout ( in seconds ) globally for all sockets by
    # doing the following.
    socket.setdefaulttimeout(600)

    for data_dict in job_params['output_data']:
        cur_filename = data_dict.get('file_name', filename)
        cur_URL = params.get('%s|%s|URL' % (GALAXY_PARAM_PREFIX, data_dict['out_data_name']), URL)
        if not cur_URL or urlparse(cur_URL).scheme not in ('http', 'https', 'ftp'):
            open(cur_filename, 'w').write("")
            stop_err('The remote data source application has not sent back a URL parameter in the request.')

        # The following calls to urlopen() will use the above default timeout
        try:
            if not URL_method or URL_method == 'get':
                request_obj = request.Request(url=cur_URL)
                request_obj.add_header('authorization', token)
                request_obj.add_header('Accept','text/csv')
                page = urlopen(request_obj)
            elif URL_method == 'post':
                page = urlopen(cur_URL, urlencode(params).encode("utf-8"))
        except Exception as e:
            stop_err('Error: url=%s, token=%s\n  %s' % (cur_URL, token, e))
        if max_file_size:
            file_size = int(page.info().get('Content-Length', 0))
            if file_size > max_file_size:
                stop_err('The size of the data (%d bytes) you have requested exceeds the maximum allowed (%d bytes) on this server.' % (file_size, max_file_size))
        try:
            cur_filename = sniff.stream_to_open_named_file(page, os.open(cur_filename, os.O_WRONLY | os.O_CREAT), cur_filename, source_encoding=get_charset_from_http_headers(page.headers))
        except Exception as e:
            stop_err('Unable to fetch %s:\n%s' % (cur_URL, e))

        # here import checks that upload tool performs
        if enhanced_handling:
            try:
                ext = sniff.handle_uploaded_dataset_file(filename, datatypes_registry, ext=data_dict['ext'])
            except Exception as e:
                stop_err(str(e))
            info = dict(type='dataset',
                        dataset_id=data_dict['dataset_id'],
                        ext=ext)

            json_file.write("%s\n" % dumps(info))
예제 #58
0
def get_hostname(url):
    """Get hostname from URL."""
    return list(urlparse(url))[1].lower()
예제 #59
0
def _assert_s3_file_exists(region, s3_url):
    parsed_url = urlparse(s3_url)
    s3 = boto3.resource('s3', region_name=region)
    s3.Object(parsed_url.netloc, parsed_url.path.lstrip('/')).load()
예제 #60
0
def fetch_release_file(filename, release, dist=None):
    cache_key = 'releasefile:v1:%s:%s' % (
        release.id,
        md5_text(filename).hexdigest(),
    )

    filename_path = None
    if filename is not None:
        # Reconstruct url without protocol + host
        # e.g. http://example.com/foo?bar => ~/foo?bar
        parsed_url = urlparse(filename)
        filename_path = '~' + parsed_url.path
        if parsed_url.query:
            filename_path += '?' + parsed_url.query

    logger.debug('Checking cache for release artifact %r (release_id=%s)',
                 filename, release.id)
    result = cache.get(cache_key)

    dist_name = dist and dist.name or None

    if result is None:
        logger.debug(
            'Checking database for release artifact %r (release_id=%s)',
            filename, release.id)

        filename_idents = [ReleaseFile.get_ident(filename, dist_name)]
        if filename_path is not None and filename_path != filename:
            filename_idents.append(
                ReleaseFile.get_ident(filename_path, dist_name))

        possible_files = list(
            ReleaseFile.objects.filter(
                release=release,
                dist=dist,
                ident__in=filename_idents,
            ).select_related('file'))

        if len(possible_files) == 0:
            logger.debug(
                'Release artifact %r not found in database (release_id=%s)',
                filename, release.id)
            cache.set(cache_key, -1, 60)
            return None
        elif len(possible_files) == 1:
            releasefile = possible_files[0]
        else:
            # Prioritize releasefile that matches full url (w/ host)
            # over hostless releasefile
            target_ident = filename_idents[0]
            releasefile = next(
                (f for f in possible_files if f.ident == target_ident))

        logger.debug('Found release artifact %r (id=%s, release_id=%s)',
                     filename, releasefile.id, release.id)
        try:
            with metrics.timer('sourcemaps.release_file_read'):
                with releasefile.file.getfile() as fp:
                    z_body, body = compress_file(fp)
        except Exception as e:
            logger.exception(six.text_type(e))
            cache.set(cache_key, -1, 3600)
            result = None
        else:
            headers = {
                k.lower(): v
                for k, v in releasefile.file.headers.items()
            }
            encoding = get_encoding_from_headers(headers)
            result = http.UrlResult(filename, headers, body, 200, encoding)
            cache.set(cache_key, (headers, z_body, 200, encoding), 3600)

    elif result == -1:
        # We cached an error, so normalize
        # it down to None
        result = None
    else:
        # Previous caches would be a 3-tuple instead of a 4-tuple,
        # so this is being maintained for backwards compatibility
        try:
            encoding = result[3]
        except IndexError:
            encoding = None
        result = http.UrlResult(filename, result[0],
                                zlib.decompress(result[1]), result[2],
                                encoding)

    return result