예제 #1
0
def main(wf):
    optslist, args = getopt.getopt(wf.args, 'c')
    opts = dict(optslist)
    path = args[0]

    url = wf.settings.get('server', 'http://127.0.0.1:8888')

    if '-c' in opts:
        # Copy mode: We were passed either notebook.ipynb or path/to/notebook.ipynb
        pathparts = ('/' + path.strip('/')).split('/')
        from_nbname = pathparts[-1]
        dirpath = urljoin(*pathparts[:-1])
        sys.stderr.write('\n' + repr(pathparts) + '\n')
        nbname = newnb(url, dirpath, copy=from_nbname)
    else:
        # We were passed the path to a directory in which to create a blank notebook.
        dirpath = path
        sys.stderr.write('\n' + dirpath + '\n')
        nbname = newnb(url, dirpath)

    arg = urljoin(dirpath, nbname)
    if isinstance(arg, unicode):
        arg = arg.encode('utf-8')
    nb_user_url = urljoin(url, 'notebooks', quote(arg))

    sys.stdout.write(nb_user_url)
    return 0
예제 #2
0
def main(wf):
    optslist, args = getopt.getopt(wf.args, 'c')
    opts = dict(optslist)
    path = args[0]

    url = wf.settings.get('server', 'http://127.0.0.1:8888')

    if '-c' in opts:
        # Copy mode: We were passed either notebook.ipynb or path/to/notebook.ipynb
        pathparts = ('/' + path.strip('/')).split('/')
        from_nbname = pathparts[-1]
        dirpath = urljoin(*pathparts[:-1])
        sys.stderr.write('\n' + repr(pathparts) + '\n')
        nbname = newnb(url, dirpath, copy=from_nbname)
    else:
        # We were passed the path to a directory in which to create a blank notebook.
        dirpath = path
        sys.stderr.write('\n' + dirpath + '\n')
        nbname = newnb(url, dirpath)

    arg = urljoin(dirpath, nbname)
    if isinstance(arg, unicode):
        arg = arg.encode('utf-8')
    nb_user_url = urljoin(url, 'notebooks', quote(arg))

    sys.stdout.write(nb_user_url)
    return 0
예제 #3
0
    def __init__(self, base_url, *args):
        """
        Builds a URL dictionary. `args` should be tuples of the form:
            `(url_prefix, {"url_name", "url_path"})`
        """
        self._base_url = base_url
        self._urls = {}

        for prefix, url_dict in args:
            full_prefix = urljoin(self._base_url, prefix)
            for name, url in url_dict.iteritems():
                self[name] = urljoin(full_prefix, url)
예제 #4
0
    def test_urljoin(self):
        self.assertEqual('foo/bar', examinee.urljoin('foo/bar'))
        # leading/trailing slashes should be preserved
        self.assertEqual('//foo/bar//', examinee.urljoin('//foo/bar//'))

        self.assertEqual(
            'xxx://foo.bar/abc/def/',
            examinee.urljoin('xxx://foo.bar', 'abc', 'def/')
        )

        # leading/trailing slashes for "inner" parts should be slurped
        self.assertEqual(
            'gnu://foo.bar/abc/def/',
            examinee.urljoin('gnu://foo.bar/', '/abc/', '/def/')
        )
예제 #5
0
    def _authenticated_remote(self, use_ssh=True):
        if use_ssh:
            url = urljoin(self.github_cfg.ssh_url(), self.github_repo_path)
            tmp_id = os.path.abspath('tmp.id_rsa')
            with open(tmp_id, 'w') as f:
                f.write(self.github_cfg.credentials().private_key())
            os.chmod(tmp_id, 0o400)
            suppress_hostcheck = '-o "StrictHostKeyChecking no"'
            id_only = '-o "IdentitiesOnly yes"'
            os.environ[
                'GIT_SSH_COMMAND'] = f'ssh -i {tmp_id} {suppress_hostcheck} {id_only}'
        else:
            url = url_with_credentials(self.github_cfg, self.github_repo_path)

        remote = git.remote.Remote.add(
            repo=self.repo,
            name=random_str(),
            url=url,
        )
        try:
            yield remote
        finally:
            self.repo.delete_remote(remote)
            if use_ssh:
                os.unlink(tmp_id)
                del os.environ['GIT_SSH_COMMAND']
예제 #6
0
def newnb(url, path, copy=None):
    """Create new untitled notebook at 'path'
    
    Server base URL is 'url'

    Returns name of the new notebook file.
    """
    # See IPython/html/services/notebooks/handlers.py for API details.

    # Compare directory contents before and after new notebook creation.
    names = [nb['name'] for nb in get_nblist(url, path) if nb['type'] == 'notebook']

    arg = path
    if isinstance(arg, unicode):
        arg = arg.encode('utf-8')

    post_url = urljoin(url, 'api/notebooks', quote(arg)).strip('/')
    if copy is not None:
        data = json.dumps({'copy_from': copy})
    else:
        data = ''
    try:
        resp = web.post(post_url, data=data)
    except URLError:
        raise URLError('Unable to reach %s. Try the "nbserver" keyword.' % url)
    resp.raise_for_status()

    new_contents = get_nblist(url, path)
    new_names = [nb['name'] for nb in new_contents if nb['type'] == 'notebook']
    try:
        newnbname = list(set(new_names) - set(names))[0]
    except IndexError:
        raise RuntimeError('Notebook creation at %s appears to have failed.' % post_url)
    return newnbname
예제 #7
0
 def get_section(self, sort="new", time="all", limit=DEFAULT_CONTENT_LIMIT,
                 place_holder=None):
     url_data = {"sort" : sort, "time" : time}
     return self.reddit_session._get_content(urljoin(self.URL, subpath),
                                             limit=limit,
                                             url_data=url_data,
                                             place_holder=place_holder)
예제 #8
0
파일: model.py 프로젝트: minchaow/cc-utils
    def from_github_repo_url(repo_url):
        parsed = urllib.parse.urlparse(repo_url)
        if parsed.scheme:
            component_name = repo_url = urljoin(*parsed[1:3])
        else:
            component_name = repo_url

        return ComponentName(name=component_name)
예제 #9
0
파일: alpine.py 프로젝트: spiarh/jojo
 def apkindex_url(self) -> str:
     # http://dl-cdn.alpinelinux.org/alpine/v3.12/main/x86_64/APKINDEX.tar.gz
     return util.urljoin(
         self.mirror,
         self.version_from.type.value,
         self.version_id,
         self.repo,
         self.arch,
         APKINDEX_FILENAME,
     )
예제 #10
0
 def sorted(self, limit=DEFAULT_CONTENT_LIMIT, place_holder=None, **data):
     for k, v in defaults.items():
         if k == "time":
             # time should be "t" in the API data dict
             k = "t"
         data.setdefault(k, v)
     return self.reddit_session._get_content(urljoin(self.URL, subpath),
                                             limit=limit,
                                             url_data=data,
                                             place_holder=place_holder)
예제 #11
0
    def apps(self, group_id, custom_attribs={}):
        url = self._api_url('apps')
        if group_id:
            url = urljoin(url, str(group_id))

        search_query = ' '.join([
            'meta:' + str(k) + '=' + str(v) for k, v in custom_attribs.items()
        ])
        if search_query:
            url += '?' + urlencode({'q': search_query})

        return url
예제 #12
0
def add_root_item(wf, url, mode=None):
    # Make the first item a link to the root path
    nburl = urljoin(url, '')
    if mode == 'new':
        wf.add_item(title='/',
                subtitle='New notebook at ' + nburl, 
                arg='', valid=True, icon=ICON_WEB)
    elif mode == 'copy':
        return # Can't copy the root directory
    else:
        wf.add_item(title='Browse all notebooks',
                subtitle=nburl, arg=nburl, valid=True, icon=ICON_WEB)
예제 #13
0
 def __init__(
     self,
     client_type: str,
     url: str = None,
     headers: dict = None,
     cache: str = None,
     **kwargs,
 ):
     self._client_type = client_type
     self.url = url or util.urljoin(kwargs.get("base_url", ""),
                                    kwargs.get("token_path", ""))
     self.headers = headers
     for key, value in kwargs.items():
         setattr(self, key, value)
예제 #14
0
    def _authenticated_remote(self, use_ssh=False):
        if use_ssh:
            url = urljoin(self.github_cfg.ssh_url(), self.github_repo_path)
        else:
            url = url_with_credentials(self.github_cfg, self.github_repo_path)

        remote = git.remote.Remote.add(
            repo=self.repo,
            name=random_str(),
            url=url,
        )

        try:
            yield remote
        finally:
            self.repo.delete_remote(remote)
예제 #15
0
def collector(update_on_conflict, ignore_on_conflict, use_existing):
    "Run a one-off task to synchronize from the fracfocus data source"
    logger.info(conf)

    endpoints = Endpoint.load_from_config(conf)
    coll = FracFocusCollector(endpoints["registry"])
    url = util.urljoin(conf.COLLECTOR_BASE_URL, conf.COLLECTOR_URL_PATH)
    if not use_existing:
        downloader = ZipDownloader(url)
        req = downloader.get()
        filelist = downloader.unpack(req).paths
    else:
        downloader = ZipDownloader.from_existing()
        filelist = downloader.paths

    coll.collect(filelist, update_on_conflict, ignore_on_conflict)
예제 #16
0
def add_root_item(wf, url, mode=None):
    # Make the first item a link to the root path
    nburl = urljoin(url, '')
    if mode == 'new':
        wf.add_item(title='/',
                    subtitle='New notebook at ' + nburl,
                    arg='',
                    valid=True,
                    icon=ICON_WEB)
    elif mode == 'copy':
        return  # Can't copy the root directory
    else:
        wf.add_item(title='Browse all notebooks',
                    subtitle=nburl,
                    arg=nburl,
                    valid=True,
                    icon=ICON_WEB)
예제 #17
0
    def upload_image_to_container_registry(self, container_image,
                                           image_data_fh):
        '''
        uploads the given container images (read from the passed fileobj, which
        must have a name) to the configured container registry.
        The original image reference is mangled, and prefixed with the configured
        prefix.
        '''
        # we assume there is always a tag present
        image_name, tag = container_image.image_reference().rsplit(':', 1)
        mangled_reference = ':'.join((image_name.replace('.', '_'), tag))
        target_reference = urljoin(self._upload_registry_prefix,
                                   mangled_reference)

        publish_container_image(
            image_reference=target_reference,
            image_file_obj=image_data_fh,
        )
예제 #18
0
def republish_image(
    src_ref,
    tgt_prefix,
    mangle=True,
):
    img_ref, tag = src_ref.rsplit(':', 1)
    if mangle:
        img_ref = img_ref.replace('.', '_')

    tgt_ref = util.urljoin(tgt_prefix, ':'.join((img_ref, tag)))

    with tempfile.NamedTemporaryFile() as tmp_file:
        container.registry.retrieve_container_image(image_reference=src_ref,
                                                    outfileobj=tmp_file)
        container.registry.publish_container_image(image_reference=tgt_ref,
                                                   image_file_obj=tmp_file)

    return src_ref, tgt_ref
예제 #19
0
    def download_alt(self, url, job):
        if not self.client.config.alt_dl_server:
            return False

        ext_url = urljoin(
            self.client.config.alt_dl_server,
            job.project,
            "split",
            job.file,
        )

        with self.client.session.get(ext_url, timeout=5, stream=True) as r:
            if r.status_code != 200:
                return False
            else:
                logging.log(log.Levels.NET, "downloading from", ext_url)
            self.save(r, job)
            return True
예제 #20
0
    def retrieve_secrets(self):
        if self.cache_file and os.path.isfile(self.cache_file):
            with open(self.cache_file) as f:
                return json.load(f)

        request_url = urljoin(self.url, self.concourse_secret_name)
        response = requests.get(request_url)
        # pylint: disable=no-member
        if not response.status_code == requests.codes.ok:
            # pylint: enable=no-member
            raise RuntimeError('secrets_server sent {d}: {m}'.format(
                d=response.status_code, m=response.content))

        if self.cache_file:
            with open(self.cache_file, 'w') as f:
                json.dump(response.json(), f)

        return response.json()
예제 #21
0
    def _authenticated_remote(self, use_ssh=True):
        if use_ssh:
            url = urljoin(self.github_cfg.ssh_url(), self.github_repo_path)
            tmp_id = _ssh_auth_env(github_cfg=self.github_cfg)
        else:
            url = url_with_credentials(self.github_cfg, self.github_repo_path)

        remote = git.remote.Remote.add(
            repo=self.repo,
            name=random_str(),
            url=url,
        )
        try:
            yield remote
        finally:
            self.repo.delete_remote(remote)
            if use_ssh:
                os.unlink(tmp_id)
                del os.environ['GIT_SSH_COMMAND']
예제 #22
0
def _metadata_dict():
    # XXX mv to concourse package; deduplicate with notify step
    if not util._running_on_ci():
        return {}

    # XXX do not hard-code meta-dir
    meta_dir = util.existing_dir(
        os.path.join(util._root_dir(), os.environ.get('META')))

    attrs = (
        'atc-external-url',
        'build-team-name',
        'build-pipeline-name',
        'build-job-name',
        'build-name',
    )

    def read_attr(name):
        with open(os.path.join(meta_dir, name)) as f:
            return f.read().strip()

    meta_dict = {name: read_attr(name) for name in attrs}

    # XXX deduplicate; mv to concourse package
    meta_dict['concourse_url'] = util.urljoin(
        meta_dict['atc-external-url'],
        'teams',
        meta_dict['build-team-name'],
        'pipelines',
        meta_dict['build-pipeline-name'],
        'jobs',
        meta_dict['build-job-name'],
        'builds',
        meta_dict['build-name'],
    )

    # XXX do not hard-code env variables
    meta_dict['effective_version'] = os.environ.get('EFFECTIVE_VERSION')
    meta_dict['component_name'] = os.environ.get('COMPONENT_NAME')
    meta_dict['creation_date'] = datetime.datetime.now().isoformat()

    return meta_dict
예제 #23
0
 def clone_into(
     target_directory: str,
     github_cfg,
     github_repo_path: str,
     checkout_branch: str = None,
 ) -> 'GitHelper':
     url = urljoin(github_cfg.ssh_url(), github_repo_path)
     tmp_id = _ssh_auth_env(github_cfg=github_cfg)
     args = ['--quiet']
     if checkout_branch is not None:
         args += ['--branch', checkout_branch, '--single-branch']
     args += [url, target_directory]
     git.Git().clone(*args)
     os.unlink(tmp_id)
     del os.environ['GIT_SSH_COMMAND']
     return GitHelper(
         repo=target_directory,
         github_cfg=github_cfg,
         github_repo_path=github_repo_path,
     )
예제 #24
0
    def _resolve(self, url):
        self._resolved.add(url)
        try:
            response = urllib2.urlopen(url)
            code = response.getcode()

            if not self._ignore(url):
                all_links = util.pull_out_all_links(response.read())
                for link in all_links:
                    link = util.urljoin(url, link)
                    self._add_link_parent(link, url)
                    if self._is_current_site(link) and link not in self._resolved:
                        self._tasks.add(link)

            response.close()
        except urllib2.URLError, e:
            print e
            if hasattr(e, "code"):
                code = e.code
            else:
                code = -1
예제 #25
0
def newnb(url, path, copy=None):
    """Create new untitled notebook at 'path'
    
    Server base URL is 'url'

    Returns name of the new notebook file.
    """
    # See IPython/html/services/notebooks/handlers.py for API details.

    # Compare directory contents before and after new notebook creation.
    names = [
        nb['name'] for nb in get_nblist(url, path) if nb['type'] == 'notebook'
    ]

    arg = path
    if isinstance(arg, unicode):
        arg = arg.encode('utf-8')

    post_url = urljoin(url, 'api/notebooks', quote(arg)).strip('/')
    if copy is not None:
        data = json.dumps({'copy_from': copy})
    else:
        data = ''
    try:
        resp = web.post(post_url, data=data)
    except URLError:
        raise URLError('Unable to reach %s. Try the "nbserver" keyword.' % url)
    resp.raise_for_status()

    new_contents = get_nblist(url, path)
    new_names = [nb['name'] for nb in new_contents if nb['type'] == 'notebook']
    try:
        newnbname = list(set(new_names) - set(names))[0]
    except IndexError:
        raise RuntimeError('Notebook creation at %s appears to have failed.' %
                           post_url)
    return newnbname
예제 #26
0
def main(wf):
    optslist, args = getopt.getopt(wf.args, 'rdc')
    opts = dict(optslist)
    if args:
        query = args[0]
    else:
        query = None

    sort_by_modtime = '-r' in opts
    if '-d' in opts:
        mode = 'new'
    elif '-c' in opts:
        mode = 'copy'
    else:
        mode = None

    url = wf.settings.get('server', 'http://127.0.0.1:8888')

    def get_nb():
        return get_all_notebooks(url)
    # Retrieve directory and cache for 30 seconds
    nblist = wf.cached_data('nblist', get_nb, max_age=30)

    # Filtering by query
    if query:
        nblist = wf.filter(query, nblist, 
                key=lambda nb: nb['path'] + '/' + nb['name'] )
        # No matches
        if not nblist:
            add_root_item(wf, url, mode=mode)
            wf.add_item('No notebooks found', icon=ICON_WARNING,
                     subtitle='On server %s' % url)
            wf.send_feedback()
            return 0
    elif not sort_by_modtime:
        # If no query and alphabetical sorting, show root.
        add_root_item(wf, url, mode=mode)

    if sort_by_modtime or mode == 'copy':
        # Notebooks only
        nblist = [nb for nb in nblist if nb['type'] == 'notebook']
    elif mode == 'new':
        # Directories only
        nblist = [nb for nb in nblist if nb['type'] == 'directory']

    if sort_by_modtime:
        # Most recent first
        nblist.sort(key=lambda nb: nb['last_modified'], reverse=True)

    # Build results output
    for nb in nblist:
        if nb['name'].endswith('.ipynb'):
            # We use urljoin() twice to get the right behavior when path is empty
            nbname = urljoin(nb['path'], nb['name'][:-len('.ipynb')])
        elif nb['type'] == 'directory':
            nbname = urljoin(nb['path'], nb['name']) + '/'
        else:
            nbname = nb['name']

        nb_user_url = urljoin(url, 'notebooks', urljoin(nb['path'], nb['name']))
        if mode == 'new':
            # We return only the path information, since newnb.py has to use the API anyhow.
            nburl = urljoin(nb['path'], nb['name']) + '/'
            subtitle = 'New notebook at ' + nb_user_url
        elif mode == 'copy':
            nburl = urljoin(nb['path'], nb['name']) + '/'
            subtitle = 'Make a copy of ' + nb_user_url
        else:
            # URL will be passed straight to opener, so must be quoted.
            arg = urljoin(nb['path'], nb['name'])
            if isinstance(arg, unicode):
                arg = arg.encode('utf-8')
            nburl = urljoin(url, 'notebooks', 
                    quote(arg, '/'))
            subtitle = nb_user_url
        wf.add_item(title=nbname,
                subtitle=subtitle,
                arg=nburl,
                valid=True,
                icon=ICON_WEB)

    wf.send_feedback()
    return 0
예제 #27
0
def main(wf):
    optslist, args = getopt.getopt(wf.args, 'rdc')
    opts = dict(optslist)
    if args:
        query = args[0]
    else:
        query = None

    sort_by_modtime = '-r' in opts
    if '-d' in opts:
        mode = 'new'
    elif '-c' in opts:
        mode = 'copy'
    else:
        mode = None

    url = wf.settings.get('server', 'http://127.0.0.1:8888')

    def get_nb():
        return get_all_notebooks(url)

    # Retrieve directory and cache for 30 seconds
    nblist = wf.cached_data('nblist', get_nb, max_age=30)

    # Filtering by query
    if query:
        nblist = wf.filter(query,
                           nblist,
                           key=lambda nb: nb['path'] + '/' + nb['name'])
        # No matches
        if not nblist:
            add_root_item(wf, url, mode=mode)
            wf.add_item('No notebooks found',
                        icon=ICON_WARNING,
                        subtitle='On server %s' % url)
            wf.send_feedback()
            return 0
    elif not sort_by_modtime:
        # If no query and alphabetical sorting, show root.
        add_root_item(wf, url, mode=mode)

    if sort_by_modtime or mode == 'copy':
        # Notebooks only
        nblist = [nb for nb in nblist if nb['type'] == 'notebook']
    elif mode == 'new':
        # Directories only
        nblist = [nb for nb in nblist if nb['type'] == 'directory']

    if sort_by_modtime:
        # Most recent first
        nblist.sort(key=lambda nb: nb['last_modified'], reverse=True)

    # Build results output
    for nb in nblist:
        if nb['name'].endswith('.ipynb'):
            # We use urljoin() twice to get the right behavior when path is empty
            nbname = urljoin(nb['path'], nb['name'][:-len('.ipynb')])
        elif nb['type'] == 'directory':
            nbname = urljoin(nb['path'], nb['name']) + '/'
        else:
            nbname = nb['name']

        nb_user_url = urljoin(url, 'notebooks',
                              urljoin(nb['path'], nb['name']))
        if mode == 'new':
            # We return only the path information, since newnb.py has to use the API anyhow.
            nburl = urljoin(nb['path'], nb['name']) + '/'
            subtitle = 'New notebook at ' + nb_user_url
        elif mode == 'copy':
            nburl = urljoin(nb['path'], nb['name']) + '/'
            subtitle = 'Make a copy of ' + nb_user_url
        else:
            # URL will be passed straight to opener, so must be quoted.
            arg = urljoin(nb['path'], nb['name'])
            if isinstance(arg, unicode):
                arg = arg.encode('utf-8')
            nburl = urljoin(url, 'notebooks', quote(arg, '/'))
            subtitle = nb_user_url
        wf.add_item(title=nbname,
                    subtitle=subtitle,
                    arg=nburl,
                    valid=True,
                    icon=ICON_WEB)

    wf.send_feedback()
    return 0
예제 #28
0
 def test_token_urljoin_double_slash(self, conf):
     url = f"{conf.API_BASE_URL}/"
     path = "/path/to/endpoint"
     assert util.urljoin(url, path) == expected_url
예제 #29
0
 def url(self):
     return util.urljoin(self.base_url, self.path)
예제 #30
0
 def test_both_slashed(self):
     self.assertEqual(util.urljoin("http://www.example.com/", "/subpath"),
                      "http://www.example.com/subpath")
예제 #31
0
 def upload_image_ref(image_reference):
     image_name, tag = image_reference.rsplit(':', 1)
     mangled_reference = ':'.join((image_name.replace('.', '_'), tag))
     return urljoin(upload_registry_prefix, mangled_reference)
예제 #32
0
 def _api_url(self, *parts, **kwargs):
     return urljoin(self.base_url, *parts)
예제 #33
0
파일: api.py 프로젝트: run-it-down/reporter
    def on_get(self, req, resp):
        logger.info('reporting')
        params = {
            'summoner1': req.params['summoner1'],
            'summoner2': req.params['summoner2']
        }

        # common_games
        logger.info('getting common games')
        game_information = requests.get(url=util.urljoin(
            ANALYZER_ENDPOINT, '/common-games'),
                                        params=params)
        game_information = json.loads(game_information.content.decode())

        if not game_information:
            resp.status_code = 404
            return

        # get WR
        logger.info('getting wr')
        wr = requests.get(url=util.urljoin(ANALYZER_ENDPOINT, '/winrate'),
                          params=params)
        wr = json.loads(wr.content.decode())

        # get KDA
        logger.info('getting kda')
        kda = requests.get(url=util.urljoin(ANALYZER_ENDPOINT, '/kda'),
                           params=params)
        kda = json.loads(kda.content.decode())

        # get CS
        logger.info('getting cs')
        cs = requests.get(url=util.urljoin(ANALYZER_ENDPOINT, '/cs'),
                          params=params)
        cs = json.loads(cs.content.decode())

        # get avg-game
        logger.info('getting average game')
        avg_game = requests.get(url=util.urljoin(ANALYZER_ENDPOINT,
                                                 '/avg-game'),
                                params=params)
        avg_game = json.loads(avg_game.content.decode())

        # millionaire
        logger.info('getting classification millionaire')
        millionaire = requests.get(url=util.urljoin(
            ANALYZER_ENDPOINT, '/classification/millionaire'),
                                   params=params)
        millionaire = json.loads(millionaire.content.decode())

        # match-type
        logger.info('getting classification match-type')
        match_type = requests.get(url=util.urljoin(
            ANALYZER_ENDPOINT, '/classification/match-type'),
                                  params=params)
        match_type = json.loads(match_type.content.decode())

        # murderous-duo
        logger.info('getting classification murderous-duo')
        murderous_duo = requests.get(url=util.urljoin(
            ANALYZER_ENDPOINT, '/classification/murderous-duo'),
                                     params=params)
        murderous_duo = json.loads(murderous_duo.content.decode())

        # duo-type
        logger.info('getting classification duo-type')
        duo_type = requests.get(url=util.urljoin(ANALYZER_ENDPOINT,
                                                 '/classification/duo-type'),
                                params=params)
        duo_type = json.loads(duo_type.content.decode())

        # farmer-type
        logger.info('getting classification farmer-type')
        farmer_type = requests.get(url=util.urljoin(
            ANALYZER_ENDPOINT, '/classification/farmer-type'),
                                   params=params)
        farmer_type = json.loads(farmer_type.content.decode())

        # tactician
        logger.info('getting classification tactician')
        tactician = requests.get(url=util.urljoin(ANALYZER_ENDPOINT,
                                                  '/classification/tactician'),
                                 params=params)
        tactician = json.loads(tactician.content.decode())

        # champ combination
        logger.info('getting champ combination')
        champ_combo = requests.get(url=util.urljoin(ANALYZER_ENDPOINT,
                                                    '/combinations/champions'),
                                   params=params)
        champ_combo = json.loads(champ_combo.content.decode())

        # aggression
        logger.info('getting aggression')
        aggression = requests.get(url=util.urljoin(ANALYZER_ENDPOINT,
                                                   '/aggression'),
                                  params=params)
        aggression = json.loads(aggression.content.decode())

        # avg-role
        logger.info('getting avg-role')
        avg_role = requests.get(url=util.urljoin(ANALYZER_ENDPOINT,
                                                 '/avg-role'),
                                params=params)
        avg_role = json.loads(avg_role.content.decode())

        # gold-diff
        logger.info('getting gold-diff')
        gold_diff = requests.get(url=util.urljoin(ANALYZER_ENDPOINT,
                                                  '/gold-diff'),
                                 params=params)
        gold_diff = json.loads(gold_diff.content.decode())

        # aggregate metrics to report
        resp.text = json.dumps({
            'game_information': game_information,
            'winrate': wr,
            'kda': kda,
            'cs': cs,
            'avg-game': avg_game,
            'classification_millionaire': millionaire,
            'match_type': match_type,
            'murderous_duo': murderous_duo,
            'duo_type': duo_type,
            'farmer_type': farmer_type,
            'tactician': tactician,
            'champ_combo': champ_combo,
            'aggression': aggression,
            'avg_role': avg_role,
            'gold_diff': gold_diff,
        })
예제 #34
0
 def compose_api_url(path: str) -> str:
     return urljoin(options.api_endpoint, "account", options.api_account_id, path)
예제 #35
0
 def __init__(self, reddit_session, title=None, json_dict=None,
              fetch_comments=True):
     super(Submission, self).__init__(reddit_session, title, json_dict,
                                      fetch=True)
     if not self.permalink.startswith(urls["reddit_url"]):
         self.permalink = urljoin(urls["reddit_url"], self.permalink)
예제 #36
0
 def schema_header_url(self):
     return util.urljoin(self.base_url or "", self._url_path or "")
예제 #37
0
 def _url(self, *parts):
     return urljoin(self._base_url, *parts)
예제 #38
0
 def test_neither_slashed(self):
     self.assertEqual(util.urljoin("http://www.example.com", "subpath"),
                      "http://www.example.com/subpath")
예제 #39
0
 def test_token_urljoin_no_slash(self, conf):
     url = conf.API_BASE_URL
     path = "path/to/endpoint"
     assert util.urljoin(url, path) == expected_url
예제 #40
0
 def captcha_url(self):
     if self.captcha_id:
         return urljoin(self.VIEW_URL, self.captcha_id + ".png")
예제 #41
0
파일: routes.py 프로젝트: minchaow/cc-utils
 def login(self):
     return util.urljoin(self.base_url, 'sky', 'token')