コード例 #1
0
 def check_non_utf8_content(self, filename, content):
     base_url = "file:///tmp"
     url = "%s/%s" % (base_url, filename)
     template = {
         "resources": {
             "one_init": {
                 "type": "OS::Heat::CloudConfig",
                 "properties": {
                     "cloud_config": {
                         "write_files": [
                             {"path": "/tmp/%s" % filename, "content": {"get_file": url}, "encoding": "b64"}
                         ]
                     }
                 },
             }
         }
     }
     self.m.StubOutWithMock(request, "urlopen")
     raw_content = base64.decodestring(content)
     response = six.BytesIO(raw_content)
     request.urlopen(url).AndReturn(response)
     self.m.ReplayAll()
     files = {}
     template_utils.resolve_template_get_files(template, files, base_url)
     self.assertEqual({url: content}, files)
コード例 #2
0
ファイル: models.py プロジェクト: dimagi/commcare-hq
    def send(self, msg, *args, **kwargs):
        config = self.config
        if config.additional_params:
            params = config.additional_params.copy()
        else:
            params = {}

        phone_number = msg.phone_number
        if config.include_plus:
            phone_number = clean_phone_number(phone_number)
        else:
            phone_number = strip_plus(phone_number)

        try:
            text = msg.text.encode("iso-8859-1")
        except UnicodeEncodeError:
            text = msg.text.encode("utf-8")
        params[config.message_param] = text
        params[config.number_param] = phone_number

        url_params = urlencode(params)
        try:
            if config.method == "GET":
                response = urlopen("%s?%s" % (config.url, url_params),
                    timeout=settings.SMS_GATEWAY_TIMEOUT).read()
            else:
                response = urlopen(config.url, url_params,
                    timeout=settings.SMS_GATEWAY_TIMEOUT).read()
        except Exception as e:
            msg = "Error sending message from backend: '{}'\n\n{}".format(self.pk, str(e))
            six.reraise(BackendProcessingException(msg), None, sys.exc_info()[2])
コード例 #3
0
def _genomeresearch(url):
    try:
        html = urlopen(url)
    except HTTPError as e:
        print(e)
    except URLError as e:
        print("The server could not be found!")
    bsObj = BeautifulSoup(html.read(), "html5lib")
    meta_data = bsObj.find("cite").get_text()
    journal_data = []
    level1 = bsObj.find("div", {"class": "level1"})
    for subject in bsObj.findAll("div", {"class": "level1"}):
        category = subject.find("h2").get_text()
        articles = []
        for article in subject.findAll("li", {"class": "toc-cit"}):
            t = article.find("h4").get_text().strip()
            abst_url = '/'.join(url.split('/')[:3]) \
                    + article.find("a", {"rel": "abstract"})['href']
            try:
                abst_html = urlopen(abst_url)
                abstObj = BeautifulSoup(abst_html.read(), "html5lib")
                abst_contents = abstObj.find("div", {"id": "abstract-1"})
                results = abst_contents.find("p").get_text()
                #results = results.split(':')[1]
                s = ' '.join(results.replace("\n", " ").split())
                articles.append((t, s))
            except AttirbuteError:
                continue
        if len(articles) > 0:
            journal_data.append((category, articles))
    return meta_data, journal_data
コード例 #4
0
def _genomebiology(url):
    try:
        html = urlopen(url)
    except HTTPError as e:
        print(e)
    except URLError as e:
        print("The server could not be found!")
    bsObj = BeautifulSoup(html.read(), "html5lib")
    meta_data = bsObj.find("p", {"class": "ResultsList_journal"})\
            .get_text().strip().split(":")[0]

    journal_data = defaultdict(list)

    for subject in bsObj.findAll("li", {"class":"ResultsList_item"}):
        category = subject.find("p", {"class":"ResultsList_type"}).get_text()
        t = subject.find("a",{"class":"fulltexttitle"}).get_text()
        abst_url = '/'.join(url.split('/')[:3]) \
                + subject.find("a",{"class":"fulltexttitle"})['href']
        if category == "Research Highlight":
            continue

        try:
            abst_html = urlopen(abst_url)
            abstObj = BeautifulSoup(abst_html.read(), "html5lib")
            s = abstObj.find("section").findAll("p", {"class":"Para"})[-1]\
                    .get_text()
            journal_data[category].append((t, s))
        except AttributeError:
            continue
        except IndexError:
            continue
    journal_data = [(a, b) for a, b in journal_data.items()]
    return meta_data, journal_data
コード例 #5
0
ファイル: googlesearch.py プロジェクト: aakash-cr7/zulip
def get_google_result(search_keywords):
    if search_keywords == 'help':
        help_message = "To use this bot start message with @google \
                        followed by what you want to search for. If \
                        found, Zulip will return the first search result \
                        on Google. An example message that could be sent is:\
                        '@google zulip' or '@google how to create a chatbot'."
        return help_message
    else:
        try:
            urls = search(search_keywords, stop=20)
            urlopen('http://216.58.192.142', timeout=1)
        except http.client.RemoteDisconnected as er:
            logging.exception(er)
            return 'Error: No internet connection. {}.'.format(er)
        except Exception as e:
            logging.exception(e)
            return 'Error: Search failed. {}.'.format(e)

        if not urls:
            return 'No URLs returned by google.'

        url = next(urls)

        return 'Success: {}'.format(url)
コード例 #6
0
ファイル: test_record_mode.py プロジェクト: koobs/vcrpy
def test_none_record_mode(tmpdir):
    # Cassette file doesn't exist, yet we are trying to make a request.
    # raise hell.
    testfile = str(tmpdir.join("recordmode.yml"))
    with vcr.use_cassette(testfile, record_mode="none"):
        with pytest.raises(Exception):
            urlopen("http://httpbin.org/").read()
コード例 #7
0
ファイル: test_record_mode.py プロジェクト: darioush/vcrpy
def test_new_episodes_record_mode(tmpdir):
    testfile = str(tmpdir.join('recordmode.yml'))

    with vcr.use_cassette(testfile, record_mode="new_episodes"):
        # cassette file doesn't exist, so create.
        response = urlopen('http://httpbin.org/').read()

    with vcr.use_cassette(testfile, record_mode="new_episodes") as cass:
        # make the same request again
        response = urlopen('http://httpbin.org/').read()

        # all responses have been played
        assert cass.all_played

        # in the "new_episodes" record mode, we can add more requests to
        # a cassette without repurcussions.
        response = urlopen('http://httpbin.org/get').read()

        # one of the responses has been played
        assert cass.play_count == 1

        # not all responses have been played
        assert not cass.all_played

    with vcr.use_cassette(testfile, record_mode="new_episodes") as cass:
        # the cassette should now have 2 responses
        assert len(cass.responses) == 2
コード例 #8
0
ファイル: external.py プロジェクト: ImmPortDB/immport-galaxy
    def __init__(self, url, method='GET', data=None, **kwargs):
        """
        :param url: the base URL to open.
        :param method: the HTTP method to use.
            Optional: defaults to 'GET'
        :param data: any data to pass (either in query for 'GET'
            or as post data with 'POST')
        :type data: dict
        """
        self.url = url
        self.method = method

        self.data = data or {}
        encoded_data = urlencode(self.data)

        scheme = urlparse(url).scheme
        assert scheme in ('http', 'https', 'ftp'), 'Invalid URL scheme: %s' % scheme

        if method == 'GET':
            self.url += '?%s' % (encoded_data)
            opened = urlopen(url)
        elif method == 'POST':
            opened = urlopen(url, encoded_data)
        else:
            raise ValueError('Not a valid method: %s' % (method))

        super(URLDataProvider, self).__init__(opened, **kwargs)
コード例 #9
0
ファイル: flask.py プロジェクト: tricoder42/python-ariadne
    def start(self):
        """ Start application in a separate process. """
        self.port = self.get_free_port()

        worker = lambda app, port: app.run(port=port, use_reloader=False)
        self._process = multiprocessing.Process(
            target=worker,
            args=(self.app, self.port)
        )
        self._process.start()

        # Wait for server to start
        timestep = 0.5
        for i in range(int(self.timeout / timestep)):
            try:
                urlopen(self.server_url)
            except HTTPError:
                " Server is up, but returns 404 for /"
                break
            except URLError:
                " Server is down, we need to wait."
                time.sleep(timestep)
            else:
                " No error, server is up."
                break
コード例 #10
0
ファイル: server.py プロジェクト: pombredanne/test_server
    def test_path(self):
        urlopen(self.server.get_url("/foo")).read()
        self.assertEqual(self.server.request["path"], "/foo")

        urlopen(self.server.get_url("/foo?bar=1")).read()
        self.assertEqual(self.server.request["path"], "/foo")
        self.assertEqual(self.server.request["args"]["bar"], "1")
コード例 #11
0
    def test_process_environment_relative_file(self):

        self.m.StubOutWithMock(request, 'urlopen')
        env_file = '/home/my/dir/env.yaml'
        env_url = 'file:///home/my/dir/env.yaml'
        env = b'''
        resource_registry:
          "OS::Thingy": a.yaml
        '''
        tmpl = b'{"foo": "bar"}'

        request.urlopen(env_url).AndReturn(
            six.BytesIO(env))
        request.urlopen('file:///home/my/dir/a.yaml').AndReturn(
            six.BytesIO(tmpl))
        self.m.ReplayAll()

        self.assertEqual(
            env_url,
            template_utils.normalise_file_path_to_url(env_file))
        self.assertEqual(
            'file:///home/my/dir',
            template_utils.base_url_for_url(env_url))

        files, env_dict = template_utils.process_environment_and_files(
            env_file)

        self.assertEqual(
            {'resource_registry': {
                'OS::Thingy': 'file:///home/my/dir/a.yaml'}},
            env_dict)
        self.assertEqual(
            tmpl, files['file:///home/my/dir/a.yaml'])
コード例 #12
0
ファイル: test_request.py プロジェクト: IvanMalison/vcrpy
def test_recorded_request_uri_with_redirected_request(tmpdir):
    with vcr.use_cassette(str(tmpdir.join('test.yml'))) as cass:
        assert len(cass) == 0
        urlopen('http://httpbin.org/redirect/3')
        assert cass.requests[0].uri == 'http://httpbin.org/redirect/3'
        assert cass.requests[3].uri == 'http://httpbin.org/get'
        assert len(cass) == 4
コード例 #13
0
ファイル: deactivate.py プロジェクト: Pike/elmo
 def handleSection(self, section, items):
     locales = items['locales']
     if locales == 'all':
         inipath = '/'.join((
             items['repo'], items['mozilla'],
             'raw-file', 'default',
             items['l10n.ini']
         ))
         ini = ConfigParser()
         ini.readfp(urlopen(inipath))
         allpath = urljoin(
             urljoin(inipath, ini.get('general', 'depth')),
             ini.get('general', 'all'))
         locales = urlopen(allpath).read()
     locales = locales.split()
     obs = (Active.objects
            .filter(run__tree__code=section)
            .exclude(run__locale__code__in=locales)
            .order_by('run__locale__code'))
     obslocs = ' '.join(obs.values_list('run__locale__code', flat=True))
     if not obslocs:
         self.stdout.write(' OK\n')
         return
     s = input('Remove %s? [Y/n] ' % obslocs)
     if s.lower() == 'y' or s == '':
         obs.delete()
コード例 #14
0
ファイル: test_config.py プロジェクト: IvanMalison/vcrpy
def test_default_set_cassette_library_dir(tmpdir):
    my_vcr = vcr.VCR(cassette_library_dir=str(tmpdir.join('subdir')))

    with my_vcr.use_cassette('test.json'):
        urlopen('http://httpbin.org/get')

    assert os.path.exists(str(tmpdir.join('subdir').join('test.json')))
コード例 #15
0
ファイル: cachedfetch.py プロジェクト: PeterJCLaw/tools
def grab_url_cached(url):
    """
    Download a possibly cached URL.

    :returns: The contents of the page.
    """
    cache_dir = get_cache_dir('urls')

    h = hashlib.sha1()
    h.update(url.encode('UTF-8'))

    F = os.path.join(cache_dir, h.hexdigest())

    if os.path.exists(F) and (time.time() - os.path.getmtime(F)) < CACHE_LIFE:
        with open(F) as file:
            page = file.read()
    else:
        # try the remote supplier page cache
        try:
            base_url = "https://www.studentrobotics.org/~rspanton/supcache/{}"
            cached_url = base_url.format(h.hexdigest())
            sc = urlopen(cached_url)
            page = sc.read()
        except HTTPError:
            page = urlopen(url).read()

        with open(F, 'wb') as file:
            file.write(page)

    return page
コード例 #16
0
    def json_req(self, url, http_post=False, skip_auth=False, raw=False,
                 **kwargs):
        '''
        Performs JSON request.
        '''
        # Encode params
        if len(kwargs) > 0:
            params = urlencode(
                {key: val.encode('utf-8') for key, val in kwargs.items()}
            )
        else:
            params = ''

        # Store for exception handling
        self.request_url = url
        self.request_params = params

        # Append parameters
        if len(params) > 0 and not http_post:
            url = '?'.join((url, params))

        # Create request object with custom headers
        request = Request(url)
        request.timeout = 0.5
        request.add_header('User-Agent', USER_AGENT)
        # Optional authentication
        if not skip_auth:
            self.authenticate(request)

        # Fire request
        if http_post:
            handle = urlopen(request, params.encode('utf-8'))
        else:
            handle = urlopen(request)

        # Read and possibly convert response
        text = handle.read()
        # Needed for Microsoft
        if text[:3] == b'\xef\xbb\xbf':
            text = text.decode('UTF-8-sig')
        else:
            text = text.decode('utf-8')
        # Replace literal \t
        text = text.strip().replace(
            '\t', '\\t'
        ).replace(
            '\r', '\\r'
        )
        # Needed for Google
        while ',,' in text or '[,' in text:
            text = text.replace(',,', ',null,').replace('[,', '[')

        if raw:
            return text

        # Parse JSON
        response = json.loads(text)

        # Return data
        return response
コード例 #17
0
ファイル: test_record_mode.py プロジェクト: darioush/vcrpy
def test_once_mode_three_times(tmpdir):
    testfile = str(tmpdir.join('recordmode.yml'))
    with vcr.use_cassette(testfile, record_mode="once"):
        # get three of the same file
        response1 = urlopen('http://httpbin.org/').read()
        response2 = urlopen('http://httpbin.org/').read()
        response2 = urlopen('http://httpbin.org/').read()
コード例 #18
0
def _bioinformatics(url):
    try:
        html = urlopen(url)
    except HTTPError as e:
        print(e)
    except URLError as e:
        print("The server could not be found!")
    bsObj = BeautifulSoup(html.read(), "html5lib")
    meta_data = ' '.join(bsObj.find("cite")\
                        .get_text().replace("\n", "").split())
    journal_data = []
    original = bsObj.find("div", {"class": "pub-section-ORIGINALPAPERS"})
    for subject in original.findAll("div", {"class": "level2"}):
        category = subject.find("h3").get_text()
        articles = []
        for article in subject.findAll("li", {"class": "cit"}):
            t = article.find("h4").get_text()
            abst_url = '/'.join(url.split('/')[:3]) \
                    + article.find("a", {"rel": "abstract"})['href']
            try:
                #abst_html = urllib.request.urlopen(abst_url)
                abst_html = urlopen(abst_url)
                abstObj = BeautifulSoup(abst_html.read(), "html5lib")
                abst_contents = abstObj.find("div", {"id": "abstract-1"})
                results = abst_contents.findAll("p")[1].get_text()
                results = results.split(':')[1]
                s = ' '.join(results.replace("\n", " ").split())
                articles.append((t, s))
            except AttirbuteError:
                continue
        if len(articles) > 0:
            journal_data.append((category, articles))
    return meta_data, journal_data
コード例 #19
0
ファイル: test_matchers.py プロジェクト: JanLikar/vcrpy
def test_default_matcher_matches(cassette, uri, httpbin, httpbin_secure):

    uri = _replace_httpbin(uri, httpbin, httpbin_secure)

    with vcr.use_cassette(cassette) as cass:
        urlopen(uri)
        assert cass.play_count == 1
コード例 #20
0
ファイル: com.py プロジェクト: oberstet/vmprof-python
def send(t, args):

    name = args.program
    base_url = args.web
    auth = args.web_auth


    data = {
        "profiles": t.get_tree().flatten()._serialize(),
        "argv": "%s %s" % (name, " ".join(args.args)),
        "version": 1,
    }

    data = json.dumps(data).encode('utf-8')

    # XXX http only for now
    if base_url.startswith("http"):
        url = '%s/api/log/' % base_url.rstrip("/")
    else:
        url = 'http://%s/api/log/' % base_url.rstrip("/")

    headers = {'content-type': 'application/json'}

    if auth:
        headers['AUTHORIZATION'] = "Token %s" % auth

    req = request.Request(url, data, headers)

    request.urlopen(req)
コード例 #21
0
ファイル: conftest.py プロジェクト: kbg/drms_json
def site_reachable(url, timeout=3):
    """Checks if the given URL is accessible."""
    try:
        urlopen(url, timeout=timeout)
    except (URLError, HTTPError):
        return False
    return True
コード例 #22
0
ファイル: nmjv2.py プロジェクト: pymedusa/SickRage
    def _sendNMJ(self, host):
        """
        Send a NMJ update command to the specified machine

        host: The hostname/IP to send the request to (no port)
        database: The database to send the request to
        mount: The mount URL to use (optional)

        return: True if the request succeeded, False otherwise
        """
        # if a host is provided then attempt to open a handle to that URL
        try:
            url_scandir = 'http://' + host + ':8008/metadata_database?arg0=update_scandir&arg1=' + app.NMJv2_DATABASE + '&arg2=&arg3=update_all'
            log.debug(u'NMJ scan update command sent to host: {0}', host)
            url_updatedb = 'http://' + host + ':8008/metadata_database?arg0=scanner_start&arg1=' + app.NMJv2_DATABASE + '&arg2=background&arg3='
            log.debug(u'Try to mount network drive via url: {0}', host)
            prereq = Request(url_scandir)
            req = Request(url_updatedb)
            handle1 = urlopen(prereq)
            response1 = handle1.read()
            time.sleep(0.3)
            handle2 = urlopen(req)
            response2 = handle2.read()
        except IOError as error:
            log.warning(u'Warning: Unable to contact popcorn hour on host {0}: {1}', host, error)
            return False
        try:
            et = etree.fromstring(response1)
            result1 = et.findtext('returnValue')
        except SyntaxError as error:
            log.error(u'Unable to parse XML returned from the Popcorn Hour: update_scandir, {0}', error)
            return False
        try:
            et = etree.fromstring(response2)
            result2 = et.findtext('returnValue')
        except SyntaxError as error:
            log.error(u'Unable to parse XML returned from the Popcorn Hour: scanner_start, {0}', error)
            return False

        # if the result was a number then consider that an error
        error_codes = ['8', '11', '22', '49', '50', '51', '60']
        error_messages = ['Invalid parameter(s)/argument(s)',
                          'Invalid database path',
                          'Insufficient size',
                          'Database write error',
                          'Database read error',
                          'Open fifo pipe failed',
                          'Read only file system']
        if int(result1) > 0:
            index = error_codes.index(result1)
            log.error(u'Popcorn Hour returned an error: {0}', error_messages[index])
            return False
        else:
            if int(result2) > 0:
                index = error_codes.index(result2)
                log.error(u'Popcorn Hour returned an error: {0}', error_messages[index])
                return False
            else:
                log.info(u'NMJv2 started background scan')
                return True
コード例 #23
0
    def test_hot_template(self):
        self.m.StubOutWithMock(request, "urlopen")
        tmpl_file = "/home/my/dir/template.yaml"
        url = "file:///home/my/dir/template.yaml"
        request.urlopen("file:///home/my/dir/foo.yaml").InAnyOrder().AndReturn(six.BytesIO(self.foo_template))
        request.urlopen("file:///home/my/dir/foo.yaml").InAnyOrder().AndReturn(six.BytesIO(self.foo_template))
        request.urlopen(url).InAnyOrder().AndReturn(six.BytesIO(self.hot_template))
        request.urlopen("file:///home/my/dir/spam/egg.yaml").InAnyOrder().AndReturn(six.BytesIO(self.egg_template))
        request.urlopen("file:///home/my/dir/spam/egg.yaml").InAnyOrder().AndReturn(six.BytesIO(self.egg_template))
        self.m.ReplayAll()

        files, tmpl_parsed = template_utils.get_template_contents(template_file=tmpl_file)

        self.assertEqual(
            yaml.load(self.foo_template.decode("utf-8")), json.loads(files.get("file:///home/my/dir/foo.yaml"))
        )
        self.assertEqual(
            yaml.load(self.egg_template.decode("utf-8")), json.loads(files.get("file:///home/my/dir/spam/egg.yaml"))
        )

        self.assertEqual(
            {
                u"heat_template_version": u"2013-05-23",
                u"parameters": {u"param1": {u"type": u"string"}},
                u"resources": {
                    u"resource1": {u"type": u"file:///home/my/dir/foo.yaml", u"properties": {u"foo": u"bar"}},
                    u"resource2": {
                        u"type": u"OS::Heat::ResourceGroup",
                        u"properties": {u"resource_def": {u"type": u"file:///home/my/dir/spam/egg.yaml"}},
                    },
                },
            },
            tmpl_parsed,
        )
コード例 #24
0
ファイル: bioformats.py プロジェクト: nkeim/pims
def _download_jar(version='5.7.0'):
    from six.moves.urllib.request import urlopen
    import hashlib

    for loc in _gen_jar_locations():
        # check if dir exists and has write access:
        if os.path.exists(loc) and os.access(loc, os.W_OK):
            break
        # if directory is pims and it does not exist, so make it (if allowed)
        if os.path.basename(loc) == 'pims' and \
           os.access(os.path.dirname(loc), os.W_OK):
            os.mkdir(loc)
            break
    else:
        raise IOError('No writeable location found. In order to use the '
                      'Bioformats reader, please download '
                      'loci_tools.jar to the pims program folder or one of '
                      'the locations provided by _gen_jar_locations().')

    url = ('http://downloads.openmicroscopy.org/bio-formats/' + version +
           '/artifacts/loci_tools.jar')

    path = os.path.join(loc, 'loci_tools.jar')
    loci_tools = urlopen(url).read()
    sha1_checksum = urlopen(url + '.sha1').read().split(b' ')[0].decode()

    downloaded = hashlib.sha1(loci_tools).hexdigest()
    if downloaded != sha1_checksum:
        raise IOError("Downloaded loci_tools.jar has invalid checksum. "
                      "Please try again.")

    with open(path, 'wb') as output:
        output.write(loci_tools)

    return path
コード例 #25
0
ファイル: test_strategies.py プロジェクト: Eigenstate/osprey
def test_moe_rest_1():
    moe_url = os.environ.get('MOE_API_URL', 'http://ERROR-sdjssfssdbsdf.com')
    try:
        request.urlopen(moe_url)
    except error.URLError:
        raise nose.SkipTest(
            'No available MOE REST API endpoint (set with '
            'MOE_API_URL environment variable)')

    searchspace = SearchSpace()
    searchspace.add_float('x', -10, 10)
    searchspace.add_float('y', 1, 10, warp='log')
    searchspace.add_int('z', -10, 10)
    searchspace.add_enum('w', ['opt1', 'opt2'])

    history = [(searchspace.rvs(), np.random.random(), 'SUCCEEDED')
               for _ in range(4)]
    params = MOE(url=moe_url).suggest(history, searchspace)
    for k, v in iteritems(params):
        assert k in searchspace.variables
        if isinstance(searchspace[k], EnumVariable):
            assert v in searchspace[k].choices
        elif isinstance(searchspace[k], FloatVariable):
            assert searchspace[k].min <= v <= searchspace[k].max
        elif isinstance(searchspace[k], IntVariable):
            assert searchspace[k].min <= v <= searchspace[k].max
        else:
            assert False
コード例 #26
0
ファイル: test_ignore.py プロジェクト: JeffSpies/vcrpy
def test_ignore_localhost(tmpdir, httpserver):
    httpserver.serve_content('Hello!')
    cass_file = str(tmpdir.join('filter_qs.yaml'))
    with vcr.use_cassette(cass_file, ignore_localhost=True) as cass:
        urlopen(httpserver.url)
        assert len(cass) == 0
        urlopen('http://httpbin.org')
        assert len(cass) == 1
コード例 #27
0
ファイル: test_urllib2.py プロジェクト: JanLikar/vcrpy
def urlopen_with_cafile(*args, **kwargs):
    kwargs['cafile'] = pytest_httpbin.certs.where()
    try:
        return urlopen(*args, **kwargs)
    except TypeError:
        # python2/pypi don't let us override this
        del kwargs['cafile']
        return urlopen(*args, **kwargs)
コード例 #28
0
ファイル: test_filter.py プロジェクト: Bjwebb/vcrpy
def test_filter_querystring(tmpdir):
    url = 'http://httpbin.org/?foo=bar'
    cass_file = str(tmpdir.join('filter_qs.yaml'))
    with vcr.use_cassette(cass_file, filter_query_parameters=['foo']):
        urlopen(url)
    with vcr.use_cassette(cass_file, filter_query_parameters=['foo']) as cass:
        urlopen(url)
        assert 'foo' not in cass.requests[0].url
コード例 #29
0
ファイル: test_urllib2.py プロジェクト: aah/vcrpy
def test_random_body(scheme, tmpdir):
    '''Ensure we can read the content, and that it's served from cache'''
    url = scheme + '://httpbin.org/bytes/1024'
    with vcr.use_cassette(str(tmpdir.join('body.yaml'))) as cass:
        body = urlopen(url).read()

    with vcr.use_cassette(str(tmpdir.join('body.yaml'))) as cass:
        assert body == urlopen(url).read()
コード例 #30
0
ファイル: test_filter.py プロジェクト: Bjwebb/vcrpy
def test_filter_post_data(tmpdir):
    url = 'http://httpbin.org/post'
    data = urlencode({'id': 'secret', 'foo': 'bar'}).encode('utf-8')
    cass_file = str(tmpdir.join('filter_pd.yaml'))
    with vcr.use_cassette(cass_file, filter_post_data_parameters=['id']):
        urlopen(url, data)
    with vcr.use_cassette(cass_file, filter_post_data_parameters=['id']) as cass:
        assert b'id=secret' not in cass.requests[0].body
コード例 #31
0
 def test_get_file(self):
     with closing(urlopen(self._url('root.txt'))) as res:
         self.assertEqual(res.read(), b'Hello, World!')
     with closing(urlopen(self._url('top/file.bin'))) as res:
         self.assertEqual(res.read(), b'Hi there!')
コード例 #32
0
 def test_get_file_not_found(self):
     with self.assertRaises(HTTPError) as err:
         urlopen(self._url('not-found.txt'))
     self.assertEqual(err.exception.code, 404)
コード例 #33
0
 def test_get_file_unicode(self):
     with closing(urlopen(self._url('top/middle/bottom/☻.txt'))) as res:
         self.assertEqual(res.read(), b'Happy face !')
コード例 #34
0
def cli(spec, env, url, http_options):
    click.echo('Version: %s' % __version__)

    copied, config_path = config.initialize()
    if copied:
        click.echo('Config file not found. Initialized a new one: %s' %
                   config_path)

    cfg = config.load()

    # Override pager/less options
    os.environ['PAGER'] = cfg['pager']
    os.environ['LESS'] = '-RXF'

    if spec:
        f = urlopen(spec)
        try:
            content = f.read().decode('utf-8')
            try:
                spec = json.loads(content)
            except json.JSONDecodeError:
                click.secho("Warning: Specification file '%s' is not JSON" %
                            spec,
                            err=True,
                            fg='red')
                spec = None
        finally:
            f.close()

    if url:
        url = fix_incomplete_url(url)
    context = Context(url, spec=spec)

    output_style = cfg.get('output_style')
    if output_style:
        context.options['--style'] = output_style

    # For prompt-toolkit
    history = FileHistory(os.path.join(get_data_dir(), 'history'))
    lexer = PygmentsLexer(HttpPromptLexer)
    completer = HttpPromptCompleter(context)
    try:
        style_class = get_style_by_name(cfg['command_style'])
    except ClassNotFound:
        style_class = Solarized256Style
    style = style_from_pygments(style_class)

    listener = ExecutionListener(cfg)

    if len(sys.argv) == 1:
        # load previous context if nothing defined
        load_context(context)
    else:
        if env:
            load_context(context, env)
            if url:
                # Overwrite the env url if not default
                context.url = url

        if http_options:
            # Execute HTTPie options from CLI (can overwrite env file values)
            http_options = [smart_quote(a) for a in http_options]
            execute(' '.join(http_options), context, listener=listener)

    while True:
        try:
            text = prompt('%s> ' % context.url,
                          completer=completer,
                          lexer=lexer,
                          style=style,
                          history=history,
                          auto_suggest=AutoSuggestFromHistory(),
                          on_abort=AbortAction.RETRY,
                          vi_mode=cfg['vi'])
        except EOFError:
            break  # Control-D pressed
        else:
            execute(text, context, listener=listener, style=style_class)
            if context.should_exit:
                break

    click.echo("Goodbye!")
コード例 #35
0
from six import PY3

import dash
import dash_bio as dashbio
import dash_html_components as html
import random

Graph_ID = 10
#group ID

#--------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------
styles_data = urlreq.urlopen(
    'https://raw.githubusercontent.com/plotly/dash-bio-docs-files/master/' +
    'mol3d/styles_data.js').read()

if PY3:
    styles_data = styles_data.decode('utf-8')

Node_Label_list = []
f = open('DHFR_node_labels.txt')
for line in f:
    Node_Label_list.append(int(line))
f.close()

Node_Attribute_list = []
f = open('DHFR_node_attributes.txt')
for line in f:
    temp = line.split(',')
コード例 #36
0
def index(request):
    if request.method == 'POST':
        url = request.POST.get('name', '')
        reqs = requests.get(url)
        soup = BeautifulSoup(reqs.text, 'html.parser')

        urls = ''
        str1 = 'https://www.eventbrite.com/d/online'
        str2 = 'https://www.eventbrite.com/e/', 'https://www.eventbrite.com.au/e/', 'https://www.eventbrite.co.uk/e/', 'https://www.eventbrite.ca/e'
        str3 = 'https://www.eventbrite.ca/e'
        str4 = 'Technologies', 'Engineering', 'Computer Engineering', 'Civil Engineering', 'Mechanical Engineering', 'Electronics Engineering', 'Electrical Engineering', 'Doctors', 'Obstetrics', 'Gynaecology', 'Dentistry', 'Internal Medicines', 'Paediatrics', 'General Medicine (MBBS)', 'Sports', 'Basketball', 'Cricket', 'Hockey', 'Table Tennis', 'Badminton', 'Artists', 'Photography', 'Music', 'Painting', 'Dancing', 'Theatre and Film', 'Writing', 'Hip Hop Dancing', 'Salsa Dancing', 'Zumba', 'Classical Dancing', 'Activism', 'Social Activism', 'Political Activism', 'Animal Welfare', 'Environment Activism', 'Lawn Tennis', 'Fitness', 'Gym', 'Running', 'Cycling', 'Yoga', 'Wrestling', 'Kizomba Dancing', 'Food and Nutrition', 'Bollywood', 'Belly Dancing', 'Football', 'Golf', 'Skating', 'Blockchain & Cryptocurrency', 'Travelling', 'Lawyer', 'Leisure', 'Trekking & Hiking', 'Business', 'Startup', 'Code Development', 'QA & testing', 'DevOps', 'Designing', 'Parenting', 'UI Designing', 'Graphic Designing', 'Meditation', 'Self Healing', 'Computer Networking', 'Party', 'Cyber Security', 'Dermatology', 'Neurology', 'Ophthalmology', 'Psychiatry', 'Toxicology', 'Baseball', 'Volleyball', 'Rugby', 'Boxing', 'Motorsports', 'Cardiology', 'Orthopaedic', 'Endocrinology', 'Oncology', 'Anesthesia', 'Gardening', 'Infertility' 'Immigration', 'Makeup Artist', 'MBA', 'Human Resource', 'Finance', 'Marketing', 'Leadership', 'Financial Planning', 'Management', 'Containers', 'Accounts', 'Physics', 'Chemistry', 'Economics', 'Literature', 'Chartered Accountant', 'Linguistic and Languages', 'English Learning', 'Spanish Learning', 'Beer', 'Marijuana', 'Real Estate', 'Emerging Technologies', 'Data Analytics', 'Mental Peace', 'Sketching', 'Russian Language', 'German Language', 'Business Strategy', 'Wine', 'Linux', 'Business Networking', 'Artificial Intelligence', 'Game Developing', 'Internet Of Things', 'Aeronautical Engineering', 'Chemical Engineering', 'Fashion', 'Flight Attendant', 'Pilot', 'Robotic Engineering', 'Poetry', 'Philosophy', 'Architecture', 'Rowing', 'Virtual Reality', 'Biochemistry', 'Religion', 'Spirituality', 'Agriculture', 'Book Reading', 'History', 'Space', 'Pharmacy', 'Education', 'Entrance Exam', 'Musical Instrument', 'Lifestyle', 'Maths', 'French Language', 'Student', 'Chess', 'Guitar', 'Tabla', 'Piano', 'Violin', 'Flute', 'Drum', 'Saxophone', 'Keyboard', 'Harmonium', 'Mythology', 'Market Analysis', 'LGBT', 'Class 11', 'Class 10', 'Class 12', 'Teacher', 'Festival', 'Beauty', 'IIT JEE', 'Archaeology', 'Civilisation', 'Astronomy', 'Cooking', 'Trading', 'Movie Watching', 'Archery', 'Bowling', 'Curling', 'Karate', 'Weight Lifting', 'Shooting', 'Ballet', 'Kathak', 'Aerial', 'Tango', 'Cancan', 'Magic', 'Horse Riding', 'Self Defence', 'Swimming', 'Product Reviews', 'Massage', 'Mass media and Journalism', 'Dating', 'Database Administrator', 'Machine Learning', 'Personal Development', 'Career Counselling', 'Gaming', 'Construction', 'Urban Planning', 'Structural Engineering', 'Transportation Engineering', 'Geotechnical Engineering', 'Industrial Automation Engineering', 'Nanotechnology Engineering', 'Mechatronics Engineering', 'Industrial Engineering', 'Robotics Engineering', 'Pottery', 'Politics', 'Gender Issue', 'Humanity', 'DJ', 'Digital Marketing', 'Volunteering', 'Venture Capital', 'Hotel Management', 'Adventure', 'Bungee Jumping', 'Sky Diving', 'Ocean Diving', 'Canoeing', 'Camping', 'Inventors', 'Anthropology', 'Fine Arts', 'Freeflying', 'Scuba Diving', 'Wind Surfing', 'Homeless', 'mountain biking', 'Music Producers', 'Performing and visual arts', 'Operations', 'Driving', 'R&D', 'Evolution', 'ITI', 'Affiliate Marketing', 'Content Marketing', 'Email Marketing', 'Pay per Click Advertising', 'Search Engine Marketing', 'Social media marketing', 'ATG_INTERNAL'
        urls1 = []
        for link in soup.find_all('a'):
            try:
                da = link.get('href')
                if str1 in da:
                    if da == 'https://www.eventbrite.com/d/online/events/':
                        pass

                    else:
                        reqs1 = requests.get(da)
                        soup = BeautifulSoup(reqs1.text, 'html.parser')
                        for link in soup.find_all('a'):
                            try:
                                data = link.get('href')
                                for j in str2:
                                    if j in data:
                                        html = urlopen(data)
                                        bs = BeautifulSoup(html, 'html.parser')
                                        images = bs.find_all(
                                            'picture',
                                            {'content': re.compile('')})
                                        i = 0
                                        for image in images:
                                            if i == 0:
                                                img = image['content'] + '\n'
                                                i = i + 1
                                            else:
                                                pass
                                        reqs2 = requests.get(data)
                                        soup = BeautifulSoup(
                                            reqs2.text, 'html.parser')
                                        for link in soup.find_all('title'):
                                            title = link.get_text()
                                            # print(title)
                                            for j in str4:
                                                if j in title:
                                                    ata = DATA(title=title,
                                                               img=img,
                                                               int_grp=j)
                                                    try:
                                                        DATA.objects.get(
                                                            title=title)
                                                        pass
                                                    except:
                                                        ata.save()
                                                        print("DATA SAVE")
                                                        print("TITLE:", title)
                                                        print("IMAGE:", img)
                                                        print(
                                                            "INTERSETED GROUP:",
                                                            j)

                            except:
                                pass

            except:
                pass
        return render(request, 'index.html')

    return render(request, 'index.html')
コード例 #37
0
    def test_env_nested_includes(self):
        self.m.StubOutWithMock(request, 'urlopen')
        env_file = '/home/my/dir/env.yaml'
        env_url = 'file:///home/my/dir/env.yaml'
        env = b'''
        resource_registry:
          "OS::Thingy": template.yaml
        '''
        template_url = u'file:///home/my/dir/template.yaml'
        foo_url = u'file:///home/my/dir/foo.yaml'
        egg_url = u'file:///home/my/dir/spam/egg.yaml'
        ham_url = u'file:///home/my/dir/spam/ham.yaml'
        one_url = u'file:///home/my/dir/spam/one.yaml'
        two_url = u'file:///home/my/dir/spam/two.yaml'
        three_url = u'file:///home/my/dir/spam/three.yaml'

        request.urlopen(env_url).AndReturn(
            six.BytesIO(env))
        request.urlopen(template_url).AndReturn(
            six.BytesIO(self.hot_template))

        request.urlopen(foo_url).InAnyOrder().AndReturn(
            six.BytesIO(self.foo_template))
        request.urlopen(egg_url).InAnyOrder().AndReturn(
            six.BytesIO(self.egg_template))
        request.urlopen(ham_url).InAnyOrder().AndReturn(
            six.BytesIO(b'ham contents'))
        request.urlopen(one_url).InAnyOrder().AndReturn(
            six.BytesIO(self.foo_template))
        request.urlopen(two_url).InAnyOrder().AndReturn(
            six.BytesIO(self.foo_template))
        request.urlopen(three_url).InAnyOrder().AndReturn(
            six.BytesIO(b'three contents'))
        self.m.ReplayAll()

        files, env_dict = template_utils.process_environment_and_files(
            env_file)

        self.assertEqual(
            {'resource_registry': {
                'OS::Thingy': template_url}},
            env_dict)

        self.assertEqual({
            u'heat_template_version': u'2013-05-23',
            u'parameters': {u'param1': {u'type': u'string'}},
            u'resources': {
                u'resource1': {
                    u'properties': {u'foo': u'bar'},
                    u'type': foo_url
                },
                u'resource2': {
                    u'type': u'OS::Heat::ResourceGroup',
                    u'properties': {
                        u'resource_def': {
                            u'type': egg_url},
                        u'with': {u'get_file': ham_url}
                    }
                }
            }
        }, json.loads(files.get(template_url)))

        self.assertEqual(yaml.load(self.foo_template.decode('utf-8')),
                         json.loads(files.get(foo_url)))
        self.assertEqual({
            u'heat_template_version': u'2013-05-23',
            u'parameters': {u'param1': {u'type': u'string'}},
            u'resources': {
                u'resource1': {
                    u'properties': {u'foo': u'bar'},
                    u'type': one_url},
                u'resource2': {
                    u'type': u'OS::Heat::ResourceGroup',
                    u'properties': {
                        u'resource_def': {u'type': two_url},
                        u'with': {u'get_file': three_url}
                    }
                }
            }
        }, json.loads(files.get(egg_url)))
        self.assertEqual(b'ham contents',
                         files.get(ham_url))
        self.assertEqual(yaml.load(self.foo_template.decode('utf-8')),
                         json.loads(files.get(one_url)))
        self.assertEqual(yaml.load(self.foo_template.decode('utf-8')),
                         json.loads(files.get(two_url)))
        self.assertEqual(b'three contents',
                         files.get(three_url))

        self.m.VerifyAll()
コード例 #38
0
def test_don_quixote(mock_site):
    """
    All of these items are by 'Miguel de Cervantes Saavedra',
    only one Author should be created. Some items have bad
    MARC length, others are missing binary MARC altogether
    and raise BadMARC exceptions.
    """
    pytest.skip("This test make live requests to archive.org")

    dq = [
        u'lifeexploitsofin01cerv', u'cu31924096224518',
        u'elingeniosedcrit04cerv', u'ingeniousgentlem01cervuoft',
        u'historyofingenio01cerv', u'lifeexploitsofin02cerviala',
        u'elingeniosohidal03cervuoft', u'nybc209000',
        u'elingeniosohidal11cerv', u'elingeniosohidal01cervuoft',
        u'elingeniosoh01cerv', u'donquixotedelama00cerviala',
        u'1896elingeniosohid02cerv', u'ingeniousgentlem04cervuoft',
        u'cu31924027656978', u'histoiredeladmir01cerv',
        u'donquijotedelama04cerv', u'cu31924027657075',
        u'donquixotedelama03cervuoft', u'aventurasdedonqu00cerv',
        u'p1elingeniosohid03cerv', u'geshikhefundonik01cervuoft',
        u'historyofvalorou02cerviala', u'ingeniousgentlem01cerv',
        u'donquixotedelama01cervuoft', u'ingeniousgentlem0195cerv',
        u'firstpartofdelig00cervuoft', u'p4elingeniosohid02cerv',
        u'donquijote00cervuoft', u'cu31924008863924',
        u'c2elingeniosohid02cerv', u'historyofvalorou03cerviala',
        u'historyofingenio01cerviala', u'historyadventure00cerv',
        u'elingeniosohidal00cerv', u'lifeexploitsofin01cervuoft',
        u'p2elingeniosohid05cerv', u'nybc203136',
        u'elingeniosohidal00cervuoft', u'donquixotedelama02cervuoft',
        u'lingnieuxcheva00cerv', u'ingeniousgentlem03cerv',
        u'vidayhechosdeli00siscgoog', u'lifeandexploits01jarvgoog',
        u'elingeniosohida00puiggoog', u'elingeniosohida00navagoog',
        u'donquichottedel02florgoog', u'historydonquixo00cogoog',
        u'vidayhechosdeli01siscgoog', u'elingeniosohida28saavgoog',
        u'historyvalorous00brangoog', u'elingeniosohida01goog',
        u'historyandadven00unkngoog', u'historyvalorous01goog',
        u'ingeniousgentle11saavgoog', u'elingeniosohida10saavgoog',
        u'adventuresdonqu00jarvgoog', u'historydonquixo04saavgoog',
        u'lingnieuxcheval00rouxgoog', u'elingeniosohida19saavgoog',
        u'historyingeniou00lalagoog', u'elingeniosohida00ormsgoog',
        u'historyandadven01smolgoog', u'elingeniosohida27saavgoog',
        u'elingeniosohida21saavgoog', u'historyingeniou00mottgoog',
        u'historyingeniou03unkngoog', u'lifeandexploits00jarvgoog',
        u'ingeniousgentle00conggoog', u'elingeniosohida00quixgoog',
        u'elingeniosohida01saavgoog', u'donquixotedelam02saavgoog',
        u'adventuresdonqu00gilbgoog', u'historyingeniou02saavgoog',
        u'donquixotedelam03saavgoog', u'elingeniosohida00ochogoog',
        u'historyingeniou08mottgoog', u'lifeandexploits01saavgoog',
        u'firstpartdeligh00shelgoog', u'elingeniosohida00castgoog',
        u'elingeniosohida01castgoog', u'adventofdonquixo00cerv',
        u'portablecervante00cerv', u'firstpartofdelig14cerv',
        u'donquixotemanofl00cerv', u'firstpartofdelig00cerv'
    ]

    bad_length = []
    bad_marc = []

    add_languages(mock_site)
    edition_status_counts = defaultdict(int)
    work_status_counts = defaultdict(int)
    author_status_counts = defaultdict(int)

    for ocaid in dq:
        marc_url = 'https://archive.org/download/%s/%s_meta.mrc' % (ocaid,
                                                                    ocaid)
        data = urlopen(marc_url).read()
        try:
            marc = MarcBinary(data)
        except BadLength:
            bad_length.append(ocaid)
            continue
        except BadMARC:
            bad_marc.append(ocaid)
            continue

        rec = read_edition(marc)
        rec['source_records'] = ['ia:' + ocaid]
        reply = load(rec)

        q = {
            'type': '/type/work',
            'authors.author': '/authors/OL1A',
        }
        work_keys = list(mock_site.things(q))
        author_keys = list(mock_site.things({'type': '/type/author'}))
        print("\nReply for %s: %s" % (ocaid, reply))
        print("Work keys: %s" % work_keys)
        assert author_keys == ['/authors/OL1A']
        assert reply['success'] is True

        # Increment status counters
        edition_status_counts[reply['edition']['status']] += 1
        work_status_counts[reply['work']['status']] += 1
        if (reply['work']['status'] !=
                'matched') and (reply['edition']['status'] != 'modified'):
            # No author key in response if work is 'matched'
            # No author key in response if edition is 'modified'
            author_status_counts[reply['authors'][0]['status']] += 1

    print("BAD MARC LENGTH items: %s" % bad_length)
    print("BAD MARC items: %s" % bad_marc)
    print("Edition status counts: %s" % edition_status_counts)
    print("Work status counts: %s" % work_status_counts)
    print("Author status counts: %s" % author_status_counts)
コード例 #39
0
    def after_request(self, response):
        """
        The heavy lifter. This method collects the majority of data
        and passes it off for storage.

        :Parameters:
           - `response`: The response on it's way to the client.
        """
        ctx = _request_ctx_stack.top
        view_func = self.app.view_functions.get(ctx.request.endpoint)
        if self._type == 'exclude':
            if view_func in self._exclude_views:
                return response
        elif self._type == 'include':
            if view_func not in self._include_views:
                return response
        else:
            raise NotImplementedError('You must set include or exclude type.')

        now = datetime.datetime.utcnow()
        speed = None
        try:
            speed = (now - g.start_time).total_seconds()
        except:
            # Older python versions don't have total_seconds()
            speed_result = (now - g.start_time)
            speed = float("%s.%s" %
                          (speed_result.seconds, speed_result.microseconds))

        if self._fake_time:
            current_time = self._fake_time
        else:
            current_time = now

        data = {
            'url':
            ctx.request.url,
            'user_agent':
            ctx.request.user_agent,
            'server_name':
            ctx.app.name,
            'blueprint':
            ctx.request.blueprint,
            'view_args':
            ctx.request.view_args,
            'status':
            response.status_code,
            'remote_addr':
            ctx.request.remote_addr,
            'xforwardedfor':
            ctx.request.headers.get('X-Forwarded-For', None),
            'authorization':
            bool(ctx.request.authorization),
            'ip_info':
            None,
            'path':
            ctx.request.path,
            'speed':
            float(speed),
            'date':
            int(time.mktime(current_time.timetuple())),
            'content_length':
            response.content_length,
            'request':
            "{} {} {}".format(ctx.request.method, ctx.request.url,
                              ctx.request.environ.get('SERVER_PROTOCOL')),
            'url_args':
            dict([(k, ctx.request.args[k]) for k in ctx.request.args]),
            'username':
            None,
            'track_var':
            g.track_var
        }
        if ctx.request.authorization:
            data['username'] = str(ctx.request.authorization.username)
        if self._use_freegeoip:
            clean_ip = quote_plus(str(ctx.request.remote_addr))
            if '{ip}' in self._freegeoip_endpoint:
                url = self._freegeoip_endpoint.format(ip=clean_ip)
            else:
                url = self._freegeoip_endpoint + clean_ip
            # seperate capture and conversion to aid in debugging
            text = urlopen(url).read()
            ip_info = json.loads(text)
            if url.startswith("http://extreme-ip-lookup.com/"):
                del ip_info["businessWebsite"]
                del ip_info["status"]
            data['ip_info'] = ip_info

        for storage in self._storages:
            storage(data)
        return response
コード例 #40
0
ファイル: onnx2xla.py プロジェクト: zudehuang/jax
    for node in graph.node:
        args = (vals[name] for name in node.input)
        attrs = {a.name: attribute_handlers[a.type](a) for a in node.attribute}
        outputs = onnx_ops[node.op_type](*args, **attrs)
        for name, output in zip(node.output, outputs):
            vals[name] = output
    return [vals[n.name] for n in graph.output]


if __name__ == "__main__":
    # It seems that there are several ONNX proto versions (you had one job!) but
    # this implementation works with at least this one mnist example file.
    url = ('https://github.com/onnx/models/blob/'
           '81c4779096d1205edd0b809e191a924c58c38fef/'
           'mnist/model.onnx?raw=true')
    download = urlopen(url).read()
    if hashlib.md5(download).hexdigest() != 'bc8ad9bd19c5a058055dc18d0f089dad':
        print("onnx file checksum mismatch")
        sys.exit(1)
    model = onnx.load(StringIO(download))

    predict = lambda inputs: interpret_onnx(model.graph, inputs)[0]

    # Run inference in Numpy-backed interpreter
    print("interpreted:")
    print(predict(np.ones((1, 1, 28, 28))))

    # JIT compile to XLA device, run inference on device
    compiled_predict = jit(predict)
    print("compiled:")
    print(compiled_predict(np.ones((1, 1, 28, 28))))
コード例 #41
0
    ("rest_framework_jwt.authentication.JSONWebTokenAuthentication", ),
    "DEFAULT_FILTER_BACKENDS":
    ("django_filters.rest_framework.DjangoFilterBackend", ),
}

CORS_ORIGIN_WHITELIST = ("localhost:3000", "interactivemap-frontend-*.now.sh")

AUTH0_DOMAIN = os.environ.get("AUTH0_DOMAIN", "chronoscio.auth0.com")
API_IDENTIFIER = os.environ.get("API_IDENTIFIER", "https://chronoscio.org/")
AUTH0_CLIENT_ID = os.environ.get("AUTH0_CLIENT_ID", "")
AUTH0_CLIENT_SECRET = os.environ.get("AUTH0_CLIENT_SECRET", "")
PUBLIC_KEY = None
JWT_ISSUER = None

if AUTH0_DOMAIN:
    jsonurl = request.urlopen("https://" + AUTH0_DOMAIN +
                              "/.well-known/jwks.json")
    jwks = json.loads(jsonurl.read())
    # Add a line-break every 64 chars
    # https://stackoverflow.com/questions/2657693/insert-a-newline-character-every-64-characters-using-python
    body = re.sub("(.{64})", "\\1\n", jwks["keys"][0]["x5c"][0], 0, re.DOTALL)
    cert = "-----BEGIN CERTIFICATE-----\n" + body + "\n-----END CERTIFICATE-----"
    certificate = load_pem_x509_certificate(cert.encode("utf-8"),
                                            default_backend())
    PUBLIC_KEY = certificate.public_key()
    JWT_ISSUER = "https://" + AUTH0_DOMAIN + "/"

JWT_AUTH = {
    "JWT_PAYLOAD_GET_USERNAME_HANDLER":
    "api.user.jwt_get_username_from_payload_handler",
    "JWT_PUBLIC_KEY": PUBLIC_KEY,
    "JWT_ALGORITHM": "RS256",
コード例 #42
0
ファイル: image.py プロジェクト: xuyifeng-nwpu/matplotlib
def imread(fname, format=None):
    """
    Read an image from a file into an array.

    *fname* may be a string path, a valid URL, or a Python
    file-like object.  If using a file object, it must be opened in binary
    mode.

    If *format* is provided, will try to read file of that type,
    otherwise the format is deduced from the filename.  If nothing can
    be deduced, PNG is tried.

    Return value is a :class:`numpy.array`.  For grayscale images, the
    return array is MxN.  For RGB images, the return value is MxNx3.
    For RGBA images the return value is MxNx4.

    matplotlib can only read PNGs natively, but if `PIL
    <http://www.pythonware.com/products/pil/>`_ is installed, it will
    use it to load the image and return an array (if possible) which
    can be used with :func:`~matplotlib.pyplot.imshow`. Note, URL strings
    may not be compatible with PIL. Check the PIL documentation for more
    information.
    """
    def pilread(fname):
        """try to load the image with PIL or return None"""
        try:
            from PIL import Image
        except ImportError:
            return None
        with Image.open(fname) as image:
            return pil_to_array(image)

    handlers = {
        'png': _png.read_png,
    }
    if format is None:
        if cbook.is_string_like(fname):
            parsed = urlparse(fname)
            # If the string is a URL, assume png
            if len(parsed.scheme) > 1:
                ext = 'png'
            else:
                basename, ext = os.path.splitext(fname)
                ext = ext.lower()[1:]
        elif hasattr(fname, 'name'):
            basename, ext = os.path.splitext(fname.name)
            ext = ext.lower()[1:]
        else:
            ext = 'png'
    else:
        ext = format

    if ext not in handlers:
        im = pilread(fname)
        if im is None:
            raise ValueError('Only know how to handle extensions: %s; '
                             'with Pillow installed matplotlib can handle '
                             'more images' % list(six.iterkeys(handlers)))
        return im

    handler = handlers[ext]

    # To handle Unicode filenames, we pass a file object to the PNG
    # reader extension, since Python handles them quite well, but it's
    # tricky in C.
    if cbook.is_string_like(fname):
        parsed = urlparse(fname)
        # If fname is a URL, download the data
        if len(parsed.scheme) > 1:
            fd = BytesIO(urlopen(fname).read())
            return handler(fd)
        else:
            with open(fname, 'rb') as fd:
                return handler(fd)
    else:
        return handler(fname)
コード例 #43
0
    def test_process_multiple_environments_default_resources(self):

        self.m.StubOutWithMock(request, 'urlopen')
        env_file1 = '/home/my/dir/env1.yaml'
        env_file2 = '/home/my/dir/env2.yaml'

        env1 = b'''
        resource_registry:
          resources:
            resource1:
              "OS::Thingy1": "file:///home/b/a.yaml"
            resource2:
              "OS::Thingy2": "file:///home/b/b.yaml"
        '''
        env2 = b'''
        resource_registry:
          resources:
            resource1:
              "OS::Thingy3": "file:///home/b/a.yaml"
            resource2:
              "OS::Thingy4": "file:///home/b/b.yaml"
        '''

        request.urlopen('file://%s' % env_file1).AndReturn(
            six.BytesIO(env1))
        request.urlopen('file:///home/b/a.yaml').AndReturn(
            six.BytesIO(self.template_a))
        request.urlopen('file:///home/b/b.yaml').AndReturn(
            six.BytesIO(self.template_a))
        request.urlopen('file://%s' % env_file2).AndReturn(
            six.BytesIO(env2))
        request.urlopen('file:///home/b/a.yaml').AndReturn(
            six.BytesIO(self.template_a))
        request.urlopen('file:///home/b/b.yaml').AndReturn(
            six.BytesIO(self.template_a))
        self.m.ReplayAll()

        files, env = template_utils.process_multiple_environments_and_files(
            [env_file1, env_file2])
        self.assertEqual(
            {
                'resource_registry': {
                    'resources': {
                        'resource1': {
                            'OS::Thingy1': 'file:///home/b/a.yaml',
                            'OS::Thingy3': 'file:///home/b/a.yaml'
                        },
                        'resource2': {
                            'OS::Thingy2': 'file:///home/b/b.yaml',
                            'OS::Thingy4': 'file:///home/b/b.yaml'
                        }
                    }
                }
            },
            env)
        self.assertEqual(self.template_a.decode('utf-8'),
                         files['file:///home/b/a.yaml'])
        self.assertEqual(self.template_a.decode('utf-8'),
                         files['file:///home/b/b.yaml'])
コード例 #44
0
    def test_hot_template(self):
        self.m.StubOutWithMock(request, 'urlopen')

        tmpl_file = '/home/my/dir/template.yaml'
        url = 'file:///home/my/dir/template.yaml'
        request.urlopen(url).AndReturn(
            six.BytesIO(self.hot_template))
        request.urlopen(
            'http://localhost/bar.yaml').InAnyOrder().AndReturn(
                six.BytesIO(b'bar contents'))
        request.urlopen(
            'file:///home/my/dir/foo.yaml').InAnyOrder().AndReturn(
                six.BytesIO(b'foo contents'))
        request.urlopen(
            'file:///home/my/dir/baz/baz1.yaml').InAnyOrder().AndReturn(
                six.BytesIO(b'baz1 contents'))
        request.urlopen(
            'file:///home/my/dir/baz/baz2.yaml').InAnyOrder().AndReturn(
                six.BytesIO(b'baz2 contents'))
        request.urlopen(
            'file:///home/my/dir/baz/baz3.yaml').InAnyOrder().AndReturn(
                six.BytesIO(b'baz3 contents'))

        self.m.ReplayAll()

        files, tmpl_parsed = template_utils.get_template_contents(
            template_file=tmpl_file)

        self.assertEqual({
            'http://localhost/bar.yaml': b'bar contents',
            'file:///home/my/dir/foo.yaml': b'foo contents',
            'file:///home/my/dir/baz/baz1.yaml': b'baz1 contents',
            'file:///home/my/dir/baz/baz2.yaml': b'baz2 contents',
            'file:///home/my/dir/baz/baz3.yaml': b'baz3 contents',
        }, files)
        self.assertEqual({
            'heat_template_version': '2013-05-23',
            'resources': {
                'resource1': {
                    'type': 'OS::type1',
                    'properties': {
                        'bar': {'get_file': 'http://localhost/bar.yaml'},
                        'foo': {'get_file': 'file:///home/my/dir/foo.yaml'},
                    },
                },
                'resource2': {
                    'type': 'OS::type1',
                    'properties': {
                        'baz': [
                            {'get_file': 'file:///home/my/dir/baz/baz1.yaml'},
                            {'get_file': 'file:///home/my/dir/baz/baz2.yaml'},
                            {'get_file': 'file:///home/my/dir/baz/baz3.yaml'},
                        ],
                        'ignored_list': {'get_file': ['ignore', 'me']},
                        'ignored_dict': {'get_file': {'ignore': 'me'}},
                        'ignored_none': {'get_file': None},
                    },
                }
            }
        }, tmpl_parsed)
コード例 #45
0
 def test_mime_type(self):
     request = Request(self._url('video.mp4'))
     request.get_method = lambda: 'HEAD'
     with closing(urlopen(request)) as res:
         self.assertEqual(res.headers['Content-type'], 'video/mp4')
コード例 #46
0
ファイル: htmltools.py プロジェクト: orest-d/pymagf
def fetch(url):
    req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
    return urlopen(req).read()
コード例 #47
0
def get_page_for_platform(command, platform):
    data = urlopen(remote + "/" + platform + "/" + quote(command) + ".md")
    return data
コード例 #48
0
def catcher_remote_image(request):
    """远程抓图,当catchRemoteImageEnable:true时,
        如果前端插入图片地址与当前web不在同一个域,则由本函数从远程下载图片到本地
    """
    if not request.method == "POST":
        return HttpResponse(json.dumps(u"{'state:'ERROR'}"),
                            content_type="application/javascript")

    state = "SUCCESS"

    allow_type = list(
        request.GET.get(
            "catcherAllowFiles",
            USettings.UEditorUploadSettings.get("catcherAllowFiles", "")))
    max_size = long(
        request.GET.get(
            "catcherMaxSize",
            USettings.UEditorUploadSettings.get("catcherMaxSize", 0)))

    remote_urls = request.POST.getlist("source[]", [])
    catcher_infos = []
    path_format_var = get_path_format_vars()

    for remote_url in remote_urls:
        # 取得上传的文件的原始名称
        remote_file_name = os.path.basename(remote_url)
        remote_original_name, remote_original_ext = os.path.splitext(
            remote_file_name)
        # 文件类型检验
        if remote_original_ext in allow_type:
            path_format_var.update({
                "basename": remote_original_name,
                "extname": remote_original_ext[1:],
                "filename": remote_original_name
            })
            # 计算保存的文件名
            o_path_format, o_path, o_file = get_output_path(
                request, "catcherPathFormat", path_format_var)
            o_filename = os.path.join(o_path, o_file).replace("\\", "/")
            # 读取远程图片文件
            try:
                remote_image = urlopen(remote_url)
                # 将抓取到的文件写入文件
                try:
                    f = open(o_filename, 'wb')
                    f.write(remote_image.read())
                    f.close()
                    state = "SUCCESS"
                except Exception as E:
                    state = u"写入抓取图片文件错误:%s" % E.message
            except Exception as E:
                state = u"抓取图片错误:%s" % E.message

            catcher_infos.append({
                "state":
                state,
                "url":
                urljoin(USettings.gSettings.MEDIA_URL, o_path_format),
                "size":
                os.path.getsize(o_filename),
                "title":
                os.path.basename(o_file),
                "original":
                remote_file_name,
                "source":
                remote_url
            })

    return_info = {
        "state": "SUCCESS" if len(catcher_infos) > 0 else "ERROR",
        "list": catcher_infos
    }

    return HttpResponse(json.dumps(return_info, ensure_ascii=False),
                        content_type="application/javascript")
コード例 #49
0
def do_update_changelog(ctx, target, cur_version, new_version, dry_run=False):
    """
    Actually perform the operations needed to update the changelog, this
    method is supposed to be used by other tasks and not directly.
    """
    # get the name of the current release tag
    target_tag = get_release_tag_string(target, cur_version)

    # get the diff from HEAD
    target_path = os.path.join(ROOT, target)
    cmd = 'git log --pretty=%s {}... {}'.format(target_tag, target_path)
    diff_lines = ctx.run(cmd, hide='out').stdout.split('\n')

    # for each PR get the title, we'll use it to populate the changelog
    endpoint = GITHUB_API_URL + '/repos/DataDog/integrations-core/pulls/{}'
    pr_numbers = parse_pr_numbers(diff_lines)
    print("Found {} PRs merged since tag: {}".format(len(pr_numbers),
                                                     target_tag))

    entries = []
    for pr_num in pr_numbers:
        try:
            response = urlopen(endpoint.format(pr_num))
        except Exception as e:
            sys.stderr.write("Unable to fetch info for PR #{}\n: {}".format(
                pr_num, e))
            continue

        payload = json.loads(response.read())
        if NO_CHANGELOG_LABEL in (l.get('name')
                                  for l in payload.get('labels', [])):
            # No changelog entry for this PR
            print("Skipping PR #{} from changelog".format(pr_num))
            continue

        author = payload.get('user', {}).get('login')
        author_url = payload.get('user', {}).get('html_url')

        entry = ChangelogEntry(pr_num, payload.get('title'),
                               payload.get('html_url'), author, author_url,
                               is_contributor(payload))

        entries.append(entry)

    # store the new changelog in memory
    new_entry = StringIO()

    # the header contains version and date
    header = "### {} / {}\n".format(new_version,
                                    datetime.now().strftime("%Y-%m-%d"))
    new_entry.write(header)

    # one bullet point for each PR
    new_entry.write("\n")
    for entry in entries:
        thanknote = ""
        if entry.is_contributor:
            thanknote = " Thanks [{}]({}).".format(entry.author,
                                                   entry.author_url)
        new_entry.write("* {}. See [#{}]({}).{}\n".format(
            entry.title, entry.number, entry.url, thanknote))
    new_entry.write("\n")

    # read the old contents
    changelog_path = os.path.join(ROOT, target, "CHANGELOG.md")
    with open(changelog_path, 'r') as f:
        old = f.readlines()

    # write the new changelog in memory
    changelog = StringIO()

    # preserve the title
    changelog.write("".join(old[:2]))

    # prepend the new changelog to the old contents
    # make the command idempotent
    if header not in old:
        changelog.write(new_entry.getvalue())

    # append the rest of the old changelog
    changelog.write("".join(old[2:]))

    # print on the standard out in case of a dry run
    if dry_run:
        print(changelog.getvalue())
        sys.exit(0)

    # overwrite the old changelog
    with open(changelog_path, 'w') as f:
        f.write(changelog.getvalue())
コード例 #50
0
    def check_download(self, url, zip_filename):
        """Check to see if the file was successfully downloaded.

        If the user has :command:`gpg` installed on their system, use that to
        check that the package was signed. Otherwise, check the sha256sum.

        Args:
            url (unicode):
                The URL that the file came from.

            zip_filename (unicode):
                The filename of the downloaded copy.

        Raises:
            rbtools.commands.CommandError:
                The authenticity of the file could not be verified.
        """
        if check_install('gpg'):
            execute(['gpg', '--recv-keys', '4ED1F993'])
            sig_filename = self.download_file('%s.asc' % url)

            try:
                retcode, output, errors = execute(
                    ['gpg', '--verify', sig_filename, zip_filename],
                    with_errors=False, ignore_errors=True,
                    return_error_code=True, return_errors=True)

                if retcode == 0:
                    logging.debug('Verified file signature')
                else:
                    raise CommandError(
                        'Unable to verify authenticity of file downloaded '
                        'from %s:\n%s' % (url, errors))
            finally:
                os.unlink(sig_filename)
        else:
            logging.info('"gpg" not installed. Skipping signature validation.')

            try:
                sha_url = '%s.sha256sum' % url
                logging.debug('Downloading %s', sha_url)
                response = urlopen(sha_url)
                real_sha = response.read().split(' ')[0]
            except (HTTPError, URLError) as e:
                raise CommandError('Error when downloading file: %s' % e)

            with open(zip_filename, 'rb') as f:
                our_sha = hashlib.sha256(f.read()).hexdigest()

            if real_sha == our_sha:
                logging.debug('Verified SHA256 hash')
            else:
                logging.debug('SHA256 hash does not match!')
                logging.debug('  Downloaded file hash was: %s', our_sha)
                logging.debug('  Expected hash was: %s', real_sha)

                raise CommandError(
                    'Unable to verify the checksum of the downloaded copy of '
                    '%s.\n'
                    'This could be due to an invasive proxy or an attempted '
                    'man-in-the-middle attack.' % url)
コード例 #51
0
    def cacheData(self):
        ''' 
        Downloads all needed data. Called by output().
        '''
        station_list = self.ap_paramList[0]()

        remote_location = '/data/mahali_UAF_data/cloud/rinex/obs'

        day_list = []

        start_year = self.start_date.strftime('%Y')
        start_day = self.start_date.strftime('%j')

        end_year = self.end_date.strftime('%Y')
        end_date = self.end_date.strftime('%j')   
    
        data_list = pd.DataFrame(columns=['Site','Date'])
        
        # Get a list of all data that needs to be loaded
        mahali_data_info_location = resource_filename('skdaccess',os.path.join('support','mahali_data_info.hdf'))
        for station in station_list:
            
            try:
                available_dates = pd.read_hdf(mahali_data_info_location, station)
            except KeyError:
                print('Unknown station:',station, )

            common_dates = list(set(self.date_range).intersection(set(available_dates)))

            common_dates.sort()

            data_list = pd.concat([data_list, pd.DataFrame({'Site':station,'Date':common_dates})])
                
                
        
        # Get a list of all needed filenames
        data_list_obs = data_list.Site + data_list.Date.apply(lambda x: x.strftime('%j0.%yo'))
        data_list_nav = data_list.Site + data_list.Date.apply(lambda x: x.strftime('%j0.%yn'))
        
        data_set_filenames = set(pd.concat([data_list_obs, data_list_nav]))

        
        # Get locations of all files to download
        def getFileLocation(in_file):
            day = in_file[4:7]
            if in_file[-1] == 'n':
                data_folder = 'nav'
            elif in_file[-1] == 'o':
                data_folder = 'obs'
            else:
                raise ValueError('Could not parse in_file')

            return 'rinex/' + data_folder + '/2015/' + day + '/' + in_file



        # Key function to sort rinex files by date, then
        # station, then type (NAV or OBS)
        key_func = lambda x: x[-3:-1] + x[-8:-5] + x[-12:-8] + x[-1]

        # Base url of data
        base_url = 'http://apollo.haystack.mit.edu/mahali-data/'
        

        # Download files to disk
        if not self.generate_links:
            data_location = DataFetcher.getDataLocation('mahali_rinex')

            if data_location == None:
                data_location = os.path.join(os.path.expanduser('~'), '.skdaccess','mahali_rinex')
                os.makedirs(data_location, exist_ok=True)

            # Get currently downloaded files
            file_list = glob(os.path.join(data_location,'*.*n',)) + glob(os.path.join(data_location,'*.*o',))
            file_list = set(file.split(os.sep)[-1] for file in file_list)

            # Select files that are wanted but not yet downloaded
            missing_files = data_set_filenames.difference(file_list)

            missing_files = list(missing_files)
            missing_files.sort()
            file_location_list = [getFileLocation(filename) for filename in missing_files]


            if len(file_location_list) > 0:
                print('Downloading mahali data')
                sys.stdout.flush()
                for url_path, filename in tqdm(zip(file_location_list, missing_files), total=len(missing_files)):
                    with  open(os.path.join(data_location, filename), 'wb') as data_file:
                        shutil.copyfileobj(urlopen(base_url+ url_path), data_file)

            # return the appropriate list of files to load

            obs_file_list = [os.path.join(data_location, file) for file in data_list_obs]
            nav_file_list = [os.path.join(data_location, file) for file in data_list_nav]

        
        # Not downloading data, just generating links to where data is located
        else:
            obs_file_list = [base_url + getFileLocation(location) for location in data_list_obs]
            nav_file_list = [base_url + getFileLocation(location) for location in data_list_nav]


        obs_file_list.sort(key=key_func)
        nav_file_list.sort(key=key_func)

        return nav_file_list, obs_file_list
コード例 #52
0
ファイル: general.py プロジェクト: dpaysan/pyhsmm
def get_file(remote_url,local_path):
    if not os.path.isfile(local_path):
        with closing(urlopen(remote_url)) as remotefile:
            with open(local_path,'wb') as localfile:
                shutil.copyfileobj(remotefile,localfile)
コード例 #53
0
ファイル: main.py プロジェクト: padlocke11180/swiss
def download_swiss_data(data_dir):
    import os.path as p

    # Check if the data directory exists, create if not
    if not p.isdir(data_dir):
        try:
            os.makedirs(data_dir)
            print "Created data directory: " + data_dir
        except FileExistsError:
            pass
        except PermissionError:
            error(
                "Unable to create swiss data directory due to insufficient permissions: "
                + data_dir)

    # Needed filenames/paths
    data_pkg = p.basename(urlparse(DATA_URL).path)
    version_url = DATA_URL.replace(".tar.gz", ".version")
    checksum_url = DATA_URL.replace(".tar.gz", ".sha512")
    local_pkg = p.join(data_dir, data_pkg)
    local_sum = local_pkg.replace(".tar.gz", ".sha512")
    vfile = local_pkg.replace(".tar.gz", ".version")
    local_version = None
    remote_version = None

    # We won't bother updating/downloading the data if the data version matches
    try:
        resp = urlopen(version_url)
        remote_version = int(resp.read().strip())
    except:
        error("Failed to download data version from " + version_url)

    if p.isfile(vfile):
        with open(vfile, "rt") as fp:
            local_version = int(fp.read().strip())

        if local_version == remote_version:
            print "Data is already up to date"
            return
        else:
            print "A data update is available, downloading..."
    else:
        print "Downloading data for swiss..."

    # Looks like we need to download the data.
    with tqdm(unit='B',
              unit_scale=True,
              miniters=1,
              desc=DATA_URL.split('/')[-1]) as t:
        urlretrieve(DATA_URL,
                    filename=local_pkg,
                    reporthook=tqdm_hook(t),
                    data=None)

    with tqdm(unit='B',
              unit_scale=True,
              miniters=1,
              desc=checksum_url.split('/')[-1]) as t:
        urlretrieve(checksum_url,
                    filename=local_sum,
                    reporthook=tqdm_hook(t),
                    data=None)

    # Verify the data
    print "Verifying data..."
    checksum_computed = check_output("sha512sum " + local_pkg,
                                     shell=True,
                                     universal_newlines=True).split()[0]
    with open(local_sum, "rt") as fp:
        checksum_read = fp.read().split()[0]

    if checksum_computed != checksum_read:
        raise IOError(
            "SHA512 computed for {} is {} but does not match downloaded checksum {}"
            .format(local_pkg, checksum_computed, checksum_read))

    # Looks like we're OK. Extract the data.
    print "Extracting data..."
    tar = tarfile.open(local_pkg)
    tar.extractall(data_dir)

    # Delete tarball now that it isn't needed
    os.remove(local_pkg)

    # Write out the data version. This also serves as the semaphore that the download
    # and extraction completed successfully.
    with open(local_pkg.replace(".tar.gz", ".version"), "wt") as fp:
        print >> fp, remote_version

    print "Data downloaded and installed successfully"
コード例 #54
0
ファイル: haupdown.py プロジェクト: xiangfu0/hacheck
def main(default_action='list'):
    ACTIONS = ('up', 'down', 'status', 'status_downed', 'list')
    parser = optparse.OptionParser(usage='%prog [options] service_name(s)')
    parser.add_option(
        '--spool-root',
        default='/var/spool/hacheck',
        help='Root for spool for service states (default %default)')
    parser.add_option('-a',
                      '--action',
                      type='choice',
                      choices=ACTIONS,
                      default=default_action,
                      help='Action (one of %s, default %%default)' %
                      ', '.join(ACTIONS, ))
    parser.add_option('-r',
                      '--reason',
                      type=str,
                      default="",
                      help='Reason string when setting down')
    parser.add_option(
        '-p',
        '--port',
        type=str,
        default=3333,
        help='Port that the hacheck daemon is running on (default %(default)')
    opts, args = parser.parse_args()

    nonhumans = set()
    try:
        with open('/etc/nonhumans', 'r') as f:
            for line in f:
                unix_username = line.split('#')[0].strip()
                if unix_username:
                    nonhumans.add(unix_username)
    except:
        pass
    if opts.action == 'down' and not opts.reason:
        if 'SUDO_USER' in os.environ:
            opts.reason = os.environ['SUDO_USER']
        elif 'SSH_USER' in os.environ:
            opts.reason = os.environ['SSH_USER']
        else:
            opts.reason = pwd.getpwuid(os.geteuid()).pw_name
        if opts.reason in nonhumans:
            print_s('please use --reason option to tell us who you REALLY are')
            return 1

    if opts.action in ('status', 'up', 'down'):
        if not args:
            parser.error('Expected args for action %s' % (opts.action))
        service_names = args
    else:
        if args:
            parser.error('Unexpected args for action %s: %r' %
                         (opts.action, args))

    if opts.action == 'list':
        with contextlib.closing(
                urlopen('http://127.0.0.1:%d/recent' % opts.port,
                        timeout=3)) as f:
            resp = json.load(f)
            for s in sorted(resp['seen_services']):
                if isinstance(s, six.string_types):
                    print_s(s)
                else:
                    service_name, last_response = s
                    print_s('%s last_response=%s', service_name,
                            json.dumps(last_response))
            return 0
    elif opts.action == 'up':
        hacheck.spool.configure(opts.spool_root, needs_write=True)
        for service_name in service_names:
            hacheck.spool.up(service_name)
        return 0
    elif opts.action == 'down':
        hacheck.spool.configure(opts.spool_root, needs_write=True)
        for service_name in service_names:
            hacheck.spool.down(service_name, opts.reason)
        return 0
    elif opts.action == 'status_downed':
        hacheck.spool.configure(opts.spool_root, needs_write=False)
        for service_name, info in hacheck.spool.status_all_down():
            print_s('DOWN\t%s\t%s', service_name, info.get('reason', ''))
        return 0
    else:
        hacheck.spool.configure(opts.spool_root, needs_write=False)
        rv = 0
        for service_name in service_names:
            status, info = hacheck.spool.status(service_name)
            if status:
                print_s('UP\t%s', service_name)
            else:
                print_s('DOWN\t%s\t%s', service_name, info.get('reason', ''))
                rv = 1
        return rv
コード例 #55
0
ファイル: server.py プロジェクト: James-Burgess/openZFD
    def decorated(*args, **kwargs):
        token = get_token_auth_header()
        jsonurl = urlopen("https://" + AUTH0_DOMAIN + "/.well-known/jwks.json")
        jwks = json.loads(jsonurl.read())
        try:
            unverified_header = jwt.get_unverified_header(token)
        except jwt.JWTError:
            raise AuthError(
                {
                    "code":
                    "invalid_header",
                    "description":
                    "Invalid header. "
                    "Use an RS256 signed JWT Access Token"
                }, 401)
        if unverified_header["alg"] == "HS256":
            raise AuthError(
                {
                    "code":
                    "invalid_header",
                    "description":
                    "Invalid header. "
                    "Use an RS256 signed JWT Access Token"
                }, 401)
        rsa_key = {}
        for key in jwks["keys"]:
            if key["kid"] == unverified_header["kid"]:
                rsa_key = {
                    "kty": key["kty"],
                    "kid": key["kid"],
                    "use": key["use"],
                    "n": key["n"],
                    "e": key["e"]
                }
        if rsa_key:
            try:
                payload = jwt.decode(token,
                                     rsa_key,
                                     algorithms=ALGORITHMS,
                                     audience=API_IDENTIFIER,
                                     issuer="https://" + AUTH0_DOMAIN + "/")
            except jwt.ExpiredSignatureError:
                raise AuthError(
                    {
                        "code": "token_expired",
                        "description": "token is expired"
                    }, 401)
            except jwt.JWTClaimsError:
                raise AuthError(
                    {
                        "code":
                        "invalid_claims",
                        "description":
                        "incorrect claims,"
                        " please check the audience and issuer"
                    }, 401)
            except Exception:
                raise AuthError(
                    {
                        "code": "invalid_header",
                        "description": "Unable to parse authentication"
                        " token."
                    }, 401)

            _request_ctx_stack.top.current_user = payload
            return f(*args, **kwargs)
        raise AuthError(
            {
                "code": "invalid_header",
                "description": "Unable to find appropriate key"
            }, 401)
コード例 #56
0
	def Urlget(self, url):
		f = urlopen(url, timeout = 1)
		fr = f.read()
		fc = f.code
		f.close()
		return (fr, fc)
コード例 #57
0
ファイル: dev.py プロジェクト: jzrlza/django-vue-base
}

REST_FRAMEWORK = {
    'DEFAULT_PERMISSION_CLASSES':
    ('rest_framework.permissions.IsAuthenticated', ),
    'DEFAULT_AUTHENTICATION_CLASSES':
    ('rest_framework_jwt.authentication.JSONWebTokenAuthentication', ),
}

AUTH0_DOMAIN = 'jmkey.auth0.com'
API_IDENTIFIER = 'https://django-vue.temperate.com/'
PUBLIC_KEY = None
JWT_ISSUER = None

if AUTH0_DOMAIN:
    jsonurl = request.urlopen('https://' + AUTH0_DOMAIN +
                              '/.well-known/jwks.json')
    jwks = json.loads(jsonurl.read().decode('utf-8'))
    cert = '-----BEGIN CERTIFICATE-----\n' + jwks['keys'][0]['x5c'][
        0] + '\n-----END CERTIFICATE-----'
    certificate = load_pem_x509_certificate(cert.encode('utf-8'),
                                            default_backend())
    PUBLIC_KEY = certificate.public_key()
    JWT_ISSUER = 'https://' + AUTH0_DOMAIN + '/'


def jwt_get_username_from_payload_handler(payload):
    return 'auth0user'


CORS_ORIGIN_WHITELIST = (
    'localhost:8080',
コード例 #58
0
def generate_classes():
    """Generates the necessary classes."""
    print("Downloading {} for last API version.".format(import_url))
    data = json.loads(urlopen(import_url).read().decode('utf-8'))
    print("Download OK. Generating python files...")

    for event in ['requests', 'events']:
        if event not in data:
            raise Exception("Missing {} in data.".format(event))
        with open('obswebsocket/{}.py'.format(event), 'w') as f:

            f.write("#!/usr/bin/env python\n")
            f.write("# -*- coding: utf-8 -*-\n")
            f.write("\n")
            f.write("# THIS FILE WAS GENERATED BY generate_classes.py - DO NOT EDIT #\n")
            f.write("# (Generated on {}) #\n".format(datetime.now().isoformat(" ")))
            f.write("\n")
            f.write("from .base_classes import Base{}\n".format(event))
            f.write("\n\n")
            for sec in data[event]:
                for i in data[event][sec]:
                    f.write("class {}(Base{}):\n".format(i['name'], event))
                    f.write("    \"\"\"{}\n\n".format(i['description']))

                    arguments_default = []
                    arguments = []
                    try:
                        if len(i['params']) > 0:
                            f.write("    :Arguments:\n")
                            for a in i['params']:
                                a['name'] = a['name'].replace("[]", ".*")
                                f.write("       *{}*\n".format(clean_var(a['name'])))
                                f.write("            type: {}\n".format(a['type']))
                                f.write("            {}\n".format(a['description']))
                                if 'optional' in a['type']:
                                    arguments_default.append(a['name'])
                                else:
                                    arguments.append(a['name'])
                    except KeyError:
                        pass

                    returns = []
                    try:
                        if len(i['returns']) > 0:
                            f.write("    :Returns:\n")
                            for r in i['returns']:
                                r['name'] = r['name'].replace("[]", ".*")
                                f.write("       *{}*\n".format(clean_var(r['name'])))
                                f.write("            type: {}\n".format(r['type']))
                                f.write("            {}\n".format(r['description']))
                                returns.append(r['name'])
                    except KeyError:
                        pass

                    arguments = set([x.split(".")[0] for x in arguments])
                    arguments_default = set([x.split(".")[0] for x in arguments_default])
                    arguments_default = set([x for x in arguments_default if x not in arguments])
                    returns = set([x.split(".")[0] for x in returns])

                    f.write("    \"\"\"\n\n")
                    f.write("    def __init__({}):\n".format(
                        ", ".join(
                            ["self"]
                            + [clean_var(a) for a in arguments]
                            + [clean_var(a) + "=None" for a in arguments_default]
                        )
                    ))
                    f.write("        Base{}.__init__(self)\n".format(event))
                    f.write("        self.name = '{}'\n".format(i['name']))
                    for r in returns:
                        f.write("        self.datain['{}'] = None\n".format(r))
                    for a in arguments:
                        f.write("        self.dataout['{}'] = {}\n".format(a, clean_var(a)))
                    for a in arguments_default:
                        f.write("        self.dataout['{}'] = {}\n".format(a, clean_var(a)))
                    f.write("\n")
                    for r in returns:
                        cc = "".join(x[0].upper() + x[1:] for x in r.split('-'))
                        f.write("    def get{}(self):\n".format(clean_var(cc)))
                        f.write("        return self.datain['{}']\n".format(r))
                        f.write("\n")
                    f.write("\n")

    print("API classes have been generated.")
コード例 #59
0
ファイル: ls.py プロジェクト: silky/datalad
def _ls_s3(loc,
           fast=False,
           recursive=False,
           all_=False,
           long_=False,
           config_file=None,
           list_content=False):
    """List S3 bucket content"""
    if loc.startswith('s3://'):
        bucket_prefix = loc[5:]
    else:
        raise ValueError("passed location should be an s3:// url")

    import boto
    from hashlib import md5
    from boto.s3.key import Key
    from boto.s3.prefix import Prefix
    from boto.exception import S3ResponseError
    from ..support.configparserinc import SafeConfigParser  # provides PY2,3 imports

    if '/' in bucket_prefix:
        bucket_name, prefix = bucket_prefix.split('/', 1)
    else:
        bucket_name, prefix = bucket_prefix, None

    if prefix and '?' in prefix:
        ui.message("We do not care about URL options ATM, they get stripped")
        prefix = prefix[:prefix.index('?')]

    ui.message("Connecting to bucket: %s" % bucket_name)
    if config_file:
        config = SafeConfigParser()
        config.read(config_file)
        access_key = config.get('default', 'access_key')
        secret_key = config.get('default', 'secret_key')

        # TODO: remove duplication -- reuse logic within downloaders/s3.py to get connected
        conn = boto.connect_s3(access_key, secret_key)
        try:
            bucket = conn.get_bucket(bucket_name)
        except S3ResponseError as e:
            ui.message("E: Cannot access bucket %s by name" % bucket_name)
            all_buckets = conn.get_all_buckets()
            all_bucket_names = [b.name for b in all_buckets]
            ui.message("I: Found following buckets %s" %
                       ', '.join(all_bucket_names))
            if bucket_name in all_bucket_names:
                bucket = all_buckets[all_bucket_names.index(bucket_name)]
            else:
                raise RuntimeError("E: no bucket named %s thus exiting" %
                                   bucket_name)
    else:
        # TODO: expose credentials
        # We don't need any provider here really but only credentials
        from datalad.downloaders.providers import Providers
        providers = Providers.from_config_files()
        provider = providers.get_provider(loc)

        if not provider:
            raise ValueError(
                "Don't know how to deal with this url %s -- no provider defined for %s. "
                "Define a new provider (DOCS: TODO) or specify just s3cmd config file instead for now."
                % loc)
        downloader = provider.get_downloader(loc)

        # should authenticate etc, and when ready we will ask for a bucket ;)
        bucket = downloader.access(lambda url: downloader.bucket, loc)

    info = []
    for iname, imeth in [
        ("Versioning", bucket.get_versioning_status),
        ("   Website", bucket.get_website_endpoint),
        ("       ACL", bucket.get_acl),
    ]:
        try:
            ival = imeth()
        except Exception as e:
            ival = str(e).split('\n')[0]
        info.append(" {iname}: {ival}".format(**locals()))
    ui.message("Bucket info:\n %s" % '\n '.join(info))

    kwargs = {} if recursive else {'delimiter': '/'}

    ACCESS_METHODS = [bucket.list_versions, bucket.list]

    prefix_all_versions = None
    for acc in ACCESS_METHODS:
        try:
            prefix_all_versions = list(acc(prefix, **kwargs))
            break
        except Exception as exc:
            lgr.debug("Failed to access via %s: %s", acc, exc_str(exc))

    if not prefix_all_versions:
        ui.error("No output was provided for prefix %r" % prefix)
    else:
        max_length = max((len(e.name) for e in prefix_all_versions))
        max_size_length = max(
            (len(str(getattr(e, 'size', 0))) for e in prefix_all_versions))

    results = []
    for e in prefix_all_versions:
        results.append(e)
        if isinstance(e, Prefix):
            ui.message("%s" % (e.name, ), )
            continue

        base_msg = ("%%-%ds %%s" % max_length) % (e.name, e.last_modified)
        if isinstance(e, Key):
            if not (e.is_latest or all_):
                # Skip this one
                continue
            ui.message(base_msg + " %%%dd" % max_size_length % e.size, cr=' ')
            # OPT: delayed import
            from ..support.s3 import get_key_url
            url = get_key_url(e, schema='http')
            try:
                _ = urlopen(Request(url))
                urlok = "OK"
            except HTTPError as err:
                urlok = "E: %s" % err.code

            try:
                acl = e.get_acl()
            except S3ResponseError as err:
                acl = err.message

            content = ""
            if list_content:
                # IO intensive, make an option finally!
                try:
                    # _ = e.next()[:5]  if we are able to fetch the content
                    kwargs = dict(version_id=e.version_id)
                    if list_content in {'full', 'first10'}:
                        if list_content in 'first10':
                            kwargs['headers'] = {'Range': 'bytes=0-9'}
                        content = repr(e.get_contents_as_string(**kwargs))
                    elif list_content == 'md5':
                        digest = md5()
                        digest.update(e.get_contents_as_string(**kwargs))
                        content = digest.hexdigest()
                    else:
                        raise ValueError(list_content)
                    # content = "[S3: OK]"
                except S3ResponseError as err:
                    content = err.message
                finally:
                    content = " " + content
            if long_:
                ui.message("ver:%-32s  acl:%s  %s [%s]%s" %
                           (e.version_id, acl, url, urlok, content))
            else:
                ui.message('')
        else:
            ui.message(base_msg + " " +
                       str(type(e)).split('.')[-1].rstrip("\"'>"))
    return results
コード例 #60
0
def download_and_store_page_for_platform(command, platform):
    page_url = remote + "/" + platform + "/" + quote(command) + ".md"
    data = urlopen(page_url).read()
    store_page_to_cache(data, command, platform)