コード例 #1
0
 def test_missing_headers(self):
     file_owner = 'get_test_user'
     file_id = 0
     resource = "/apigetfile/{0}/{1}/".format(file_owner, file_id)
     headers = list(default_headers.keys())
     for header in headers:
         hd = copy.copy(default_headers)
         del hd[header]
         response = requests.get(server + resource, headers=hd)
         if six.PY3:
             content = _json.loads(response.content.decode('unicode_escape'))
         else:
             content = _json.loads(response.content)
         self.assertEqual(response.status_code, 422)
コード例 #2
0
 def test_private_permission_defined(self):
     username = '******'
     api_key = '786r5mecv0'
     file_owner = 'get_test_user'
     file_id = 1  # 1 is a private file
     hd = copy.copy(default_headers)
     hd['plotly-username'] = username
     hd['plotly-apikey'] = api_key
     resource = "/apigetfile/{0}/{1}/".format(file_owner, file_id)
     response = requests.get(server + resource, headers=hd)
     if six.PY3:
         content = _json.loads(response.content.decode('unicode_escape'))
     else:
         content = _json.loads(response.content)
     self.assertEqual(response.status_code, 403)
コード例 #3
0
 def test_missing_headers(self):
     file_owner = 'get_test_user'
     file_id = 0
     resource = "/apigetfile/{0}/{1}/".format(file_owner, file_id)
     headers = list(default_headers.keys())
     for header in headers:
         hd = copy.copy(default_headers)
         del hd[header]
         response = requests.get(server + resource, headers=hd)
         if six.PY3:
             content = _json.loads(
                 response.content.decode('unicode_escape'))
         else:
             content = _json.loads(response.content)
         self.assertEqual(response.status_code, 422)
コード例 #4
0
 def test_private_permission_defined(self):
     username = '******'
     api_key = '786r5mecv0'
     file_owner = 'get_test_user'
     file_id = 1  # 1 is a private file
     hd = copy.copy(default_headers)
     hd['plotly-username'] = username
     hd['plotly-apikey'] = api_key
     resource = "/apigetfile/{0}/{1}/".format(file_owner, file_id)
     response = requests.get(server + resource, headers=hd)
     if six.PY3:
         content = _json.loads(response.content.decode('unicode_escape'))
     else:
         content = _json.loads(response.content)
     self.assertEqual(response.status_code, 403)
コード例 #5
0
ファイル: utils.py プロジェクト: fu/plotly.py
    def encode(self, o):
        """
        Load and then dump the result using parse_constant kwarg

        Note that setting invalid separators will cause a failure at this step.

        """

        # this will raise errors in a normal-expected way
        encoded_o = super(PlotlyJSONEncoder, self).encode(o)

        # now:
        #    1. `loads` to switch Infinity, -Infinity, NaN to None
        #    2. `dumps` again so you get 'null' instead of extended JSON
        try:
            new_o = _json.loads(encoded_o,
                                parse_constant=self.coerce_to_strict)
        except ValueError:

            # invalid separators will fail here. raise a helpful exception
            raise ValueError(
                "Encoding into strict JSON failed. Did you set the separators "
                "valid JSON separators?"
            )
        else:
            return _json.dumps(new_o, sort_keys=self.sort_keys,
                               indent=self.indent,
                               separators=(self.item_separator,
                                           self.key_separator))
コード例 #6
0
ファイル: jiveresponse.py プロジェクト: patricia1387/jiveapi
    def json(self, **kwargs):
        """
        Returns the json-encoded content of a response, if any, with the
        leading JSON Security String stripped off.

        :param kwargs: Optional arguments that ``json.loads`` takes.
        :raises ValueError: If the response body does not contain valid json.
        """
        content = self.text
        if not self.encoding and self.content and len(self.content) > 3:
            # No encoding set. JSON RFC 4627 section 3 states we should expect
            # UTF-8, -16 or -32. Detect which one to use; If the detection or
            # decoding fails, fall back to `self.text` (using chardet to make
            # a best guess).
            encoding = guess_json_utf(self.content)
            if encoding is not None:  # nocoverage
                try:
                    content = self.content.decode(encoding)
                except UnicodeDecodeError:
                    # Wrong UTF codec detected; usually because it's not UTF-8
                    # but some other 8-bit codec.  This is an RFC violation,
                    # and the server didn't bother to tell us what codec *was*
                    # used.
                    pass
        content = JIVE_SECURITY_RE.sub('', content)
        return complexjson.loads(content, **kwargs)
コード例 #7
0
    def test_masked_constants_example():
        # example from: https://gist.github.com/tschaume/d123d56bf586276adb98
        data = {
            'esN': [0, 1, 2, 3],
            'ewe_is0':
            [-398.11901997, -398.11902774, -398.11897111, -398.11882215],
            'ewe_is1': [-398.11793027, -398.11792966, -398.11786308, None],
            'ewe_is2': [-398.11397008, -398.11396421, None, None]
        }
        df = pd.DataFrame.from_dict(data)

        plotopts = {'x': 'esN'}
        fig, ax = plt.subplots(1, 1)
        df.plot(ax=ax, **plotopts)

        renderer = PlotlyRenderer()
        Exporter(renderer).run(fig)

        _json.dumps(renderer.plotly_fig, cls=utils.PlotlyJSONEncoder)

        jy = _json.dumps(renderer.plotly_fig['data'][1]['y'],
                         cls=utils.PlotlyJSONEncoder)
        print(jy)
        array = _json.loads(jy)
        assert (array == [-398.11793027, -398.11792966, -398.11786308, None])
コード例 #8
0
    def _request(self, method, url, **kwargs):
        response = None
        base_url = kwargs.pop('base_url', self.base_url)
        fine_json = kwargs.pop('fine_json', False)

        if not url.startswith(('http://', 'https://')):
            _url = self.url_set.get(url, url)
            if not _url.startswith(('http://', 'https://')):
                _url = f'{base_url}/{_url}'
        else:
            _url = url

        try:
            res = requests.request(method, url=_url, **kwargs)
        except (Exception, ) as e:
            print(e)
        else:
            if str(res.status_code).startswith('2'):
                try:
                    if fine_json:
                        response = f_json.loads(res.content)
                    else:
                        response = res.json()
                except json.JSONDecodeError:
                    print('JSON Decode Error')

        return response
コード例 #9
0
    def _execute_get_url(self, request_url, append_sid=True):
        """Function to execute and handle a GET request"""
        # Prepare Request
        self._debuglog("Requesting URL: '" + request_url + "'")
        if append_sid:
            self._debuglog("Appending access_token (SID: " +
                           self.access_token + ") to url")
            request_url = "%s&_sid=%s" % (request_url, self.access_token)

        # Execute Request
        try:
            resp = self._session.get(request_url)
            self._debuglog("Request executed: " + str(resp.status_code))
            if resp.status_code == 200:
                # We got a response
                json_data = json.loads(resp.text)

                if json_data["success"]:
                    self._debuglog("Succesfull returning data")
                    self._debuglog(str(json_data))
                    return json_data
                else:
                    if json_data["error"]["code"] in {105, 106, 107, 119}:
                        self._debuglog("Session error: " +
                                       str(json_data["error"]["code"]))
                        self._session_error = True
                    else:
                        self._debuglog("Failed: " + resp.text)
            else:
                # We got a 404 or 401
                return None
        #pylint: disable=bare-except
        except:
            return None
コード例 #10
0
ファイル: api.py プロジェクト: wyd2004/tsBackend
def get_user_info(access_token, openid):
    '''
    https://api.weixin.qq.com/sns/userinfo?access_token=ACCESS_TOKEN&openid=OPENID&lang=zh_CN
    {
       "openid":" OPENID",
       "nickname": NICKNAME,
       "sex":"1",
       "province":"PROVINCE"
       "city":"CITY",
       "country":"COUNTRY",
        "headimgurl":    "http://wx.qlogo.cn/mmopen/g3MonUZtNHkdmzicIlibx6iaFqAc56vxLSUfpb6n5WKSYVY0ChQKkiaJSgQ1dZuTOgvLLrhJbERQQ4eMsv84eavHiaiceqxibJxCfHe/46", 
            "privilege":[
            "PRIVILEGE1"
            "PRIVILEGE2"
        ],
        "unionid": "o6_bmasdasdsad6_2sgVt7hMZOPfL"
    }
    '''
    url = 'https://api.weixin.qq.com/sns/userinfo'
    params = {
        'access_token': access_token,
        'openid': openid,
        'lang': 'zh_CH',
        }
    response = requests.get(url, params=params)
    if response.ok and 'errcode' not in response.json():
        res = response.content.decode('utf8').encode('utf8')
        r = complexjson.loads(res)
        return r
    else:
        return None
コード例 #11
0
ファイル: test_utils.py プロジェクト: codybushnell/plotly.py
    def test_masked_constants_example():
        # example from: https://gist.github.com/tschaume/d123d56bf586276adb98
        data = {
            'esN': [0, 1, 2, 3],
            'ewe_is0': [-398.11901997, -398.11902774,
                        -398.11897111, -398.11882215],
            'ewe_is1': [-398.11793027, -398.11792966, -398.11786308, None],
            'ewe_is2': [-398.11397008, -398.11396421, None, None]
        }
        df = pd.DataFrame.from_dict(data)

        plotopts = {'x': 'esN'}
        fig, ax = plt.subplots(1, 1)
        df.plot(ax=ax, **plotopts)

        renderer = PlotlyRenderer()
        Exporter(renderer).run(fig)

        _json.dumps(renderer.plotly_fig, cls=utils.PlotlyJSONEncoder)

        jy = _json.dumps(renderer.plotly_fig['data'][1]['y'],
                         cls=utils.PlotlyJSONEncoder)
        print(jy)
        array = _json.loads(jy)
        assert(array == [-398.11793027, -398.11792966, -398.11786308, None])
コード例 #12
0
    def encode(self, o):
        """
        Load and then dump the result using parse_constant kwarg

        Note that setting invalid separators will cause a failure at this step.

        """

        # this will raise errors in a normal-expected way
        encoded_o = super(PlotlyJSONEncoder, self).encode(o)

        # now:
        #    1. `loads` to switch Infinity, -Infinity, NaN to None
        #    2. `dumps` again so you get 'null' instead of extended JSON
        try:
            new_o = _json.loads(encoded_o,
                                parse_constant=self.coerce_to_strict)
        except ValueError:

            # invalid separators will fail here. raise a helpful exception
            raise ValueError(
                "Encoding into strict JSON failed. Did you set the separators "
                "valid JSON separators?"
            )
        else:
            return _json.dumps(new_o, sort_keys=self.sort_keys,
                               indent=self.indent,
                               separators=(self.item_separator,
                                           self.key_separator))
コード例 #13
0
def test_valid_request():
    username = '******'
    api_key = '786r5mecv0'
    file_owner = 'get_test_user'
    file_id = 0
    hd = copy.copy(default_headers)
    hd['plotly-username'] = username
    hd['plotly-apikey'] = api_key
    resource = "/apigetfile/{0}/{1}/".format(file_owner, file_id)
    response = requests.get(server + resource, headers=hd)
    if six.PY3:
        content = _json.loads(response.content.decode('unicode_escape'))
    else:
        content = _json.loads(response.content)
    print(response.status_code)
    print(content)
    assert response.status_code == 200
コード例 #14
0
 def _json(content, text):
     if not self.encoding and content and len(content) > 3:
         # No encoding set. JSON RFC 4627 section 3 states we should expect
         # UTF-8, -16 or -32. Detect which one to use; If the detection or
         # decoding fails, fall back to `self.text` (using chardet to make
         # a best guess).
         encoding = guess_json_utf(content)
         if encoding is not None:
             try:
                 return complexjson.loads(content.decode(encoding),
                                          **kwargs)
             except UnicodeDecodeError:
                 # Wrong UTF codec detected; usually because it's not UTF-8
                 # but some other 8-bit codec.  This is an RFC violation,
                 # and the server didn't bother to tell us what codec *was*
                 # used.
                 pass
     return complexjson.loads(text, **kwargs)
コード例 #15
0
 def test_file_does_not_exist(self):
     username = '******'
     api_key = '786r5mecv0'
     file_owner = 'get_test_user'
     file_id = 1000
     hd = copy.copy(default_headers)
     hd['plotly-username'] = username
     hd['plotly-apikey'] = api_key
     resource = "/apigetfile/{0}/{1}/".format(file_owner, file_id)
     response = requests.get(server + resource, headers=hd)
     if six.PY3:
         content = _json.loads(response.content.decode('unicode_escape'))
     else:
         content = _json.loads(response.content)
     error_message = ("Aw, snap! It looks like this file does "
                      "not exist. Want to try again?")
     self.assertEqual(response.status_code, 404)
     self.assertEqual(content['error'], error_message)
コード例 #16
0
 def test_file_does_not_exist(self):
     username = '******'
     api_key = '786r5mecv0'
     file_owner = 'get_test_user'
     file_id = 1000
     hd = copy.copy(default_headers)
     hd['plotly-username'] = username
     hd['plotly-apikey'] = api_key
     resource = "/apigetfile/{0}/{1}/".format(file_owner, file_id)
     response = requests.get(server + resource, headers=hd)
     if six.PY3:
         content = _json.loads(response.content.decode('unicode_escape'))
     else:
         content = _json.loads(response.content)
     error_message = ("Aw, snap! It looks like this file does "
                      "not exist. Want to try again?")
     self.assertEqual(response.status_code, 404)
     self.assertEqual(content['error'], error_message)
コード例 #17
0
 def test_user_does_not_exist(self):
     username = '******'
     api_key = 'invalid-apikey'
     file_owner = 'get_test_user'
     file_id = 0
     hd = copy.copy(default_headers)
     hd['plotly-username'] = username
     hd['plotly-apikey'] = api_key
     resource = "/apigetfile/{0}/{1}/".format(file_owner, file_id)
     response = requests.get(server + resource, headers=hd)
     if six.PY3:
         content = _json.loads(response.content.decode('unicode_escape'))
     else:
         content = _json.loads(response.content)
     error_message = (
         "Aw, snap! We don't have an account for {0}. Want to "
         "try again? Sign in is not case sensitive.".format(username))
     self.assertEqual(response.status_code, 404)
     self.assertEqual(content['error'], error_message)
コード例 #18
0
 def test_user_does_not_exist(self):
     username = '******'
     api_key = 'invalid-apikey'
     file_owner = 'get_test_user'
     file_id = 0
     hd = copy.copy(default_headers)
     hd['plotly-username'] = username
     hd['plotly-apikey'] = api_key
     resource = "/apigetfile/{0}/{1}/".format(file_owner, file_id)
     response = requests.get(server + resource, headers=hd)
     if six.PY3:
         content = _json.loads(response.content.decode('unicode_escape'))
     else:
         content = _json.loads(response.content)
     error_message = ("Aw, snap! We don't have an account for {0}. Want to "
                      "try again? Sign in is not case sensitive."
                      .format(username))
     self.assertEqual(response.status_code, 404)
     self.assertEqual(content['error'], error_message)
コード例 #19
0
def run_api_tests(args, data_format):
    endpoints = []
    for i in range(len(args.host)):
        endpoints.append({"host": args.host[i], "port": args.port[i], "version": args.version[i]})
    results = run_tests(args.suite, endpoints, [args.selection])
    if data_format == "xml":
        formatted_test_results = format_test_results(results, endpoints, "junit", args)
        return TestSuite.to_xml_string([formatted_test_results], prettyprint=True)
    else:
        formatted_test_results = format_test_results(results, endpoints, "json", args)
        return json.loads(formatted_test_results)
コード例 #20
0
ファイル: alltests.py プロジェクト: mazandaran/rubber
    def test_getitem_len_iter(self):
        """
        It should be possible to get its with bracket notation on collections, like collection[0], look at len(collection) and iterate for hit in collection.
        """

        response = json.loads("""{"took":2,"timed_out":false,"_shards":{"total":5,"successful":5,"failed":0},"hits":{"total":2,"max_score":1.0,"hits":[{"_index":"auth","_type":"user","_id":"6","_score":1.0, "_source" : {"username": "******", "first_name": "", "last_name": "", "is_active": true, "is_superuser": false, "is_staff": false, "last_login": "******", "groups": [], "user_permissions": [], "password": "******", "email": "*****@*****.**", "date_joined": "2012-08-02T08:30:11"}},{"_index":"auth","_type":"user","_id":"8","_score":1.0, "_source" : {"username": "******", "first_name": "", "last_name": "", "is_active": true, "is_superuser": false, "is_staff": false, "last_login": "******", "groups": [], "user_permissions": [], "password": "******", "email": "*****@*****.**", "date_joined": "2012-08-02T09:14:38"}}]}}""")
        from rubber.response import HitCollection
        collection = HitCollection(response['hits'])

        self.assertEquals(2, len(collection))
        self.assertEquals('guillaume', collection[0].source.username)
        self.assertEquals(['guillaume', 'stephane'], [hit.source.username for hit in collection])
コード例 #21
0
def get_graph_reference():
    """
    Load graph reference JSON (aka plot-schema)

    :return: (dict) The graph reference.

    """
    path = os.path.join('package_data', 'default-schema.json')
    s = resource_string('plotly', path).decode('utf-8')
    graph_reference = _json.loads(s)

    return utils.decode_unicode(graph_reference)
コード例 #22
0
    def test_getitem_len_iter(self):
        """
        It should be possible to get its with bracket notation on collections, like collection[0], look at len(collection) and iterate for hit in collection.
        """

        response = json.loads("""{"took":2,"timed_out":false,"_shards":{"total":5,"successful":5,"failed":0},"hits":{"total":2,"max_score":1.0,"hits":[{"_index":"auth","_type":"user","_id":"6","_score":1.0, "_source" : {"username": "******", "first_name": "", "last_name": "", "is_active": true, "is_superuser": false, "is_staff": false, "last_login": "******", "groups": [], "user_permissions": [], "password": "******", "email": "*****@*****.**", "date_joined": "2012-08-02T08:30:11"}},{"_index":"auth","_type":"user","_id":"8","_score":1.0, "_source" : {"username": "******", "first_name": "", "last_name": "", "is_active": true, "is_superuser": false, "is_staff": false, "last_login": "******", "groups": [], "user_permissions": [], "password": "******", "email": "*****@*****.**", "date_joined": "2012-08-02T09:14:38"}}]}}""")
        from rubber.response import HitCollection
        collection = HitCollection(response['hits'])

        self.assertEquals(2, len(collection))
        self.assertEquals('guillaume', collection[0].source.username)
        self.assertEquals(['guillaume', 'stephane'], [hit.source.username for hit in collection])
コード例 #23
0
    def test_default_schema_is_up_to_date(self):
        response = v2.plot_schema.retrieve('')
        schema = response.json()['schema']

        path = os.path.join('package_data', 'default-schema.json')
        s = resource_string('plotly', path).decode('utf-8')
        default_schema = _json.loads(s)

        msg = (
            'The default, hard-coded plot schema we ship with pip is out of '
            'sync with the prod plot schema!\n'
            'Run `make update_default_schema` to fix it!')
        self.assertEqual(schema, default_schema, msg=msg)
コード例 #24
0
        def _add_client_data(c):
            if not c.clientInfo:
                if self.ip_is_local(c.clientip):
                    c.clientInfo = {
                        'vendor': self.get_vendor_Info(c.clientip),
                        'country_code': '',
                        'country_name': '',
                        'city': ''
                    }
                else:
                    try:
                        headers = {'User-Agent': 'API Browser'}
                        with requests.get('https://geoip-db.com/jsonp/%s' %
                                          c.clientip,
                                          headers=headers,
                                          stream=False,
                                          timeout=5) as r:
                            if r.encoding is None: r.encoding = 'utf-8'
                            c.clientInfo = json.loads(
                                r.text.split('(', 1)[1].strip(')'))
                            c.clientInfo['vendor'] = ''
                    except:
                        c.clientInfo = {
                            'vendor': '',
                            'country_code': '',
                            'country_name': '',
                            'city': ''
                        }

            return {
                'sessionID':
                c.sessionID,
                'channelIcon':
                c.channelIcon,
                'channelName':
                ensure_text(c.channelName),
                'clientIP':
                c.clientip,
                'clientInfo':
                c.clientInfo,
                #'clientBuff': c.q.qsize()*100/self.config.videotimeout,
                'startTime':
                time.strftime('%d/%m/%Y %H:%M:%S',
                              time.localtime(c.connectionTime)),
                'durationTime':
                time.strftime('%H:%M:%S',
                              time.gmtime(time.time() - c.connectionTime)),
                'stat':
                c.ace.GetSTATUS(),
            }
コード例 #25
0
ファイル: cimis.py プロジェクト: fkarimpour/stompy
def cimis_fetch_station_metadata(station,df=None,cimis_key=None,cache_dir=None):
    """
    Return an xr.Dataset with station metadata for the station ID
    (integer) supplied.
    cimis_key is not needed, but accepted.
    """
    if df is None:
        df=xr.Dataset()
    if cache_dir is not None:
        assert os.path.exists(cache_dir)

        # Be nice and make a cimis subdirectory
        cache_sub_dir=os.path.join(cache_dir,'cimis')
        os.path.exists(cache_sub_dir) or os.mkdir(cache_sub_dir)
        cache_fn=os.path.join(cache_sub_dir,"station_metadata-%s.json"%station)
    else:
        cache_fn=None

    # The Latin-1 business here is because CIMIS uses 0xBA for a degree sign, and
    # that trips up python unicode.  Latin-1 in theory means don't transform any
    # bytes -- just write it out, and pretend we all agree on the high byte symbols.
    if (cache_fn is not None) and os.path.exists(cache_fn):
        log.warning("Station metadata from cache")
        with open(cache_fn,'rb') as fp:
            station_meta=json.loads(fp.read().decode('Latin-1'))
    else:
        log.warning("Station metadata from download")
        req=requests.get("http://et.water.ca.gov/api/station/%s"%station,
                         headers=dict(Accept='application/json'))
        if cache_fn is not None:
            with open(cache_fn,'wb') as fp:
                fp.write(req.text.encode('Latin-1'))
        station_meta=req.json()

    # add station metadata to attrs:
    stn=station_meta['Stations'][0]

    df.attrs['elevation'] = float(stn['Elevation'])
    df.attrs['is_active'] = stn['IsActive']
    df.attrs['station_name']=stn['Name']
    lat=float(stn['HmsLatitude'].split('/')[1]) #  u"37\xba35'56N / 37.598758"
    lon=float(stn['HmsLongitude'].split('/')[1])

    df.attrs['latitude']=lat
    df.attrs['longitude']=lon
    return df
コード例 #26
0
 def q(searcher, q, **options):
     options = {
         key.partition('.')[-1]: options[key]
         for key in options if key.startswith('q.')
     }
     field = options.pop('field', [])
     fields = [field] if isinstance(field, str) else field
     fields = [name.partition('^')[::2] for name in fields]
     if any(boost for name, boost in fields):
         field = {name: float(boost or 1.0) for name, boost in fields}
     elif isinstance(field, str):
         (field, boost), = fields
     else:
         field = [name for name, boost in fields] or ''
     if 'type' in options:
         with HTTPError(AttributeError):
             return getattr(engine.Query, options['type'])(field, q)
     for key in set(options) - {'op', 'version'}:
         with HTTPError(ValueError):
             options[key] = json.loads(options[key])
     if q is not None:
         with HTTPError(lucene.JavaError):
             return searcher.parse(q, field=field, **options)
コード例 #27
0
ファイル: graph_reference.py プロジェクト: plotly/plotly.py
def get_graph_reference():
    """
    Load graph reference JSON (aka plot-schema)

    :return: (dict) The graph reference.

    """
    path = os.path.join("package_data", "default-schema.json")
    s = resource_string("plotly", path).decode("utf-8")
    graph_reference = utils.decode_unicode(_json.loads(s))

    # TODO: Patch in frames info until it hits streambed. See #659
    graph_reference["frames"] = {
        "items": {
            "frames_entry": {
                "baseframe": {
                    "description": "The name of the frame into which this "
                    "frame's properties are merged before "
                    "applying. This is used to unify "
                    "properties and avoid needing to specify "
                    "the same values for the same properties "
                    "in multiple frames.",
                    "role": "info",
                    "valType": "string",
                },
                "data": {
                    "description": "A list of traces this frame modifies. "
                    "The format is identical to the normal "
                    "trace definition.",
                    "role": "object",
                    "valType": "any",
                },
                "group": {
                    "description": "An identifier that specifies the group "
                    "to which the frame belongs, used by "
                    "animate to select a subset of frames.",
                    "role": "info",
                    "valType": "string",
                },
                "layout": {
                    "role": "object",
                    "description": "Layout properties which this frame "
                    "modifies. The format is identical to "
                    "the normal layout definition.",
                    "valType": "any",
                },
                "name": {"description": "A label by which to identify the frame", "role": "info", "valType": "string"},
                "role": "object",
                "traces": {
                    "description": "A list of trace indices that identify "
                    "the respective traces in the data "
                    "attribute",
                    "role": "info",
                    "valType": "info_array",
                },
            }
        },
        "role": "object",
    }

    return graph_reference
コード例 #28
0
ファイル: event_stream.py プロジェクト: zeroae/zeroae-goblet
 def json(self, **kwargs):
     return complexjson.loads(self._data, **kwargs)
コード例 #29
0
ファイル: stat_plugin.py プロジェクト: furvovan/HTTPAceProxy
    def getStatusJSON(self):
        # Sys Info
        clients = self.stuff.clientcounter.getAllClientsList(
        )  # Get connected clients list
        statusJSON = {}
        statusJSON['status'] = 'success'
        statusJSON['sys_info'] = {
            'os_platform': self.config.osplatform,
            'cpu_nums': psutil.cpu_count(),
            'cpu_percent': psutil.cpu_percent(interval=0, percpu=True),
            'cpu_freq': {
                k: v
                for k, v in psutil.cpu_freq()._asdict().items()
                if k in ('current', 'min', 'max')
            } if psutil.cpu_freq() else {},
            'mem_info': {
                k: v
                for k, v in psutil.virtual_memory()._asdict().items()
                if k in ('total', 'used', 'available')
            },
            'disk_info': {
                k: v
                for k, v in psutil.disk_usage(getcwdb())._asdict().items()
                if k in ('total', 'used', 'free')
            }
        }

        statusJSON['connection_info'] = {
            'max_clients': self.config.maxconns,
            'total_clients': len(clients),
        }

        statusJSON['clients_data'] = []
        for c in clients:
            if not c.clientInfo:
                if any([
                        requests.utils.address_in_network(c.clientip, i)
                        for i in localnetranges
                ]):
                    c.clientInfo = {
                        'vendor': self.get_vendor_Info(c.clientip),
                        'country_code': '',
                        'country_name': '',
                        'city': ''
                    }
                else:
                    try:
                        headers = {'User-Agent': 'API Browser'}
                        with requests.get('https://geoip-db.com/jsonp/%s' %
                                          c.clientip,
                                          headers=headers,
                                          stream=False,
                                          timeout=5) as r:
                            if r.encoding is None: r.encoding = 'utf-8'
                            c.clientInfo = json.loads(
                                r.text.split('(', 1)[1].strip(')'))
                            c.clientInfo['vendor'] = ''
                    except:
                        c.clientInfo = {
                            'vendor': '',
                            'country_code': '',
                            'country_name': '',
                            'city': ''
                        }

            statusJSON['clients_data'].append({
                'sessionID':
                c.sessionID,
                'channelIcon':
                c.channelIcon,
                'channelName':
                c.channelName,
                'clientIP':
                c.clientip,
                'clientInfo':
                c.clientInfo,
                #'clientBuff': c.q.qsize()*100/self.config.videotimeout,
                'startTime':
                time.strftime('%d/%m/%Y %H:%M:%S',
                              time.localtime(c.connectionTime)),
                'durationTime':
                time.strftime('%H:%M:%S',
                              time.gmtime(time.time() - c.connectionTime)),
                'stat':
                c.ace._status.get(timeout=2)
            })
        return statusJSON
コード例 #30
0
ファイル: aceclient.py プロジェクト: sergelevin/HTTPAceProxy
 def _loadresp_(self, recvbuffer):
     '''
     LOADRESP request_id {'status': status, 'files': [["Name", idx], [....]], 'infohash': infohash, 'checksum': checksum}
     '''
     return json.loads(unquote(''.join(recvbuffer[2:])))
コード例 #31
0
ファイル: offline.py プロジェクト: fcolmenero/plotly.py
def iplot(figure_or_data, show_link=True, link_text='Export to plot.ly',
          validate=True, image=None, filename='plot_image', image_width=800,
          image_height=600, config=None):
    """
    Draw plotly graphs inside an IPython or Jupyter notebook without
    connecting to an external server.
    To save the chart to Plotly Cloud or Plotly Enterprise, use
    `plotly.plotly.iplot`.
    To embed an image of the chart, use `plotly.image.ishow`.

    figure_or_data -- a plotly.graph_objs.Figure or plotly.graph_objs.Data or
                      dict or list that describes a Plotly graph.
                      See https://plot.ly/python/ for examples of
                      graph descriptions.

    Keyword arguments:
    show_link (default=True) -- display a link in the bottom-right corner of
                                of the chart that will export the chart to
                                Plotly Cloud or Plotly Enterprise
    link_text (default='Export to plot.ly') -- the text of export link
    validate (default=True) -- validate that all of the keys in the figure
                               are valid? omit if your version of plotly.js
                               has become outdated with your version of
                               graph_reference.json or if you need to include
                               extra, unnecessary keys in your figure.
    image (default=None |'png' |'jpeg' |'svg' |'webp') -- This parameter sets
        the format of the image to be downloaded, if we choose to download an
        image. This parameter has a default value of None indicating that no
        image should be downloaded. Please note: for higher resolution images
        and more export options, consider making requests to our image servers.
        Type: `help(py.image)` for more details.
    filename (default='plot') -- Sets the name of the file your image
        will be saved to. The extension should not be included.
    image_height (default=600) -- Specifies the height of the image in `px`.
    image_width (default=800) -- Specifies the width of the image in `px`.
    config (default=None) -- Plot view options dictionary. Keyword arguments
        `show_link` and `link_text` set the associated options in this
        dictionary if it doesn't contain them already.

    Example:
    ```
    from plotly.offline import init_notebook_mode, iplot
    init_notebook_mode()
    iplot([{'x': [1, 2, 3], 'y': [5, 2, 7]}])
    # We can also download an image of the plot by setting the image to the
    format you want. e.g. `image='png'`
    iplot([{'x': [1, 2, 3], 'y': [5, 2, 7]}], image='png')
    ```
    """
    if not ipython:
        raise ImportError('`iplot` can only run inside an IPython Notebook.')

    config = dict(config) if config else {}
    config.setdefault('showLink', show_link)
    config.setdefault('linkText', link_text)

    figure = tools.return_figure_from_figure_or_data(figure_or_data, validate)

    # Though it can add quite a bit to the display-bundle size, we include
    # multiple representations of the plot so that the display environment can
    # choose which one to act on.
    data = _json.loads(_json.dumps(figure['data'],
                                   cls=plotly.utils.PlotlyJSONEncoder))
    layout = _json.loads(_json.dumps(figure.get('layout', {}),
                                     cls=plotly.utils.PlotlyJSONEncoder))
    frames = _json.loads(_json.dumps(figure.get('frames', None),
                                     cls=plotly.utils.PlotlyJSONEncoder))

    fig = {'data': data, 'layout': layout}
    if frames:
        fig['frames'] = frames

    display_bundle = {'application/vnd.plotly.v1+json': fig}

    if __PLOTLY_OFFLINE_INITIALIZED:
        plot_html, plotdivid, width, height = _plot_html(
            figure_or_data, config, validate, '100%', 525, True
        )
        display_bundle['text/html'] = plot_html
        display_bundle['text/vnd.plotly.v1+html'] = plot_html

    ipython_display.display(display_bundle, raw=True)

    if image:
        if not __PLOTLY_OFFLINE_INITIALIZED:
            raise PlotlyError('\n'.join([
                'Plotly Offline mode has not been initialized in this notebook. '
                'Run: ',
                '',
                'import plotly',
                'plotly.offline.init_notebook_mode() '
                '# run at the start of every ipython notebook',
            ]))
        if image not in __IMAGE_FORMATS:
            raise ValueError('The image parameter must be one of the following'
                             ': {}'.format(__IMAGE_FORMATS)
                             )
        # if image is given, and is a valid format, we will download the image
        script = get_image_download_script('iplot').format(format=image,
                                                           width=image_width,
                                                           height=image_height,
                                                           filename=filename,
                                                           plot_id=plotdivid)
        # allow time for the plot to draw
        time.sleep(1)
        # inject code to download an image of the plot
        ipython_display.display(ipython_display.HTML(script))
コード例 #32
0
ファイル: aceclient.py プロジェクト: furvovan/HTTPAceProxy
 def _recvData(self, timeout=30):
     '''
     Data receiver method for greenlet
     '''
     while 1:
         # Destroy socket connection if AceEngine STATE 0 (IDLE) and we didn't read anything from socket until Nsec
         with gevent.Timeout(timeout, False):
             try:
                 self._recvbuffer = self._socket.read_until('\r\n',
                                                            None).strip()
             except gevent.Timeout:
                 self.destroy()
             except gevent.socket.timeout:
                 pass
             except:
                 raise
             else:
                 logging.debug('<<< %s' % unquote(self._recvbuffer))
                 # Parsing everything only if the string is not empty
                 # HELLOTS
                 if self._recvbuffer.startswith('HELLOTS'):
                     #version=engine_version version_code=version_code key=request_key http_port=http_port
                     self._auth.set({
                         k: v
                         for k, v in (x.split('=')
                                      for x in self._recvbuffer.split()
                                      if '=' in x)
                     })
                 # NOTREADY
                 elif self._recvbuffer.startswith('NOTREADY'):
                     self._auth.set('NOTREADY')
                     # AUTH
                 elif self._recvbuffer.startswith('AUTH'):
                     self._auth.set(
                         self._recvbuffer.split()[1])  # user_auth_level
                     # START
                 elif self._recvbuffer.startswith('START'):
                     # url [ad=1 [interruptable=1]] [stream=1] [pos=position]
                     params = {
                         k: v
                         for k, v in (x.split('=')
                                      for x in self._recvbuffer.split()
                                      if '=' in x)
                     }
                     if not self._seekback or self._started_again.ready(
                     ) or params.get('stream', '') is not '1':
                         # If seekback is disabled, we use link in first START command.
                         # If seekback is enabled, we wait for first START command and
                         # ignore it, then do seekback in first EVENT position command
                         # AceStream sends us STOP and START again with new link.
                         # We use only second link then.
                         self._url.set(
                             self._recvbuffer.split()[1])  # url for play
                 # LOADRESP
                 elif self._recvbuffer.startswith('LOADRESP'):
                     self._loadasync.set(
                         json.loads(
                             unquote(''.join(
                                 self._recvbuffer.split()[2:]))))
                 # STATE
                 elif self._recvbuffer.startswith('STATE'):
                     self._state.set(self._recvbuffer.split()
                                     [1])  # STATE state_id -> STATE_NAME
                 # STATUS
                 elif self._recvbuffer.startswith('STATUS'):
                     self._tempstatus = self._recvbuffer.split()[1]
                     stat = [self._tempstatus.split(';')[0].split(':')[1]
                             ]  # main:????
                     if self._tempstatus.startswith('main:idle'): pass
                     elif self._tempstatus.startswith('main:loading'): pass
                     elif self._tempstatus.startswith('main:starting'): pass
                     elif self._tempstatus.startswith('main:check'): pass
                     elif self._tempstatus.startswith('main:err'):
                         pass  # err;error_id;error_message
                     elif self._tempstatus.startswith('main:dl'):  #dl;
                         stat.extend(
                             map(int,
                                 self._tempstatus.split(';')[1:]))
                     elif self._tempstatus.startswith(
                             'main:wait'):  #wait;time;
                         stat.extend(
                             map(int,
                                 self._tempstatus.split(';')[2:]))
                     elif self._tempstatus.startswith(
                         ('main:prebuf', 'main:buf')):  #buf;progress;time;
                         stat.extend(
                             map(int,
                                 self._tempstatus.split(';')[3:]))
                     try:
                         self._status.set({
                             k: v
                             for k, v in zip(AceConst.STATUS, stat)
                         })  # dl, wait, buf, prebuf
                     except:
                         self._status.set(
                             {'status':
                              stat[0]})  # idle, loading, starting, check
                 # CID
                 elif self._recvbuffer.startswith('##'):
                     self._cid.set(self._recvbuffer)
                     # INFO
                 elif self._recvbuffer.startswith('INFO'):
                     pass
                     # EVENT
                 elif self._recvbuffer.startswith('EVENT'):
                     self._tempevent = self._recvbuffer.split()
                     if self._seekback and not self._started_again.ready(
                     ) and 'livepos' in self._tempevent:
                         params = {
                             k: v
                             for k, v in (x.split('=')
                                          for x in self._tempevent
                                          if '=' in x)
                         }
                         self._write(
                             AceMessage.request.LIVESEEK(
                                 int(params['last']) - self._seekback))
                         self._started_again.set()
                     elif 'getuserdata' in self._tempevent:
                         self._write(
                             AceMessage.request.USERDATA(
                                 self._gender, self._age))
                     elif 'cansave' in self._tempevent:
                         pass
                     elif 'showurl' in self._tempevent:
                         pass
                     elif 'download_stopped' in self._tempevent:
                         pass
                 # PAUSE
                 elif self._recvbuffer.startswith('PAUSE'):
                     pass  #self._write(AceMessage.request.EVENT('pause'))
                     # RESUME
                 elif self._recvbuffer.startswith('RESUME'):
                     pass  #self._write(AceMessage.request.EVENT('play'))
                     # STOP
                 elif self._recvbuffer.startswith('STOP'):
                     pass  #self._write(AceMessage.request.EVENT('stop'))
                     # SHUTDOWN
                 elif self._recvbuffer.startswith('SHUTDOWN'):
                     self._socket.close()
                     break
コード例 #33
0
def get_graph_reference():
    """
    Load graph reference JSON (aka plot-schema)

    :return: (dict) The graph reference.

    """
    path = os.path.join('package_data', 'plot-schema.json')
    s = resource_string('plotly', path).decode('utf-8')
    graph_reference = utils.decode_unicode(_json.loads(s))

    # TODO: Patch in frames info until it hits streambed. See #659
    graph_reference['frames'] = {
        "items": {
            "frames_entry": {
                "baseframe": {
                    "description":
                    "The name of the frame into which this "
                    "frame's properties are merged before "
                    "applying. This is used to unify "
                    "properties and avoid needing to specify "
                    "the same values for the same properties "
                    "in multiple frames.",
                    "role":
                    "info",
                    "valType":
                    "string"
                },
                "data": {
                    "description":
                    "A list of traces this frame modifies. "
                    "The format is identical to the normal "
                    "trace definition.",
                    "role":
                    "object",
                    "valType":
                    "any"
                },
                "group": {
                    "description":
                    "An identifier that specifies the group "
                    "to which the frame belongs, used by "
                    "animate to select a subset of frames.",
                    "role":
                    "info",
                    "valType":
                    "string"
                },
                "layout": {
                    "role":
                    "object",
                    "description":
                    "Layout properties which this frame "
                    "modifies. The format is identical to "
                    "the normal layout definition.",
                    "valType":
                    "any"
                },
                "name": {
                    "description": "A label by which to identify the frame",
                    "role": "info",
                    "valType": "string"
                },
                "role": "object",
                "traces": {
                    "description":
                    "A list of trace indices that identify "
                    "the respective traces in the data "
                    "attribute",
                    "role":
                    "info",
                    "valType":
                    "info_array"
                }
            }
        },
        "role": "object"
    }

    return graph_reference
コード例 #34
0
ファイル: stat_plugin.py プロジェクト: 1Joy1/HTTPAceProxy
    def getStatusJSON(self):
        # Sys Info
        statusJSON = {}
        statusJSON['status'] = 'success'
        statusJSON['sys_info'] = {
            'os_platform': self.config.osplatform,
            'cpu_nums': psutil.cpu_count(),
            'cpu_percent': psutil.cpu_percent(interval=0, percpu=True),
            'cpu_freq': {
                k: v
                for k, v in psutil.cpu_freq()._asdict().items()
                if k in ('current', 'min', 'max')
            } if psutil.cpu_freq() else None,
            'mem_info': {
                k: v
                for k, v in psutil.virtual_memory()._asdict().items()
                if k in ('total', 'used', 'available')
            },
            'disk_info': {
                k: v
                for k, v in psutil.disk_usage(getcwdb())._asdict().items()
                if k in ('total', 'used', 'free')
            }
        }

        statusJSON['connection_info'] = {
            'max_clients': self.config.maxconns,
            'total_clients': self.stuff.clientcounter.totalClients(),
        }

        statusJSON['clients_data'] = []
        # Dict {'CID': [client1, client2,....]} to list of values
        clients = [
            item for sublist in self.stuff.clientcounter.streams.values()
            for item in sublist
        ]
        for c in clients:
            if not c.clientInfo:
                if any([
                        requests.utils.address_in_network(c.clientip, i)
                        for i in localnetranges
                ]):
                    c.clientInfo = {
                        'vendor': self.get_vendor_Info(c.clientip),
                        'country_code': '',
                        'country_name': '',
                        'city': ''
                    }
                else:
                    try:
                        headers = {'User-Agent': 'API Browser'}
                        with requests.get('https://geoip-db.com/jsonp/%s' %
                                          c.clientip,
                                          headers=headers,
                                          stream=False,
                                          timeout=5) as r:
                            if r.encoding is None: r.encoding = 'utf-8'
                            c.clientInfo = json.loads(
                                r.text.split('(', 1)[1].strip(')'))
                            c.clientInfo['vendor'] = ''
                    except:
                        c.clientInfo = {
                            'vendor': '',
                            'country_code': '',
                            'country_name': '',
                            'city': ''
                        }

            statusJSON['clients_data'].append({
                'channelIcon':
                c.channelIcon,
                'channelName':
                c.channelName,
                'clientIP':
                c.clientip,
                'clientInfo':
                c.clientInfo,
                'startTime':
                time.strftime('%d/%m/%Y %H:%M:%S',
                              time.localtime(c.connectionTime)),
                'durationTime':
                time.strftime('%H:%M:%S',
                              time.gmtime(time.time() - c.connectionTime)),
                'stat':
                requests.get(c.cmd['stat_url'], timeout=2,
                             stream=False).json()['response']
                if self.config.new_api else c.ace._status.get(timeout=2)
            })
        return statusJSON
コード例 #35
0
    def search(self,
               q=None,
               count: int = None,
               start: int = 0,
               fields: multi = None,
               sort: multi = None,
               facets: multi = '',
               group='',
               hl: multi = '',
               mlt: int = None,
               timeout: float = None,
               **options):
        """Run query and return documents.

        **GET** /search?
            Return array of document objects and total doc count.

            &q=\ *chars*\ &q.type=[term|prefix|wildcard]&q.spellcheck=true&q.\ *chars*\ =...,
                query, optional type to skip parsing, spellcheck, and parser settings: q.field, q.op,...

            &count=\ *int*\ &start=0
                maximum number of docs to return and offset to start at

            &fields=\ *chars*,... &fields.multi=\ *chars*,... &fields.docvalues=\ *chars*\ [:*chars*],...
                only include selected stored fields; multi-valued fields returned in an array; docvalues fields

            &sort=\ [-]\ *chars*\ [:*chars*],... &sort.scores[=max]
                | field name, optional type, minus sign indicates descending
                | optionally score docs, additionally compute maximum score

            &facets=\ *chars*,... &facets.count=\ *int*\&facets.min=0
                | include facet counts for given field names
                | optional maximum number of most populated facet values per field, and minimum count to return

            &group=\ *chars*\ [:*chars*]&group.count=1
                | group documents by field value with optional type, up to given maximum count

            .. versionchanged:: 1.6 grouping searches use count and start options

            &hl=\ *chars*,... &hl.count=1
                | stored fields to return highlighted
                | optional maximum fragment count

            &mlt=\ *int*\ &mlt.fields=\ *chars*,... &mlt.\ *chars*\ =...,
                | doc index (or id without a query) to find MoreLikeThis
                | optional document fields to match
                | optional MoreLikeThis settings: mlt.minTermFreq, mlt.minDocFreq,...

            &timeout=\ *number*
                timeout search after elapsed number of seconds

            :return:
                | {
                | "query": *string*\|null,
                | "count": *int*\|null,
                | "maxscore": *number*\|null,
                | "docs": [{"__id__": *int*, "__score__": *number*, "__keys__": *array*,
                    "__highlights__": {*string*: *array*,... }, *string*: *value*,... },... ],
                | "facets": {*string*: {*string*: *int*,... },... },
                | "groups": [{"count": *int*, "value": *value*, "docs": [*object*,... ]},... ]
                | }
        """
        searcher = self.searcher
        if sort is not None:
            sort = (re.match('(-?)(\w+):?(\w*)', field).groups()
                    for field in sort)
            with HTTPError(AttributeError):
                sort = [
                    searcher.sortfield(name, getattr(__builtins__, type, None),
                                       (reverse == '-'))
                    for reverse, name, type in sort
                ]
        q = parse.q(searcher, q, **options)
        if mlt is not None:
            if q is not None:
                mlt, = searcher.search(q, count=mlt + 1, sort=sort)[mlt:].ids
            mltfields = options.pop('mlt.fields', ())
            with HTTPError(ValueError):
                attrs = {
                    key.partition('.')[-1]: json.loads(options[key])
                    for key in options if key.startswith('mlt.')
                }
            q = searcher.morelikethis(mlt,
                                      *mltfields,
                                      analyzer=searcher.analyzer,
                                      **attrs)
        if count is not None:
            count += start
        if count == 0:
            start = count = 1
        scores = options.get('sort.scores')
        gcount = options.get('group.count', 1)
        scores = {'scores': scores is not None, 'maxscore': scores == 'max'}
        if ':' in group:
            hits = searcher.search(q, sort=sort, timeout=timeout, **scores)
            name, docvalues = parse.docvalues(searcher, group)
            with HTTPError(TypeError):
                groups = hits.groupby(docvalues.select(hits.ids).__getitem__,
                                      count=count,
                                      docs=gcount)
            groups.groupdocs = groups.groupdocs[start:]
        elif group:
            scores = {
                'includeScores': scores['scores'],
                'includeMaxScore': scores['maxscore']
            }
            groups = searcher.groupby(group,
                                      q,
                                      count,
                                      start=start,
                                      sort=sort,
                                      groupDocsLimit=gcount,
                                      **scores)
        else:
            hits = searcher.search(q,
                                   sort=sort,
                                   count=count,
                                   timeout=timeout,
                                   **scores)
            groups = engine.documents.Groups(searcher, [hits[start:]],
                                             hits.count, hits.maxscore)
        result = {
            'query': q and str(q),
            'count': groups.count,
            'maxscore': groups.maxscore
        }
        fields, multi, docvalues = parse.fields(searcher, fields, **options)
        if fields is None:
            fields = {}
        else:
            groups.select(*itertools.chain(fields, multi))
        hl = dict.fromkeys(hl, options.get('hl.count', 1))
        result['groups'] = []
        for hits in groups:
            docs = []
            highlights = hits.highlights(q, **hl) if hl else ([{}] * len(hits))
            for hit, highlight in zip(hits, highlights):
                doc = hit.dict(*multi, **fields)
                with HTTPError(TypeError):
                    doc.update(
                        (name, docvalues[name][hit.id]) for name in docvalues)
                if highlight:
                    doc['__highlights__'] = highlight
                docs.append(doc)
            result['groups'].append({
                'docs': docs,
                'count': hits.count,
                'value': getattr(hits, 'value', None)
            })
        if not group:
            result['docs'] = result.pop('groups')[0]['docs']
        q = q or engine.Query.alldocs()
        if facets:
            query_map = {
                facet: self.query_map[facet]
                for facet in set(facets).intersection(self.query_map)
            }
            facets = result['facets'] = searcher.facets(
                q,
                *set(facets).difference(query_map), **query_map)
            for counts in facets.values():
                counts.pop(None, None)
            if 'facets.min' in options:
                for name, counts in facets.items():
                    facets[name] = {
                        term: count
                        for term, count in counts.items()
                        if count >= options['facets.min']
                    }
            if 'facets.count' in options:
                for name, counts in facets.items():
                    facets[name] = {
                        term: counts[term]
                        for term in heapq.nlargest(options['facets.count'],
                                                   counts,
                                                   key=counts.__getitem__)
                    }
        return result
コード例 #36
0
parser.add_argument(
    '--autoupdate',
    type=float,
    metavar='SECONDS',
    help='automatically update index version and commit any changes')
parser.add_argument(
    '--autosync',
    metavar='URL,...',
    help='automatically synchronize searcher with remote hosts and update')
parser.add_argument('--real-time',
                    action='store_true',
                    help='search in real-time without committing')

if __name__ == '__main__':
    args = parser.parse_args()
    read_only = args.read_only or args.autosync or len(args.directories) > 1
    kwargs = {'nrt': True} if args.real_time else {}
    if read_only and (args.real_time or not args.directories):
        parser.error('incompatible read/write options')
    if args.autosync:
        kwargs['urls'] = args.autosync.split(',')
        if not (args.autoupdate and len(args.directories) == 1):
            parser.error('autosync requires autoupdate and a single directory')
        warnings.warn('autosync is not recommended for production usage')
    if args.config and not os.path.exists(args.config):
        args.config = {'global': json.loads(args.config)}
    cls = WebSearcher if read_only else WebIndexer
    root = cls.new(*map(os.path.abspath, args.directories), **kwargs)
    del args.directories, args.read_only, args.autosync, args.real_time
    start(root, callback=init, **args.__dict__)
コード例 #37
0
ファイル: offline.py プロジェクト: wang-shun/plotly.py
def iplot(figure_or_data, show_link=True, link_text='Export to plot.ly',
          validate=True, image=None, filename='plot_image', image_width=800,
          image_height=600, config=None):
    """
    Draw plotly graphs inside an IPython or Jupyter notebook without
    connecting to an external server.
    To save the chart to Plotly Cloud or Plotly Enterprise, use
    `plotly.plotly.iplot`.
    To embed an image of the chart, use `plotly.image.ishow`.

    figure_or_data -- a plotly.graph_objs.Figure or plotly.graph_objs.Data or
                      dict or list that describes a Plotly graph.
                      See https://plot.ly/python/ for examples of
                      graph descriptions.

    Keyword arguments:
    show_link (default=True) -- display a link in the bottom-right corner of
                                of the chart that will export the chart to
                                Plotly Cloud or Plotly Enterprise
    link_text (default='Export to plot.ly') -- the text of export link
    validate (default=True) -- validate that all of the keys in the figure
                               are valid? omit if your version of plotly.js
                               has become outdated with your version of
                               graph_reference.json or if you need to include
                               extra, unnecessary keys in your figure.
    image (default=None |'png' |'jpeg' |'svg' |'webp') -- This parameter sets
        the format of the image to be downloaded, if we choose to download an
        image. This parameter has a default value of None indicating that no
        image should be downloaded. Please note: for higher resolution images
        and more export options, consider making requests to our image servers.
        Type: `help(py.image)` for more details.
    filename (default='plot') -- Sets the name of the file your image
        will be saved to. The extension should not be included.
    image_height (default=600) -- Specifies the height of the image in `px`.
    image_width (default=800) -- Specifies the width of the image in `px`.
    config (default=None) -- Plot view options dictionary. Keyword arguments
        `show_link` and `link_text` set the associated options in this
        dictionary if it doesn't contain them already.

    Example:
    ```
    from plotly.offline import init_notebook_mode, iplot
    init_notebook_mode()
    iplot([{'x': [1, 2, 3], 'y': [5, 2, 7]}])
    # We can also download an image of the plot by setting the image to the
    format you want. e.g. `image='png'`
    iplot([{'x': [1, 2, 3], 'y': [5, 2, 7]}], image='png')
    ```
    """
    if not __PLOTLY_OFFLINE_INITIALIZED:
        raise PlotlyError('\n'.join([
            'Plotly Offline mode has not been initialized in this notebook. '
            'Run: ',
            '',
            'import plotly',
            'plotly.offline.init_notebook_mode() '
            '# run at the start of every ipython notebook',
        ]))
    if not ipython:
        raise ImportError('`iplot` can only run inside an IPython Notebook.')

    config = dict(config) if config else {}
    config.setdefault('showLink', show_link)
    config.setdefault('linkText', link_text)

    plot_html, plotdivid, width, height = _plot_html(
        figure_or_data, config, validate, '100%', 525, True
    )

    figure = tools.return_figure_from_figure_or_data(figure_or_data, validate)

    # Though it can add quite a bit to the display-bundle size, we include
    # multiple representations of the plot so that the display environment can
    # choose which one to act on.
    data = _json.loads(_json.dumps(figure['data'],
                                   cls=plotly.utils.PlotlyJSONEncoder))
    layout = _json.loads(_json.dumps(figure.get('layout', {}),
                                     cls=plotly.utils.PlotlyJSONEncoder))
    frames = _json.loads(_json.dumps(figure.get('frames', None),
                                     cls=plotly.utils.PlotlyJSONEncoder))

    fig = {'data': data, 'layout': layout}
    if frames:
        fig['frames'] = frames

    display_bundle = {
        'application/vnd.plotly.v1+json': fig,
        'text/html': plot_html,
        'text/vnd.plotly.v1+html': plot_html
    }
    ipython_display.display(display_bundle, raw=True)

    if image:
        if image not in __IMAGE_FORMATS:
            raise ValueError('The image parameter must be one of the following'
                             ': {}'.format(__IMAGE_FORMATS)
                             )
        # if image is given, and is a valid format, we will download the image
        script = get_image_download_script('iplot').format(format=image,
                                                           width=image_width,
                                                           height=image_height,
                                                           filename=filename,
                                                           plot_id=plotdivid)
        # allow time for the plot to draw
        time.sleep(1)
        # inject code to download an image of the plot
        ipython_display.display(ipython_display.HTML(script))