コード例 #1
0
ファイル: gelf.py プロジェクト: Kelfast/txGraylog
 def encoded_log_params(self):
     """ Property to return back the compressed log paramaters
     """
     params = (
         zlib.compress(json.dumps(self.log_params))
         if self._compress else json.dumps(self.log_params)
     )
     return params
コード例 #2
0
ファイル: microblog.py プロジェクト: drewp/magma
def postIdenticaOauth():
    """ not working yet. last tried on 2010-05 """
    from restkit import OAuthFilter, request
    import restkit.oauth2 

    consumer = restkit.oauth2.Consumer(key=oauthKey, secret=oauthSecret)

    request_token_url = "http://identi.ca/api/oauth/request_token"

    auth = OAuthFilter(('*', consumer))

    if 1:
        # The request.
        resp = request(request_token_url, filters=[auth])
        print resp.__dict__
        print resp.body_string()
    else:
        tok = restkit.oauth2.Token(oauth_token, oauth_token_secret)

    resp = restkit.request(
        "http://identi.ca/api/statuses/friends_timeline.json",
        filters=[OAuthFilter(('*', consumer, tok))],
        method="GET")
    print resp.body_string()
    print resp

    resp = restkit.request("http://identi.ca/api/statuses/update.json",
                    filters=[OAuthFilter(('*', consumer, tok))],
                    method="POST",
                    body=jsonlib.dumps({'status' : 'first oauth update'}))

    print resp.body_string()
    print resp
コード例 #3
0
ファイル: login.py プロジェクト: felixbonkoski/tumblrpype
    def __save_customize_page(self, themeInfo):

        # HTTP Post is done with Mime-type 'application/json'

        postData = jsonlib.dumps(themeInfo)

        postHandler = HTTPPostHandler('application/json')

        debug("Editing Theme HTML...")

        opener = self._make_opener()
        opener.add_handler(postHandler)
        opener.addheaders.append(('Referer', 'http://www.tumblr.com/customize/%s' % self.blogname))
        opener.addheaders.append(('Accept', 'application/json, text/javascript, */*; q=0.01'))
        opener.addheaders.append(('Accept-Charset', 'UTF-8,*;q=0.5'))
        opener.addheaders.append(('X-Requested-With', 'XMLHttpRequest'))
        opener.addheaders.append(('Origin', 'http://www.tumblr.com'))
        opener.addheaders.append(('Pragma', 'no-cache'))
        opener.addheaders.append(('Cache-Control', 'no-cache'))

        try:
            resp = opener.open('http://www.tumblr.com/customize_api/blog/%s' % self.blogname, data = postData)

        except Exception as e:
            debug("  !! Failed to edit HTML")
            return None

        newThemeInfo = resp.fp.read()
        newThemeInfo = jsonlib.loads(newThemeInfo)

        debug("  <3 Theme Saved.")

        return newThemeInfo
コード例 #4
0
ファイル: entrancemusic.py プロジェクト: drewp/entrancemusic
def sendMsg(msg, hubPost=True):
    """adds created time, writes mongo and hub"""
    log.info(str(msg))
    if hubPost:
        hub.post("visitorNet", payload=jsonlib.dumps(msg))
    msg['created'] = datetime.datetime.now(tz.gettz('UTC'))
    mongo.save(msg)
コード例 #5
0
ファイル: entrancemusic.py プロジェクト: drewp/entrancemusic
def sendMsg(msg, hubPost=True):
    """adds created time, writes mongo and hub"""
    log.info(str(msg))
    if hubPost:
        hub.post("visitorNet", payload=jsonlib.dumps(msg))
    msg['created'] = datetime.datetime.now(tz.gettz('UTC'))
    mongo.save(msg)
コード例 #6
0
ファイル: homepage.py プロジェクト: drewp/magma
 def child_ticket(self, ctx):
     """one-time-use string to pass as ticket= param"""
     magic = hexlify(os.urandom(8))
     f = open(self.ticketFile, "w")
     os.chmod(self.ticketFile, stat.S_IRUSR | stat.S_IWUSR)
     f.write(jsonlib.dumps({"magic": magic, "expires": time.time() + 5}))
     f.close()
     return magic
コード例 #7
0
ファイル: mpvAdapter.py プロジェクト: antonagre/NodeClient-be
def play(name):
    global songName
    global thumbUrl
    data = getAudioUrl(name)
    songName = data["title"]
    thumbUrl = data["thumbnail"]
    mpv.play(data["url"])
    return json.dumps(data)
コード例 #8
0
ファイル: homepage.py プロジェクト: drewp/magma
 def child_ticket(self, ctx):
     """one-time-use string to pass as ticket= param"""
     magic = hexlify(os.urandom(8))
     f = open(self.ticketFile, "w")
     os.chmod(self.ticketFile, stat.S_IRUSR | stat.S_IWUSR)
     f.write(jsonlib.dumps({'magic': magic, 'expires': time.time() + 5}))
     f.close()
     return magic
コード例 #9
0
ファイル: homepage.py プロジェクト: drewp/magma
 def child_tempSection(self, ctx):
     temps = dict.fromkeys(["ariBedroom", "downstairs", "livingRoom", "bedroom", "KSQL"])
     temps.update(getAllTemps())
     return jsonlib.dumps(
         [
             dict(name=k, val="%.1f &#176;F" % v if v is not None else "?")
             for k, v in sorted(temps.items(), key=lambda (k, tp): tp, reverse=True)
         ]
     )
コード例 #10
0
ファイル: __init__.py プロジェクト: past-due/socketrpc
 def encode(data):
     """
     Encodes data returns a BSON object or
     a Fault
     """
     try:
         return jsonlib.dumps(data)
     except Exception, e:
         msg = 'Invalid JSON Data, got: %s:%s' % (e.__class__.__name__, e)
         return Fault(NOT_WELLFORMED_ERROR, msg)
コード例 #11
0
ファイル: test_api.py プロジェクト: CSIS/proccer
def test_post_new_event():
    client = Client(app, BaseResponse)
    with patch('proccer.notifications.smtplib') as smtplib:
        resp = client.post('/api/1.0/report',
                           data=json.dumps(ok_result),
                           headers={'Content-Type': 'application/json'})

    assert resp.status_code == 200, repr(resp.status, resp.data)
    assert session.query(Job).count() == 1
    assert session.query(JobResult).count() == 1
コード例 #12
0
def mongoInsert(msg):
    try:
        js = jsonlib.dumps(msg)
    except UnicodeDecodeError:
        pass
    else:
        if msg.get("name", "") and msg["name"] not in ["THINKPAD_T43"] and msg["action"] == "arrive":
            hub.post("visitorNet", payload=js)  # sans datetime
    msg["created"] = datetime.datetime.now(tz.gettz("UTC"))
    mongo.insert(msg, safe=True)
コード例 #13
0
ファイル: agent.py プロジェクト: dba-csis/proccer
def report(result):
    if not API_URL:
        return

    try:
        headers = {'Content-Type': 'application/json; charset=utf-8'}
        report_url = API_URL + '/1.0/report'
        r = requests.post(report_url,
                          data=json.dumps(result),
                          headers=headers)
    except Exception:
        log.error('error delivering job-status %r',
                  result, exc_info=True)
コード例 #14
0
 def doEntranceMusic(self, action):
     import restkit, jsonlib
     dt = self.deltaSinceLastArrive(action['name'])
     log.debug("dt=%s", dt)
     if dt > datetime.timedelta(hours=1):
         hub = restkit.Resource(
             # PSHB not working yet; "http://bang:9030/"
             "http://slash:9049/"
             )
         action = action.copy()
         del action['created']
         del action['_id']
         log.info("post to %s", hub)
         hub.post("visitorNet", payload=jsonlib.dumps(action))
コード例 #15
0
    def get(self):
        name = {}  # addr : name
        events = []
        hours = float(self.get_argument("hours", default="3"))
        t1 = datetime.datetime.now(tzutc()) - datetime.timedelta(seconds=60 * 60 * hours)
        for row in mongo.find({"sensor": "bluetooth", "created": {"$gt": t1}}, sort=[("created", 1)]):
            if "name" in row:
                name[row["address"]] = row["name"]
            row["t"] = int(row["created"].astimezone(tzlocal()).strftime("%s"))
            del row["created"]
            del row["_id"]
            events.append(row)

        for r in events:
            r["name"] = name.get(r["address"], r["address"])
        self.set_header("Content-Type", "application/json")
        self.write(jsonlib.dumps({"events": events}))
コード例 #16
0
ファイル: mpvAdapter.py プロジェクト: antonagre/NodeClient-be
def getInfo():
    global thumbUrl
    global songName
    data = {}
    data["pos"] = getCurPos()
    if mpv.time_remaining is None:
        data["title"] = None
        data["duration"] = None
        data["thumbnail"] = None
    else:
        try:
            data["title"] = songName
            data["duration"] = round(mpv.duration, 2)
            data["thumbnail"] = thumbUrl
        except:
            print("error get-info")
    data["volume"] = mpv.volume
    return json.dumps(data)
コード例 #17
0
ファイル: views.py プロジェクト: sanmimu/IP469
def query_by_ipv4_inner(request, ipv4):
    """
    """
    logger = logging.getLogger("query_by_ipv4_inner")
    ip_infos = models.Ipv4Info.objects.filter_by_ip(ipv4)[:5]
    ip_string = ip_convert.ipv4_to_string(ipv4)
    ip_value = ip_convert.ipv4_int2readable(ipv4)
    ip_client_string = get_client_ip(request)
    ip_client_value = ip_convert.ipv4_from_string(ip_client_string)
    logger.debug("from " + ip_client_string + " query " + ip_string + " return " + str(ip_infos.count()) + " results")
    new_query_history = []
    if ip_infos.count() > 0:
        new_query_history.append([ip_string, unicode(ip_infos[0])])
    if COOKIE_QUERY_HISTORY in request.COOKIES:
        old_query_history = request.COOKIES[COOKIE_QUERY_HISTORY]
        try:
            old_query_history = json.loads(old_query_history)
        except json.ReadError:
            old_query_history = []
        old_query_history = uniq(old_query_history)
        new_query_history.extend(old_query_history)
        new_query_history = uniq(new_query_history)[:MAX_QUERY_HISTORY]
    response = render_to_response("ipinfo.html", locals())
    try:
        new_query_history_str = json.dumps(new_query_history)
        response.set_cookie(
            key=COOKIE_QUERY_HISTORY,
            value=new_query_history_str,
            max_age=86400,
            expires=None,
            path="/",
            domain=None,
            secure=None,
        )
    except json.WriteError:
        response.delete_cookie(key=COOKIE_QUERY_HISTORY)
        print "write error: "
        print new_query_history
    except json.UnknownSerializerError:
        response.delete_cookie(key=COOKIE_QUERY_HISTORY)
        print "error"
    return response
コード例 #18
0
def query_by_ipv4_inner(request, ipv4):
    """
    """
    logger = logging.getLogger('query_by_ipv4_inner')
    ip_infos = models.Ipv4Info.objects.filter_by_ip(ipv4)[:5]
    ip_string = ip_convert.ipv4_to_string(ipv4)
    ip_value = ip_convert.ipv4_int2readable(ipv4)
    ip_client_string = get_client_ip(request)
    ip_client_value = ip_convert.ipv4_from_string(ip_client_string)
    logger.debug('from ' + ip_client_string + ' query ' + ip_string +
                 ' return ' + str(ip_infos.count()) + ' results')
    new_query_history = []
    if ip_infos.count() > 0:
        new_query_history.append([ip_string, unicode(ip_infos[0])])
    if COOKIE_QUERY_HISTORY in request.COOKIES:
        old_query_history = request.COOKIES[COOKIE_QUERY_HISTORY]
        try:
            old_query_history = json.loads(old_query_history)
        except json.ReadError:
            old_query_history = []
        old_query_history = uniq(old_query_history)
        new_query_history.extend(old_query_history)
        new_query_history = uniq(new_query_history)[:MAX_QUERY_HISTORY]
    response = render_to_response('ipinfo.html', locals())
    try:
        new_query_history_str = json.dumps(new_query_history)
        response.set_cookie(key=COOKIE_QUERY_HISTORY,
                            value=new_query_history_str,
                            max_age=86400,
                            expires=None,
                            path='/',
                            domain=None,
                            secure=None)
    except json.WriteError:
        response.delete_cookie(key=COOKIE_QUERY_HISTORY)
        print 'write error: '
        print new_query_history
    except json.UnknownSerializerError:
        response.delete_cookie(key=COOKIE_QUERY_HISTORY)
        print 'error'
    return response
コード例 #19
0
ファイル: jsontest.py プロジェクト: yzou/openlibrary
    t0 = time.time()
    for x in seq:
        pass
    t1 = time.time()
    print label, "%.2f sec" % (t1 - t0)


if False and __name__ == "__main__":
    timeit("read", read())
    timeit("simplejson.load", (simplejson.loads(json) for json in read()))
    timeit("jsonlib.load", (jsonlib.loads(json) for json in read()))

    timeit("simplejson.load-dump",
           (simplejson.dumps(simplejson.loads(json)) for json in read()))
    timeit("jsonlib.load-dump",
           (jsonlib.dumps(jsonlib.loads(json)) for json in read()))


def bench(count, f, *args):
    times = []
    for _ in range(count):
        t0 = time.time()
        f(*args)
        times.append(time.time() - t0)
    times = sorted(times)
    return "avg %.5f med %.5f max %.5f min %.5f" % (sum(times) / float(
        len(times)), times[int(len(times) / 2.0)], times[-1], times[0])


if True and __name__ == "__main__":
    for b in (
コード例 #20
0
ファイル: jsontest.py プロジェクト: hornc/openlibrary-1
def timeit(label, seq):
    t0 = time.time()
    for x in seq:
        pass
    t1 = time.time()
    print label, "%.2f sec" % (t1-t0)

if False and __name__ == "__main__":
    timeit("read", read())
    timeit("simplejson.load", (simplejson.loads(json) for json in read()))
    timeit("jsonlib.load", (jsonlib.loads(json) for json in read()))


    timeit("simplejson.load-dump", (simplejson.dumps(simplejson.loads(json)) for json in read()))
    timeit("jsonlib.load-dump", (jsonlib.dumps(jsonlib.loads(json)) for json in read()))


def bench(count, f, *args):
    times = []
    for _ in range(count):
        t0 = time.time()
        f(*args)
        times.append(time.time() - t0)
    times = sorted(times)
    return "avg %.5f med %.5f max %.5f min %.5f" % (
        sum(times) / float(len(times)),
        times[int(len(times) / 2.0)],
        times[-1],
        times[0]
   )
コード例 #21
0
ファイル: dyncommands.py プロジェクト: drewp/magma
 def get(self):
     pc = PickCommands(self.settings.graph, user(self.request))
     d = pc.run()
     d.addCallback(lambda cmds: self.write(jsonlib.dumps(cmds)))
     return d
コード例 #22
0
ファイル: tropedist.py プロジェクト: drewp/tropegraph
        if not f2:
            continue
        intersect = len(f1.intersection(f2))
        union = len(f1.union(f2))
        frac = intersect / union if union else 0
        shared[(m1, m2)] = (decimal.Decimal("%.4f" % frac), intersect)

edgeId = 0
doc = gexf.Gexf("drewp", "tropes")
out = doc.addGraph("undirected", "static", "common tropes")
for (m1, m2), count in shared.items():
    n1 = out.addNode(m1, movieName(m1))
    n2 = out.addNode(m2, movieName(m2))
    if count:
        out.addEdge(edgeId, m1, m2, weight=count)
    edgeId += 1
doc.write(open("out.gexf", "w"))

d3graph = {"nodes": [], "links": []}
for m in movs:
    d3graph['nodes'].append({'name': movieName(m)})
for (m1, m2), counts in shared.items():
    if count:
        d3graph['links'].append({
            'source': movs.index(m1),
            'target': movs.index(m2),
            'fracShare': counts[0],
            'absShare': counts[1]
        })
open("out.json", "w").write(jsonlib.dumps(d3graph))
コード例 #23
0
import jsonlib as json

cnx.execute(
    """
    alter table proccer_job
        alter column notify type varchar
"""
)

jobs = cnx.execute("select id, notify from proccer_job")
for job_id, notify in jobs:
    if notify is None:
        continue
    cnx.execute("update proccer_job set notify = %s where id = %s", [json.dumps(notify[1:-1].split(",")), job_id])
コード例 #24
0
ファイル: dataset.py プロジェクト: ymt123/geoinference
def posts2users(posts_fname,
                extract_user_id,
                working_dir=None,
                max_open_temp_files=256):
    """ 
	This method builds a valid `users.json.gz` file from the `posts.json.gz` file
	specified.  Unless indicated otherwise, the directory containing the posts
	file will be used as the working and output directory for the construction
	process.

	`extract_user_id` is a function that accepts a post and returns a string
	user_id.
	"""

    # figure out the working dir
    if not working_dir:
        working_dir = os.path.dirname(posts_fname)

    # bin the user data
    logger.info('binning user posts')

    curr_temp_file_idx = -1

    # A dict from a user-id to the file handle-id
    user_assignments = {}
    # A dict from the file handle-id to the actual file handle
    file_handles = {}

    # Sanity check methods for ensuring we're reading and writing
    # all the data.
    posts_seen = 0
    user_posts_written = 0

    fh = gzip.open(posts_fname, 'r')
    for line in fh:
        post = jsonlib.loads(line)
        uid = extract_user_id(post)
        posts_seen += 1

        if uid not in user_assignments:

            # Get the temp file this user should be in.
            # Assume that user-ids are randomly distribued
            # in some range such that the last three
            # digits of the id serve as a uniformly
            # distributed hash
            tmp_file_assignment = long(uid) % max_open_temp_files
            if not tmp_file_assignment in file_handles:
                # Write the temp file as gzipped files
                # because this splitting process gets
                # very expensive when processing large
                # datasets
                tmp_fname = os.path.join(
                    working_dir, 'tmp-%03d.json.gz' % tmp_file_assignment)
                logger.debug('creating temp file %s' % tmp_fname)

                tmp_fh = gzip.open(tmp_fname, 'w')

                file_handles[tmp_file_assignment] = tmp_fh
            user_assignments[uid] = tmp_file_assignment

        file_handles[user_assignments[uid]].write(line)

    for idx, tmp_fh in file_handles.items():
        tmp_fh.close()

    # aggregate the users
    logger.info('aggregating user data')

    user_fh = gzip.open(os.path.join(working_dir, 'users.json.gz'), 'w')
    for i in range(max_open_temp_files):
        logging.debug('processing file %d' % i)

        tmp_fname = os.path.join(working_dir, 'tmp-%03d.json.gz' % i)
        tmp_fh = gzip.open(tmp_fname, 'r')

        # aggregate data by tweets
        user_posts = {}
        for line in tmp_fh:
            post = jsonlib.loads(line)
            uid = extract_user_id(post)

            if uid not in user_posts:
                user_posts[uid] = []

            user_posts[uid].append(post)

        # write out the tweets by user
        for uid, posts in user_posts.items():
            user_fh.write('%s\n' % jsonlib.dumps({
                'user_id': uid,
                'posts': posts
            }))
            user_posts_written += len(posts)

        # delete the temporary file
        tmp_fh.close()
        os.remove(tmp_fname)

    # done
    user_fh.close()
    logger.debug("Read %s posts, wrote %s posts to users.json.gz" %
                 (posts_seen, user_posts_written))
コード例 #25
0
ファイル: db_types.py プロジェクト: CSIS/proccer
 def process_bind_param(self, value, dialect):
     if value is None:
         return value
     else:
         return json.dumps(value)
コード例 #26
0
ファイル: dataset.py プロジェクト: brentwalther/geoinference
def posts2users(posts_fname,extract_user_id,
				working_dir=None,max_open_temp_files=256):
	""" 
	This method builds a valid `users.json.gz` file from the `posts.json.gz` file
	specified.  Unless indicated otherwise, the directory containing the posts
	file will be used as the working and output directory for the construction
	process.

	`extract_user_id` is a function that accepts a post and returns a string
	user_id.
	"""
	
	# figure out the working dir
	if not working_dir:
		working_dir = os.path.dirname(posts_fname)

	# bin the user data
	logger.info('binning user posts')

	curr_temp_file_idx = -1

	# A dict from a user-id to the file handle-id 
	user_assignments = {}
	# A dict from the file handle-id to the actual file handle
	file_handles = {}

	# Sanity check methods for ensuring we're reading and writing
	# all the data.
	posts_seen = 0
	user_posts_written = 0

	fh = gzip.open(posts_fname,'r')
	for line in fh:
		post = jsonlib.loads(line)
		uid = extract_user_id(post)
		posts_seen += 1

		if uid not in user_assignments:
			
			# Get the temp file this user should be in.
			# Assume that user-ids are randomly distribued
			# in some range such that the last three
			# digits of the id serve as a uniformly
			# distributed hash
			tmp_file_assignment = long(uid) % max_open_temp_files
			if not tmp_file_assignment in file_handles:
				# Write the temp file as gzipped files
				# because this splitting process gets
				# very expensive when processing large
				# datasets
				tmp_fname = os.path.join(working_dir,'tmp-%03d.json.gz'
							 % tmp_file_assignment)
				logger.debug('creating temp file %s' % tmp_fname)

				tmp_fh = gzip.open(tmp_fname,'w')

				file_handles[tmp_file_assignment] = tmp_fh
			user_assignments[uid] = tmp_file_assignment

		file_handles[user_assignments[uid]].write(line)


	for idx,tmp_fh in file_handles.items():
		tmp_fh.close()

	# aggregate the users
	logger.info('aggregating user data')

	user_fh = gzip.open(os.path.join(working_dir,'users.json.gz'),'w')
	for i in range(max_open_temp_files):
		logging.debug('processing file %d' % i)

		tmp_fname = os.path.join(working_dir,'tmp-%03d.json.gz' % i)
		tmp_fh = gzip.open(tmp_fname,'r')

		# aggregate data by tweets
		user_posts = {}
		for line in tmp_fh:
			post = jsonlib.loads(line)
			uid = extract_user_id(post)

			if uid not in user_posts:
				user_posts[uid] = []

			user_posts[uid].append(post)

		# write out the tweets by user
		for uid,posts in user_posts.items():
			user_fh.write('%s\n' % jsonlib.dumps({'user_id':uid,'posts':posts}))
			user_posts_written += len(posts)

		# delete the temporary file
                tmp_fh.close();
		os.remove(tmp_fname)

	# done
	user_fh.close()
	logger.debug("Read %s posts, wrote %s posts to users.json.gz" 
		    % (posts_seen, user_posts_written))
コード例 #27
0
 def encoded_log_params(self):
     """ Property to return back the compressed log paramaters
     """
     params = (zlib.compress(json.dumps(self.log_params))
               if self._compress else json.dumps(self.log_params))
     return params
コード例 #28
0
ファイル: tropedist.py プロジェクト: drewp/tropegraph
        intersect = len(f1.intersection(f2))
        union = len(f1.union(f2))
        frac = intersect / union if union else 0
        shared[(m1,m2)] = (decimal.Decimal("%.4f" % frac),
                           intersect)

edgeId = 0
doc = gexf.Gexf("drewp", "tropes")
out = doc.addGraph("undirected", "static", "common tropes")
for (m1, m2), count in shared.items():
    n1 = out.addNode(m1, movieName(m1))
    n2 = out.addNode(m2, movieName(m2))
    if count:
        out.addEdge(edgeId, m1, m2, weight=count)
    edgeId += 1
doc.write(open("out.gexf", "w"))
    
d3graph = {"nodes" : [], "links" : []}
for m in movs:
    d3graph['nodes'].append({'name' : movieName(m)})
for (m1, m2), counts in shared.items():
    if count:
        d3graph['links'].append({
            'source' : movs.index(m1),
            'target' : movs.index(m2),
            'fracShare' : counts[0],
            'absShare' : counts[1]
            })
open("out.json", "w").write(jsonlib.dumps(d3graph))
                                
コード例 #29
0
 def save(self, *args, **kwargs):
     if self.name == 'Untitled':
         models.Model.save(self, *args, **kwargs) # save to set self.id
         self.name = 'Sheet %d' % (self.id,)
     self.column_widths_json = jsonlib.dumps(self.column_widths)
     models.Model.save(self, *args, **kwargs)
     users[device_id[:-1]]["kitchen"] = users[device_id[:-1]]["kitchen"]+ inlet
     print(users[device_id[:-1]]["kitchen"])
 elif device_id[-1]=="B":
     users[device_id[:-1]]["bathroom"] = users[device_id[:-1]]["bathroom"]+ inlet
     print(users[device_id[:-1]]["bathroom"])
 elif device_id[-1]=="M":
     users[device_id[:-1]]["others"] = users[device_id[:-1]]["others"]+ inlet
     print(users[device_id[:-1]]["others"])
 try:
     # Building url for rest api according the device id 
     request_address = request_address + device_id[:-1]
     if device_id[-1]=="K":
         # Server url and port and establishing connection with the server to update values using rest api
         connection = http.client.HTTPConnection('18.217.240.250',80)
         connection.connect()
         connection.request('PUT', request_address, jsonlib.dumps({"Kitchen":users[device_id[:-1]]["kitchen"] }), {"X-Parse-Application-Id": "0bfc45c8be2b2e93f018041ff949fe6d09233c0a","X-Parse-REST-API-Key": "avbs","Content-Type": "application/json"})
         result = jsonlib.loads(connection.getresponse().read())
     elif device_id[-1] == "B":
         connection = http.client.HTTPConnection('18.217.240.250',80)
         connection.connect()
         connection.request('PUT', request_address, jsonlib.dumps({"Bathroom": users[device_id[:-1]]["bathroom"]}), {"X-Parse-Application-Id": "0bfc45c8be2b2e93f018041ff949fe6d09233c0a","X-Parse-REST-API-Key": "avbs","Content-Type": "application/json"})
         result = jsonlib.loads(connection.getresponse().read())
     elif device_id[-1] == "M":
         connection = http.client.HTTPConnection('18.217.240.250',80)
         connection.connect()
         connection.request('PUT', request_address, jsonlib.dumps({"Misc": users[device_id[:-1]]["others"]}), {"X-Parse-Application-Id": "0bfc45c8be2b2e93f018041ff949fe6d09233c0a","X-Parse-REST-API-Key": "avbs","Content-Type": "application/json"})
         result = jsonlib.loads(connection.getresponse().read())
     # Check if the connection was successfull or not which can also be tested from the try and catch block which is implemented
     # to check if the internet is available or not.
     for key,value in result.items():
         if key=="updatedAt":
コード例 #31
0
ファイル: dyncommands.py プロジェクト: drewp/magma
 def get(self):
     pc = PickCommands(self.settings.graph, user(self.request))
     d = pc.run()
     d.addCallback(lambda cmds: self.write(jsonlib.dumps(cmds)))
     return d
コード例 #32
0
ファイル: Test1.py プロジェクト: JiaqiuWang/WebCrawl
"""
以下实例演示了 Python 数据结构转换为JSON:
"""

import jsonlib

# Python 字典类型转换为JSON对象
data = {
    'no': 1,
    'name': 'Runoob',
    'url': 'http://runoob.txt.com'
}

json_str = jsonlib.dumps(data)
print("Python 原始数据:", repr(data))
print("JSON 对象:", json_str)