def post(self, request): data = JSONDecoder().parse(request) access_token = data.get('access_token', '') try: app = SocialApp.objects.get(provider="facebook") token = SocialToken(app=app, token=access_token) # check token against facebook login = fb_complete_login(app, token) login.token = token login.state = SocialLogin.state_from_request(request) # add or update the user into users table ret = complete_social_login(request, login) # if we get here we've succeeded return HttpResponse(status=200, data={ 'success': True, 'username': request.user.username, 'user_id': request.user.pk, }) except: return HttpResponse(status=401 ,data={ 'success': False, 'reason': "Bad Access Token", })
def lookup_direct(self, reference): if reference.type == u"album": endpoint = u"v1/albums/{id}" elif reference.type == u"artist": endpoint = u"v1/artists/{id}" elif reference.type == u"playlist": endpoint = u"v1/users/{user_id}/playlists/{playlist_id}" return None # Unsupported by this plugin elif reference.type == u"track": endpoint = u"v1/tracks/{id}" else: return None # Unsupported by this plugin api_url = self.api_base_url + endpoint.format(id=reference.hash) response = utility.read_url(api_url) if not response: return None try: data = JSONDecoder().decode(response['data']) except ValueError: return None if data.get(u"status"): return None else: return self._format_result(reference.type, data)
def extract_data(self, file_name): """Extracts data from given JSON file.""" decoder = JSONDecoder() with open(file_name, 'r') as file: for line in file: feature = decoder.decode(line) zipcode_name = feature['zipcode'] zipcode_name = int(zipcode_name.rsplit("_")[0]) store_name = feature['store_id'] store_name = int(store_name.rsplit("_")[1]) self.all_stores.add(store_name) #adds store to set universal set of stores if zipcode_name in self.zips: self.zips[zipcode_name].add_nearby_store(store_name) #adds nearby store to zip code if zip exists in dictionary else: z = Zipcode(zipcode_name) #creates new zip code if not exist z.add_nearby_store(store_name) self.zips[zipcode_name] = z
def _getPileupConfigFromJson(self): """ There has been stored pileup configuration stored in a JSON file as a result of DBS querrying when running PileupFetcher, this method loads this configuration from sandbox and returns it as dictionary. The PileupFetcher was called by WorkQueue which creates job's sandbox and sandbox gets migrated to the worker node. """ workingDir = self.stepSpace.location jsonPileupConfig = os.path.join(workingDir, "pileupconf.json") print("Pileup JSON configuration file: '%s'" % jsonPileupConfig) # load the JSON config file into a Python dictionary decoder = JSONDecoder() try: f = open(jsonPileupConfig, 'r') json = f.read() pileupDict = decoder.decode(json) f.close() except IOError: m = "Could not read pileup JSON configuration file: '%s'" % jsonPileupConfig raise RuntimeError(m) return pileupDict
def query(self, argument): decoder = JSONDecoder() argument = utility.escape(argument) api_url = u"http://www.imdbapi.com/?t=%(search_term)s&r=json&plot=short" % \ {"search_term": argument} site_search_url = u"http://akas.imdb.com/find?s=all&q=" + argument response = utility.read_url(api_url) if not response: return u"Couldn't connect to the API :( | Manual search: " + site_search_url try: data = decoder.decode(response['data']) except Exception: return u"Couldn't parse the API output :( | Manual search: " + site_search_url if data.get(u"Response") != u"True": return u"No results found! Maybe you should try searching manually: " + \ site_search_url return \ (u"%(title)s (%(year)s) - Rating: %(rating)s out of 10 - Genre: %(genre)s - " + \ u"http://akas.imdb.com/title/%(id)s/ | More results: %(site_search_url)s") % \ {u"title": data.get(u"Title", u"Missing title :S"), u"year": data.get(u"Year", u"Unknown year"), u"rating": data.get(u"Rating", u"N/A"), u"genre": data.get(u"Genre", u"Unknown"), u"id": data.get(u"ID", u"tt0107838"), u"site_search_url": site_search_url}
def urlrequest(stream, url, headers, write_lock, debug=0): """URL request function""" if debug: print("Input for urlrequest", url, headers, debug) req = UrlRequest('GET', url=url, headers=headers) if debug: hdlr = urllib2.HTTPHandler(debuglevel=1) opener = urllib2.build_opener(hdlr) else: opener = urllib2.build_opener() time0 = time.time() fdesc = opener.open(req) data = fdesc.read() ctime = time.time() - time0 fdesc.close() # just use elapsed time if we use html format if headers['Accept'] == 'text/html': response = {'ctime': str(ctime)} else: decoder = JSONDecoder() response = decoder.decode(data) if isinstance(response, dict): write_lock.acquire() stream.write(str(response) + '\n') stream.flush() write_lock.release()
def validate(self, frag): """ Validate input as correct JSON. If there are multiple JSON entities separated by a space, evaluate them separately. If there is an incomplete JSON array or object, buffer the fragment and wait to see if the object or array completes. Return a list of the valid JSON fragments passed in, or False. """ valid_input = [] jsonObj = [] end = 0 jsonString = "" try: decoder = JSONDecoder() fragLength = len(frag) while end != fragLength: obj, end = decoder.raw_decode(frag, idx=end) jsonObj.append(obj) valid_input.append(jsonObj) return valid_input except ValueError: print "This JSON is not valid. Shutting down now" sys.exit()
def fjson_out(json): from json import JSONDecoder import pprint pp = pprint.PrettyPrinter(indent=4) deconder = JSONDecoder() result = deconder.decode(json) pp.pprint(result)
def client_get_google_user_token(code): url = settings.GOOGLE_API_TOKEN_URL post_values = (("code", code), ("client_id", settings.GOOGLE_APP_ID), ("client_secret", settings.GOOGLE_APP_SECRET), ("redirect_uri", settings.GOOGLE_REDIRECT_URL), ("grant_type", "authorization_code")) post_data = urllib.urlencode(post_values) req = urllib2.Request(url = url, data = post_data) response = urllib2.urlopen(req) decoder = JSONDecoder() response_content = decoder.decode(response.read()) oAuthReceipt = OAuthAccessToken() if response_content.has_key('access_token'): oAuthReceipt.user_token = response_content['access_token'] if response_content.has_key('expires_in'): oAuthReceipt.expires = response_content['expires_in'] if response_content.has_key('refresh_token'): oAuthReceipt.refresh_token = response_content['refresh_token'] oAuthReceipt.provider = 'GOOGLE' return oAuthReceipt
def sync_weibo(did, page, new_latest): d = DataSource.objects.get(pk=did) info = JSONDecoder().decode(d.auth_info) url = "https://api.weibo.com/2/statuses/user_timeline.json?access_token=%s&uid=%s&page=%s&count=200" % (info['access_token'], info['uid'], page) statuses_info = JSONDecoder().decode(urllib2.urlopen(url).read()) if statuses_info['statuses']: if (page == 1): new_latest = statuses_info['statuses'][0]['id'] for status in statuses_info['statuses']: if (info.has_key('latest') and (status['id'] == info['latest'])): info['latest'] = new_latest d.auth_info = JSONEncoder().encode(info) d.save(force_update=True) sync_weibo.apply_async(args = [did, 1, None], countdown = 84600) break new_paw = Paw.objects.create(source=d, type="weibo", content=status['text'], create_time=datetime.strptime(status['created_at'][0:20]+status['created_at'][-4:], "%a %b %d %H:%M:%S %Y")) if status.has_key('original_pic'): new_img = Img(album = new_paw, upload_time=new_paw.create_time) origin_tmp = NamedTemporaryFile(delete=True) origin_tmp.write(urllib2.urlopen(status['original_pic']).read()) origin_tmp.flush() new_img.original.save(origin_tmp.name.split('/')[-1]+'.jpg', File(origin_tmp), save=False) thumb_tmp = NamedTemporaryFile(delete=True) thumb_tmp.write(urllib2.urlopen(status['thumbnail_pic']).read()) thumb_tmp.flush() new_img.thumbnail.save(thumb_tmp.name.split('/')[-1]+'.jpg', File(thumb_tmp), save=True) if not (info.has_key('latest') and (status['id'] == info['latest'])): sync_weibo.apply_async(args = [did, page+1, new_latest], countdown = 60) else: if new_latest is not None: info['latest'] = new_latest d.auth_info = JSONEncoder().encode(info) d.save(force_update=True) sync_weibo.apply_async(args = [did, 1, None], countdown = 84600)
def sync_renren_status(did, page, new_latest): d = DataSource.objects.get(pk=did) info = JSONDecoder().decode(d.auth_info) sig_calc = u"access_token=%scount=1000format=jsonmethod=status.getspage=%suid=%sv=1.0%s" % (info['access_token'], page, info['user']['id'], RENREN_SECRET) sig = calc_md5(sig_calc) statues_url = "http://api.renren.com/restserver.do?method=status.gets&v=1.0&access_token=%s&count=1000&format=json&page=%s&uid=%s&sig=%s" % (info['access_token'], page, info['user']['id'], sig) statues = JSONDecoder().decode(urllib2.urlopen(statues_url, "").read()) if statues: if (page == 1): new_latest = statues[0]['status_id'] for status in statues: if info.has_key('latest_status') and status['status_id'] == info['latest_status']: info['latest_status'] = new_latest d.auth_info = JSONEncoder().encode(info) d.save(force_update=True) sync_renren_status.apply_async(args = [did, 1, None], countdown = 84600) sync_renren_albums.apply_async(args = [d.id, 1], countdown = 1) break p = Paw.objects.create(source=d,\ type="renren status",\ content=status['message'],\ create_time=datetime.strptime(status['time'], "%Y-%m-%d %H:%M:%S")) if not (info.has_key('latest_status') and status['status_id'] == info['latest_status']): sync_renren_status.apply_async(args = [did, page+1, new_latest], countdown = 60) else: if new_latest is not None: info['latest_status'] = new_latest d.auth_info = JSONEncoder().encode(info) d.save(force_update=True) sync_renren_status.apply_async(args = [did, 1, None], countdown = 84600) sync_renren_albums.apply_async(args = [d.id, 1], countdown = 1)
def parseSearchResults(self, result, urlQueryStr): '''Parse the JSON return result''' parsedResults = dict() #final returned results if (result != 0): decoder = JSONDecoder() jsonResult = decoder.decode(result) #print "LOG: available result keys:\n" + jsonResult.keys() #-----------------recursive section---------------- '''If there exists more 'pages' of results, recursively get them''' if 'next_page' in jsonResult.keys(): next_urlQueryStr = string.split(urlQueryStr, "?", 1)[0] + jsonResult['next_page'] if 'since_id' in jsonResult.keys(): '''append the since_id to this query to ensure we don't search too far''' next_urlQueryStr = next_urlQueryStr + "&since_id=" + jsonResult['since_id_str'] print "LOG: recursively searching at:\n" + next_urlQueryStr '''This will possibly return results, which must be appended forward to the current search results''' parsedResults = self.getSearchResults(next_urlQueryStr) #------------end of recursive section------------- '''save the currently searched tweets and other info''' print jsonResult['max_id_str'] if jsonResult['page'] == 1: parsedResults['max_id'] = jsonResult['max_id_str'] tweetsKey = "tweets_page" + str(jsonResult['page']) parsedResults[tweetsKey] = jsonResult['results'] #dict return parsedResults
def query(self, term): """ Run `gerrit query` with the given `term`. Return a list of results as `Change` objects. Raise `ValueError` if `term` is not a string. """ results = [] command = ["query", "--current-patch-set", "--all-approvals", "--format JSON", "--commit-message"] if not isinstance(term, basestring): raise ValueError("term must be a string") command.append(escape_string(term)) result = self._ssh_client.run_gerrit_command(" ".join(command)) decoder = JSONDecoder() for line in result.stdout.read().splitlines(): # Gerrit's response to the query command contains one or more # lines of JSON-encoded strings. The last one is a status # dictionary containing the key "type" whose value indicates # whether or not the operation was successful. # According to http://goo.gl/h13HD it should be safe to use the # presence of the "type" key to determine whether the dictionary # represents a change or if it's the query status indicator. try: data = decoder.decode(line) except ValueError as err: raise GerritError("Query returned invalid data: %s", err) if "type" in data and data["type"] == "error": raise GerritError("Query error: %s" % data["message"]) elif "project" in data: results.append(Change(data)) return results
def test_json(self): text = u"""[ {{ "canIpForward": false, "cpuPlatform": "Intel Ivy Bridge", "creationTimestamp": "2015-11-03T08:38:59.701-08:00", "description": "", "metadata": {{ "fingerprint": "p_LMICy68MQ=", "items": [ {{ "key": "google-cloud-marketplace-solution-key", "value": "bitnami-launchpad:jenkins" }}, {{ "key": "google-cloud-marketplace-generate-password", "value": "{type}" }}, {{ "key": "bitnami-base-password", "value": "{password}" }} ], "kind": "compute#metadata" }} }} ]""" decoder = JSONDecoder() scrubber = JsonScrubber() original = text.format(type='bitnami-base-password', password='******') expect = decoder.decode(text.format(type=scrubber.REDACTED, password=scrubber.REDACTED)) self.assertEqual(expect, decoder.decode(scrubber(original)))
def parseJson(s, conn): """ Parses the json file and calls populate method for inserting the data into the 'repository' table input: s- json string conn- database connection """ try: _w=WHITESPACE.match decoder = JSONDecoder() s_len = len(s) end = 0 while end != s_len: obj, end = decoder.raw_decode(s, idx=_w(s, end).end()) end = _w(s, end).end() refType ='' if obj['type'] == 'CreateEvent': try: refType = obj['payload']['ref_type'] except Exception as e: print e if refType == 'repository': populateRepoTable(obj, conn) except Exception as e: #print 'Error in line:'+str(sys.exc_traceback.tb_lineno) print sys.exc_traceback.tb_lineno pass
def test_04_read_user_id(self): self.client.login(username='******', password='******') response = self.client.get('/user/2') self.assertContains(response, 'is_staff', count=1, status_code=200) decoder = JSONDecoder() user = decoder.decode(response.content) self.assertEqual(user['username'], 'pepe')
def sync_douban_collections(did, index, new_latest): d = DataSource.objects.get(pk=did) info = JSONDecoder().decode(d.auth_info) consumer = oauth.Consumer(key=DOUBAN_KEY, secret=DOUBAN_SECRET) token = oauth.Token(key=info['oauth_token'], secret=info['oauth_token_secret']) client = oauth.Client(consumer, token) url = "http://api.douban.com/people/%s/collection?start-index=%s&max-results=50&alt=json" % (info['douban_user_id'], index) resp, content = client.request(url, "GET") collections_info = JSONDecoder().decode(content) if collections_info['entry']: if (index == 1): new_latest = collections_info['entry'][0]['id']['$t'] for collect in collections_info['entry']: if (info.has_key('latest_collection') and collect['id']['$t'] == info['latest_collection']): info['latest_collection'] = new_latest d.auth_info = JSONEncoder().encode(info) d.save(force_update=True) sync_douban_collections.apply_async(args = [did, 1, None], countdown = 84600) break new_paw = Paw.objects.create(source=d, \ type="douban collection", \ content=collect['title']['$t'], \ create_time=datetime.strptime(collect['updated']['$t'][:-6], "%Y-%m-%dT%H:%M:%S")) if not (info.has_key('latest_collection') and collect['id']['$t'] == info['latest_collection']): sync_douban_collections.apply_async(args = [did, index+50, new_latest], countdown = 60) else: if new_latest is not None: info['latest_collection'] = new_latest d.auth_info = JSONEncoder().encode(info) d.save(force_update=True) sync_douban_collections.apply_async(args = [did, 1, None], countdown = 84600)
def get_from_json(cls, json): """ creates a view from a json that looks like this: {'s':<page_id>, 'v':{'<space_id>':<widget_id>,'<space_id>':<widget_id>,[...]}, 'b':{'<box_id>':[<widget_id>, <widget_id>, [...]]}, 'c':{'<wgt_id>': {<widget_args>},'wgt_id':{<widget_args>},[...]}, 'p':<wgt_id> } 's' is the page that this view is going to be rendered on 'v' is a dictionary that maps space_ids to widget_ids 'b' represents box-packed widgets 'c' maps parameters to widgets 'p' is an OPTIONAL parameter. if a html-form is submitted, this contains a widget_id to """ json = unquote(json) jd = JSONDecoder() try: json = jd.decode(json) except ValueError: raise ViewException(ViewException.get_msg(7)) view = View(cls._core) if json.has_key('s'): view.set_page(json['s']) else: raise ViewException(ViewException.get_msg(6)) if json.has_key('v'): for key, value in json['v'].items(): #transform indices back to int json['v'][int(key)] = value del(json['v'][key]) view.set_space_widget_mapping(json['v']) else: view.set_space_widget_mapping({}) if json.has_key('b'): for key, value in json['b'].items(): #transform indices back to int json['b'][int(key)] = value del(json['b'][key]) view.set_box_mapping(json['b']) else: view.set_box_mapping({}) if json.has_key('c'): for key, value in json['c'].items(): #transform indices back to int json['c'][int(key)] = value del(json['c'][key]) view.set_widget_param_mapping(json['c']) else: view.set_widget_param_mapping({}) if json.has_key('p'): view.set_post_widget_id(json['p']) return view
def test_05_create_user(self): self.client.login(username='******', password='******') json = simplejson.dumps({"username": "******", "first_name": "", "last_name": "", "is_active": True, "is_superuser": False, "is_staff": False, "last_login": "******", "groups": [], "user_permissions": [], "password": "******", "email": "", "date_joined": "2011-03-23 06:02:28"}) response = self.client.post('/user/', data=json, content_type='application/json') self.assertContains(response, 'username', count=1, status_code=200) decoder = JSONDecoder() user = decoder.decode(response.content) self.assertEqual(user['username'], 'otro_user')
def ajax_get_event_attendees(): hackathon = get_object_or_404(Hackathon, id = request.form["hackathon_id"]) req = urllib2.Request("https://graph.facebook.com/"+str(hackathon.facebook_id)+"/attending?access_token="+session["fb_token"]) print "https://graph.facebook.com/"+str(hackathon.facebook_id)+"/attending?access_token="+session["fb_token"] response = urllib2.urlopen(req) decoder = JSONDecoder() attending = decoder.decode(response.read())["data"] return {"attending": attending}
def parse_sls(self): """ Parse a YARN SLS trace file. This is a JSON file containing multiple job objects. """ json_decoder = JSONDecoder() job_objects = [] value_error_pattern = re.compile('Expecting .+ \(char (\d+)\)$') with open(self.sls_file) as sls_file: object_chunk = '' last_error_idx = -1 # Read file in chunks of lines. for chunk in lines_per_n(sls_file, LINES_TO_READ): # Remove all whitespace chunk = chunk.replace(" ", "") chunk = chunk.replace("\n", "") # Add (hopefully good) whitespace chunk = re.sub(r"{", r'{\n', chunk) chunk = re.sub(r"}", r'}\n', chunk) chunk = re.sub(r"\[", r'[\n', chunk) chunk = re.sub(r"\]", r']\n', chunk) # Further sanitize some JSON stuff chunk = re.sub(r"{\s*'?(\w)", r'{"\1', chunk) chunk = re.sub(r",\s*'?(\w)", r',"\1', chunk) chunk = re.sub(r"(\w)'?\s*:", r'\1":', chunk) chunk = re.sub(r":\s*'(\w+)'\s*([,}])", r':"\1"\2', chunk) object_chunk += chunk # Try to parse chunk read so far. chunk_parsing_done = False # Chunk may contain more than one object. while not chunk_parsing_done: try: parse_result = json_decoder.raw_decode(object_chunk) last_error_idx = -1 except ValueError as e: m = value_error_pattern.match(e.message) if m: # Get the index that the parsing error occurred on. idx = int(m.group(1)) if last_error_idx == -1 or last_error_idx != idx: # Chunk is not yet complete, keep reading. last_error_idx = idx break # The error at the current index was not due to an incomplete chunk. SLSParser._print_chunk(object_chunk) raise e # Add decoded job object to array job_objects.append(parse_result[0]) # Check if there's trailing data from another object object_end = parse_result[1] if object_end != len(object_chunk): # Trim chunk for the next object object_chunk = object_chunk[object_end + 1:] if not object_chunk.isspace(): chunk_parsing_done = True return job_objects
def loads_invalid_obj_list(s): decoder = JSONDecoder() s_len = len(s) objs = [] end = 0 while end != s_len: obj, end = decoder.raw_decode(s, idx=end) objs.append(obj) return objs
def load_states(): """Return a dictionary from state names to lists of polygons. >>> len(load_states()['HI']) # Hawaii has 5 islands 5 """ with open(DATA_PATH + 'states.json', encoding='utf8') as json_data_file: states = JSONDecoder().decode(json_data_file.read()) return {state: format_shapes(shapes) for state, shapes in states.items()}
def to_dict(self): decoder = JSONDecoder() result = dict( [ (p[:-len('_json')], decoder.decode(getattr(self, p))) if p.endswith('_json') else (p, getattr(self, p)) for p in self.properties() ] +[('id', unicode(self.key().id()))]) return result
def json_to_object(entity_cls, json_str): if isinstance(entity_cls, object): entity = entity_cls() json_dict = JSONDecoder().decode(json_str) for key, value in json_dict.items(): if hasattr(entity, key): setattr(entity, key, value) return entity print None
def compare_and_notify(): """ iterate through all charts, checking data whether it is out of the marks, notify the related people """ json_decoder = JSONDecoder() charts = session.query(Charts).all() # now = datetime.datetime.now() now = datetime.datetime.strptime(sys.argv[1], "%Y-%m-%d_%H:%M:%S") for i in charts: logger.debug("checking chart \"%s\"" % i.name) if i.alert_mode and i.alert_enable: try: duration = json_decoder.decode(i.alert_duration) alert_start = duration["start"] alert_end = duration["end"] if not (alert_start <= now <= alert_end): continue except: pass td = datetime.date.today() tablename = 't_chartdata_%s' % td.strftime('%Y%m%d') latency = i.alert_latency delta5mins = datetime.timedelta(minutes = 5) delta7days = datetime.timedelta(days = 7) tablename_lastweek = 't_chartdata_%s' % (td - delta7days).strftime('%Y%m%d') start = datetime.datetime(now.year, now.month, now.day, now.hour, now.minute) - datetime.timedelta(minutes = latency) end = datetime.datetime(now.year, now.month, now.day, now.hour, now.minute) data_this_time = __get_chartdata(i, tablename, start, end) logger.debug("data_this_time: %s" % str(data_this_time)) if i.alert_mode == CONST_HWM or i.alert_mode == CONST_LWM: if i.alert_mode == CONST_HWM: level, msg = __get_alert_level_hwm(i, data_this_time) if i.alert_mode == CONST_LWM: level, msg = __get_alert_level_lwm(i, data_this_time) else: start = start - delta7days end = end - delta7days data_last_week = __get_chartdata(i, tablename_lastweek, start, end) logger.debug("data_last_week: %s" % str(data_last_week)) level, msg = __get_alert_level_range(i, data_this_time, data_last_week) logger.debug("%s: %s, %s" % (i.name, level, msg)) add_events(i, level, msg) if level == CONST_LEVEL_CRITICAL: __notify(i, msg)
def _setupFunctions(self): f = open('/var/www/fabui/application/plugins/pcbmill/assets/python/jsbuttons.json', 'r') fc = f.read() jsonD = JSONDecoder() funcDict = jsonD.decode(fc) f.close() for key, value in funcDict.iteritems(): if value['function'] != 'notUsed' and value['function'] != '': self.activeFunctions[key] = value
def __call__(self, instructions): jd = JSONDecoder() je = JSONEncoder() answer = {} try: instructions = jd.decode(instructions) except ValueError, e: answer['error'] = "could not decode instructions" self._core.response_body.append(je.encode(answer)) return
def loads_invalid_obj_list(s): decoder = JSONDecoder() s_len = len(s) objs = [] end = 0 while end != s_len: obj, end = decoder.raw_decode(s, idx=end) objs.append(obj) #if obj['repository'] is not None: etlMySQLWatch(obj)
def get_channel_list(self): """获取频道列表 """ try: r = urlopen(self.channel_list_url) json = JSONDecoder() self.channel_list = json.decode(r.read().decode()) except HTTPError as err: print(err)
def getitems(subreddit, previd=''): """Return list of items from a subreddit.""" url = 'http://www.reddit.com/r/%s.json' % subreddit # Get items after item with 'id' of previd. hdr = {'User-Agent': 'RedditImageGrab script.'} if previd: url = '%s?after=t3_%s' % (url, previd) try: req = Request(url, headers=hdr) json = urlopen(req).read() data = JSONDecoder().decode(json) items = [x['data'] for x in data['data']['children']] except HTTPError as ERROR: error_message = '\tHTTP ERROR: Code %s for %s.' % (ERROR.code, url) sys.exit(error_message) except ValueError as ERROR: if ERROR.args[0] == 'No JSON object could be decoded': error_message = 'ERROR: subreddit "%s" does not exist' % ( subreddit) sys.exit(error_message) raise ERROR return items
def stream_json(file_obj, buf_size=1024, decoder=JSONDecoder()): ''' ### deal with multiple json object in on file Sometime, a json file could have more than one josn object, this is used to deal with that issue :param file_obj: file name :param buf_size: int :param decoder: :return: json objects (generator) ''' NOT_WHITESPACE = re.compile(r"[^\s]") buf = "" ex = None while True: block = file_obj.read(buf_size) if not block: break buf += block pos = 0 while True: match = NOT_WHITESPACE.search(buf, pos) if not match: break pos = match.start() try: obj, pos = decoder.raw_decode(buf, pos) except JSONDecodeError as e: ex = e break else: ex = None yield obj buf = buf[pos:] if ex is not None: raise ex
def find_face(imgpath): print("finding") http_url = 'https://api-cn.faceplusplus.com/facepp/v3/detect' data = { "api_key": key, "api_secret": secret, "image_url": imgpath, "return_landmark": 1 } files = {"image_file": open(imgpath, "rb")} response = requests.post(http_url, data=data, files=files) req_con = response.content.decode('utf-8') req_dict = JSONDecoder().decode(req_con) this_json = simplejson.dumps(req_dict) this_json2 = simplejson.loads(this_json) print(this_json) faces = this_json2['faces'] list0 = faces[0] rectangle = list0['face_rectangle'] # print(rectangle) return rectangle
def Detect_Faces(detect_path, app_key, detect_url): count = 1 for Img in os.listdir(detect_path): uid, endstr = os.path.splitext(Img) image = Get_File_Content(detect_path + Img) # 获取图片 files = {"image_file": image} start_time = time.clock() response = requests.post(detect_url, data=app_key, files=files) end_time = time.clock() req_con = response.content.decode('utf-8') req_dict = JSONDecoder().decode(req_con) if "error_message" in req_dict: msg = uid + "," + req_dict["error_message"] + ",检测的错误" + "\n" file = "Face++_Detect_Fail.txt" else: Use_Time = end_time - start_time msg = uid + "," + str(Use_Time) + "," + str(req_dict) + "\n" file = "Face++_Detect_TimeUse.txt" count += 1 print(req_dict) Write_Txt_Msg(file, msg) time.sleep(0) print("共计检测成功:", count - 1, "张图片!") return
def getScoreWholeProcess(belonger, belonger_sub_face_name, dirpath, get_user_id): # <Attention>: 下面指令筛选出了主人的脸中相对最大的那张脸 size, belonger, belongerFace_JPG_Wholepath = get_theMaxPic( belonger, belonger_sub_face_name, dirpath, get_user_id) if (size[0] < 48) | (size[1] < 48): ResizeImage(belongerFace_JPG_Wholepath, belongerFace_JPG_Wholepath, 80, 80) response = Get_TheFaceScore(belongerFace_JPG_Wholepath) req_con = response.content.decode('utf-8') req_dict = JSONDecoder().decode(req_con) print(req_dict) json_FaceScore_TheWholePath = dirpath + "\\" + get_user_id + "\\" + "Face_Scores.json" f1 = open_Json_File_To_Write(json_FaceScore_TheWholePath) write_Score(req_dict, f1, belonger)
class JsonResource(resource.Resource): json_encoder = JSONEncoder() json_decoder = JSONDecoder() def render(self, txrequest): r = resource.Resource.render(self, txrequest) return self.render_object(r, txrequest) def render_object(self, obj, txrequest): r = self.json_encoder.encode(obj) + "\n" txrequest.setHeader('Content-Type', 'application/json') txrequest.setHeader('Access-Control-Allow-Origin', '*') txrequest.setHeader('Access-Control-Allow-Methods', 'GET, POST, PATCH, PUT, DELETE') txrequest.setHeader('Access-Control-Allow-Headers',' X-Requested-With') txrequest.setHeader('Content-Length', len(r)) return r def parse_jsonrpc(self, txrequest): if isinstance(txrequest.content, io.IOBase): data = txrequest.content.read() else: data = txrequest.content.getvalue() return self.json_decoder.decode(data)
def faceAPI(file_path): http_url = 'https://api-cn.faceplusplus.com/facepp/v3/detect' http_url2 = 'https://api-cn.faceplusplus.com/facepp/v3/face/analyze' key = "RbgpeibJA9wz3csQ5CFS-uQBHMct6e6P" secret = "i94IXJddG57j3JMh97jnot7gwUi3v9cX" data = { "api_key": key, "api_secret": secret, "return_attributes": "gender,age,smiling,beauty", "return_landmark": 2 } files = {"image_file": open(img1_path, "rb")} response = requests.post(http_url, data=data, files=files) #response的内容是JSON格式 req_con = response.content.decode('utf-8') #对其解码成字典格式 req_dict = JSONDecoder().decode(req_con) landmark = req_dict['faces'][0]['landmark'] img = img_read(file_path) points = [] Width, Length, Dim = img.shape print(img.shape) points_add = [(Length - 1, Width - 1), (math.floor(Length / 2), Width - 1), (0, Width - 1), (Length - 1, math.floor(Width / 2)), (0, math.floor(Width / 2)), (Length - 1, 0), (math.floor(Length / 2), 0), (0, 0)] #手动添加边缘的点 for point_add in points_add: points.append([point_add[1], point_add[0]]) for i, val in enumerate(list(landmark.values())): x = val['x'] y = val['y'] points.append([x, y]) return points
def get_messages(decoder: json.JSONDecoder, buf: str) -> Iterator[Tuple[str, Any]]: """Parse individual messages from an input buffer. :decoder: A json decoder. :buf: The buffer to decode. :yield: Tuples of (unused-buf, message). """ message = 'dummy' while buf and message: message = '' try: message, index = decoder.raw_decode(buf) except json.JSONDecodeError: log("Decode error", repr(buf)) return except Exception: f = io.StringIO() traceback.print_exc(file=f) log(f.getvalue()) raise buf = buf[index:].lstrip() yield buf, message
def on_data(self, data): global retry retry = 0 post = JSONDecoder().decode(data) try: if (args.debug): print(post['user']['name'] + ' @' + post['user']['screen_name'] + ' ' + post['created_at']) print(post['text'] + "\n") for tag in post['entities']['hashtags']: if (tag['text'].lower() in monitor_words['star']): star.blink(on_time=args.star_on_time, fade_out_time=args.star_off_time, n=args.star_twinkle) elif (tag['text'].lower() in monitor_words['tree']): choice(tree).blink(on_time=args.tree_on_time, fade_out_time=args.tree_off_time, n=args.tree_twinkle) except KeyError, e: # Happens sometimes. The data looks something like this: # Maybe Twitter is warning about falling behind? # {"limit":{"track":6,"timestamp_ms":"1514107882539"}} if (args.debug): print "KeyError:" + e
def post(self, cluster_name): try: project_name = request.args.get('project', "admin") json_data = request.get_json(force=True) json_str = json.dumps(json_data) dict_data = JSONDecoder().decode(json_str) workDir = dict_data.get('workDir') sourceDir = dict_data.get('sourceDir', self.sourceDir) scriptName = dict_data.get('scriptName', self.scriptName) scriptPath = os.path.join(sourceDir, scriptName) sshKeyPath = dict_data.get('sshKeyPath') masterIP = shell.call( get_sahara_cluster_s_masterIP_cmd(project_name, cluster_name)).strip() if not workDir or not sshKeyPath or not masterIP: abort(400, message="bad parameter in request body") shell.call("scp -i %s %s centos@%s:/home/centos" % (sshKeyPath, scriptPath, masterIP)) output = shell.call("ssh -i %s centos@%s \"cd %s; /usr/bin/bash %s\"" \ %(sshKeyPath, masterIP, workDir, scriptName)) return output_json(output, 200) except Exception: log.exception(traceback.format_exc()) abort(400, message="Request failed")
class AsyncJson: _default_encoder = JSONEncoder( skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, indent=4, separators=None, default=None, ) _default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None) _default_executor = concurrent.futures.ThreadPoolExecutor def __init__( self, target_path: t.Optional[str] = None, executor: t.Optional[concurrent.futures.ThreadPoolExecutor] = None, worker: t.Optional[int] = None): self.target_path = target_path self.executor = executor self.worker = worker @t.no_type_check async def dump(self, obj: t.Dict[str, t.Any], fp: t.Optional[t.Union[io.TextIOBase, io.BufferedIOBase]] = None, *, mode: t.Optional[str] = 'w', **kw): """ Coroutine to save sample in json formation. :param obj: Data to be saved in json formation. :param fp: File object. :param mode: File mode. 'w', 'r', 'rb', 'wb' :param kw: Additional argument. """ loop = asyncio.get_event_loop() iterable = self.make_iterencode(obj) def sync_write(): for chunk in iterable: fp.write(chunk) async def async_write(): for chunk in iterable: await fp.write(chunk) if fp is None: fp = get_file_io(self.target_path, mode) if not hasattr(fp, 'close'): raise AttributeError("fp must have close method") if asyncio.iscoroutine(fp): fp = await fp.open() await async_write() await fp.close() elif isinstance(fp, (AsyncBufferedIOBase, AsyncTextIOWrapper)): await async_write() else: await loop.run_in_executor(self.executor, sync_write) @t.no_type_check async def load(self, fp: t.Optional[t.Union[io.TextIOBase, io.BufferedIOBase]] = None, *, mode='rb'): """ Coroutine to load sample to json formation. :param fp: File object. :param mode: File mode. 'w', 'r', 'rb', 'wb' """ if fp is None: fp = get_file_io(self.target_path, mode) # type: ignore if not hasattr(fp, 'close'): raise AttributeError("fp must have close method") if asyncio.iscoroutine(fp): fp = await fp.open() read = await fp.read() elif isinstance(fp, (AsyncBufferedIOBase, AsyncTextIOWrapper)): read = await fp.read() else: read = fp.read() if isinstance(read, str): if read.startswith('\ufeff'): raise JSONDecodeError( "Unexpected UTF-8 BOM (decode using utf-8-sig)", read, 0) else: if not isinstance(read, (bytes, bytearray)): raise TypeError( f'the JSON object must be str, bytes or bytearray, ' f'not {read.__class__.__name__}') read = read.decode(detect_encoding(read), 'surrogatepass') json_data = self._default_decoder.decode(read) return json_data def make_iterencode(self, obj): return self._default_encoder.iterencode(obj)
def __init__(self, cmd, cwd, args=[]): self.cmd = cmd self.cwd = cwd self.args = args self.je = JSONEncoder(ensure_ascii=True) self.jd = JSONDecoder()
def read(cls, input_string, schema, index=0): """Deserialise an input JSONString to a python object""" decoder = JSONDecoder() decoded, index = decoder.raw_decode(input_string, index) def decode(input, schema): # special case for handling result codes if schema.role == Schema.Role.EResult: if not isinstance(input, int): raise ValueError("Could not interpret " + str(input) + " as a result code") return Result(input) # special case for handling functions. note that only outputs (and return values) are read if schema.role == Schema.Role.EFunction: output_schemas = [ s[1] for s in schema.sub_schemas if (s[1].role is Schema.Role.EReturn) or ( s[1].role is Schema.Role.EOutput) ] return tuple( decode(element[0], element[1]) for element in zip(input, output_schemas)) # special case for handling callbacks. if schema.role == Schema.Role.ECallback: if len(input) != len(schema.sub_schemas): raise ValueError( "Could not interpret " + str(input) + " as callback parameter tuple of length " + str(len(schema.sub_schemas))) return tuple( decode(element[0], element[1][1]) for element in zip(input, schema.sub_schemas)) if schema.type == Schema.Type.EVoid: return None if schema.type == Schema.Type.EBool: if isinstance(input, bool): return input if isinstance(input, int): return False if input == 0 else True raise ValueError("Could not interpret " + str(input) + " as " + schema.type.name) if schema.type.value >= Schema.Type.EInt8.value and schema.type.value <= Schema.Type.EFloat64.value: if not isinstance(input, numbers.Number): raise ValueError("Could not interpret " + str(input) + " as " + schema.type.name) return input if schema.type == Schema.Type.EString: if not isinstance(input, string_types): raise ValueError("Could not interpret " + str(input) + " as " + schema.type.name) return input if schema.type == Schema.Type.EArray: if len(input) != schema.count: raise ValueError("Could not interpret " + str(input) + " as array of length " + schema.count) return tuple( decode(element, schema.sub_schemas[0][1]) for element in input) if schema.type == Schema.Type.EList: return [ decode(element, schema.sub_schemas[0][1]) for element in input ] if schema.type == Schema.Type.ETuple: if len(input) != len(schema.sub_schemas): raise ValueError("Could not interpret " + str(input) + " as tuple of length " + str(len(schema.sub_schemas))) return tuple( decode(element[0], element[1][1]) for element in zip(input, schema.sub_schemas)) if schema.type == Schema.Type.ENamedTuple: record = cls.schema_records.get(schema.type_name, None) if not record: raise ValueError("Could not interpret unknown type " + schema.type_name) output = record.object() if isinstance(input, dict): for names, sub_schema in zip(record.pythonic_names.items(), schema.sub_schemas): input_element = input.get(names[0], None) if input_element: setattr(output, names[1], decode(input_element, sub_schema[1])) else: if len(input) != len(record.pythonic_names): raise ValueError("Could not interpret " + str(input) + " as class with " + str(len(record.pythonic_names)) + " members") for input_element, names, sub_schema in zip( input, record.pythonic_names.items(), schema.sub_schemas): setattr(output, names[1], decode(input_element, sub_schema[1])) return output if schema.type.value >= Schema.Type.EEnum8.value and schema.type.value <= Schema.Type.EEnum32.value: record = cls.schema_records.get(schema.type_name, None) if not record: raise ValueError("Could not read unregistered enum type " + str(input)) return record.object["E" + input] if schema.type == Schema.Type.ERef: return decode(input, cls.schema_records[schema.type_name].schema) return decode(decoded, schema)
def __init__(self, arena=None, encoding=None, object_hook=None, **kwargs): JSONDecoder.__init__(self, encoding, object_hook, **kwargs) if not self.object_hook: self.object_hook = self.json_to_python self.arena = arena
def __init__(self, *args, **kwargs): JSONDecoder.__init__(self, object_hook=self.dict_to_object, *args, **kwargs)
hostname = determine_helper_hostname(session, logger, config, dbswitch) if hostname: args = determine_helper_args(config) args.append(hostname) else: args = [] args.extend([importer, str(dbswitch.primary_name)]) try: out = run_command(args) except ProcessException, err: raise ArgumentError("Failed to run switch discovery: %s" % err) data = JSONDecoder().decode(out) # Safety net: if the discovery program did not manage to collect any usable # information, do not do anything. if not data["interfaces"]: raise ArgumentError("Discovery returned no interfaces, aborting.") if "model" in data and "vendor" in data: dbmodel = Model.get_unique(session, name=data["model"], vendor=data["vendor"]) else: dbmodel = None if "serial" in data: serial_no = data["serial"] else:
ii][d.left() - ww + jj] cv2.imwrite( current_face_dir + "/img_face_" + str(cnt_ss) + ".jpg", im_blank) print( "写入本地:", str(current_face_dir) + "/img_face_" + str(cnt_ss) + ".jpg") #进行人脸数据的反馈与接收 filepath = str( str(current_face_dir) + "/img_face_" + str(cnt_ss) + ".jpg") files = {'image_file': open(filepath, 'rb')} response = requests.post(http_url, data=data, files=files) req_con = response.content.decode('utf-8') req_dict = JSONDecoder().decode(req_con) #赋值给age age_4 = req_dict['faces'] age_3 = age_4[0] age_2 = age_3['attributes'] age_1 = age_2['age'] age = age_1['value'] #赋值给is_famle feamle5 = req_dict['faces'] feamle2 = feamle5[0] feamle3 = feamle2['attributes'] feamle4 = feamle3['gender'] is_famle = feamle4['value'] # 显示人脸数 cv2.putText(img_rd, "Faces: " + str(len(faces)), (20, 100), font, 0.8,
class ServiceHandler: def __init__(self, service): self.decoder = JSONDecoder(strict=False) self.encoder = JSONEncoder(ensure_ascii=True, sort_keys=False) self.service = service self.retry = False def handleData(self, data): try: obj = self.decoder.decode(data) result = self.handleRequest(obj) return result except Exception as err: log.error("Internal error: %s" % str(err)) log.error(data, exc_info=True) return self.__getError(None, -32603, err) def handleRequest(self, req): """handles a request by calling the appropriete method the service exposes""" # id of the request object if not "id" in req: return self.__getError(None, -32600, "'id' memeber must be given.") idnr = req["id"] # version of the json-rpc protocol if "jsonrpc" in req: if not req["jsonrpc"] == "2.0": return self.__getError(idnr, -32600, "json-rpc protocol must be '2.0'.") else: return self.__getError(idnr, -32600, "jsonrpc member must be given.") # method to call if not "method" in req: return self.__getError(None, -32600, "'method' memeber must be given.") method = req["method"] if not hasattr(self.service, method): return self.__getError(None, -32601, "Method %s not found." % method) # params of the method if "params" in req: args = req["params"] if not isinstance(args, (list, dict)): return self.__getError( idnr, -32600, "params must be either a list or a dictonary") if args is None: args = [] else: args = [] obj = None try: if hasattr(self.service, "_prepare"): prepare = getattr(self.service, "_prepare") prepare() obj = getattr(self.service, method) if isinstance(args, list): data = self.__getResult(idnr, obj(*args)) elif isinstance(args, dict): data = self.__getResult(idnr, obj(**args)) if hasattr(self.service, "_complete"): complete = getattr(self.service, "_complete") complete() return data except TypeError as err: log.error("Invalid params: %s" % str(err), exc_info=True) return self.__getError(idnr, -32602, err) except AttributeError as err: log.error("Parse error: %s" % str(err), exc_info=True) return self.__getError(idnr, -32700, err) except ValueError as err: log.error("Parse error: %s" % str(err), exc_info=True) return self.__getError(idnr, -32700, err) def __getResult(self, idnr, result): obj = {"jsonrpc": "2.0", "id": idnr} obj["result"] = result try: return self.encoder.encode(obj) except Exception as err: log.error("JSON failed to encode: %s" % str(err), exc_info=True) return self.__getError(idnr, -32603, err) def __getError(self, idnr, code, execption): obj = {"jsonrpc": "2.0", "id": idnr} error = { "code": code, "message": str(execption), } if not isinstance(execption, str): error["data"] = { "exception": execption.__class__.__name__, "traceback": utils.getTraceback() } else: error["data"] = None obj["error"] = error try: return self.encoder.encode(obj) except Exception as err: return self.responseError(500, str(err)) return obj
def __init__(self, service): self.decoder = JSONDecoder(strict=False) self.encoder = JSONEncoder(ensure_ascii=True, sort_keys=False) self.service = service self.retry = False
def __init__(self): JSONDecoder.__init__(self, object_hook=self.object_hook)
try: ip_network(ip_as_string) return True except ValueError: return False def to_range(network_with_mask): """Retrieve first and last IP address for a given network""" ip_range_start = (ip_network(network_with_mask).network_address) ip_range_end = (ip_network(network_with_mask).broadcast_address) return ip_range_start, ip_range_end # Retain policy order when reading in data custom_decoder = JSONDecoder(object_pairs_hook=OrderedDict) policy = custom_decoder.decode(open('output/MGT-CLOUD_run_conf.json').read()) # Set up Jinja2 environment loader = jinja2.FileSystemLoader(searchpath="./templates") env = jinja2.Environment(loader=loader, extensions=['jinja2.ext.do']) env.filters.update({ # Inject custom environment filters 'deepest_node': deepest_node, 'retrieve_groups': retrieve_groups, 'mask_to_cidr': mask_to_cidr, 'flatten': flatten, 'check_dns': check_dns, 'check_wildcard': check_wildcard, 'check_ip_addr': check_ip_addr, 'to_range': to_range })
def load_snapshot(self, fpath): try: with open(fpath, "r") as fh: json = fh.read() logging.info("Loading snapshot %s => \n%s" % (fpath, json)) except Exception as e: logging.error("Can't load snapshot '%s': %s" % (fpath, e)) return False try: snapshot = JSONDecoder().decode(json) first_layer_index = 1 remove_mixer_layer = False # Check for MIXER layer ... for lss in snapshot['layers']: if lss['engine_nick'] == "MX": if zynthian_gui_config.ignore_snapshot_mixer_settings: snapshot['layers'].remove(lss) else: first_layer_index = 0 remove_mixer_layer = True break #Clean all layers, but don't stop unused engines self.remove_all_layers(False, remove_mixer_layer) # Reusing Jalv engine instances raise problems (audio routing & jack names, etc..), # so we stop Jalv engines! self.zyngui.screens['engine'].stop_unused_jalv_engines() #Create new layers, starting engines when needed for lss in snapshot['layers']: engine = self.zyngui.screens['engine'].start_engine( lss['engine_nick']) self.layers.append( zynthian_layer(engine, lss['midi_chan'], self.zyngui)) # Finally, stop all unused engines self.zyngui.screens['engine'].stop_unused_engines() #Autoconnect self.zyngui.zynautoconnect_midi(True) self.zyngui.zynautoconnect_audio() #Restore MIDI profile state if 'midi_profile_state' in snapshot: self.set_midi_profile_state(snapshot['midi_profile_state']) #Set extended config if 'extended_config' in snapshot: self.set_extended_config(snapshot['extended_config']) # Restore layer state, step 1 => Restore Bank & Preset Status i = first_layer_index for lss in snapshot['layers']: self.layers[i].restore_snapshot_1(lss) i += 1 # Restore layer state, step 2 => Restore Controllers Status i = first_layer_index for lss in snapshot['layers']: self.layers[i].restore_snapshot_2(lss) i += 1 #Fill layer list self.fill_list() #Set active layer self.index = first_layer_index + snapshot['index'] if self.index in self.layers: self.curlayer = self.layers[self.index] self.zyngui.set_curlayer(self.curlayer) #Set Clone if 'clone' in snapshot: self.set_clone(snapshot['clone']) else: self.reset_clone() #Set Transpose if 'transpose' in snapshot: self.set_transpose(snapshot['transpose']) else: self.reset_transpose() #Set CC-Map #TODO #Set Audio Routing if 'audio_routing' in snapshot: self.set_audio_routing(snapshot['audio_routing']) else: self.reset_audio_routing() #Post action if self.index < len(self.root_layers): self.select_action(self.index) else: self.index = 0 self.zyngui.show_screen('layer') except Exception as e: self.zyngui.reset_loading() logging.exception("Invalid snapshot: %s" % e) return False self.last_snapshot_fpath = fpath return True
import requests import testDraw from testDraw import photoDraw from json import JSONDecoder # 调用Face++的API识别图片中的人脸 http_url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/create" key = "JPofXVcqsEro1KO7pseh9H2KFXLuTjns" secret = "vL2SaCF41T78-S-iz0DI-v4rrGxn1cuP" filepath = "F:\\testPh\\liudehua.jpg" outer = "faceName" data = {"api_key": key, "api_secret": secret, "outer_id": outer} # files = {"image_file": open(filepath, "rb")} response = requests.post(http_url, data=data) req_con = response.content.decode('utf-8') req_dict = JSONDecoder().decode(req_con) print(req_dict) if req_dict.__contains__("error_message"): print(req_dict.get("error_message")) exit(1)
def sync_taobao(did, page, new_latest): d = DataSource.objects.get(pk=did) # here refresh the sessionkey info = JSONDecoder().decode(d.auth_info) sign_str = "appkey" + TAOBAO_KEY + "refresh_token" + info[ 'refresh_token'] + "sessionkey" + info['top_session'] + TAOBAO_SECRET hash = md5.new() hash.update(sign_str) sign = hash.hexdigest().upper() access_token_url = 'http://container.open.taobao.com/container/refresh?appkey=%s&refresh_token=%s&sessionkey=%s&sign=%s' \ % (TAOBAO_KEY, info['refresh_token'], info['top_session'], sign) new_info = JSONDecoder().decode( urllib2.urlopen(access_token_url, "").read()) info['top_session'] = new_info['top_session'] info['refresh_token'] = new_info['refresh_token'] d.auth_info = JSONEncoder().encode(info) d.save(force_update=True) # here begin the goods sync info = JSONDecoder().decode(d.auth_info) params = { 'method': 'taobao.trades.bought.get', 'session': info['top_session'], 'fields': 'tid,created,seller_nick,orders.oid,orders.pic_path,orders.total_fee,orders.title', 'partner_id': 'top-apitools', 'format': 'json', 'page_no': page, 'status': 'TRADE_FINISHED' } op = OpenTaobao(TAOBAO_KEY, TAOBAO_SECRET) deal_info = JSONDecoder().decode( op.get_result(params))['trades_bought_get_response'] if deal_info.has_key('trades') and deal_info['trades']: if (page == 1): print "now is page1" new_latest = deal_info['trades']['trade'][0]['tid'] for trade in deal_info['trades']['trade']: if (info.has_key('latest') and (trade['tid'] == info['latest'])): info['latest'] = new_latest d.auth_info = JSONEncoder().encode(info) d.save(force_update=True) sync_taobao.apply_async( args=[did, 1, None], countdown=84000) # wait for tommorow to do again this task print "I am break and do it tomorrow" break for order in trade['orders']['order']: print "I am in again" new_paw = Paw.objects.create( source=d, type="taobao", content=u"在" + trade['seller_nick'] + u"店</br>花费" + order['total_fee'] + u"</br>购买了" + order["title"], create_time=trade['created']) if order.has_key('pic_path'): new_img = Img(album=new_paw, upload_time=new_paw.create_time) origin_tmp = NamedTemporaryFile(delete=True) origin_tmp.write(urllib2.urlopen(order['pic_path']).read()) origin_tmp.flush() new_img.original.save(origin_tmp.name.split('/')[-1] + '.jpg', File(origin_tmp), save=False) thumb_tmp = NamedTemporaryFile(delete=True) thumb_tmp.write( urllib2.urlopen(order['pic_path'] + '_sum.jpg').read()) thumb_tmp.flush() new_img.thumbnail.save(thumb_tmp.name.split('/')[-1] + '.jpg', File(thumb_tmp), save=True) if not (info.has_key('latest') and (trade['tid'] == info['latest'])): print "do it 1min later" sync_taobao.apply_async(args=[did, page + 1, new_latest], countdown=60) else: if new_latest is not None: info['latest'] = new_latest d.auth_info = JSONEncoder().encode(info) d.save(force_update=True) print "do it tommorrow" sync_taobao.apply_async(args=[did, 1, None], countdown=84000)
def warpper_func(resource): @wraps(resource) def wrapper(*args, **kwargs): Convertor.convert_to_builtin_type_remove_attribute.__init__() Convertor.convert_to_builtin_type_remove_attribute.extend( RemoveAttribute) resp = resource(*args, **kwargs) return resp return wrapper return warpper_func jsonDecoder = JSONDecoder() def get_json_param(): if request.json is not None: return request.json isDataJson = False try: isDataJson = str(request.content_type).upper().index('JSON') except ValueError: pass if isDataJson: return jsonDecoder.decode(request.data) else: try: return jsonDecoder.decode(request.data)
def create_application(request): if request.method == 'GET': context = {} context['title'] = "Create Application" context['images'] = image.showAll() return render(request, 'docker/create_application.html', context) elif request.method == 'POST': meth = request.POST['meth'] ip = request.POST['ip'] port = request.POST['port'] imagename = request.POST['imagename'] if meth == 'pull': re = dockerclient.pull(ip, port, imagename) if re: respones = {} respones['result'] = 'ok' respones['msg'] = re return JsonResponse(respones, safe=False) else: respones = {} respones['result'] = 'fail' return JsonResponse(respones, safe=False) elif meth == "create": hostid = request.POST['hostid'] name = request.POST['name'] description = request.POST['description'] l_command = request.POST['command'] l_entrypoint = request.POST['entrypoint'] container_name = request.POST['container_name'] host_name = request.POST['host_name'] network_mode = request.POST['network_mode'] privileged = True if request.POST['privileged'] == "true" else False l_security_opt = request.POST['security_opt'] ulimit_nofile = request.POST['ulimit_nofile'] ulimit_noproc = request.POST['ulimit_noproc'] arr_ports = JSONDecoder().decode(request.POST['ports']) arr_volum = JSONDecoder().decode(request.POST['volum']) arr_dns_server = JSONDecoder().decode(request.POST['dns_server']) arr_hosts = JSONDecoder().decode(request.POST['hosts']) arr_environment = JSONDecoder().decode(request.POST['environment']) ports = [] port_bindings = {} command = [] command = l_command.split(";") entrypoint = [] entrypoint = l_entrypoint.split(";") security_opt = [] security_opt = l_security_opt.split(";") for p in arr_ports: if p.split(":")[0]: port_bindings[int(p.split(":")[0])] = int(p.split(":")[1]) ports.append(int(p.split(":")[0])) print port_bindings print ports volume = [] binds = {} for v in arr_volum: vv = {} if v.split(":")[0]: vv['bind'] = v.split(":")[1] vv['mode'] = 'rw' volume.append(v.split(":")[1]) binds[v.split(":")[0]] = vv dns_server = [] for d in arr_dns_server: if d: dns_server.append(d) hosts = {} for h in arr_hosts: if h.split(":")[0]: hosts[h.split(":")[0]] = h.split(":")[1] environment = {} for e in arr_environment: if e.split(":")[0]: environment[e.split(":")[0]] = e.split(":")[1] msg = application.create_new( ip, port, hostid, name, description, imagename, command, entrypoint, container_name, host_name, network_mode, privileged, security_opt, ulimit_nofile, ulimit_noproc, ports, port_bindings, volume, binds, dns_server, hosts, environment) respones = {} respones['result'] = 'ok' respones['msg'] = msg return JsonResponse(respones, safe=False)
def look(url): try: return JSONDecoder().decode(urlopen(genRequestUrl(url), timeout=3).read()) except Exception: return None
from json import JSONDecoder from time import time from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import NMF from sklearn.datasets import fetch_20newsgroups n_samples = 1000 n_features = 1000 n_topics = 10 n_top_words = 50 t0 = time() print("Loading dataset and extracting TF-IDF features...") file_documents = open("documents.txt", "r") documents = JSONDecoder().decode(file_documents.read()) file_documents.close() file_stopwords = open("stopwords.txt", "r", errors="replace") stopwords = file_stopwords.read().split() file_stopwords.close() dataset = [] for key, value in documents.items(): dataset.append(value) vectorizer = TfidfVectorizer( max_df=0.95, min_df=2, max_features=n_features, stop_words=stopwords) tfidf = vectorizer.fit_transform(dataset[:n_samples])
def load_snapshot(self, fpath): try: with open(fpath, "r") as fh: json = fh.read() logging.info("Loading snapshot %s => \n%s" % (fpath, json)) except Exception as e: logging.error("Can't load snapshot '%s': %s" % (fpath, e)) return False try: snapshot = JSONDecoder().decode(json) #Clean all layers, but don't stop unused engines self.remove_all_layers(False) # Reusing Jalv engine instances raise problems (audio routing & jack names, etc..), # so we stop Jalv engines! self.zyngui.screens['engine'].stop_unused_jalv_engines() #Create new layers, starting engines when needed i = 0 for lss in snapshot['layers']: if lss['engine_nick'] == "MX": if zynthian_gui_config.snapshot_mixer_settings: snapshot['amixer_layer'] = lss del (snapshot['layers'][i]) else: engine = self.zyngui.screens['engine'].start_engine( lss['engine_nick']) self.layers.append( zynthian_layer(engine, lss['midi_chan'], self.zyngui)) i += 1 # Finally, stop all unused engines self.zyngui.screens['engine'].stop_unused_engines() #Restore MIDI profile state if 'midi_profile_state' in snapshot: self.set_midi_profile_state(snapshot['midi_profile_state']) #Set MIDI Routing if 'midi_routing' in snapshot: self.set_midi_routing(snapshot['midi_routing']) else: self.reset_midi_routing() #Autoconnect MIDI self.zyngui.zynautoconnect_midi(True) #Set extended config if 'extended_config' in snapshot: self.set_extended_config(snapshot['extended_config']) # Restore layer state, step 1 => Restore Bank & Preset Status i = 0 for lss in snapshot['layers']: self.layers[i].restore_snapshot_1(lss) i += 1 # Restore layer state, step 2 => Restore Controllers Status i = 0 for lss in snapshot['layers']: self.layers[i].restore_snapshot_2(lss) i += 1 #Set Audio Routing if 'audio_routing' in snapshot: self.set_audio_routing(snapshot['audio_routing']) else: self.reset_audio_routing() #Set Audio Capture if 'audio_capture' in snapshot: self.set_audio_capture(snapshot['audio_capture']) else: self.reset_audio_routing() #Autoconnect Audio self.zyngui.zynautoconnect_audio() # Restore ALSA Mixer settings if self.amixer_layer and 'amixer_layer' in snapshot: self.amixer_layer.restore_snapshot_1(snapshot['amixer_layer']) self.amixer_layer.restore_snapshot_2(snapshot['amixer_layer']) #Fill layer list self.fill_list() #Set active layer if snapshot['index'] < len(self.layers): self.index = snapshot['index'] self.zyngui.set_curlayer(self.layers[self.index]) elif len(self.layers) > 0: self.index = 0 self.zyngui.set_curlayer(self.layers[self.index]) #Set Clone if 'clone' in snapshot: self.set_clone(snapshot['clone']) else: self.reset_clone() # Note-range & Tranpose self.reset_note_range() if 'note_range' in snapshot: self.set_note_range(snapshot['note_range']) #BW compat. elif 'transpose' in snapshot: self.set_transpose(snapshot['transpose']) #Zynseq RIFF data if 'zynseq_riff_b64' in snapshot and 'stepseq' in self.zyngui.screens: b64_bytes = snapshot['zynseq_riff_b64'].encode('utf-8') binary_riff_data = base64.decodebytes(b64_bytes) self.zyngui.screens['stepseq'].restore_riff_data( binary_riff_data) #Post action if self.index < len(self.root_layers): self.select_action(self.index) else: self.index = 0 self.zyngui.show_screen('layer') except Exception as e: self.zyngui.reset_loading() logging.exception("Invalid snapshot: %s" % e) return False self.last_snapshot_fpath = fpath return True
#!/usr/bin/env python import argparse from json import JSONDecoder from jenkins_flaky.serialization import from_json parser = argparse.ArgumentParser( description='Diff two files with flaky tests to get the new ones') parser.add_argument('db_file', type=argparse.FileType('r')) args = parser.parse_args() flaky_tests = JSONDecoder(object_hook=from_json).decode(args.db_file.read()) for flaky in flaky_tests: print(flaky.name) #for execution in flaky.executions: # print(f'\t{execution.job_name}/{execution.build_number}')