def login(self): """ Logs in to the server """ hashkey = self.make_hash() connection = self.connection or self.connect() parameters = urlencode({ 'service' : config.service, 'auth' : hashkey }) print "parameters for login: '******'" % (parameters) connectionstring = 'https://' + config.base_url + '/' \ + config.api_version + '/login' logger.info('Trying to login to REST: %s' % connectionstring) logger.info('Applying header: %s' % no_auth_headers) connection.request('POST', connectionstring, parameters, no_auth_headers) response = connection.getresponse() response_as_json = jloads(response.read()) self.auth_session_key = response_as_json['session_key'] self.auth_hostname = response_as_json['public_feed']['hostname'] self.auth_port = response_as_json['public_feed']['port'] basic_auth = b64encode("%s:%s" % (self.auth_session_key, self.auth_session_key)) self.auth_headers = no_auth_headers.copy() self.auth_headers['Authorization']="Basic %s" % (basic_auth) return response_as_json
def jsonloads(self, content): Log.d('response --> %s' % content) try: return jloads(content) except: from factcore.works.workflow import BaseWork return {'ret': BaseWork.FAILED, 'desc': 'Program exception %s' % \ traceback.format_exc()}
def json2yaml(_str: str) -> str: r"""Convert a JSON string to a YAML string >>> json2yaml('{"0":["Val1","Val2","Val3","Val4"],"1":["1","2","3","4"],"2":["5","6","7","8"],"3":["9","10","11","12"],"4":["13","14","15","16"],"5":["17","18","19","20"],"6":["3.14","6.28","2.73","1.57"]}') "'0':\n- Val1\n- Val2\n- Val3\n- Val4\n'1':\n- '1'\n- '2'\n- '3'\n- '4'\n'2':\n- '5'\n- '6'\n- '7'\n- '8'\n'3':\n- '9'\n- '10'\n- '11'\n- '12'\n'4':\n- '13'\n- '14'\n- '15'\n- '16'\n'5':\n- '17'\n- '18'\n- '19'\n- '20'\n'6':\n- '3.14'\n- '6.28'\n- '2.73'\n- '1.57'\n" """ return yamldump(jloads(_str, object_pairs_hook=OrderedDict), safe=True)
def json2dict(_str: str) -> Union[dict, list]: """Convert a JSON string to a Python dictionary or list (depending on the data input) >>> json2dict('{"0":["Val1","Val2","Val3","Val4"],"1":["1","2","3","4"],"2":["5","6","7","8"],"3":["9","10","11","12"],"4":["13","14","15","16"],"5":["17","18","19","20"],"6":["3.14","6.28","2.73","1.57"]}') {'0': ['Val1', 'Val2', 'Val3', 'Val4'], '1': ['1', '2', '3', '4'], '2': ['5', '6', '7', '8'], '3': ['9', '10', '11', '12'], '4': ['13', '14', '15', '16'], '5': ['17', '18', '19', '20'], '6': ['3.14', '6.28', '2.73', '1.57']} """ return jloads(_str)
def test_tojson_fromjson(): """ Tests: tojson fromjson """ print('::: TEST: test_tojson_fromjson()') edict_with_all = _get_orig__edict_with_all() new_reobj_all__jdumps = edict_with_all.tojson() new_reobj_all = jloads(new_reobj_all__jdumps) # note is not equal because tuples are changed to list in json # ok_(edict_with_all == new_reobj_all, msg=None) ok_(isinstance(new_reobj_all, dict) and not isinstance(new_reobj_all, Edict), msg=None) ok_(edict_with_all['edict1'] == new_reobj_all['edict1'], msg=None) ok_(isinstance(new_reobj_all['edict1'], dict) and not isinstance(new_reobj_all['edict1'], Edict), msg=None) ok_(edict_with_all['rdict1'] == new_reobj_all['rdict1'], msg=None) ok_(isinstance(new_reobj_all['rdict1'], dict) and not isinstance(new_reobj_all['rdict1'], Rdict), msg=None) ok_(edict_with_all['edictf1'] == new_reobj_all['edictf1'], msg=None) ok_(isinstance(new_reobj_all['edictf1'], dict) and not isinstance(new_reobj_all['edictf1'], RdictF), msg=None) ok_(edict_with_all['edictio1'] == new_reobj_all['edictio1'], msg=None) ok_(isinstance(new_reobj_all['edictio1'], dict) and not isinstance(new_reobj_all['edictio1'], RdictIO), msg=None) ok_(edict_with_all['edictio1'].key_order == ['edictio_inner1', 'edictio_inner2', 'edictio_inner3'], msg=None) ok_(edict_with_all['edictfo1'] == new_reobj_all['edictfo1'], msg=None) ok_(isinstance(new_reobj_all['edictfo1'], dict) and not isinstance(new_reobj_all['edictfo1'], RdictFO), msg=None) ok_(edict_with_all['edictfo1'].key_order == ['edictfo_inner1', 'edictfo_inner2', 'edictfo_inner3'], msg=None) ok_(edict_with_all['edictfo2_1'] == new_reobj_all['edictfo2_1'], msg=None) ok_(isinstance(new_reobj_all['edictfo2_1'], dict) and not isinstance(new_reobj_all['edictfo2_1'], RdictFO2), msg=None) ok_(edict_with_all['edictfo2_1'].key_order == ['edictfo2_inner1', 'edictfo2_inner2', 'edictfo2_inner3'], msg=None) ok_(edict_with_all['elist1'] == new_reobj_all['elist1'], msg=None) ok_(isinstance(new_reobj_all['elist1'], list) and not isinstance(new_reobj_all['elist1'], Elist), msg=None) ok_(edict_with_all['rlist1'] == new_reobj_all['rlist1'], msg=None) ok_(isinstance(new_reobj_all['rlist1'], list) and not isinstance(new_reobj_all['rlist1'], Rlist), msg=None) ok_(edict_with_all['rlistf1'] == new_reobj_all['rlistf1'], msg=None) ok_(isinstance(new_reobj_all['rlistf1'], list) and not isinstance(new_reobj_all['rlistf1'], RlistF), msg=None) # note is not equal because tuples are changed to list in json # ok_(edict_with_all['etuple1'] == new_reobj_all['etuple1'], msg=None) ok_(isinstance(new_reobj_all['etuple1'], list) and not isinstance(new_reobj_all['etuple1'], Etuple), msg=None) # note is not equal because tuples are changed to list in json # ok_(edict_with_all['lmatrix1'] == new_reobj_all['lmatrix1'], msg=None) ok_(isinstance(new_reobj_all['lmatrix1'], list) and not isinstance(new_reobj_all['lmatrix1'], Lmatrix), msg=None) # note is not equal because tuples are changed to list in json # ok_(edict_with_all['lmatrixf1'] == new_reobj_all['lmatrixf1'], msg=None) ok_(isinstance(new_reobj_all['lmatrixf1'], list) and not isinstance(new_reobj_all['lmatrixf1'], LmatrixF), msg=None) # some data checks ok_(edict_with_all['edictfo1']['edictfo_inner2'] == new_reobj_all['edictfo1']['edictfo_inner2'] and new_reobj_all['edictfo1']['edictfo_inner2'] == 'edictfo_inner2 value', msg=None) ok_(edict_with_all['rlist1'][1] == new_reobj_all['rlist1'][1] and new_reobj_all['rlist1'][1] == 'rlist_inner value2', msg=None) ok_(edict_with_all['lmatrixf1'][1][2] == new_reobj_all['lmatrixf1'][1][2] and new_reobj_all['lmatrixf1'][1][2] == 125, msg=None)
def json2csv(_str: str) -> list: """Convert a JSON string to CSV (as a list) >>> json2csv('{"0":["Val1","Val2","Val3","Val4"],"1":["1","2","3","4"],"2":["5","6","7","8"],"3":["9","10","11","12"],"4":["13","14","15","16"],"5":["17","18","19","20"],"6":["3.14","6.28","2.73","1.57"]}') [['Val1', 'Val2', 'Val3', 'Val4'], ['1', '2', '3', '4'], ['5', '6', '7', '8'], ['9', '10', '11', '12'], ['13', '14', '15', '16'], ['17', '18', '19', '20'], ['3.14', '6.28', '2.73', '1.57']] """ _dict: Union[dict, list] = jloads(_str) return [_dict[_key] for _key in _dict.keys()] if isinstance(_dict, dict) else _dict
def get_data(): s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.connect("mastersocket") s.send(jdumps({"request": "get_data"})) data = s.recv(8192) raw = jloads(data) if raw : (data, data_forecast, unread, news, cal) = raw return (data, data_forecast, unread, news, cal) else : return False
def serve_rflist(self, body, loc, start_response): rpathes = jloads(body) bodyfobj = StringIO() gzfobj = GzipFile('srvrflist', 'wb', fileobj = bodyfobj) if rpathes == []: rpathes = loc.get('rpathes', None) wdobj = walkdirs(loc['rootdir'], rpathes) fpath = wdobj.next() while fpath: gzfobj.write(relpath(fpath, loc['rootdir']) + '\n') fpath = wdobj.next() gzfobj.close() return self.octreply(bodyfobj.getvalue(), start_response)
def convert(srcname): ''' Convert a single file from .json to .style ''' print(srcname) sstr = open(srcname, 'rb').read() sdata = fixstyle(jloads(sstr)) dstr = dumps(sdata) assert sdata == rloads(dstr), "Bad round-trip" dstname = srcname.replace('.json', '.style') dstf = open(dstname, 'wb') dstf.write(dstr) dstf.close()
def run(self, runtimeargs): err = False msg = '' try: result = xbmc.executeJSONRPC(self.cmd_str) msg = jloads(result) except: e = sys.exc_info()[0] err = True if hasattr(e, 'message'): msg = str(e.message) msg = msg + '\n' + traceback.format_exc() return [err, msg]
def openjsonfile(_filename: str, _encoding: str = r'utf-8', _jsondata: bool = True) -> Union[dict, str]: """Open an JSON file given a pathname and return the object as a dict or str (if `_jsondata` is set to `False`)""" try: _out: str = r'' ensurefileexists(_filename) with codec_opener(_filename, mode=r'rt', encoding=_encoding, buffering=1) as _file: _out = r''.join(_file.readlines()) return jloads(_out) if _jsondata else _out except JSONDecodeError: stderr.write('The JSON file is malformed!\n') raise SystemExit(1) except (LookupError, UnicodeError): stderr.write('Unable to determine and process data encoding!\n') raise SystemExit(1)
def convert(srcname): """ Convert a single file from .json to .style """ print(srcname) with open(srcname, 'r') as f: sstr = f.read() sdata = fixstyle(jloads(sstr)) dstr = dumps(sdata) assert sdata == rloads(dstr), "Bad round-trip" dstname = srcname.replace('.json', '.style') with open(dstname, 'w') as dstf: dstf.write(dstr)
def _read_metadata(self, keyword, default_value, json=False): ''' If the keyword is found, return stored value ''' if keyword in self.metadata: data = self.metadata[keyword] else: data = default_value if json: try: return jloads(data) except: pass return data
def loads(serializedString): ''' Abstraction on json.loads to handle TransmissionData deserialization. Takes a serialized TransmissionData object and uses the json module to deserialize it. :param serializedString: The serialized TransmissionData :returns: A TransmissionData obect ''' return jloads( serializedString, object_hook=_TransmissionDataSerializer.Decode, )
def getStereoscopicMode(): """ Retrieves stereoscopic mode from json-rpc @return: "off", "split_vertical", "split_horizontal", "row_interleaved", "hardware_based", "anaglyph_cyan_red", "anaglyph_green_magenta", "monoscopic" @rtype: str """ query = '{"jsonrpc": "2.0", "method": "GUI.GetProperties", "params": {"properties": ["stereoscopicmode"]}, "id": 1}' result = xbmc.executeJSONRPC(query) jsonr = jloads(result) ret = "" if "result" in jsonr: if "stereoscopicmode" in jsonr["result"]: if "mode" in jsonr["result"]["stereoscopicmode"]: ret = jsonr["result"]["stereoscopicmode"]["mode"].encode("utf-8") return ret
def run(self): """ Infinity loop: wait for any new item in queue:instagram Redis list, pop it and process. Wait for 2 seconds, go to the begining. """ while True: data = jloads(self.redis.blpop('queue:instagram')[1]) try: url = 'https://api.instagram.com/v1/media/shortcode/{}?access_token={}'.format(data[1].split('/')[4], IG_ACCESS_TOKEN) photo_data = get(url, stream=False, timeout=10) except (IndexError, ConnectionError, ProtocolError, ReadTimeout, ReadTimeoutError, SSLError, ssl_SSLError, soc_error, SysCallError) as e: error(e) else: if photo_data.ok: link = photo_data.json()['data']['images']['standard_resolution']['url'] q = 'INSERT INTO media(tweet_id, url) VALUES ("{}", "{}");'.format(data[0], link) exec_mysql(q, self.mysql) sleep(2)
def reduce_shared_features(shared_features): to_keep = [] feature_lists = [] for feature in shared_features: feature_lists.append([i.encode("unicode-escape") for i in jloads(feature)]) for f_i in xrange(len(feature_lists)): keep = True for o_i in xrange(len(feature_lists)): if f_i == o_i: continue if len(feature_lists[f_i]) < len(feature_lists[o_i]): if feature_lists[f_i] == feature_lists[o_i][:len(feature_lists[f_i])]: keep = False if keep: to_keep.append(feature_lists[f_i]) return to_keep
def json2csvstr(_str: str, _dialect: str = r'unix', _delimiter: str = r',', _quotechar: str = r'"') -> str: r"""Convert a JSON string to CSV (as a string) >>> json2csvstr('{"0":["Val1","Val2","Val3","Val4"],"1":["1","2","3","4"],"2":["5","6","7","8"],"3":["9","10","11","12"],"4":["13","14","15","16"],"5":["17","18","19","20"],"6":["3.14","6.28","2.73","1.57"]}') '0,1,2,3,4,5,6\nVal1,Val2,Val3,Val4\n1,2,3,4\n5,6,7,8\n9,10,11,12\n13,14,15,16\n17,18,19,20\n3.14,6.28,2.73,1.57\n' """ _dict: Union[dict, list] = jloads(_str) _buf = StringIO(r'') csvwriter = cwriter(_buf, dialect=_dialect, delimiter=_delimiter, quotechar=_quotechar, quoting=QUOTE_MINIMAL) if isinstance(_dict, dict): _tmpdict: list = [_dict[_key] for _key in _dict.keys()] csvwriter.writerow(_dict.keys()) for _row in _tmpdict: csvwriter.writerow(_row) else: for _row in _dict: csvwriter.writerow(_row) _out: str = _buf.getvalue() _buf.close() return _out
def _json(self, method, request, jrequest, *args, **kwargs): response = { 'status': 500, 'detail': '', 'timestamp': datetime.now().strftime('%Y%m%d %H:%M:%S.%f'), 'data': {} } try: respdata = None if hasattr(self, 'service'): respdata = self.service(request, jrequest) elif method == 'get': respdata = self.service_get(request, jrequest) elif method == 'post': respdata = self.service_post(request, jrequest) if isinstance(respdata, http.HttpResponse): return respdata elif respdata is None: response['status'] = 200 elif type(respdata) in (str, unicode): response['status'] = 200 response['data'] = jloads(respdata) elif type(respdata) != dict: raise Exception('Can not return an type not as dict') else: response['status'] = 200 response['data'] = respdata content = jdumps(response) kw = {'status': response['status']} except JSONServiceError as exc: traceback.print_exc() kw = {'status': exc.status_code} content = jdumps({'detail': exc.detail, 'status': exc.status_code}) except Exception as exc: traceback.print_exc() kw = {'status': getattr(exc, 'status_code', 400)} content = jdumps({'detail': exc, 'status': kw['status']}) return http.HttpResponse(content, content_type='application/json', **kw)
def on_message(self, headers, message): record_counter('daemons.tracer.kronos.reports') appversion = 'dq2' msg_id = headers['message-id'] if 'appversion' in headers: appversion = headers['appversion'] if 'resubmitted' in headers: record_counter('daemons.tracer.kronos.received_resubmitted') logging.warning('(kronos_file) got a resubmitted report') try: if appversion == 'dq2': self.__conn.ack(msg_id, self.__subscription_id) return else: report = jloads(message) except Exception: # message is corrupt, not much to do here # send count to graphite, send ack to broker and return record_counter('daemons.tracer.kronos.json_error') logging.error('(kronos_file) json error') self.__conn.ack(msg_id, self.__subscription_id) return self.__ids.append(msg_id) self.__reports.append(report) try: logging.debug('(kronos_file) message received: %s %s %s' % (str(report['eventType']), report['filename'], report['remoteSite'])) except Exception: pass if len(self.__ids) >= self.__chunksize: self.__update_atime() for msg_id in self.__ids: self.__conn.ack(msg_id, self.__subscription_id) self.__reports = [] self.__ids = []
def on_message(self, frame): record_counter('daemons.tracer.kronos.reports') appversion = 'dq2' msg_id = frame.headers['message-id'] if 'appversion' in frame.headers: appversion = frame.headers['appversion'] if 'resubmitted' in frame.headers: record_counter('daemons.tracer.kronos.received_resubmitted') self.__logger(logging.WARNING, 'got a resubmitted report') try: if appversion == 'dq2': self.__conn.ack(msg_id, self.__subscription_id) return else: report = jloads(frame.body) except Exception: # message is corrupt, not much to do here # send count to graphite, send ack to broker and return record_counter('daemons.tracer.kronos.json_error') self.__logger(logging.ERROR, 'json error', exc_info=True) self.__conn.ack(msg_id, self.__subscription_id) return self.__ids.append(msg_id) self.__reports.append(report) try: self.__logger(logging.DEBUG, 'message received: %s %s %s' % (str(report['eventType']), report['filename'], report['remoteSite'])) except Exception: pass if len(self.__ids) >= self.__chunksize: self.__update_atime() for msg_id in self.__ids: self.__conn.ack(msg_id, self.__subscription_id) self.__reports = [] self.__ids = []
def decrypt(self, fromPath=None): """""" done = False try : if fromPath is None : fromPath = self.path toPath = fromPath[:-len(Kirmah.EXT)] if fromPath.endswith(Kirmah.EXT) else fromPath+'.dump' if Io.file_exists(fromPath) : Sys.pwlog([(' Decrypt Index... ' , Const.CLZ_0, True)]) call = ' '.join([Sys.executable, 'kirmah-cli.py', 'dec', '-qfj2' if Sys.isUnix() else '-qf', fromPath, '-z', '-r', '-m', '-o', toPath, '-k', self.keyPath ]) print(call) Sys.sysCall(call) data = jloads(Io.get_data(toPath)) Io.removeFile(toPath) else : data = {} done = True except ValueError as e: raise BadKeyException(e) Sys.pwlog([(' done'if done else ' ko' , Const.CLZ_2 if done else Const.CLZ_1, True)]) return data
def get_weather_now(): ''' retrieve current weather conditions for New York, NY as of the time of request. ''' r = rget(WEATHER_NOW_URL) js = jloads(r.text) curr_weather = js['current_observations'] ret = {} ret['weather_str'] = curr_weather['weather'] ret['wind_chl'] = float(curr_weather['windchill_c']) ret['wind_dir'] = float(curr_weather['wind_degrees']) ret['wind_spd'] = float(curr_weather['wind_kph']) ret['wind_gst'] = float(curr_weather['wind_gust_kph']) ret['temp_c'] = float(curr_weather['temp_c']) ret['feels_like_c'] = float(curr_weather['feelslike_c']) ret['heat_index_c'] = float(curr_weather['heat_index_c']) ret['uv'] = float(curr_weather['UV']) ret['pressure_mb'] = float(curr_weather['pressure_mb']) ret['precipitation'] = float(curr_weather['precip_today_metric']) ret['solar_rad'] = float(curr_weather['solarradiation']) return ret
def get_student_test(bucket, subjectName, testId, questionsKey): global sleepTime, s3delimiter response = '' questionsKeyPath = f'{subjectName}{s3delimiter}{testId}{s3delimiter}' \ f'{questionsKey}' # Counter helps to work with API RequestLimitExceed errors counter = 0 while (counter < 5): s3Resource = boto3.resource('s3') try: response = jloads( s3Resource.Object( bucket, questionsKeyPath).get()['Body'].read().decode('utf-8')) break except Exception as e: logger.warning(e) counter += 1 logger.warning('API limit - waiting {}s...'.format(sleepTime)) sleep(sleepTime) del s3Resource del questionsKeyPath, counter return response
def on_message(self, headers, message): record_counter('daemons.tracer.kronos.reports') appversion = 'dq2' id = headers['message-id'] if 'appversion' in headers: appversion = headers['appversion'] try: if appversion == 'dq2': self.__conn.ack(id, self.__subscription_id) return else: report = jloads(message) except: # message is corrupt, not much to do here # send count to graphite, send ack to broker and return record_counter('daemons.tracer.kronos.json_error') logging.error('(kronos_file) json error') self.__conn.ack(id, self.__subscription_id) return self.__ids.append(id) self.__reports.append(report) try: logging.debug('(kronos_file) message received: %s %s %s' % (str(report['eventType']), report['filename'], report['remoteSite'])) except: pass if len(self.__ids) >= self.__chunksize: self.__update_atime() for id in self.__ids: self.__conn.ack(id, self.__subscription_id) self.__reports = [] self.__ids = []
def __call__(self, **kwargs): if self.auth_session_key: relative_url="/login/%s" % self.auth_session_key url = 'https://%s/%s%s' % (config.base_url,config.api_version, relative_url) logged_in_response = requests.put(url, headers=self.auth_headers) if not jloads( logged_in_response.text )['logged_in']: print " :::: set all auth to None and redo the login next time ..." # Set auth stuff to None so that a new session is created self.connection = None self.auth_headers = None self.auth_session_key = None if self.connection is None: print " :::: connect ..." self.connect() if self.auth_headers is None: print " :::: login ..." self.login() return self.decorated_function(self, **kwargs)
def check_if_student_is_valid(bucket, studentId, studentsListKey): global sleepTime studentsList = '' # Counter helps to work with API RequestLimitExceed errors counter = 0 while (counter < 5): s3Resource = boto3.resource('s3') try: studentsList = jloads( s3Resource.Object( bucket, studentsListKey).get()['Body'].read().decode('utf-8')) break except Exception as e: logger.warning(e) counter += 1 logger.warning('API limit - waiting {}s...'.format(sleepTime)) sleep(sleepTime) del s3Resource del counter return [ student for student in studentsList if student['student_id'] == studentId ]
def setDictionary(word): try: api_key = '750c98f0-f83f-4604-a78d-9065f53e5804' url = "https://www.dictionaryapi.com/api/v3/references/sd2/json/"+ word.lower() + "?key=" + api_key resp = reqGet(url) data = jloads(resp.text) defis = data[0]['def'][0]['sseq'] count = 0 # file = open(savedir+"\\dictionary.txt","w") file = open("D:\\tsaurus\\Tsaurus-teaching-assistant-for-Kids-using-OCR-and-webscrapping\\temp"+"\\dictionary.txt","w") for defi in defis: count += 1 if(count>=4): break text = defi[0][1]['dt'][0] text = text[1][4:]+"." text = text.replace("{it}", "\"") text = text.replace("{/it}","\"") text = text.replace("{bc}","") text = text.replace("{sx","") text = text.replace("|}","") text = text.replace("}","|") file.write("=> " + text + " #&# ") # global db db.set_dic_text("=>"+text+" #&# "); # print( text +" <=text" ) file.write( "< " + data[0]['fl'] + " > #&# ") # print( data[0]['fl']+" <=dara[0]" ) # global db db.set_dic_parOfSpeech( "< " + data[0]['fl'] + " > #&# ") file.close() global c c = '+' except: x_ferb=1
def on_message(self, headers, message): # Send message to StatsD # Sanity check print(headers) print(message) msg_id = headers['message-id'] if 'resubmitted' in headers: # Send message to StatsD # Ignore resubmitted messages return try: report = jloads(message) except Exception: # Corrupt message, ignore # Send message to StatsD self.__conn.ack(msg_id, self.__subscription_id) return try: report['payload']['created_at'] = report['created_at'] report['payload']['event_type'] = report['event_type'] for k, v in report['payload'].items(): if k.endswith("_at"): if v: report['payload'][k] = v.split('.')[0] except: pass self.__ids.append(msg_id) self.__reports.append({'id': msg_id, 'body': report}) if len(self.__reports) >= self.__chunksize: self.__send_to_es()
def json2dict(self): with self.lock: with open(self.filename) as rawInfo: lines = rawInfo.read() return jloads(lines)
def google_comlete(query): params = urlencode({'client': 'firefox', 'q': query}) url = 'https://suggestqueries.google.com/complete/search?%s' % params raw = urlopen(url).read() return jloads(raw)[1]
super(StockOperations, self).__init__() def summary(self): firstdate, lastdate = self[0].date, self[-1].date sum_quantity, sum_amount = 0, 0 for soobj in self: sum_amount += soobj.amount if soobj.operation not in [u'股息入账']: sum_quantity += soobj.quantity if soobj.operation in [u'余额入账', u'转托转入']: sum_amount -= soobj.price * soobj.quantity return firstdate, lastdate, sum_quantity, sum_amount stockdict = {} for jrow in open('stock.flow.jsons', 'rt').readlines(): row = jloads(jrow.strip()) if row[1] == u'证券代码': continue if not row[1]: continue if row[1] not in stockdict: stockdict[row[1]] = StockOperations() stockdict[row[1]].append(StockOperation(row)) def soscmp(sosobj0, sosobj1): return cmp(sosobj0[-1].date, sosobj1[-1].date) total = 0 sosobjs = stockdict.values() sosobjs.sort(cmp=soscmp) for sosobj in sosobjs: summary = sosobj.summary()
def getNReads(readStatsF): """Return the number of reads in millions.""" readStats = jloads(open(readStatsF).read()) nreads = int(readStats['num_reads']) nreads = nreads / million return nreads
def main(): inp = jloads(stdin.readline()) func,source = ploads(ploads(inp['rdd_pickle'])) rdd = RDD(prev_func = func, source=source) partition_numbers = inp['partition_numbers'] worker_pool.map(lambda n: do_task_and_print(rdd,n), partition_numbers)
def getNReads(readStatsF): readStats = jloads(open(readStatsF).read()) nreads = int(readStats['num_reads']) nreads = nreads / (1000 * 1000) return nreads
def __init__(self): super(StockOperations, self).__init__() def summary(self): firstdate, lastdate = self[0].date, self[-1].date sum_quantity, sum_amount = 0, 0 for soobj in self: sum_amount += soobj.amount if soobj.operation not in [u'股息入账']: sum_quantity += soobj.quantity if soobj.operation in [u'余额入账', u'转托转入']: sum_amount -= soobj.price * soobj.quantity return firstdate, lastdate, sum_quantity, sum_amount stockdict = {} for jrow in open('stock.flow.jsons', 'rt').readlines(): row = jloads(jrow.strip()) if row[1] == u'证券代码': continue if not row[1]: continue if row[1] not in stockdict: stockdict[row[1]] = StockOperations() stockdict[row[1]].append(StockOperation(row)) def soscmp(sosobj0, sosobj1): return cmp(sosobj0[-1].date, sosobj1[-1].date) total = 0 sosobjs = stockdict.values() sosobjs.sort(cmp = soscmp) for sosobj in sosobjs: summary = sosobj.summary() if summary[2] == 0: cost = 0 else: cost = - summary[3] / summary[2] if summary[3] == 0.0: continue
connection = connect() connectionstring = 'https://' + config.base_url \ + '/' + config.api_version logger.info('Trying to get status: %s' % connectionstring) logger.info('Applying header: %s' % headers) connection.request('GET', connectionstring, '', headers) try: response = connection.getresponse() except HTTPException, exception: logger.error('Error getting status: %s' % exception) return jloads(response.read()) def login(connection, hashkey): """ Logs in to the server """ parameters = urlencode({ 'service' : config.service, 'auth' : hashkey }) connectionstring = 'https://' + config.base_url + '/' \ + config.api_version + '/login' logger.info('Trying to login to REST: %s' % connectionstring) logger.info('Applying header: %s' % headers) connection.request('POST', connectionstring, parameters, headers) try: response = connection.getresponse() except HTTPException, exception:
def jsonloads(self, content): try: return jloads(content) except: return {'ret': 0, 'desc': 'Program exception %s' % traceback.format_exc()}
class Storage(models.Model): """ Corresponding to a storage. Attributes are needed informations for connect to storage's API. """ HTTP_PROTOCOLS = ( ('http', 'HTTP'), ('https', 'HTTPS'), ) name = models.CharField(_('name'), max_length=100, blank=True, null=True) address = models.CharField(_('address'), max_length=200) port = models.IntegerField(_('port'), blank=True, null=True, default=80) url_prefix = models.CharField(_('URL prefix'), max_length=100, default='', blank=True, help_text=_('Start point of API')) protocol = models.CharField(_('protocol'), max_length=5, default='http', choices=HTTP_PROTOCOLS) login = models.CharField(_('login'), max_length=100, blank=True, null=True, help_text=('Used for HTTP authentification')) password = models.CharField(_('password'), max_length=100, blank=True, null=True) objects = Storage_Manager() class Meta: app_label = 'core' ordering = ('name', ) verbose_name = _('storage') verbose_name_plural = _('storages') class ConnectionError(Exception): "Can't connect to storage" pass class UnvalidDataError(Exception): "Can't read data from storage" pass def __unicode__(self): if self.name: return self.name else: return self.address def __init__(self, *args, **kwargs): super(Storage, self).__init__(*args, **kwargs) self._set_proxy() self.URLS = { 'hosts': '/hosts', 'host': '/hinfo?host={hostid}', 'plugins': '/list?host={hostid}', 'data': '/data?host={hostid}&plugin={plugin}&ds={ds}&res={res}', } def _set_proxy(self): """ Set an URL opener for the current storage. """ from urllib2 import HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, build_opener, install_opener if self.login: passman = HTTPPasswordMgrWithDefaultRealm() passman.add_password(None, self.address, self.login, self.password) authhandler = HTTPBasicAuthHandler(passman) self.proxy = build_opener(authhandler) else: self.proxy = build_opener() install_opener(self.proxy) def is_on(self): """Test if storage is reachable""" try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.address, self.port)) s.close() return True except socket.error as msg: return False def get_absolute_url(self): return reverse('storage', args=[str(self.id)]) def get_add_url(self): return reverse('storage add') def get_list_url(self): return reverse('storage list') def get_update_url(self): if not self.id: return self.get_add_url() return reverse('storage update', args=[str(self.id)]) def get_delete_url(self): return reverse('storage delete', args=[str(self.id)]) def get_create_hosts_url(self): return reverse('storage create hosts', args=[str(self.id)]) def get_external_url(self): """Return the storage's API url.""" return "%(protocol)s://%(address)s:%(port)i%(url_prefix)s" % self.__dict__ def _connect(self, url, data={}): """ Basic method for use proxy to storage. `data` should be as following: - host: requested host id on storage - plugin: requested plugin name - ds: requested data sources separated by ',' - res: resolution by default Daily """ if url not in self.URLS: raise ValueError("URL key does not exists.") data['res'] = data.get('res', 'Daily') if 'plugin' in data: data['plugin'] = quote(data['plugin']) _url = self.URLS[url].format(**data) uri = ("%(protocol)s://%(address)s:%(port)i%(url_prefix)s" % self.__dict__) + _url logger.info(uri) try: r = self.proxy.open(uri, timeout=settings.STORAGE_TIMEOUT).read() except IOError, e: raise self.ConnectionError(e) return jloads(r)
def main(stop_after_init=False): from sys import argv argc = len(argv) if argc <= len(CLI_ARGS): print 'Usage: %s %s %s' % (argv[0], ' '.join(CLI_ARGS), ' '.join(["[%s]" % x for x in OPTIONAL_ARGS])) print 'Currently missing parameters arguments:', ' '.join(CLI_ARGS[len(argv)-1:]) exit() urls_from_logs_to_ids_file = argv[1].strip() web_crawl_urls_to_ids_file = argv[2].strip() output_path = argv[-1].strip() t_init = time() print "Loading domains string -> id mapping..." t0 = time() domains_string_to_ids = {} with univ_open(urls_from_logs_to_ids_file, 'r') as f: current_index = 0 for line in f: domains_string_to_ids[line.strip().lower().replace("%0a", '')] = current_index current_index += 1 print "Done in", time()-t0 print "Counting urls..." t0 = time() number_of_urls = 0 with univ_open(web_crawl_urls_to_ids_file, 'r') as f: # Note: I thought I'd use len(f.readlines()) but building this huge list in memory takes ages for nothing for l in f: number_of_urls += 1 print "Mapping URLs to their domain id...." web_crawl_urls_to_domain_ids = [None] * number_of_urls with univ_open(web_crawl_urls_to_ids_file, 'r') as f: f.readline() #1st line has no info current_index = 0 start_index = 0 # The second line contains no comma for line in f: line = line.strip().lower() if line == "]": continue line = jloads(line[start_index:]).replace("%0a", '') start_index = 1 domain = extract_domain(line) try: web_crawl_urls_to_domain_ids[current_index] = domains_string_to_ids[domain] except KeyError: pass # well, not found, then keep no values to assign current_index += 1 print "Done in", time()-t0 print "Writing URLs to output file", output_path, "..." t0 = time() with univ_open(output_path, 'w+') as out: out.write( "\n".join( "%d" % web_crawl_urls_to_domain_ids[i] if web_crawl_urls_to_domain_ids[i] is not None else "" for i in xrange(len(web_crawl_urls_to_domain_ids)) ) ) print "Done in", time()-t0 print "Script executed in", time() - t_init, "seconds"
def get_host_json(hostid, mock_id): hosts_json = get_hosts_json(mock_id) hosts = jloads(hosts_json) return hosts.get(hostid, {})
def post(self, request, *args, **kwargs): return self._json('post', request, jloads(request.body), *args, **kwargs)
def _execute(self, pose_name: str = "pose0") -> Dict[str, str]: self._var_not_none(pose_name) pose_info = self._send_recv_request([pose_name]) # Recieves a serialised json containing the following fields: # atom_coords, atom_elements, atom_bonds return jloads(pose_info[1])
def test_tojson_keeporder(): """ Tests: tojson_keeporder """ print('::: TEST: test_tojson_keeporder()') edict_with_all = _get_orig__edict_with_all() new_reobj_all__jdumps_keeporder = edict_with_all.tojson_keeporder() new_reobj_all = jloads(new_reobj_all__jdumps_keeporder, object_pairs_hook=OrderedDict) # note is not equal because tuples are changed to list in json # ok_(edict_with_all == new_reobj_all, msg=None) ok_(isinstance(new_reobj_all, dict) and not isinstance(new_reobj_all, Edict), msg=None) ok_(edict_with_all['edict1'] == new_reobj_all['edict1'], msg=None) ok_(isinstance(new_reobj_all['edict1'], dict) and not isinstance(new_reobj_all['edict1'], Edict), msg=None) ok_(edict_with_all['rdict1'] == new_reobj_all['rdict1'], msg=None) ok_(isinstance(new_reobj_all['rdict1'], dict) and not isinstance(new_reobj_all['rdict1'], Rdict), msg=None) ok_(edict_with_all['edictf1'] == new_reobj_all['edictf1'], msg=None) ok_(isinstance(new_reobj_all['edictf1'], dict) and not isinstance(new_reobj_all['edictf1'], RdictF), msg=None) ok_(edict_with_all['edictio1'] == new_reobj_all['edictio1'], msg=None) ok_(isinstance(new_reobj_all['edictio1'], dict) and not isinstance(new_reobj_all['edictio1'], RdictIO), msg=None) ok_(edict_with_all['edictio1'].key_order == ['edictio_inner1', 'edictio_inner2', 'edictio_inner3'] and edict_with_all['edictio1'].key_order == [key for key in new_reobj_all['edictio1'].keys()], msg=None) ok_(edict_with_all['edictfo1'] == new_reobj_all['edictfo1'], msg=None) ok_(isinstance(new_reobj_all['edictfo1'], dict) and not isinstance(new_reobj_all['edictfo1'], RdictFO), msg=None) ok_(edict_with_all['edictfo1'].key_order == ['edictfo_inner1', 'edictfo_inner2', 'edictfo_inner3'] and edict_with_all['edictfo1'].key_order == [key for key in new_reobj_all['edictfo1'].keys()], msg=None) ok_(edict_with_all['edictfo2_1'] == new_reobj_all['edictfo2_1'], msg=None) ok_(isinstance(new_reobj_all['edictfo2_1'], dict) and not isinstance(new_reobj_all['edictfo2_1'], RdictFO2), msg=None) ok_(edict_with_all['edictfo2_1'].key_order == ['edictfo2_inner1', 'edictfo2_inner2', 'edictfo2_inner3'] and edict_with_all['edictfo2_1'].key_order == [key for key in new_reobj_all['edictfo2_1'].keys()], msg=None) ok_(edict_with_all['elist1'] == new_reobj_all['elist1'], msg=None) ok_(isinstance(new_reobj_all['elist1'], list) and not isinstance(new_reobj_all['elist1'], Elist), msg=None) ok_(edict_with_all['rlist1'] == new_reobj_all['rlist1'], msg=None) ok_(isinstance(new_reobj_all['rlist1'], list) and not isinstance(new_reobj_all['rlist1'], Rlist), msg=None) ok_(edict_with_all['rlistf1'] == new_reobj_all['rlistf1'], msg=None) ok_(isinstance(new_reobj_all['rlistf1'], list) and not isinstance(new_reobj_all['rlistf1'], RlistF), msg=None) # note is not equal because tuples are changed to list in json # ok_(edict_with_all['etuple1'] == new_reobj_all['etuple1'], msg=None) ok_(isinstance(new_reobj_all['etuple1'], list) and not isinstance(new_reobj_all['etuple1'], Etuple), msg=None) # note is not equal because tuples are changed to list in json # ok_(edict_with_all['lmatrix1'] == new_reobj_all['lmatrix1'], msg=None) ok_(isinstance(new_reobj_all['lmatrix1'], list) and not isinstance(new_reobj_all['lmatrix1'], Lmatrix), msg=None) # note is not equal because tuples are changed to list in json # ok_(edict_with_all['lmatrixf1'] == new_reobj_all['lmatrixf1'], msg=None) ok_(isinstance(new_reobj_all['lmatrixf1'], list) and not isinstance(new_reobj_all['lmatrixf1'], LmatrixF), msg=None) # some data checks ok_(edict_with_all['edictfo1']['edictfo_inner2'] == new_reobj_all['edictfo1']['edictfo_inner2'] and new_reobj_all['edictfo1']['edictfo_inner2'] == 'edictfo_inner2 value', msg=None) ok_(edict_with_all['rlist1'][1] == new_reobj_all['rlist1'][1] and new_reobj_all['rlist1'][1] == 'rlist_inner value2', msg=None) ok_(edict_with_all['lmatrixf1'][1][2] == new_reobj_all['lmatrixf1'][1][2] and new_reobj_all['lmatrixf1'][1][2] == 125, msg=None) # Check extra_key_order edict_with_all = _get_orig__edict_with_all() new_reobj_all__jdumps_keeporder = edict_with_all.tojson_keeporder(use_extra_key_order=True) new_reobj_all = jloads(new_reobj_all__jdumps_keeporder, object_pairs_hook=OrderedDict) # not the same because we use: use_extra_key_order # ok_(edict_with_all['edictio1'] == new_reobj_all['edictio1'], msg=None) ok_(isinstance(new_reobj_all['edictio1'], dict) and not isinstance(new_reobj_all['edictio1'], RdictIO), msg=None) ok_(edict_with_all['edictio1'].extra_key_order == ['edictio_inner2', 'edictio_inner3', 'edictio_inner1'] and edict_with_all['edictio1'].extra_key_order == [key for key in new_reobj_all['edictio1'].keys()], msg=None) # not the same because we use: use_extra_key_order # ok_(edict_with_all['edictfo1'] == new_reobj_all['edictfo1'], msg=None) ok_(isinstance(new_reobj_all['edictfo1'], dict) and not isinstance(new_reobj_all['edictfo1'], RdictFO), msg=None) ok_(edict_with_all['edictfo1'].extra_key_order == ['edictfo_inner2', 'edictfo_inner3', 'edictfo_inner1'] and edict_with_all['edictfo1'].extra_key_order == [key for key in new_reobj_all['edictfo1'].keys()], msg=None) # not the same because we use: use_extra_key_order # ok_(edict_with_all['edictfo2_1'] == new_reobj_all['edictfo2_1'], msg=None) ok_(isinstance(new_reobj_all['edictfo2_1'], dict) and not isinstance(new_reobj_all['edictfo2_1'], RdictFO2), msg=None) ok_(edict_with_all['edictfo2_1'].extra_key_order == ['edictfo2_inner2', 'edictfo2_inner3', 'edictfo2_inner1'] and edict_with_all['edictfo2_1'].extra_key_order == [key for key in new_reobj_all['edictfo2_1'].keys()], msg=None)
from json import loads as jloads ids = [] with open('items.json') as fp: desc = jloads(fp.read()) for item in desc: ids.append(int(item['urlid'])) print ids.sort() print ids
#! /usr/bin/env python3 import os from requests import get from json import loads as jloads w = get("https://ipinfo.io") ipinfo = jloads(w.text) if "ip" not in ipinfo: print("error 001") print("#ff0000") print("#ff0000") exit(1) elif "country" not in ipinfo: print("error 002") print("#ff0000") print("#ff0000") exit(1) ip = ipinfo["ip"].strip() country = ipinfo["country"].strip() #print(ipinfo) if ip == "45.32.233.31": print(ip) print("#00ff00") print("#00ff00") elif country == "HR": print(ip) print("#ffff00") print("#ffff00")
try: s.connect((IP, PORT)) a = s.sendall r = s.recv for d in DATA: d = jdumps(d) l = len(d) a(pack("!I", l)) a(d) l = r(4) l = unpack("!I", l)[0] d = r(l) d = jloads(d) # Success print(d) except socket.error, e: # Skip errors print(e) finally: try: s.close() except socket.error, e: pass
def look_for_new_video(): """ Looks for the new video of the channel specified. """ # api key and channel id api_key = "YOUTUBE API KEY HERE" # GET IT FROM HERE https://console.developers.google.com/apis/api/youtube.googleapis.com channel_id = "CHANNEL ID YOU WANT TO TRACK" channel_name = "CHANNEL NAME YOU WANT TO TRACK" # base video url for youtube # base search url for the video search using youtube api base_video_url = "https://www.youtube.com/watch?v=" base_search_url = "https://www.googleapis.com/youtube/v3/search?" # main url for api search url = base_search_url + f"key={api_key}&channelId={channel_id}&part=snippet,id&order=date&maxResults=1" # initialising old video id r = rget(url).text parser = jloads(r) old_vidID = (parser['items'][0]['id']['videoId']) tries = 0 while True: # initialising the new video url until new video is published r = rget(url).text parser = jloads(r) new_vidID = (parser['items'][0]['id']['videoId']) # when new video is not published, i.e. new video id is same as old video id if new_vidID == old_vidID: tries += 1 print(f"Try {tries}: No new video!") sleep(30) # when new video has been published, i.e. new video id is different else: try: # fetching video title from the api data title = parser['items'][0]['snippet']['title'] # intialising toaster object for notifications toaster = ToastNotifier() # alerting the user by a notification and speaking toaster.show_toast(f"Youtube Tracker", f"New video from {channel_name} has arrived!\nTitle: {title}", duration=5) speak(f"New video has arrived! Title is:") speak(title) # opening the video on the default browser videoURL = base_video_url + new_vidID webbrowser.open(videoURL) sleep(10) like_the_video() except KeyboardInterrupt: raise SystemExit except Exception as e: print(e)
def capture_logic(packet): _layers, hex_payloads, raw_payloads, _fields, _raw, _hex = [], {}, {}, {}, 'None', 'None' _layers = list(self.get_layers(packet)) try: if _q_s.method == "ALL": # only incoming packets - hmmmm received = False if packet.haslayer( Ether) and packet[Ether].dst == get_if_hwaddr( conf.iface).lower(): if packet[Ether].src != get_if_hwaddr( conf.iface).lower(): for layer in _layers: try: _fields[layer] = packet[layer].fields if "load" in _fields[layer]: raw_payloads[layer] = _fields[layer][ "load"] hex_payloads[layer] = hexlify( _fields[layer]["load"]) received = True except Exception as e: pass dumped = jdumps( { 'type': 'received', 'time': datetime.now().isoformat(), 'ip': _q_s.current_ip, 'mac': _q_s.current_mac, 'layers': _layers, 'fields': _fields, "payload": hex_payloads }, cls=ComplexEncoder) _q_s.logs.insert(jloads(dumped)) if not received and packet.haslayer( Ether) and packet[Ether].src == get_if_hwaddr( conf.iface).lower(): for layer in _layers: try: _fields[layer] = packet[layer].fields if "load" in _fields[layer]: raw_payloads[layer] = _fields[layer][ "load"] hex_payloads[layer] = hexlify( _fields[layer]["load"]) except Exception as e: pass dumped = jdumps( { 'type': 'sent', 'time': datetime.now().isoformat(), 'ip': _q_s.current_ip, 'mac': _q_s.current_mac, 'layers': _layers, 'fields': _fields, "payload": hex_payloads }, cls=ComplexEncoder) _q_s.logs.insert(jloads(dumped)) except BaseException: pass stdout.flush()
def _load(self): if not pathexists(self.fpath): self.conf = {} return with open(self.fpath, 'rt') as fp: self.conf = jloads(fp.read())
def get_status(connection): """ Gets the server status """ if not connection: connection = connect() connectionstring = 'https://' + config.base_url \ + '/' + config.api_version logger.info('Trying to get status: %s' % connectionstring) logger.info('Applying header: %s' % headers) connection.request('GET', connectionstring, '', headers) try: response = connection.getresponse() except HTTPException, exception: logger.error('Error getting status: %s' % exception) return jloads(response.read()) def login(connection, hashkey): """ Logs in to the server """ parameters = urlencode({'service': config.service, 'auth': hashkey}) connectionstring = 'https://' + config.base_url + '/' \ + config.api_version + '/login' logger.info('Trying to login to REST: %s' % connectionstring) logger.info('Applying header: %s' % headers) connection.request('POST', connectionstring, parameters, headers) try: response = connection.getresponse() except HTTPException, exception:
def _data_loader(self, data): json_data = jloads(data) return json_data
def loadObject(obj, **kwargs): try: return jloads(obj, **kwargs) except (UnpicklingError, ): raise LoadError('failed to load the object')