def cleanup_json(json: str) -> str: """Remove codeblocks, if present.""" if json.startswith("```") and json.endswith("```"): # remove ```json and ``` from start and end json = json.strip("```json") json = json.strip("```py") # not documented but want to accept it as well return json.strip("```") elif json.startswith("`") and json.endswith("`"): # inline codeblocks return json.strip("`") return json
def save_json(json='null', filename='null'): if filename == '_design/library': filename = filename.replace('/', '%2F') F2 = 'CouchDB_json/' + filename + '.json' print F2 file = open(F2, 'w') file.write(json.strip().encode('utf-8')) file.close()
def Json_Save_Json_File(tmp_json_format='', tmp_doc_id=''): json = str(tmp_json_format) dir_path = 'CouchDB_json' file_path = dir_path+'/'+tmp_doc_id+'.json' print(file_path) file=open(file_path,'wb') file.write(json.strip().encode('utf-8')) file.close()
def strip_json(json): json = json.strip() if not json.startswith("{"): json = json[json.index("{"):] if not json.endswith("}"): json = json[:json.rindex("}") + 1] return json
def strip_json(json): json = json.strip() if not json.startswith("{"): json = json[json.index("{") :] if not json.endswith("}"): json = json[: json.rindex("}") + 1] return json
def from_json(json: Union[str, Dict[str, Any], Tuple[str, Dict[str, Any]]]) -> Trie: trie = Trie() if json == {}: return trie if isinstance(json, str): json = json.strip() trie.value = json trie.children = {} else: children: Dict[str, Any] = {} if isinstance(json, (list, tuple)): trie.value, children = json # type: ignore elif isinstance(json, dict): trie.value = None children = json # type: ignore for key, value in children.items(): trie.children[key] = Trie.from_json(value) count = 0 if trie.value is not None: count += 1 for child in trie.children.values(): count += len(child) trie.count = count return trie
def get_json(self, str1): arr = re.findall(r'[^()]+', str1) # for i in range(1, len(arr) - 1): # json += arr[i] json = "".join(arr[1:-1]) return json.strip()
import json import csv import os, sys suffix='.json' prefix='candyJar::' depth=4 dir='output' if not os.path.exists(dir): os.makedirs(dir) for arg in sys.argv[1:]: with open(arg,'r') as f: for line in f: key,json=line.rstrip().split(',',1) key=key.strip().rstrip() letters=list(key) keychars=letters[0:depth] keychars.insert(0,dir) s=os.sep tdir=s.join(keychars) if not os.path.exists(tdir): os.makedirs(tdir) json=json.strip('\'') jsonfile=open(tdir+os.sep+prefix+key+suffix,'w') jsonfile.write('{:s}\n'.format(json))
def drawRuleDetail(): ruleId = request.args.get('ruleId', '') rule=find_RuleEdit(ruleId) json = ""; if ruleId != None: rule = find_RuleEdit(ruleId) if (rule.Direction != None): if (rule.DrawType == "TripwireEventDefinition"): rule.Direction = "LeftToRight" if (rule.DrawType == "AreaOfInterestEventDefinition"): rule.Direction = "Ground" json += "{" json += '\"Rule\":{' json += '\"xmlns_xsi\":' + '\"http://www.w3.org/2001/XMLSchema-instance\",' json += '\"xmlns_xsd\":' + '\"http://www.w3.org/2001/XMLSchema\",' json += '\"xmlns_xlink\":' + '\"http://www.w3.org/1999/xlink\",' json += '\"xmlns\":' + '\"http://www.objectvideo.com/schemas/ovready\",' if (rule.Id != None): json += '\"ID\":' + "\"" + rule.Id + "\"," json += "\"Name\":" + "\"" + rule.RuleType.Name + "\"," json += "\"IsActive\":" + "\"" + str(rule.IsActive) + "\"," json += "\"ViewInfo\":{" + "\"-xlink:type\":" + "\"simple\"," json += "\"ID\":" + "\"0xf8f28c907ab7e1119d63e94ba45bf61b\"," if (rule.Id != None): json += "\"xlink_href\":" + "\"\"," json += "\"Name\":" + "\"\"}," else: json += "\"xlink_href\":" + "\"/api.rest/channels/" + rule.ChannelId + "/views/" + "0xf8f28c907ab7e1119d63e94ba45bf61b\"," json += "\"Name\":" + "\"Default View\"}," json += "\"EventDefinition\":{" json += "\"xsi_type\":" + "\"" + rule.DrawType + "\"," json += "\"Classifications\":{" classfications = [] classfications = str(rule.Classification).split(',') classficationLength = len(classfications) - 1 i = 0 for classfication in classfications: # 第一个实例 if (i != classficationLength): json += "\"Classification\":" + "\"" + classfication + "\"," else: json += "\"Classification\":" + "\"" + classfication + "\"" i += 1 json += "}," if (rule.DrawType == "AreaOfInterestEventDefinition"): json += "\"PlaneType\":" + "\"" + rule.Direction + "\"," json += "\"Actions\":{" + "\"xsi_type\":\"" + "TakeAwayAreaAction" + "\"}," json += "\"Points\":{" if (rule.Points != None): json += "\"Point\":[" points = [] points = str(rule.Points).split('),') i = 0; pointLength = len(points) - 1 for point in points: #if (i != pointLength): innerPoints = point.split(',') json += "{" json += "\"X\":" + "\"" + innerPoints[0].replace('(', '').replace(')', '') + "\"," json += "\"Y\":" + "\"" + innerPoints[1].replace('(', '').replace(')', '') + "\"" json += "}," ''' else: json += "{" json += "\"X\":" + "\"" + innerPoints[0].replace('(', '').replace(')', '') + "\"," json += "\"Y\":" + "\"" + innerPoints[1].replace('(', '').replace(')', '') + "\""; json += "}" i += 1 ''' json=json.strip(',') json += "]}," if hasattr(rule,'MinFilterNearX') or hasattr(rule,'MaxFilterNearX'): json += "\"Filters\":[{Filter:[" if hasattr(rule,'MinFilterNearX'): json += "{" json += "\"xsi:type\":" + "\"MinimumSizeFilter\"," json += "\"NearRectangle\":{" json += "\"X\":" + "\"" + str(rule.MinFilterNearX) + "\"," json += "\"Y\":" + "\"" + str(rule.MinFilterNearY) + "\"," json += "\"Width\":" + "\"" + str(rule.MinFilterNearWidth) + "\"," json += "\"Height\":" + "\"" + str(rule.MinFilterNearHeight) + "\"}," json += "\"FarRectangle\":{" json += "\"X\":" + "\"" + str(rule.MinFilterFarX) + "\"," json += "\"Y\":" + "\"" + str(rule.MinFilterFarY) + "\"," json += "\"Width\":" + "\"" + str(rule.MinFilterFarWidth) + "\"," json += "\"Height\":" + "\"" + str(rule.MinFilterFarHeight) + "\"" json += "}}," if hasattr(rule,'MaxFilterNearX'): json += "{" json += "\"xsi:type\":" + "\"MaximumSizeFilter\"," json += "\"NearRectangle\":{" json += "\"X\":" + "\"" + str(rule.MaxFilterNearX) + "\"," json += "\"Y\":" + "\"" + str(rule.MaxFilterNearY) + "\"," json += "\"Width\":" + "\"" + str(rule.MaxFilterNearWidth) + "\"," json += "\"Height\":" + "\"" + str(rule.MaxFilterNearHeight) + "\"}," json += "\"FarRectangle\":{" json += "\"X\":" + "\"" + str(rule.MaxFilterFarX) + "\"," json += "\"Y\":" + "\"" + str(rule.MaxFilterFarY) + "\"," json += "\"Width\":" + "\"" + str(rule.MaxFilterFarWidth) + "\"," json += "\"Height\":" + "\"" + str(rule.MaxFilterFarHeight) + "\"" json += "}}" json +="]}]," json = json.strip(',') json+="}," json += "\"ResponseDefinition\":{" + "\"xsi_type\":" + "\"SimpleMessageResponse\"," json += "\"Message\":" + "\"" + rule.RuleType.Description + "\"}}}" return json
def json_to_object(self, json): if not json.strip(): return None obj = toObj(json) return obj
# Then invoke /opt/couchbase/bin/cbdocloader -u <userID> -p <password> -n host:port -b <bucketname> output import json import csv import os, sys suffix = '.json' prefix = 'candyJar::' depth = 4 dir = 'output' if not os.path.exists(dir): os.makedirs(dir) for arg in sys.argv[1:]: with open(arg, 'r') as f: for line in f: key, json = line.rstrip().split(',', 1) key = key.strip().rstrip() letters = list(key) keychars = letters[0:depth] keychars.insert(0, dir) s = os.sep tdir = s.join(keychars) if not os.path.exists(tdir): os.makedirs(tdir) json = json.strip('\'') jsonfile = open(tdir + os.sep + prefix + key + suffix, 'w') jsonfile.write('{:s}\n'.format(json))
json = """ [ {"f": NaN, "d": NaN}, {"f": Infinity, "d": Infinity}, {"f": -Infinity, "d": -Infinity} ] """ data = spark.read.schema(schema).json(sc.parallelize([json])) # data.show() # for debugging task_name = args.get('task_name') or 'task' comps = args.get('compression') or ['none'] for comp in comps: file = 'artifacts/' + task_name + '_' + spark.version + '_' + comp data.repartition(1).write.parquet(file, compression=comp) spark.stop() json_out = json.strip().lstrip('[').rstrip(']').strip() subs = (('\s*},\s*\n\s*{\s*', '}\n{'), (',\n\s*', ', '), ('{\s+', '{'), ('\s+}', '}')) for sub in subs: json_out = re.sub(sub[0], sub[1], json_out) json_out = json_out + '\n' with open('artifacts/' + task_name + '_' + 'reference.json', 'w') as ref: ref.write(json_out)
return json_data # For JSON structure reference # 3 letter ISO 3166-1 alpha-3 code code id # 2 letter ISO 3166-1 alpha-2 code code iso2Code # Name name # Region id region.id # Region Name region.value # Income Level incomeLevel.value # Lending Type lendingType.id # Capital City capitalCity # Longitude longitude # Latitude latitude country_list = [json.strip('.json') for json in os.listdir('bin')] # list of country JSON files - .json extension # key is user input and value is converted into the matching json key name # for nested keys: region id for example, value is a list to work with keyword_list = {'name': 'name', 'id': 'id', 'iso2code': 'iso2Code', 'region id': ['region','id'], 'region name': ['region', 'value'], 'income level': ['incomeLevel', 'value'], 'lending type': ['lendingType', 'id'], 'capital city': 'capitalCity', 'longitude': 'longitude', 'latitude': 'latitude'} def country_json_parser(country, json_data, keyword, query): # if it's a list, we need to get data out of the list if isinstance(json_data, (tuple, list)): for item in json_data: country_json_parser(country, item, keyword, query) #