def _styles(self, xlsxfile, namespaces): style_dics = [xml2dict(style_elem) for style_elem in ElementTree.fromstring( xlsxfile.read('xl/styles.xml') ).findall('.//main:cellXfs/main:xf', namespaces)] return [Style(style_dic.get('applyNumberFormat'), style_dic.get('numFmtId')) for style_dic in style_dics]
def payment_verify(params, app_type): """ Args: params: 字典参数数据 sign: 签名 nt_data 解密出的结果 '<?xml version="1.0" encoding="UTF-8" standalone="no"?> <cmge_message> <message> <login_name>zzzzzzzzcgu</login_name> <game_role>h1buemf-h1-9-1396861797</game_role> <order_id>303222UO10000047588</order_id> <pay_time>2014-04-07 17:05:27</pay_time> <amount>0.01</amount> <status>0</status> </message> </cmge_message>' """ conf = PLATFORM_CMGE_SETTINGS[app_type] raw_sign = cmge_decode(params['sign'], conf['syn_key']) new_sign = hashlib.md5('nt_data=%s' % params['nt_data']).hexdigest() new_sign = list(new_sign) trans_template = [(1, 13), (5, 17), (7, 23)] for start, end in trans_template: new_sign[start], new_sign[end] = new_sign[end], new_sign[start] new_sign = ''.join(new_sign) if raw_sign != new_sign: return None order_info = cmge_decode(params['nt_data'], conf['syn_key']) return utils.xml2dict(order_info)
def __init__(self, xml, rootkey=None): self.original = xml self._rootkey = rootkey self._mydict = utils.xml2dict().fromstring(remove_namespace(xml)) # -G: dict_keys object is generated when _mydict.keys() is called, # which behaves like a set, not a list. # Converting to list first self._response_dict = self._mydict.get(list(self._mydict.keys())[0], self._mydict)
def _sheets(self, xlsxfile, namespaces): sheet_dics = [xml2dict(sheet_elem) for sheet_elem in ElementTree.fromstring( xlsxfile.read('xl/workbook.xml') ).findall('.//main:sheet', namespaces)] ret_sheets = [Sheet(sheet.get('sheetId'), sheet.get('name')) for sheet in sheet_dics] rows_dics = [[xml2dict(elem) for elem in ElementTree.fromstring( xlsxfile.read('xl/worksheets/sheet%s.xml' % sheet.id) ).findall('.//main:row', namespaces)] for sheet in ret_sheets] rowsgroup = [[Row(row.get('r'), row.get('spans'), [Datum(datum.get('r'), datum.get('s'), datum.get('t'), datum.get('children')[0].get('text')) for datum in row.get('children')]) for row in rows_dic] for rows_dic in rows_dics] for i in range(len(ret_sheets)): ret_sheets[i].rows = rowsgroup[i] return ret_sheets
utils.signed_device_id()) try: rsp = xml_maker.requestWithParams({'action':'system_config', 'ver':defs.VERSION, 'client':defs.CLIENT, 'active':'0'}) except: print 'request fail' sys.exit() print rsp.info() # print rsp.read() # system_config = ElementTree.parse(rsp).getroot() # d = utils.elem2dict(system_config) # print json.dumps(d, indent = 4, ensure_ascii = False) d = utils.xml2dict(rsp) print json.dumps(d, indent = 2, ensure_ascii = False) ''' json_maker = RequestMaker(defs.APP_KEY, utils.device_id(), utils.signed_device_id(), True) try: rsp = json_maker.requestWithParams({'action':'recommend_user', 'gender':'', 'page':'1', 'size':'3'}) except: print 'request fail' sys.exit() print rsp.info() recommend_user = json.load(rsp) print recommend_user
def __init__(self, xml, rootkey=None): self.original = xml self._rootkey = rootkey self._mydict = utils.xml2dict().fromstring(remove_namespace(xml)) self._response_dict = self._mydict.get(self._mydict.keys()[0], self._mydict)
'ver': defs.VERSION, 'client': defs.CLIENT, 'active': '0' }) except: print 'request fail' sys.exit() print rsp.info() # print rsp.read() # system_config = ElementTree.parse(rsp).getroot() # d = utils.elem2dict(system_config) # print json.dumps(d, indent = 4, ensure_ascii = False) d = utils.xml2dict(rsp) print json.dumps(d, indent=2, ensure_ascii=False) ''' json_maker = RequestMaker(defs.APP_KEY, utils.device_id(), utils.signed_device_id(), True) try: rsp = json_maker.requestWithParams({'action':'recommend_user', 'gender':'', 'page':'1', 'size':'3'}) except: print 'request fail' sys.exit() print rsp.info() recommend_user = json.load(rsp) print recommend_user # user_list = recommend_user['data']['recommend_user_list']
def _get_meta(self): img_pattern = path.join(self.home(), "images", "*.jpg") img_filenames = sorted(glob(img_pattern)) n_imgs = len(img_filenames) # -- print "Parsing annotations..." meta = [] unique_object_names = [] n_objects = 0 img_ids = [] for ii, img_filename in enumerate(img_filenames): data = {} data['filename'] = img_filename # sha1 hash sha1 = hashlib.sha1(open(img_filename).read()).hexdigest() data['sha1'] = sha1 # image id img_basename = path.basename(path.split(img_filename)[1]) img_id = path.splitext(img_basename)[0] img_ids += [img_id] data['id'] = img_id # -- get xml filename xml_filename = path.join(self.home(), "annotations", "xmls", img_id + ".xml") if not path.exists(xml_filename): # annotation missing meta += [data] continue # -- parse xml xd = xml2dict(xml_filename) # image basename assert img_basename == xd['filename'] # source data['source'] = xd['source'] # owner (if available) if 'owner' in xd: data['owner'] = xd['owner'] # size / shape size = xd['size'] width = int(size['width']) height = int(size['height']) depth = int(size['depth']) data['shape'] = dict(height=height, width=width, depth=depth) # segmentation ? segmented = bool(xd['segmented']) data['segmented'] = segmented if segmented: data["filename_segmented"] = path.join(self.home(), "annotations", "trimaps", img_id + ".png") # objects with their bounding boxes objs = xd['object'] if isinstance(objs, dict): # case where there is only one bbox objs = [objs] objects = [] for obj in objs: # parse bounding box coordinates and convert them to valid # 0-indexed coordinates bndbox = obj.pop('bndbox') x_min = max(0, (int(np.round(float(bndbox['xmin']))) - 1)) x_max = min(width - 1, (int(np.round(float(bndbox['xmax']))) - 1)) y_min = max(0, (int(np.round(float(bndbox['ymin']))) - 1)) y_max = min(height - 1, (int(np.round(float(bndbox['ymax']))) - 1)) bounding_box = dict(x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max) assert (np.array(bounding_box) >= 0).all() obj['bounding_box'] = bounding_box n_objects += 1 if obj['name'] not in unique_object_names: unique_object_names += [obj['name']] # convert 'difficult' to boolean if 'difficult' in obj: obj['difficult'] = bool(int(obj['difficult'])) else: # assume difficult=False if key not present obj['difficult'] = False # convert 'truncated' to boolean if 'truncated' in obj: obj['truncated'] = bool(int(obj['truncated'])) else: # assume truncated=False if key not present obj['truncated'] = False objects += [obj] data['objects'] = objects # -- print progress n_done = ii + 1 status = ("Progress: %d/%d [%.1f%%]" % (n_done, len(img_filenames), 100. * n_done / n_imgs)) status += chr(8) * (len(status) + 1) print status, # -- append to meta meta += [data] print print " Number of images: %d" % len(meta) print " Number of unique object names: %d" % len(unique_object_names) print " Unique object names: %s" % unique_object_names # -- print "Parsing splits..." main_dirname = path.join(self.home(), "annotations") # We use 'aeroplane_{train,trainval}.txt' to get the list of 'train' # and 'val' ids train_filename = path.join(main_dirname, 'list.txt') assert path.exists(train_filename) train_ids = np.loadtxt(train_filename, dtype=str)[:, 0] trainval_filename = path.join(main_dirname, 'trainval.txt') assert path.exists(trainval_filename) trainval_ids = np.loadtxt(trainval_filename, dtype=str)[:, 0] splits = 'train', 'val', 'test' split_counts = dict([(split, 0) for split in splits]) for data in meta: img_id = data['id'] if img_id in trainval_ids: if img_id in train_ids: data['split'] = 'train' else: data['split'] = 'val' else: data['split'] = 'test' split_counts[data['split']] += 1 for split in splits: count = split_counts[split] # assert count > 0 print(" Number of images in '%s': %d" % (split, count)) meta = np.array(meta) return meta
def _get_meta(self): base_dirname = self.home('VOCdevkit', self.name) dirs = dict([(basename, path.join(base_dirname, basename)) for basename in os.listdir(base_dirname) if path.isdir(path.join(base_dirname, basename))]) img_pattern = path.join(dirs['JPEGImages'], "*.jpg") img_filenames = sorted(glob(img_pattern)) n_imgs = len(img_filenames) # -- print "Parsing annotations..." meta = [] unique_object_names = [] n_objects = 0 img_ids = [] for ii, img_filename in enumerate(img_filenames): data = {} data['filename'] = img_filename # sha1 hash sha1 = hashlib.sha1(open(img_filename).read()).hexdigest() data['sha1'] = sha1 # image id img_basename = path.basename(path.split(img_filename)[1]) img_id = path.splitext(img_basename)[0] img_ids += [img_id] data['id'] = img_id # -- get xml filename xml_filename = path.join(dirs['Annotations'], "%s.xml" % img_id) if not path.exists(xml_filename): # annotation missing meta += [data] continue # -- parse xml xd = xml2dict(xml_filename) # image basename assert img_basename == xd['filename'] # source data['source'] = xd['source'] # owner (if available) if 'owner' in xd: data['owner'] = xd['owner'] # size / shape size = xd['size'] width = int(size['width']) height = int(size['height']) depth = int(size['depth']) data['shape'] = dict(height=height, width=width, depth=depth) # segmentation ? segmented = bool(xd['segmented']) data['segmented'] = segmented if segmented: # TODO: parse segmentation data (in 'SegmentationClass') or # lazy-evaluate it ? pass # objects with their bounding boxes objs = xd['object'] if isinstance(objs, dict): # case where there is only one bbox objs = [objs] objects = [] for obj in objs: bndbox = obj.pop('bndbox') x_min = max(0, (int(np.round(float(bndbox['xmin']))) - 1)) x_max = min(width - 1, (int(np.round(float(bndbox['xmax']))) - 1)) y_min = max(0, (int(np.round(float(bndbox['ymin']))) - 1)) y_max = min(height - 1, (int(np.round(float(bndbox['ymax']))) - 1)) bounding_box = dict(x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max) assert (np.array(bounding_box) >= 0).all() obj['bounding_box'] = bounding_box n_objects += 1 if obj['name'] not in unique_object_names: unique_object_names += [obj['name']] objects += [obj] data['objects'] = objects # -- print progress n_done = ii + 1 status = ("Progress: %d/%d [%.1f%%]" % (n_done, len(img_filenames), 100. * n_done / n_imgs)) status += chr(8) * (len(status) + 1) print status, # -- append to meta meta += [data] print print " Number of images: %d" % len(meta) print " Number of unique object names: %d" % len(unique_object_names) print " Unique object names: %s" % unique_object_names # -- print "Parsing splits..." main_dirname = path.join(dirs['ImageSets'], 'Main') # We use 'aeroplane_{train,trainval}.txt' to get the list of 'train' # and 'val' ids train_filename = path.join(main_dirname, 'aeroplane_train.txt') assert path.exists(train_filename) train_ids = np.loadtxt(train_filename, dtype=str)[:, 0] trainval_filename = path.join(main_dirname, 'aeroplane_trainval.txt') assert path.exists(trainval_filename) trainval_ids = np.loadtxt(trainval_filename, dtype=str)[:, 0] splits = 'train', 'val', 'test' split_counts = dict([(split, 0) for split in splits]) for data in meta: img_id = data['id'] if img_id in trainval_ids: if img_id in train_ids: data['split'] = 'train' else: data['split'] = 'val' else: data['split'] = 'test' split_counts[data['split']] += 1 for split in splits: count = split_counts[split] assert count > 0 print(" Number of images in '%s': %d" % (split, count)) meta = np.array(meta) return meta
def _sharedstrs(self, xlsxfile, namespaces): sharedstr_dics = [xml2dict(sharedstr_elem) for sharedstr_elem in ElementTree.fromstring( xlsxfile.read('xl/sharedStrings.xml') ).findall('.//main:t', namespaces)] return [sharedstr.get('text') for sharedstr in sharedstr_dics]
def _get_meta(self): img_pattern = path.join(self.home(), "images", "*.jpg") img_filenames = sorted(glob(img_pattern)) n_imgs = len(img_filenames) # -- print "Parsing annotations..." meta = [] unique_object_names = [] n_objects = 0 img_ids = [] for ii, img_filename in enumerate(img_filenames): data = {} data['filename'] = img_filename # sha1 hash sha1 = hashlib.sha1(open(img_filename).read()).hexdigest() data['sha1'] = sha1 # image id img_basename = path.basename(path.split(img_filename)[1]) img_id = path.splitext(img_basename)[0] img_ids += [img_id] data['id'] = img_id # -- get xml filename xml_filename = path.join(self.home(), "annotations", "xmls", img_id+".xml") if not path.exists(xml_filename): # annotation missing meta += [data] continue # -- parse xml xd = xml2dict(xml_filename) # image basename assert img_basename == xd['filename'] # source data['source'] = xd['source'] # owner (if available) if 'owner' in xd: data['owner'] = xd['owner'] # size / shape size = xd['size'] width = int(size['width']) height = int(size['height']) depth = int(size['depth']) data['shape'] = dict(height=height, width=width, depth=depth) # segmentation ? segmented = bool(xd['segmented']) data['segmented'] = segmented if segmented: data["filename_segmented"] = path.join(self.home(), "annotations", "trimaps", img_id+".png") # objects with their bounding boxes objs = xd['object'] if isinstance(objs, dict): # case where there is only one bbox objs = [objs] objects = [] for obj in objs: # parse bounding box coordinates and convert them to valid # 0-indexed coordinates bndbox = obj.pop('bndbox') x_min = max(0, (int(np.round(float(bndbox['xmin']))) - 1)) x_max = min(width - 1, (int(np.round(float(bndbox['xmax']))) - 1)) y_min = max(0, (int(np.round(float(bndbox['ymin']))) - 1)) y_max = min(height - 1, (int(np.round(float(bndbox['ymax']))) - 1)) bounding_box = dict(x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max) assert (np.array(bounding_box) >= 0).all() obj['bounding_box'] = bounding_box n_objects += 1 if obj['name'] not in unique_object_names: unique_object_names += [obj['name']] # convert 'difficult' to boolean if 'difficult' in obj: obj['difficult'] = bool(int(obj['difficult'])) else: # assume difficult=False if key not present obj['difficult'] = False # convert 'truncated' to boolean if 'truncated' in obj: obj['truncated'] = bool(int(obj['truncated'])) else: # assume truncated=False if key not present obj['truncated'] = False objects += [obj] data['objects'] = objects # -- print progress n_done = ii + 1 status = ("Progress: %d/%d [%.1f%%]" % (n_done, len(img_filenames), 100. * n_done / n_imgs)) status += chr(8) * (len(status) + 1) print status, # -- append to meta meta += [data] print print " Number of images: %d" % len(meta) print " Number of unique object names: %d" % len(unique_object_names) print " Unique object names: %s" % unique_object_names # -- print "Parsing splits..." main_dirname = path.join(self.home(), "annotations") # We use 'aeroplane_{train,trainval}.txt' to get the list of 'train' # and 'val' ids train_filename = path.join(main_dirname, 'list.txt') assert path.exists(train_filename) train_ids = np.loadtxt(train_filename, dtype=str)[:, 0] trainval_filename = path.join(main_dirname, 'trainval.txt') assert path.exists(trainval_filename) trainval_ids = np.loadtxt(trainval_filename, dtype=str)[:, 0] splits = 'train', 'val', 'test' split_counts = dict([(split, 0) for split in splits]) for data in meta: img_id = data['id'] if img_id in trainval_ids: if img_id in train_ids: data['split'] = 'train' else: data['split'] = 'val' else: data['split'] = 'test' split_counts[data['split']] += 1 for split in splits: count = split_counts[split] # assert count > 0 print(" Number of images in '%s': %d" % (split, count)) meta = np.array(meta) return meta