def produce(cls, to_user: str, msg_type: str, data: object, to_xml: bool = True) -> (str, dict): """ 生成一个用于回复的xml消息 :param to_user: 用户的openid :param msg_type: 消息类型,仅限于type_map.keys()中的类型. :param data: 数据,不同类型的消息,格式要求不同.详见具体的底层函数 :param to_xml: 是否转换为xml格式?, 否则返回的是以xml为根节点的OrderedDict :return: """ func_name = "add_{}".format(msg_type) obj = cls(to_user=to_user) if hasattr(obj, func_name): res = obj.__getattribute__(func_name)(data) r = OrderedDict() r['xml'] = res if to_xml: r = dicttoxml.dicttoxml(obj=r, root=False, attr_type=False, cdata=True) if msg_type == "news" and len(data) > 1: """满足图文消息恢复格式不对int类型包裹cdata的要求""" l = len(data) b1 = "<ArticleCount><![CDATA[{}]]></ArticleCount>".format(l) b2 = "<ArticleCount>{}</ArticleCount>".format(l) r.replace(b1.encode(), b2.encode()) return r else: return r else: ms = "错误的消息类型: {}".format(msg_type) raise ValueError(ms)
def get_derivatives(trj: TrajaDataFrame): """Returns derivatives ``displacement``, ``displacement_time``, ``speed``, ``speed_times``, ``acceleration``, ``acceleration_times`` as dictionary. Args: trj (:class:`~traja.frame.TrajaDataFrame`): Trajectory Returns: derivs (:class:`~pd.DataFrame`) : Derivatives .. doctest:: >> df = traja.TrajaDataFrame({'x':[0,1,2],'y':[1,2,3],'time':[0.,0.2,0.4]}) >> df.traja.get_derivatives() #doctest: +SKIP displacement displacement_time speed speed_times acceleration acceleration_times 0 NaN 0.0 NaN NaN NaN NaN 1 1.414214 0.2 7.071068 0.2 NaN NaN 2 1.414214 0.4 7.071068 0.4 0.0 0.4 """ if not _has_cols(trj, ["displacement", "displacement_time"]): derivs = calc_derivatives(trj) d = derivs["displacement"] t = derivs["displacement_time"] else: d = trj.displacement t = trj.displacement_time derivs = OrderedDict(displacement=d, displacement_time=t) if is_datetime_or_timedelta_dtype(t): # Convert to float divisible series # TODO: Add support for other time units t = t.dt.total_seconds() v = d[1:len(d)] / t.diff() v.rename("speed") vt = t[1:len(t)].rename("speed_times") # Calculate linear acceleration a = v.diff() / vt.diff().rename("acceleration") at = vt[1:len(vt)].rename("accleration_times") data = dict(speed=v, speed_times=vt, acceleration=a, acceleration_times=at) derivs = derivs.merge(pd.DataFrame(data), left_index=True, right_index=True) # Replace infinite values derivs.replace([np.inf, -np.inf], np.nan) return derivs
def configure(self, upload_id, photo, caption='', location=None, filter_=None): caption = caption if caption else '' size = Image.open(photo).size[0] post = OrderedDict([ ('upload_id', upload_id), ('camera_model', self.settings.get('model').replace(" ", "")), ('source_type', 3), ('date_time_original', time.strftime('%Y:%m:%d %H:%M:%S')), ('camera_make', self.settings.get('manufacturer')), ('edits', OrderedDict([('crop_original_size', [size, size]), ('crop_zoom', 1.3333334), ('crop_center', [0.0, -0.0])])), ('extra', OrderedDict([('source_width', size), ('source_height', size)])), ('device', OrderedDict([('manufacturer', self.settings.get('manufacturer')), ('model', self.settings.get('model')), ('android_version', Constants.ANDROID_VERSION), ('android_release', Constants.ANDROID_RELEASE)])), ('_csrftoken', self.token), ('_uuid', self.uuid), ('_uid', self.username_id), ('caption', caption) ]) if location: loc = OrderedDict([(str(location.getExternalIdSource()) + '_id', location.getExternalId()), ('name', location.getName()), ('lat', location.getLatitude()), ('lng', location.getLongitude()), ('address', location.getAddress()), ('external_source', location.getExternalIdSource())]) post['location'] = json.dumps(loc) post['geotag_enabled'] = True post['media_latitude'] = location.getLatitude() post['posting_latitude'] = location.getLatitude() post['media_longitude'] = location.getLongitude() post['posting_longitude'] = location.getLongitude() post['altitude'] = mt_rand(10, 800) if filter_: post['edits']['filter_type'] = Utils.getFilterCode(filter) post = json.dumps(post) post = post.replace('"crop_center":[0,0]', '"crop_center":[0.0,-0.0]') return ConfigureResponse( self.http.request('media/configure/', SignatureUtils.generateSignature(post))[1])
def kle2json(cli): """Convert a KLE layout to QMK's layout format. """ # If filename is a path if cli.args.filename.startswith("/") or cli.args.filename.startswith("./"): file_path = Path(cli.args.filename) # Otherwise assume it is a file name else: file_path = Path(os.environ['ORIG_CWD'], cli.args.filename) # Check for valid file_path for more graceful failure if not file_path.exists(): return cli.log.error( 'File {fg_cyan}%s{style_reset_all} was not found.', file_path) out_path = file_path.parent raw_code = file_path.open().read() # Check if info.json exists, allow overwrite with force if Path(out_path, "info.json").exists() and not cli.args.force: cli.log.error( 'File {fg_cyan}%s/info.json{style_reset_all} already exists, use -f or --force to overwrite.', out_path) return False try: # Convert KLE raw to x/y coordinates (using kle2xy package from skullydazed) kle = KLE2xy(raw_code) except Exception as e: cli.log.error('Could not parse KLE raw data: %s', raw_code) cli.log.exception(e) # FIXME: This should be better return cli.log.error('Could not parse KLE raw data.') keyboard = OrderedDict( keyboard_name=kle.name, url='', maintainer='qmk', width=kle.columns, height=kle.rows, layouts={'LAYOUT': { 'layout': 'LAYOUT_JSON_HERE' }}, ) # Initialize keyboard with json encoded from ordered dict keyboard = json.dumps(keyboard, indent=4, separators=(', ', ': '), sort_keys=False, cls=CustomJSONEncoder) # Initialize layout with kle2qmk from converter module layout = json.dumps(kle2qmk(kle), separators=(', ', ':'), cls=CustomJSONEncoder) # Replace layout in keyboard json keyboard = keyboard.replace('"LAYOUT_JSON_HERE"', layout) # Write our info.json file = open(out_path + "/info.json", "w") file.write(keyboard) file.close() cli.log.info('Wrote out {fg_cyan}%s/info.json', out_path)
def _do_write(_data, _contents): query = None if _data == 'rule': # - We want to avoid the encoder for the query and instead use kql-lint. # - Linting is done in rule.normalize() which is also called in rule.validate(). # - Until lint has tabbing, this is going to result in all queries being flattened with no wrapping, # but will at least purge extraneous white space query = contents['rule'].pop('query', '').strip() # - As tags are expanding, we may want to reconsider the need to have them in alphabetical order # tags = contents['rule'].get("tags", []) # # if tags and isinstance(tags, list): # contents['rule']["tags"] = list(sorted(set(tags))) top = OrderedDict() bottom = OrderedDict() for k in sorted(list(_contents)): v = _contents.pop(k) if isinstance(v, dict): bottom[k] = OrderedDict(sorted(v.items())) elif isinstance(v, list): if any([isinstance(value, (dict, list)) for value in v]): bottom[k] = v else: top[k] = v elif k in CurrentSchema.markdown_fields(): top[k] = NonformattedField(v) else: top[k] = v if query: top.update({'query': "XXxXX"}) top.update(bottom) top = toml.dumps(OrderedDict({data: top}), encoder=encoder) # we want to preserve the query format, but want to modify it in the context of encoded dump if query: formatted_query = "\nquery = '''\n{}\n'''{}".format( query, '\n\n' if bottom else '') top = top.replace('query = "XXxXX"', formatted_query) write(top)
def POST_v1_converters_kle(): """Convert a KLE layout to QMK's layout format. """ data = request.get_json(force=True) if not data: return error("Invalid JSON data!") if 'id' in data: gist_id = data['id'].split('/')[-1] raw_code = fetch_kle_json(gist_id)[1:-1] elif 'raw' in data: raw_code = data['raw'] else: return error('You must supply either "id" or "raw" labels.') try: kle = KLE2xy(raw_code) except Exception as e: logging.error('Could not parse KLE raw data: %s', raw_code) logging.exception(e) return error( 'Could not parse KLE raw data.') # FIXME: This should be better keyboard = OrderedDict( keyboard_name=kle.name, url='', maintainer='qmk', width=kle.columns, height=kle.rows, layouts={'LAYOUT': { 'layout': 'LAYOUT_JSON_HERE' }}, ) keyboard = json.dumps(keyboard, indent=4, separators=(', ', ': '), sort_keys=False, cls=CustomJSONEncoder) layout = json.dumps(kle_to_qmk(kle), separators=(', ', ':'), cls=CustomJSONEncoder) keyboard = keyboard.replace('"LAYOUT_JSON_HERE"', layout) response = make_response(keyboard) response.mimetype = app.config['JSONIFY_MIMETYPE'] return response
def POST_v1_converters_kle(): """Convert a KLE layout to QMK's layout format. """ data = request.get_json(force=True) if not data: return error("Invalid JSON data!") if 'id' in data: gist_id = data['id'].split('/')[-1] raw_code = fetch_kle_json(gist_id)[1:-1] elif 'raw' in data: raw_code = data['raw'] else: return error('You must supply either "id" or "raw" labels.') try: kle = KLE2xy(raw_code) except Exception as e: logging.error('Could not parse KLE raw data: %s', raw_code) logging.exception(e) return error('Could not parse KLE raw data.') # FIXME: This should be better keyboard = OrderedDict( keyboard_name=kle.name, manufacturer='', identifier='', url='', maintainer='qmk', processor='', bootloader='', width=kle.columns, height=kle.rows, layouts={'LAYOUT': {'layout': 'LAYOUT_JSON_HERE'}} ) keyboard = json.dumps(keyboard, indent=4, separators=(', ', ': '), sort_keys=False, cls=CustomJSONEncoder) layout = json.dumps(kle_to_qmk(kle), separators=(', ', ':'), cls=CustomJSONEncoder) keyboard = keyboard.replace('"LAYOUT_JSON_HERE"', layout) response = make_response(keyboard) response.mimetype = app.config['JSONIFY_MIMETYPE'] return response
def write_comments(self): """ Setup save file. Write comments and flags to file """ if self.savepath is None: fail = self._setup_save_path() if fail: return if self.savepath == -1: return #Do not save to file option idx = self.data_idx data = self.session.data_collection[idx] save_comments = data.get_component("comments").labels save_flag = data.get_component("flag").labels obj_names = data.get_component( self.catalog.meta["special_columns"]["source_id"]).labels fn = self.savepath folder = os.path.dirname(fn) t = astropy_table.data_to_astropy_table(data) #Check if load and save dir paths match temp = os.path.dirname(self.filepath) if not os.path.samefile(folder, temp): t['spectrum1d'].flags.writeable = True t['spectrum2d'].flags.writeable = True t['cutout'].flags.writeable = True for i in range(len(t)): t['spectrum1d'][i] = os.path.abspath(t['spectrum1d'][i]) t['spectrum2d'][i] = os.path.abspath(t['spectrum2d'][i]) t['cutout'][i] = os.path.abspath(t['cutout'][i]) try: t.remove_column("comments") t.remove_column("flag") keys = t.meta.keys() if "MOSViz_comments" in keys: t.meta.pop("MOSViz_comments") if "MOSViz_flags" in keys: t.meta.pop("MOSViz_flags") comments = OrderedDict() flags = OrderedDict() for i, line in enumerate(save_comments): if line != "": line = line.replace("\n", " ") key = str(obj_names[i]) comments[key] = line for i, line in enumerate(save_flag): if line != "0" and line != "": line = comments.replace("\n", " ") key = str(obj_names[i]) flags[key] = line if len(comments) > 0: t.meta["MOSViz_comments"] = comments if len(flags) > 0: t.meta["MOSViz_flags"] = flags t.write(fn, format="ascii.ecsv", overwrite=True) except Exception as e: print("Comment write failed:", e)
val["id"] = m.group("id") val["name"] = m.group("name") else: val["id"] = m.group("name") val["type"] = ("function", "category", "subcategory")[col] if m.group("descr"): val["description"] = m.group("descr") # Append it into its parent. stack[-1].append(val) # Insert a new entry into the stack. sublist = [] subattr = ("categories", "subcategories", "references")[col] val[subattr] = sublist stack.append(sublist) # Add the Informative References in this row. val = row[3].value val = val.replace("NIST SP 800-53 Rev.4", "NIST SP 800-53 Rev. 4") # data error m = re.match( r"·\s+(CCS CSC|COBIT 5|ISA 62443-2-1:2009|ISA 62443-3-3:2013|ISA 62443-2-1|ISO/IEC 27001:2013|NIST SP 800-53 Rev. 4),? (.*)", val) standard, controls = m.groups() controls = [c.strip() for c in controls.split(", ")] stack[-1].append( OrderedDict([("standard", standard), ("controls", controls)])) print(rtyaml.dump(root))
ziputils.ZipInit("Script_" + str(scriptVersion) + ".zip") ziputils.AddFile("assets") ziputils.AddFile("src") ziputils.AddFile("../../appinfoiii.json") #添加配置文件 ziputils.ZipEnd() # move moveFile("Script_" + str(scriptVersion) + ".zip", "../Script_" + str(scriptVersion) + ".zip") os.chdir("../../HotupDateTools") BuildRes() #上面生成最新的配置 所以还要编译一次 # copyFile("main.js","../build/jsb-default/main.js") #复制一份main 里面加了热更新的路径 # 先把golbal里面的GgameType 修改回去 filepath = "../assets/Script/core/Global.js" data = "" with open(filepath, "r") as f: data = f.read() data = data.replace("GgameType:3", "GgameType:1") f.close() with open(filepath, "w+") as F: F.write(data) F.close() # print("generateLocalConfig Script_" + str(scriptVersion) + "/res End==========================") os.system('pause')
def write_comments(self): """ Setup save file. Write comments and flags to file """ if self.savepath is None: fail = self._setup_save_path() if fail: return if self.savepath == -1: return #Do not save to file option idx = self.data_idx data = self.session.data_collection[idx] save_comments = data.get_component("comments").labels save_flag = data.get_component("flag").labels obj_names = data.get_component(self.catalog.meta["special_columns"]["source_id"]).labels fn = self.savepath folder = os.path.dirname(fn) t = astropy_table.data_to_astropy_table(data) #Check if load and save dir paths match temp = os.path.dirname(self.filepath) if not os.path.samefile(folder, temp): t['spectrum1d'].flags.writeable = True t['spectrum2d'].flags.writeable = True t['cutout'].flags.writeable = True for i in range(len(t)): t['spectrum1d'][i] = os.path.abspath(t['spectrum1d'][i]) t['spectrum2d'][i] = os.path.abspath(t['spectrum2d'][i]) t['cutout'][i] = os.path.abspath(t['cutout'][i]) try: t.remove_column("comments") t.remove_column("flag") keys = t.meta.keys() if "MOSViz_comments" in keys: t.meta.pop("MOSViz_comments") if "MOSViz_flags" in keys: t.meta.pop("MOSViz_flags") comments = OrderedDict() flags = OrderedDict() for i, line in enumerate(save_comments): if line != "": line = line.replace("\n", " ") key = str(obj_names[i]) comments[key] = line for i, line in enumerate(save_flag): if line != "0" and line != "": line = comments.replace("\n", " ") key = str(obj_names[i]) flags[key] = line if len(comments) > 0: t.meta["MOSViz_comments"] = comments if len(flags) > 0: t.meta["MOSViz_flags"] = flags t.write(fn, format="ascii.ecsv", overwrite=True) except Exception as e: print("Comment write failed:", e)
DOG = namedtuple("Dog", "age breed name") SAM = DOG(age=2, breed='Lab', name="Sammy") print('hello \nnewline') print(SAM.age, SAM.breed, SAM.name) CAT = namedtuple("Cat", "fur claws name") C = CAT(fur="Fuzzy", claws=False, name="Kitty") print(C[0], C[1], C[2]) T = datetime.time(5, 25, 1) print(T, datetime.time, datetime.time.min) print(datetime.time.max, datetime.time.resolution) TODAY = datetime.date.today() print(TODAY, TODAY.timetuple(), TODAY.day) print(datetime.date.min, datetime.date.max, datetime.date.resolution) D1 = datetime.date(2015, 3, 11) D2 = D1.replace(year=1990) print(D1, D2, D1 - D2) LINE = "-".join(str(n) for n in range(99)) TIME = timeit.timeit(LINE, number=999999) print(TIME) LINE2 = "-".join([str(n) for n in range(99)]) TIME2 = timeit.timeit(LINE2, number=999999) print(TIME2) LINE3 = "-".join(map(str, range(99))) TIME3 = timeit.timeit(LINE3, number=999999) print(TIME3) PATTERNS = ['term1', 'term2'] pat(PATTERNS) print(re.search('h', 'w')) SPLIT_TERM = '@'
# %% import pytz timezone = pytz.timezone('America/Chicago') d_aware = timezone.localize(naive) d_aware.tzinfo # %% d1 = datetime.date(2015,3,11) print(d1) # %% d2 = d1.replace(year=1990) d2 # %% d1-d2 # returns delta between d1 and d2, d1 is AHEAD of d2 # %% d2-d1 # d2 is BEHIND d1 # %% import pdb x = [1,3,4]