def ca_get_container_storage_information(slot, h_container): """ Get a container's storage information :param slot: target slot :param h_container: target container handle :return: result code, dict of storage values """ slot_id = CK_SLOT_ID(slot) cont_id = CK_ULONG(h_container) overhead = CK_ULONG() total = CK_ULONG() used = CK_ULONG() free = CK_ULONG() obj_count = CK_ULONG() ret = CA_GetContainerStorageInformation(slot_id, cont_id, pointer(overhead), pointer(total), pointer(used), pointer(free), pointer(obj_count)) return ret, { 'overhead': overhead.value, 'total': total.value, 'used': used.value, 'free': free.value, 'object_count': obj_count.value }
def ca_get_container_status(slot, h_container): """ Get a container's Status :param slot: target slot :param h_container: target container handle :return: result code, dict of flags, dict of failed logins """ slot_id = CK_SLOT_ID(slot) cont_id = CK_ULONG(h_container) status_flags = CK_ULONG() failed_so_logins = CK_ULONG() failed_user_logins = CK_ULONG() failed_limited_user_logins = CK_ULONG() ret = CA_GetContainerStatus(slot_id, cont_id, pointer(status_flags), pointer(failed_so_logins), pointer(failed_user_logins), pointer(failed_limited_user_logins)) flags_dict = { LUNA_CF_CONTAINER_ENABLED: None, LUNA_CF_KCV_CREATED: None, LUNA_CF_LKCV_CREATED: None, LUNA_CF_HA_INITIALIZED: None, LUNA_CF_PARTITION_INITIALIZED: None, LUNA_CF_CONTAINER_ACTIVATED: None, LUNA_CF_CONTAINER_LUSR_ACTIVATED: None, LUNA_CF_USER_PIN_INITIALIZED: None, LUNA_CF_SO_PIN_LOCKED: None, LUNA_CF_SO_PIN_TO_BE_CHANGED: None, LUNA_CF_USER_PIN_LOCKED: None, LUNA_CF_LIMITED_USER_PIN_LOCKED: None, LUNA_CF_LIMITED_USER_CREATED: None, LUNA_CF_USER_PIN_TO_BE_CHANGED: None, LUNA_CF_LIMITED_USER_PIN_TO_BE_CHANGED: None } flags_or = reduce(lambda x, y: x | y, flags_dict.keys()) mask = status_flags.value & flags_or if status_flags.value ^ mask != 0: unknown_flags = [] for i in range(status_flags.value.bit_length()): if ((status_flags.value ^ mask) >> i) & 1: unknown_flags.append(2 ** i) raise Exception("Found unknown flags! {}".format(' '.join(unknown_flags))) for key, flag in flags_dict.items(): flags_dict[key] = 1 if key & status_flags.value else 0 failed_logins_dict = { 'failed_so_logins': failed_so_logins.value, 'failed_user_logins': failed_user_logins.value, 'failed_limited_user_logins': failed_limited_user_logins.value } for key, val in failed_logins_dict.items(): if not val ^ int('1' * 64, 2) or not val ^ int('1' * 32, 2): failed_logins_dict[key] = -1 return ret, flags_dict, failed_logins_dict
def deletefilefromwin(self,filepath,filetype): return True dllpath = self.syfilename dll = CDLL(dllpath) fileserver = create_string_buffer(1024) #删除文件 return dll.deletedirectioryfile(filepath.replace('/', '\\').encode(),2, pointer(fileserver))
def build(cls, language, gazetteer_entity_parser_path=None): """Build a `BuiltinEntityParser` Args: language (str): Language identifier gazetteer_entity_parser_path (str, optional): Path to a gazetteer entity parser. If None, the builtin entity parser will only use grammar entities. """ if isinstance(gazetteer_entity_parser_path, Path): gazetteer_entity_parser_path = str(gazetteer_entity_parser_path) if not isinstance(language, str): raise TypeError("Expected language to be of type 'str' but found:" " %s" % type(language)) parser_config = dict( language=language.upper(), gazetteer_parser_path=gazetteer_entity_parser_path) parser = pointer(c_void_p()) json_parser_config = bytes(json.dumps(parser_config), encoding="utf8") exit_code = lib.snips_nlu_ontology_create_builtin_entity_parser( byref(parser), json_parser_config) check_ffi_error( exit_code, "Something went wrong while creating the " "builtin entity parser") return cls(parser)
def get_builtin_entity_examples(builtin_entity_kind, language): """Provides some examples of the builtin entity in the specified language """ global _ENTITIES_EXAMPLES if not isinstance(builtin_entity_kind, str): raise TypeError("Expected `builtin_entity_kind` to be of type 'str' " "but found: %s" % type(builtin_entity_kind)) if not isinstance(language, str): raise TypeError( "Expected `language` to be of type 'str' but found: %s" % type(language)) if builtin_entity_kind not in _ENTITIES_EXAMPLES: _ENTITIES_EXAMPLES[builtin_entity_kind] = dict() if language not in _ENTITIES_EXAMPLES[builtin_entity_kind]: with string_array_pointer(pointer(CStringArray())) as ptr: exit_code = lib.snips_nlu_ontology_builtin_entity_examples( builtin_entity_kind.encode("utf8"), language.encode("utf8"), byref(ptr)) check_ffi_error( exit_code, "Something went wrong when retrieving " "builtin entity examples") array = ptr.contents _ENTITIES_EXAMPLES[builtin_entity_kind][language] = list( array.data[i].decode("utf8") for i in range(array.size)) return _ENTITIES_EXAMPLES[builtin_entity_kind][language]
def to_c_mech(self): """ Create the Param structure, then convert the data into byte arrays. :return: :class:`~pypkcs11.cryptoki.CK_MECHANISM` """ super(PRFKDFDeriveMechanism, self).to_c_mech() params = CK_PRF_KDF_PARAMS() params.prfType = self.params['prf_type'] if self.params['label'] is None: label = '' label_len = 0 else: label, label_len = to_byte_array(self.params['label']) if self.params['context'] is None: context = '' context_len = 0 else: context, context_len = to_byte_array(self.params['context']) if self.params['counter'] is None: counter = 1 else: counter = self.params['counter'] ul_encoding_scheme = self.params['encoding_scheme'] params.pLabel = cast(label, CK_BYTE_PTR) params.ulLabelLen = label_len params.pContext = cast(context, CK_BYTE_PTR) params.ulContextLen = context_len params.ulCounter = counter params.ulEncodingScheme = ul_encoding_scheme self.mech.pParameter = cast(pointer(params), c_void_p) self.mech.ulParameterLen = CK_ULONG(sizeof(params)) return self.mech
def tokenize(input, language): with token_array_pointer(pointer(CTokenArray())) as ptr: exit_code = lib.snips_nlu_utils_tokenize(input.encode("utf8"), language.encode("utf8"), byref(ptr)) check_ffi_error(exit_code, "Something went wrong when tokenizing '%s'" % input) array = ptr.contents return array.to_pylist()
def createCoverInstance(self, name: str, srcinfo: SourceInfo, weight: int, source) -> 'Covergroup': srcinfo_p = None if srcinfo is None else pointer( _UcdbSourceInfo.ctor(srcinfo)) ci_obj = get_lib().ucdb_CreateScope(self.db, self.obj, str.encode(name), srcinfo_p, weight, source, UCIS_COVERINSTANCE, 0) return UcdbCovergroup(self.db, ci_obj)
def __init__(self, language): if not isinstance(language, str): raise TypeError("Expected language to be of type 'str' but found:" " %s" % type(language)) self.language = language self._parser = pointer(c_void_p()) exit_code = lib.snips_nlu_ontology_create_builtin_entity_parser( byref(self._parser), language.encode("utf8")) if exit_code: raise ImportError("Something wrong happened while creating the " "intent parser. See stderr.")
def size(self): """ Return a pointer to a c_ulong :return: Pointer to a CK_ULONG :rtype: pointer """ if self._size is None: # Default size to a ulong. self._size = c_ulong() return pointer(self._size)
def getParamInfo(): paramCount = c_int(0) pParamCount = pointer(paramCount) print "获取参数个数:\n",lib.KLSL_GetProjParamCount(pParamCount),"参数个数:\n",paramCount.value paramCount = paramCount.value #参数个数 paramName = "s" *1024 paramId = c_int(0) pParamId = pointer(paramId) paramLength = c_int(0) nParamLength = pointer(paramLength) paraNameLengthDict = {} #参数名称和长度的对应关系 paramIdDict = {} #参数名称和ID的对应关系 paramModelNameDict = {} #参数名称和模型名称的对应关系 for i in range(1,paramCount+1): print "获取指定序号的参数名称:\n",lib.KLSL_GetParamInfo(i,paramName,len(paramName),nParamLength),"参数名称:\n",paramName.split("\x00")[0] nparamName = paramName.split("\x00")[0] paraNameLengthDict[nparamName] = paramLength.value print "通过参数名称获取参数ID:\n",lib.KLSL_GetParamIdByName(nparamName,pParamId),"参数名为" + nparamName + "的参数ID为",paramId.value paramIdDict[nparamName] = paramId.value print "通过参数名称获取模型名称:\n",lib.KLSL_GetModelNameByParamName(nparamName,modelName,len(modelName)) paramModelNameDict[nparamName] = modelName.split("\x00")[0] return paraNameLengthDict,paramCount,paramIdDict,paramModelNameDict
def to_c_mech(self): """ Create the Param structure, then convert the data into byte arrays. :return: :class:`~pypkcs11.cryptoki.CK_MECHANISM` """ super(EcdsaBipDeriveMechanism, self).to_c_mech() params = DYCK_DERIVE_ECDSA_BIP_PARAMS() params.hardened = CK_BBOOL(self.params['hardened']) params.ulChildNumber = CK_ULONG(self.params['ulChildNumber']) self.mech.pParameter = cast(pointer(params), c_void_p) self.mech.ulParameterLen = CK_ULONG(sizeof(params)) return self.mech
def ca_get_hsm_policy_setting(slot, policy_id): """ Get the value of a single policy :param slot: slot ID of slot to query :param policy_id: policy ID :return: result code, CK_ULONG representing policy active or not """ slot_id = CK_SLOT_ID(slot) pol_id = CK_ULONG(policy_id) pol_val = CK_ULONG() ret = CA_GetHSMPolicySetting(slot_id, pol_id, pointer(pol_val)) return ret, pol_val.value
def ca_get_hsm_capability_setting(slot, capability_id): """ Get the value of a single capability :param slot: slot ID of slot to query :param capability_id: capability ID :return: result code, CK_ULONG representing capability active or not """ slot_id = CK_SLOT_ID(slot) cap_id = CK_ULONG(capability_id) cap_val = CK_ULONG() ret = CA_GetHSMCapabilitySetting(slot_id, cap_id, pointer(cap_val)) return ret, cap_val.value
def compute_all_ngrams(tokens, max_ngram_size): with ngram_array_pointer(pointer(CNgramArray())) as ptr: nb_tokens = len(tokens) c_tokens = CStringArray() c_tokens.data = (c_char_p * nb_tokens)( *[token.encode("utf8") for token in tokens]) c_tokens.size = nb_tokens exit_code = lib.snips_nlu_utils_compute_all_ngrams( byref(c_tokens), max_ngram_size, byref(ptr)) check_ffi_error( exit_code, "Something went wrong when computing all ngrams for '%s'" % tokens) array = ptr.contents return array.to_pylist()
def getOutSigInfo(): OutSigCount = c_int(0) pOutSigCount = pointer(OutSigCount) print "获取所有输出信号个数:\n",lib.KLSL_GetProjOutSigCount(pOutSigCount),"信号个数:\n",OutSigCount.value outSigCount = OutSigCount.value outSigName = "s" *1024 outSigId = c_int(0) pOutSigId = pointer(outSigId) nOutSigNameLengthDict = {} nOutSigIdDict = {} nOutSigModelnameDict = {} OutSigLength = c_int(0) pOutSigLength = pointer(OutSigLength) for i in range(1,OutSigCount.value+1): print "获取指定序号的信号名称:\n",lib.KLSL_GetOutSigInfo(i,outSigName,len(outSigName),pOutSigLength),"信号名称:\n",outSigName.split("\x00")[0],OutSigLength.value nOutSigName = outSigName.split("\x00")[0] nOutSigNameLengthDict[nOutSigName] = OutSigLength.value print "通过信号名称获取信号ID:\n",lib.KLSL_GetOutSigIdByName(nOutSigName,pOutSigId),"信号名为" + nOutSigName + "的信号ID为",outSigId.value nOutSigIdDict[nOutSigName] = outSigId.value print "通过信号名称获取模型名称:\n",lib.KLSL_GetModelNameByOutSigName(nOutSigName,modelName,len(modelName)),"模型名称为:",modelName.split("\x00")[0] nOutSigModelnameDict[nOutSigName] = modelName.split("\x00")[0] return outSigCount,nOutSigNameLengthDict,nOutSigIdDict,nOutSigModelnameDict
def build(cls, build_config): """Create a new :class:`GazetteerEntityParser` from a build config The build configuration must have the following format: { "entity_parsers": [ { "entity_identifier": "my_first_entity", "entity_parser": { "gazetteer": [ { "raw_value": "foo bar", "resolved_value": "Foo Bar" }, { "raw_value": "yolo", "resolved_value": "Yala" } ], "threshold": 0.6, "n_gazetteer_stop_words": 10, "additional_stop_words": ["the", "a"] } }, { "entity_identifier": "my_second_entity", "entity_parser": { "gazetteer": [ { "raw_value": "the stones", "resolved_value": "The Rolling Stones" } ], "threshold": 0.6, "n_gazetteer_stop_words": None, "additional_stop_words": None } }, ] } """ parser = pointer(c_void_p()) json_parser_config = bytes(json.dumps(build_config), encoding="utf8") exit_code = lib.snips_nlu_ontology_build_gazetteer_entity_parser( byref(parser), json_parser_config) check_ffi_error( exit_code, "Something went wrong when building the " "gazetteer entity parser") return cls(parser)
def from_path(cls, parser_path): """Create a :class:`GazetteerEntityParser` from a gazetteer parser persisted on disk """ if isinstance(parser_path, Path): parser_path = str(parser_path) parser = pointer(c_void_p()) parser_path = bytes(parser_path, encoding="utf8") exit_code = lib.snips_nlu_ontology_load_builtin_entity_parser( byref(parser), parser_path) check_ffi_error( exit_code, "Something went wrong when loading the " "builtin entity parser") return cls(parser)
def ca_get_container_policy_setting(slot, h_container, policy_id): """ Get the value of a container's single policy :param slot: slot ID of slot to query :param h_container: target container handle :param policy_id: policy ID :return: result code, CK_ULONG representing policy active or not """ slot_id = CK_SLOT_ID(slot) cont_id = CK_ULONG(h_container) pol_id = CK_ULONG(policy_id) pol_val = CK_ULONG() ret = CA_GetContainerPolicySetting(slot_id, cont_id, pol_id, pointer(pol_val)) return ret, pol_val.value
def ca_get_container_capability_setting(slot, h_container, capability_id): """ Get the value of a container's single capability :param slot: slot ID of slot to query :param h_container: target container handle :param capability_id: capability ID :return: result code, CK_ULONG representing capability active or not """ slot_id = CK_SLOT_ID(slot) cont_id = CK_ULONG(h_container) cap_id = CK_ULONG(capability_id) cap_val = CK_ULONG() ret = CA_GetContainerCapabilitySetting(slot_id, cont_id, cap_id, pointer(cap_val)) return ret, cap_val.value
def createCovergroup(self, name:str, srcinfo:SourceInfo, weight:int, source) -> 'Covergroup': from ucis.lib.lib_covergroup import LibCovergroup srcinfo_p = None if srcinfo is None else pointer(_LibSourceInfo.ctor(srcinfo)) cg_obj = get_lib().ucis_CreateScope( self.db, self.obj, str.encode(name), srcinfo_p, weight, source, UCIS_COVERGROUP, 0) return LibCovergroup(self.db, cg_obj)
def create_shader(self, strings, shader_type): count = len(strings) # if we have no source code, ignore this shader if count < 1: return # create the shader handle shader = glCreateShader(shader_type) shaderstrings = [] for string in strings: shaderstrings.append(bytes(string, 'ascii')) # convert the source strings into a ctypes pointer-to-char array, and # upload them this is deep, dark, dangerous black magic - don't try # stuff like this at home! src = (c_char_p * count)(*shaderstrings) glShaderSource(shader, count, cast(pointer(src), POINTER(POINTER(c_char))), None) # compile the shader glCompileShader(shader) temp = c_int(0) # retrieve the compile status glGetShaderiv(shader, GL_COMPILE_STATUS, byref(temp)) # if compilation failed, print the log if not temp: # retrieve the log length glGetShaderiv(shader, GL_INFO_LOG_LENGTH, byref(temp)) # create a buffer for the log buffer = create_string_buffer(temp.value) # retrieve the log text glGetShaderInfoLog(shader, temp, None, buffer) # print the log to the console print(buffer.value) else: # all is well, so attach the shader to the program glAttachShader(self.handle, shader)
def create_shader(self, strings, shader_type): count = len(strings) # if we have no source code, ignore this shader if count < 1: return # create the shader handle shader = glCreateShader(shader_type) shaderstrings = [] for string in strings: shaderstrings.append(bytes(string, 'ascii')) # convert the source strings into a ctypes pointer-to-char array, and # upload them this is deep, dark, dangerous black magic - don't try # stuff like this at home! src = (c_char_p * count)(*shaderstrings) glShaderSource(shader, count, cast( pointer(src), POINTER(POINTER(c_char))), None) # compile the shader glCompileShader(shader) temp = c_int(0) # retrieve the compile status glGetShaderiv(shader, GL_COMPILE_STATUS, byref(temp)) # if compilation failed, print the log if not temp: # retrieve the log length glGetShaderiv(shader, GL_INFO_LOG_LENGTH, byref(temp)) # create a buffer for the log buffer = create_string_buffer(temp.value) # retrieve the log text glGetShaderInfoLog(shader, temp, None, buffer) # print the log to the console print(buffer.value) else: # all is well, so attach the shader to the program glAttachShader(self.handle, shader)
def size(self): """ Return a pointer to a c_ulong .. warning:: This will ONLY work properly if ``array`` is read before ``size``! You can assign to temporary values to work around this if the PKCS call requires the size first:: array, len = autoarray.array, autoarray.size This is because after ``size`` is read, ``array`` is initialized to a C array of the given value. :return: Pointer to a CK_ULONG :rtype: pointer """ if self._size is None: # Default size to a ulong. self._size = c_ulong() return pointer(self._size)
def to_c_mech(self): """ Create the Param structure, then convert the data into byte arrays. :return: :class:`~pycryptoki.cryptoki.CK_MECHANISM` """ super(ECDH1DeriveMechanism, self).to_c_mech() params = CK_ECDH1_DERIVE_PARAMS() params.kdf = self.params['kdf'] if self.params['sharedData'] is None: shared_data = None shared_data_len = 0 else: shared_data, shared_data_len = to_byte_array(self.params['sharedData']) params.pSharedData = cast(shared_data, CK_BYTE_PTR) params.ulSharedDataLen = shared_data_len public_data, public_data_len = to_byte_array(self.params['publicData']) params.pPublicData = cast(public_data, CK_BYTE_PTR) params.ulPublicDataLen = public_data_len self.mech.pParameter = cast(pointer(params), c_void_p) self.mech.usParameterLen = CK_ULONG(sizeof(params)) return self.mech
def get_supported_entities(language): """Lists the builtin entities supported in the specified *language* Returns: list of str: the list of entity labels """ global _SUPPORTED_ENTITIES if not isinstance(language, str): raise TypeError("Expected language to be of type 'str' but found: %s" % type(language)) if language not in _SUPPORTED_ENTITIES: with string_array_pointer(pointer(CStringArray())) as ptr: exit_code = lib.snips_nlu_ontology_supported_builtin_entities( language.encode("utf8"), byref(ptr)) if exit_code: raise ValueError("Something wrong happened while retrieving " "supported entities. See stderr.") array = ptr.contents _SUPPORTED_ENTITIES[language] = set(array.data[i].decode("utf8") for i in range(array.size)) return _SUPPORTED_ENTITIES[language]
def get_supported_grammar_entities(language): """Lists the grammar entities supported in the specified *language* Returns: list of str: the list of entity labels """ global _SUPPORTED_GRAMMAR_ENTITIES if not isinstance(language, str): raise TypeError("Expected language to be of type 'str' but found: %s" % type(language)) if language not in _SUPPORTED_GRAMMAR_ENTITIES: with string_array_pointer(pointer(CStringArray())) as ptr: exit_code = lib.snips_nlu_ontology_supported_grammar_entities( language.encode("utf8"), byref(ptr)) check_ffi_error( exit_code, "Something went wrong when retrieving " "supported grammar entities") array = ptr.contents _SUPPORTED_GRAMMAR_ENTITIES[language] = set( array.data[i].decode("utf8") for i in range(array.size)) return _SUPPORTED_GRAMMAR_ENTITIES[language]
def mdReceive(self, netno, stationNo, devtype, devNo, size01): # mdRecive function size = ctypes.c_int32() size.value = size01 parray = (ctypes.c_int32 * size.value)() # print (parray) t3 = time.time() receiveRet = func.__mdfunc.mdReceiveEx(func.__a.value, netno, stationNo, devtype, devNo, pointer(size), parray) t4 = time.time() print(stationNo, "-receive result", receiveRet, "receive using time", str(t4 - t3)) logging.info("stationNo:%d receive result:%d receive time:%d"%(stationNo,receiveRet,t4-t3))
def parse_packet(self, *args): """ Parses a raw packet into a higher level object. Args could be a tuple or two different values. In each case the first one is the raw data and the second is the meta about the direction and interface to use. The function remapped is WinDivertHelperParsePacket: Parses a raw packet (e.g. from WinDivertRecv()) into the various packet headers and/or payloads that may or may not be present. BOOL WinDivertHelperParsePacket( __in PVOID pPacket, __in UINT packetLen, __out_opt PWINDIVERT_IPHDR *ppIpHdr, __out_opt PWINDIVERT_IPV6HDR *ppIpv6Hdr, __out_opt PWINDIVERT_ICMPHDR *ppIcmpHdr, __out_opt PWINDIVERT_ICMPV6HDR *ppIcmpv6Hdr, __out_opt PWINDIVERT_TCPHDR *ppTcpHdr, __out_opt PWINDIVERT_UDPHDR *ppUdpHdr, __out_opt PVOID *ppData, __out_opt UINT *pDataLen ); For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_helper_parse_packet """ if len(args) == 1: # Maybe this is a poor way to check the type, but it should work if hasattr(args[0], "__iter__") and not hasattr(args[0], "strip"): raw_packet, meta = args[0] else: raw_packet, meta = args[0], None elif len(args) == 2: raw_packet, meta = args[0], args[1] else: raise ValueError("Wrong number of arguments passed to parse_packet") packet_len = len(raw_packet) # Consider everything else not part of headers as payload # payload = ctypes.c_void_p(0) payload_len = c_uint(0) ip_hdr, ipv6_hdr = pointer(IpHeader()), pointer(Ipv6Header()) icmp_hdr, icmpv6_hdr = pointer(IcmpHeader()), pointer(Icmpv6Header()) tcp_hdr, udp_hdr = pointer(TcpHeader()), pointer(UdpHeader()) headers = (ip_hdr, ipv6_hdr, icmp_hdr, icmpv6_hdr, tcp_hdr, udp_hdr) self._lib.WinDivertHelperParsePacket( raw_packet, packet_len, byref(ip_hdr), byref(ipv6_hdr), byref(icmp_hdr), byref(icmpv6_hdr), byref(tcp_hdr), byref(udp_hdr), None, byref(payload_len), ) # headers_len = sum(ctypes.sizeof(hdr.contents) for hdr in headers if hdr) # headers_len = sum((getattr(hdr.contents, "HdrLength", 0) * 4) for hdr in headers if hdr) # clean headers, consider just those that are not None (!=NULL) headers = [hdr.contents for hdr in headers if hdr] headers_opts = [] offset = 0 for header in headers: if hasattr(header, "HdrLength"): header_len = getattr(header, "HdrLength", 0) * 4 opt_len = header_len - sizeof(header) if opt_len: opt = raw_packet[offset + header_len - opt_len : offset + header_len] headers_opts.append(opt) else: headers_opts.append("") else: headers_opts.append("") header_len = sizeof(header) offset += header_len return CapturedPacket( payload=raw_packet[offset:], raw_packet=raw_packet, headers=[HeaderWrapper(hdr, opt, self.encoding) for hdr, opt in zip(headers, headers_opts)], meta=meta, encoding=self.encoding, )
def synfiletoWin(self,localpath,filepath,filetype): return True dllpath = self.syfilename dll = CDLL(dllpath) fileserver = create_string_buffer(1024) rt3 = dll.syncdirectorydata(localpath.replace('/', '\\').encode(),filepath.replace('/', '\\').encode() , pointer(fileserver))
def parse_packet(self, *args): """ Parses a raw packet into a higher level object. Args could be a tuple or two different values. In each case the first one is the raw data and the second is the meta about the direction and interface to use. The function remapped is WinDivertHelperParsePacket: Parses a raw packet (e.g. from WinDivertRecv()) into the various packet headers and/or payloads that may or may not be present. BOOL WinDivertHelperParsePacket( __in PVOID pPacket, __in UINT packetLen, __out_opt PWINDIVERT_IPHDR *ppIpHdr, __out_opt PWINDIVERT_IPV6HDR *ppIpv6Hdr, __out_opt PWINDIVERT_ICMPHDR *ppIcmpHdr, __out_opt PWINDIVERT_ICMPV6HDR *ppIcmpv6Hdr, __out_opt PWINDIVERT_TCPHDR *ppTcpHdr, __out_opt PWINDIVERT_UDPHDR *ppUdpHdr, __out_opt PVOID *ppData, __out_opt UINT *pDataLen ); For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_helper_parse_packet """ if len(args) == 1: #Maybe this is a poor way to check the type, but it should work if hasattr(args[0], "__iter__") and not hasattr(args[0], "strip"): raw_packet, meta = args[0] else: raw_packet, meta = args[0], None elif len(args) == 2: raw_packet, meta = args[0], args[1] else: raise ValueError( "Wrong number of arguments passed to parse_packet") packet_len = len(raw_packet) # Consider everything else not part of headers as payload # payload = ctypes.c_void_p(0) payload_len = c_uint(0) ip_hdr, ipv6_hdr = pointer(IpHeader()), pointer(Ipv6Header()) icmp_hdr, icmpv6_hdr = pointer(IcmpHeader()), pointer(Icmpv6Header()) tcp_hdr, udp_hdr = pointer(TcpHeader()), pointer(UdpHeader()) headers = (ip_hdr, ipv6_hdr, icmp_hdr, icmpv6_hdr, tcp_hdr, udp_hdr) self._lib.WinDivertHelperParsePacket(raw_packet, packet_len, byref(ip_hdr), byref(ipv6_hdr), byref(icmp_hdr), byref(icmpv6_hdr), byref(tcp_hdr), byref(udp_hdr), None, byref(payload_len)) #headers_len = sum(ctypes.sizeof(hdr.contents) for hdr in headers if hdr) #headers_len = sum((getattr(hdr.contents, "HdrLength", 0) * 4) for hdr in headers if hdr) # clean headers, consider just those that are not None (!=NULL) headers = [hdr.contents for hdr in headers if hdr] headers_opts = [] offset = 0 for header in headers: if hasattr(header, "HdrLength"): header_len = getattr(header, "HdrLength", 0) * 4 opt_len = header_len - sizeof(header) if opt_len: opt = raw_packet[offset + header_len - opt_len:offset + header_len] headers_opts.append(opt) else: headers_opts.append('') else: headers_opts.append('') header_len = sizeof(header) offset += header_len return CapturedPacket(payload=raw_packet[offset:], raw_packet=raw_packet, headers=[ HeaderWrapper(hdr, opt, self.encoding) for hdr, opt in zip(headers, headers_opts) ], meta=meta, encoding=self.encoding)
def byte_to_float(self, s): i = int(s, 16) cp = pointer(c_int(i)) fp = cast(cp, POINTER(c_float)) return fp.contents.value