def get_element_variable_values( instance_exodus, block_id, num_elements_block, index_variable, step): if EXODUS_LIB.ex_int64_status(instance_exodus.fileId) & exodus.EX_BULK_INT64_API: num_elem_this_blk = ctypes.c_longlong(num_elements_block) else: num_elem_this_blk = ctypes.c_int(num_elements_block) step = ctypes.c_int(step) var_type = ctypes.c_int(exodus.ex_entity_type("EX_ELEM_BLOCK")) var_id = ctypes.c_int(index_variable) block_id = ctypes.c_longlong(block_id) var_vals = (ctypes.c_double * num_elements_block)() EXODUS_LIB.ex_get_var( instance_exodus.fileId, step, var_type, var_id, block_id, num_elem_this_blk, var_vals) return np.ctypeslib.as_array(var_vals)
def write(self, fp, start=None, end=None): errstr = c_char_p() start_date = ( c_longlong.in_dll(dickinson, "LONG_TIME_T_MIN") if start is None else c_longlong(self.__datetime_to_time_t(start)) ) end_date = ( c_longlong.in_dll(dickinson, "LONG_TIME_T_MAX") if end is None else c_longlong(self.__datetime_to_time_t(end)) ) text = dickinson.ts_write( self.ts_handle, c_int(self.precision if self.precision is not None else -9999), start_date, end_date, byref(errstr), ) if not text: if not errstr: fp.write("") return raise IOError("Error when writing time series: %s" % (errstr.value,)) try: fp.write(string_at(text).decode("ascii")) finally: dickinson.freemem(text)
def fprop_mergebroadcast(self, ngLayer, inputs, inference, outputs, layers, out_shape): for l in layers: l.fprop(inputs, inference) if ngLayer.initOK_f == 0: C, H, W = layers[0].out_shape N = outputs.shape[-1] C = out_shape[0] ngLayer.in_shape5D = inputs.shape5D ngLayer.out_shape5D = N, C, H, W for i, layer in enumerate(layers): ngLayer.channels[i] = layer.out_shape[0] N, C, H, W = ngLayer.out_shape5D outputs.shape5D = C, 1, H, W, N for i, layer in enumerate(layers): alloc_layers = [l for l in layer.layers if l.owns_output] ngLayer.tensors_temp[(i*4):(i*4 + 4)] = alloc_layers[-1].outputs.primitive[0:4] channel = c_longlong(ngLayer.channels.ctypes.data) inp = c_longlong(ngLayer.tensors_temp.ctypes.data) out = outputs.get_prim() prim = c_longlong(ngLayer.primitive.ctypes.data) self.mklEngine.Concat_f(inp, ngLayer.layer_num, out, prim, channel, ngLayer.initOK_f, N, C, H, W) ngLayer.initOK_f = 1
def update_conv(self, I, E, U, alpha=1.0, beta=0.0, grad_bias=None, layer_op=None): if not self.get_is_mklop(): I.backend.convert(I) I.clean_mkl() E.backend.convert(E) E.clean_mkl() super(ConvLayerMKL, self).update_conv(I, E, U, alpha, beta, grad_bias, layer_op) return # not deal with alpha, beta yet K, M, P, Q, N = self.dimO C, D, H, W, N = self.dimI C, T, R, S, K = self.dimF pad_d, pad_h, pad_w = self.padding str_d, str_h, str_w = self.strides dil_d, dil_h, dil_w = self.dilation primitives = c_longlong(self.dnnPrimitives.ctypes.data) bias_prim = c_longlong(0) if grad_bias is not None: bias_prim = grad_bias.get_prim() I.backend.mklEngine.Conv_bwdFilter( I.get_prim(), E.get_prim(), U.get_prim(), bias_prim, primitives, N, C, H, W, R, S, str_h, str_w, pad_h, pad_w, dil_h, dil_w, K, P, Q, self.init_bw, self.init_bd) self.init_bw = 1
def find_path(self, plan): logger.info("Start path finding...") logger.debug("source: %s", str(plan.source)) logger.debug("target: %s", str(plan.target)) # logger.info("Loading multimodal transportation networks ... ") # t1 = time.time() self.prepare_routingplan(plan) # t2 = time.time() # logger.info("done!") # logger.info("Finish assembling multimodal networks, time consumed: %s seconds", (t2 - t1)) logger.info("Calculating multimodal paths ... ") t1 = time.time() # self.msp_twoq(c_longlong(plan.source['properties']['id'])) final_path = self.msp_findpath(c_longlong(plan.source['properties']['id']), c_longlong(plan.target['properties']['id'])) t2 = time.time() logger.info("Finish calculating multimodal paths, time consumed: %s seconds", (t2 - t1)) routing_result = self._construct_result(plan, final_path) if routing_result.is_existent is True: self.msp_clearpaths(final_path) self.msp_cleargraphs() self.msp_clearroutingplan() del plan.source['properties']['id'] del plan.target['properties']['id'] return { "routes": [routing_result.to_dict()], "source": plan.source, "target": plan.target }
def get_num_elements_block(instance_exodus, block_id): elem_block_id = ctypes.c_longlong(block_id) elem_type = ctypes.create_string_buffer(exodus.MAX_STR_LENGTH + 1) if EXODUS_LIB.ex_int64_status(instance_exodus.fileId) & exodus.EX_BULK_INT64_API: num_elem_this_blk = ctypes.c_longlong(0) num_nodes_per_elem = ctypes.c_longlong(0) num_attr = ctypes.c_longlong(0) else: num_elem_this_blk = ctypes.c_int(0) num_nodes_per_elem = ctypes.c_int(0) num_attr = ctypes.c_int(0) EXODUS_LIB.ex_get_elem_block( instance_exodus.fileId, elem_block_id, elem_type, ctypes.byref(num_elem_this_blk), ctypes.byref(num_nodes_per_elem), ctypes.byref(num_attr)) num_elements_block = num_elem_this_blk.value return num_elements_block
def sum(self, start_date=None, end_date=None): start_date = c_longlong.in_dll(dickinson, "LONG_TIME_T_MIN") \ if start_date is None \ else c_longlong(self.__datetime_to_time_t(start_date)) end_date = c_longlong.in_dll(dickinson, "LONG_TIME_T_MAX") \ if end_date is None \ else c_longlong(self.__datetime_to_time_t(end_date)) return dickinson.ts_sum(self.ts_handle, start_date, end_date)
def btea(v, k, decode=False): if decode: n = -len(v) else: n = len(v) if not isinstance(v, list) or \ not isinstance(n, int) or \ not isinstance(k, (list, tuple)): return False # MX = lambda: ((z >> 5) ^ (y << 2)) + ((y >> 3) ^ (z << 4)) ^ (sum ^ y) + (k[(p & 3) ^ e] ^ z) MX = lambda: ((z.value >> 5) ^ (t1.value)) + ((y.value >> 3) ^ (t2.value)) ^ (sum.value ^ y.value) + ( k[int((p & 3) ^ e)].value ^ z.value) u32 = lambda x: x sum, t1, t2, y, z = c_longlong(0), c_longlong(0), c_longlong(0), c_longlong(0), c_longlong(0) y.value = v[0].value DELTA = 0x9e3779b9 # DELTA = 2654435769L if n > 1: z.value = v[n - 1].value q = 6 + 52 / n while q > 0: q -= 1 sum.value = sum.value + DELTA e = (sum.value >> 2) & 3 p = 0 while p < n - 1: y.value = v[p + 1].value t1.value = y.value << 2 t2.value = z.value << 4 z.value = v[p].value = v[p].value + MX() p += 1 y.value = v[0].value t1.value = y.value << 2 t2.value = z.value << 4 z.value = v[n - 1].value = u32(v[n - 1].value + MX()) return True elif n < -1: n = -n q = 6 + 52 / n sum.value = q * DELTA while sum.value != 0: e = (sum.value >> 2) & 3 p = n - 1 while p > 0: z.value = v[p - 1].value t1.value = y.value << 2 t2.value = z.value << 4 y.value = v[p].value = (v[p].value - MX()) p -= 1 z.value = v[n - 1].value t1.value = y.value << 2 t2.value = z.value << 4 y.value = v[0].value = (v[0].value - MX()) sum.value -= DELTA return True return False
def getDiskSize(drive): free_size_for_user = ctypes.c_longlong() total_size = ctypes.c_longlong() free_size = ctypes.c_longlong() ctypes.windll.kernel32.GetDiskFreeSpaceExW( drive, ctypes.pointer(free_size_for_user), ctypes.pointer(total_size), ctypes.pointer(free_size) ) return (free_size.value, total_size.value)
def hash_node(mml_elem, args): # mml_elem : minidom mml internal node. # args : hash values from children nodes. # returns : hashed value a, b = c_longlong(hash(mml_elem.localName[0::2])).value, c_longlong(hash(mml_elem.localName[1::2])).value result = HashResult(value = b) for arg in args: result.merge(a, arg) return result
def identify_events( ts_list, start_threshold, ntimeseries_start_threshold, time_separator, end_threshold=None, ntimeseries_end_threshold=None, start_date=None, end_date=None, reverse=False, ): if end_threshold is None: end_threshold = start_threshold if ntimeseries_end_threshold is None: ntimeseries_end_threshold = ntimeseries_start_threshold range_start_date = ( c_longlong.in_dll(dickinson, "LONG_TIME_T_MIN") if start_date is None else c_longlong(_datetime_to_time_t(start_date)) ) range_end_date = ( c_longlong.in_dll(dickinson, "LONG_TIME_T_MAX") if end_date is None else c_longlong(_datetime_to_time_t(end_date)) ) search_range = T_INTERVAL(range_start_date, range_end_date) try: a_timeseries_list = dickinson.tsl_create() a_interval_list = dickinson.il_create() if (not a_timeseries_list) or (not a_interval_list): raise MemoryError("Insufficient memory") for t in ts_list: if dickinson.tsl_append(a_timeseries_list, t.ts_handle): raise MemoryError("Insufficient memory") errstr = c_char_p() if dickinson.ts_identify_events( a_timeseries_list, search_range, c_int(reverse), c_double(start_threshold), c_double(end_threshold), c_int(ntimeseries_start_threshold), c_int(ntimeseries_end_threshold), c_longlong(time_separator.days * _SECONDS_PER_DAY + time_separator.seconds), a_interval_list, byref(errstr), ): raise Exception(errstr.value) result = [] for i in range(a_interval_list.contents.n): a_interval = a_interval_list.contents.intervals[i] result.append((_time_t_to_datetime(a_interval.start_date), _time_t_to_datetime(a_interval.end_date))) return result finally: dickinson.il_free(a_interval_list) dickinson.tsl_free(a_timeseries_list)
def sendfile(sock, fileobj): if not hasattr(fileobj, "fileno"): _sendfile(sock, fileobj) return offset = c.c_longlong(fileobj.tell()) # seek backwards fileobj.seek(0, 2) size = fileobj.tell() sock.setblocking(1) sendfile64(sock.fileno(), fileobj.fileno(), c.byref(offset), c.c_longlong(size - offset.value))
def seek(self, offset): '''Seek to a particular offset''' distance,resultDistance = ctypes.c_longlong(offset),ctypes.c_longlong(offset) FILE_BEGIN = 0 result = k32.SetFilePointerEx( self.handle, distance, ctypes.byref(resultDistance), FILE_BEGIN ) if result == 0: raise OSError(win32error.getLastErrorTuple()) self.offset = resultDistance.value
def hash_node(mml_elem, args, dup_param): # mml_elem : minidom mml internal node. # args : hash values from children nodes. # returns : hashed value av = hash(mml_elem.localName) var = c_longlong(0) a = av | 1 for arg in args: var = c_longlong(var.value * a + arg * a) var = c_longlong(var.value * dup_param + av) return var.value
def copy_transpose(self, a, out, axes=None, repeat=1): """ use MKL transposition to speed up """ if axes is None and a._tensor.ctypes.data != out._tensor.ctypes.data and len(a.shape) == 2: inp = c_longlong(a._tensor.ctypes.data) outp = c_longlong(out._tensor.ctypes.data) m, n = a.shape self.mklEngine.MatTrans(inp, outp, c_longlong(m), c_longlong(n)) else: out._tensor[:] = np.transpose(a._tensor, axes).copy()
def average(self, start_date=None, end_date=None): start_date = ( c_longlong.in_dll(dickinson, "LONG_TIME_T_MIN") if start_date is None else c_longlong(self.__datetime_to_time_t(start_date)) ) end_date = ( c_longlong.in_dll(dickinson, "LONG_TIME_T_MAX") if end_date is None else c_longlong(self.__datetime_to_time_t(end_date)) ) return dickinson.ts_average(self.ts_handle, start_date, end_date)
def blksize(path): """Get optimal file system buffer size (in bytes) for I/O calls.""" if os.name != 'nt': size = os.statvfs(path).f_bsize else: import ctypes drive = '{0}\\'.format(os.path.splitdrive(os.path.abspath(path))[0]) cluster_sectors = ctypes.c_longlong(0) sector_size = ctypes.c_longlong(0) ctypes.windll.kernel32.GetDiskFreeSpaceW( ctypes.c_wchar_p(drive), ctypes.pointer(cluster_sectors), ctypes.pointer(sector_size), None, None) size = int(cluster_sectors.value * sector_size.value) return size
def xprop_conv(self, I, F, O, X=None, bias=None, bsum=None, alpha=1.0, beta=0.0, relu=False, brelu=False, slope=0.0, backward=False, layer_op=None): if layer_op is None: layer_op = self if not self.get_is_mklop(): I.backend.convert(I) F.backend.convert(F) I.clean_mkl() F.clean_mkl() super(ConvLayerMKL, self).xprop_conv( I, F, O, X, bias, bsum, alpha, beta, relu, brelu, slope, backward) return if X is None: X = O C, D, H, W, N = self.dimI C, T, R, S, K = self.dimF K, M, P, Q, N = self.dimO pad_d, pad_h, pad_w = self.padding str_d, str_h, str_w = self.strides dil_d, dil_h, dil_w = self.dilation primitives = c_longlong(self.dnnPrimitives.ctypes.data) bias_prim = c_longlong(0) if bias is not None: bias_prim = bias.get_prim() mkl_res = 0 if not backward: mkl_res = I.backend.mklEngine.Conv_forward( I.get_prim(), O.get_prim(), F.get_prim(), bias_prim, primitives, self.init_f, N, C, H, W, R, S, str_h, str_w, pad_h, pad_w, dil_h, dil_w, K, P, Q) self.init_f = 1 O.shape5D = self.dimO else: I.backend.mklEngine.Conv_bwdData( I.get_prim(), O.get_prim(), F.get_prim(), primitives, N, C, H, W, R, S, str_h, str_w, pad_h, pad_w, dil_h, dil_w, K, P, Q, self.init_bd, c_float(beta)) O.shape5D = self.dimI self.init_bd = 1 if mkl_res != 0: super(ConvLayerMKL, self).xprop_conv( I, F, O, X, bias, bsum, alpha, beta, relu, brelu, slope, backward) I.clean_mkl() O.clean_mkl() layer_op.set_not_mklop() return
def delete_items(self, date1, date2): bd = self.bounding_dates() if not bd: return if not date1: date1 = bd[0] if not date2: date2 = bd[1] timestamp_c1 = c_longlong(self.__datetime_to_time_t(date1)) timestamp_c2 = c_longlong(self.__datetime_to_time_t(date2)) p1 = dickinson.ts_get_next(self.ts_handle, timestamp_c1) p2 = dickinson.ts_get_prev(self.ts_handle, timestamp_c2) if p1 and p2: dickinson.ts_delete_records(self.ts_handle, p1, p2)
def hash_apply(mml_elem, args, dup_param): # mml_elem : minidom mml element object which tagName equals to "apply". # returns : hashed value assert mml_elem.childNodes operator = mml_elem.firstChild if operator.localName == "": pass else: # unknown operator op_var, op_args = args[0], args[1:] var = c_longlong(0) a = op_var | 1 for arg in op_args: var = c_longlong(var.value * a + arg * a) var = c_longlong(var.value * dup_param + op_var) return var.value
def getIntegerRange(handle, command): int_max = ctypes.c_longlong() int_min = ctypes.c_longlong() success_max = check(sdk3.AT_GetIntMax(handle, ctypes.c_wchar_p(command), ctypes.byref(int_max)), "AT_GetIntMax", command) success_min = check(sdk3.AT_GetIntMin(handle, ctypes.c_wchar_p(command), ctypes.byref(int_min)), "AT_GetIntMin", command) return [success_min and success_max, int_min.value, int_max.value]
def _read_by_block(archive_res): buffer_ = ctypes.c_char_p() num = ctypes.c_size_t() offset = ctypes.c_longlong() while 1: r = libarchive.calls.archive_read.c_archive_read_data_block( archive_res, ctypes.cast(ctypes.byref(buffer_), ctypes.POINTER(ctypes.c_void_p)), ctypes.byref(num), ctypes.byref(offset)) if r == libarchive.constants.archive.ARCHIVE_OK: block = ctypes.string_at(buffer_, num.value) assert len(block) == num.value yield block elif r == libarchive.constants.archive.ARCHIVE_EOF: break else: raise ValueError("Read failed (archive_read_data_block): (%d)" % (r,))
def find_driver_base(driver=None): """ Get the base address of the specified driver or the NT Kernel if none is specified. :param str driver: The name of the driver to get the base address of. :return: The base address and the driver name. :rtype: tuple """ if platform.architecture()[0] == '64bit': lpImageBase = (ctypes.c_ulonglong * 1024)() lpcbNeeded = ctypes.c_longlong() ctypes.windll.psapi.GetDeviceDriverBaseNameA.argtypes = [ctypes.c_longlong, ctypes.POINTER(ctypes.c_char), ctypes.c_uint32] else: if process_is_wow64(): raise RuntimeError('python running in WOW64 is not supported') lpImageBase = (ctypes.c_ulong * 1024)() lpcbNeeded = ctypes.c_long() driver_name_size = ctypes.c_long() driver_name_size.value = 48 ctypes.windll.psapi.EnumDeviceDrivers(ctypes.byref(lpImageBase), ctypes.c_int(1024), ctypes.byref(lpcbNeeded)) for base_addr in lpImageBase: driver_name = ctypes.c_char_p(b'\x00' * driver_name_size.value) if base_addr: ctypes.windll.psapi.GetDeviceDriverBaseNameA(base_addr, driver_name, driver_name_size.value) driver_name_value = driver_name.value.decode('utf-8') if driver is None and driver_name_value.lower().find("krnl") != -1: return base_addr, driver_name_value elif driver_name_value.lower() == driver: return base_addr, driver_name_value return None
def __contains__(self, key): index_c = dickinson.ts_get_i( self.ts_handle, c_longlong(self.__datetime_to_time_t(key))) if index_c < 0: return False else: return True
def getFrames(self): frames = [] #current_buffer = ctypes.POINTER(ctypes.c_char)() current_buffer = ctypes.c_void_p() buffer_size = ctypes.c_longlong() while(self.waitBuffer(current_buffer, buffer_size)): # Convert the buffer to an image. check(sdk3_utility.AT_ConvertBuffer(current_buffer, ctypes.c_void_p(self.frame_data[self.frame_data_cur].getDataPtr()), ctypes.c_long(self.frame_x), ctypes.c_long(self.frame_y), ctypes.c_long(self.stride), ctypes.c_wchar_p(self.pixel_encoding), ctypes.c_wchar_p("Mono16")), "AT_ConvertBuffer") frames.append(self.frame_data[self.frame_data_cur]) # Update current frame. self.frame_data_cur += 1 if (self.frame_data_cur == len(self.frame_data)): self.frame_data_cur = 0 # Re-queue the buffers. check(sdk3.AT_QueueBuffer(self.camera_handle, current_buffer, buffer_size)) return [frames, [self.frame_x, self.frame_y]]
def __setitem__(self, key, value): timestamp_c = c_longlong(self.__datetime_to_time_t(key)) index_c = dickinson.ts_get_i(self.ts_handle, timestamp_c) if isinstance(value, _Tsvalue): tsvalue = value elif isinstance(value, tuple): tsvalue = _Tsvalue(value[0], value[1]) elif index_c >= 0: tsvalue = _Tsvalue(value, self[key].flags) else: tsvalue = _Tsvalue(value, []) if math.isnan(tsvalue): null_c = 1 value_c = c_double(0) else: null_c = 0 value_c = c_double(tsvalue) flags_c = c_char_p((' '.join(tsvalue.flags)).encode('ascii')) err_str_c = c_char_p() index_c = c_int() err_no_c = dickinson.ts_insert_record( self.ts_handle, timestamp_c, null_c, value_c, flags_c, c_int(1), byref(index_c), byref(err_str_c)) if err_no_c != 0: raise Exception('Something wrong occured in dickinson ' 'function when setting a time series value. ' 'Error message: ' + repr(err_str_c.value))
def getTotalTime(self, index): """Gets the total elapsed time since Phidget was opened, or since the last reset, in microseconds. This time corresponds to the TotalCount property. Parameters: index<int>: index of the frequency input channel Returns: The total elapsed time <long long>. Exceptions: RuntimeError - If current platform is not supported/phidget c dll cannot be found PhidgetException: If this Phidget is not opened and attached, or if the index is out of range. """ time = c_longlong() try: result = PhidgetLibrary.getDll().CPhidgetFrequencyCounter_getTotalTime(self.handle, c_int(index), byref(time)) except RuntimeError: raise if result > 0: raise PhidgetException(result) else: return time.value
def bprop_pool(self, layer, I, O, argmax=None, alpha=1.0, beta=0.0): """ Backward propagate pooling layer. Arguments: layer (PoolLayer): The pool layer object. Different backends have different pool layers. I (Tensor): Input (error) tensor. O (Tensor): Output (delta) tensor. argmax (Tensor): tensor to store location of the maximum alpha (float): linear scaling (does not work for l2 pooling) beta (float): accumulation value into grad_I """ assert layer.sizeI == O.size assert layer.sizeO == I.size if layer.op == "max": assert layer.sizeO == argmax.size J, T, R, S = layer.JTRS C, D, H, W, N = layer.dimI K, M, P, Q, N = layer.dimO pad_c, pad_d, pad_h, pad_w = layer.padding str_c, str_d, str_h, str_w = layer.strides # unsupported fall back to cpu if J > 1 or T > 1 or D > 1: super(NervanaMKL, self).bprop_pool(layer, I, O, argmax, alpha, beta) return primitives = c_longlong(layer.dnnPrimitives.ctypes.data) self.mklEngine.MaxPooling_bprop(I.get_prim(), O.get_prim(), primitives, layer.initOk_b, c_float(beta)) layer.initOk_b = 1 O.shape5D = layer.dimI
def get_process_creation_time(self, process): ''' Return the creation time of a given process. :param process: the process to check for creation time :type process: int :return: the process creation time from time stamp :rtype: int ''' creationtime = ctypes.c_ulonglong() exittime = ctypes.c_ulonglong() kerneltime = ctypes.c_ulonglong() usertime = ctypes.c_ulonglong() rc = ctypes.windll.kernel32.GetProcessTimes(process, ctypes.byref(creationtime), ctypes.byref(exittime), ctypes.byref(kerneltime), ctypes.byref(usertime)) creationtime.value -= ctypes.c_longlong(116444736000000000L).value creationtime.value /= 10000000 return creationtime.value
def seek(self, offset, whence=0): """ seek """ if self.fdesc is None: return ret = self.conn.libcephfs.ceph_lseek(self.conn.cluster, self.fdesc, ctypes.c_longlong(offset), ctypes.c_int(whence)) if ret < 0: raise cephfs.make_ex(ret, "error in seek")
def usleep(sec): if sys.platform == 'win32': # on windows time.sleep() doesn't work to well import ctypes kernel32 = ctypes.windll.kernel32 timer = kernel32.CreateWaitableTimerA(ctypes.c_void_p(), True, ctypes.c_void_p()) delay = ctypes.c_longlong(int(-1 * (10 * 1000000 * sec))) kernel32.SetWaitableTimer(timer, ctypes.byref(delay), 0, ctypes.c_void_p(), ctypes.c_void_p(), False) kernel32.WaitForSingleObject(timer, 0xffffffff) else: time.sleep(sec)
def ReadBytes(self, address, num_bytes): """Reads at most num_bytes starting from offset <address>.""" pdata = ctypes.c_void_p(0) data_cnt = ctypes.c_uint32(0) ret = libc.mach_vm_read(self.task, ctypes.c_ulonglong(address), ctypes.c_longlong(num_bytes), ctypes.pointer(pdata), ctypes.pointer(data_cnt)) if ret: raise process_error.ProcessError("Error in mach_vm_read, ret=%s" % ret) buf = ctypes.string_at(pdata.value, data_cnt.value) libc.vm_deallocate(self.mytask, pdata, data_cnt) return buf
def getEnumeratedString(handle, command): max_size = 100 response = ctypes.c_wchar_p(' ' * max_size) if check(sdk3.AT_GetEnumStringByIndex(handle, ctypes.c_wchar_p(command), ctypes.c_longlong(getEnumeratedIndex(handle, command)), response, ctypes.c_int(max_size)), "AT_GetEnumStringByIndex", command): return response.value else: return ''
def mytimer(): kernel32 = ctypes.windll.kernel32 # This sets the priority of the process to realtime--the same priority as the mouse pointer. kernel32.SetThreadPriority(kernel32.GetCurrentThread(), 31) # This creates a timer. This only needs to be done once. timer = kernel32.CreateWaitableTimerA(ctypes.c_void_p(), True, ctypes.c_void_p()) # The kernel measures in 100 nanosecond intervals, so we must multiply 1 by 10000 delay = ctypes.c_longlong(1 * 10000) kernel32.SetWaitableTimer(timer, ctypes.byref(delay), 0, ctypes.c_void_p(), ctypes.c_void_p(), False) kernel32.WaitForSingleObject(timer, 0xffffffff)
def _win_perf_counter(): out = ctypes.c_longlong() outref = ctypes.byref(out) if _win_perf_counter.freq is None: QueryPerformanceFrequency(outref) _win_perf_counter.freq = float(out.value) QueryPerformanceCounter(outref) _win_perf_counter.t0 = out.value QueryPerformanceCounter(outref) return (out.value - _win_perf_counter.t0) / _win_perf_counter.freq
def fs_bsize(path): """ get optimal file system buffer size (in bytes) for I/O calls """ path = fs_encode(path) if os.name == "nt": import ctypes drive = "%s\\" % os.path.splitdrive(path)[0] cluster_sectors, sector_size = ctypes.c_longlong(0) ctypes.windll.kernel32.GetDiskFreeSpaceW(ctypes.c_wchar_p(drive), ctypes.pointer(cluster_sectors), ctypes.pointer(sector_size), None, None) return cluster_sectors * sector_size else: return os.statvfs(path).f_frsize
def _CQ_sendGroupMsg(authCode: int, groupid: int, msg: str) -> int: try: authCode = ctypes.c_int(authCode) groupid = ctypes.c_longlong(groupid) msg = ctypes.c_char_p(bytes(msg, 'gbk')) CQDll.CQ_sendGroupMsg(authCode, groupid, msg) return 1 result = 1 result = result = ctypes.c_int32(result).value return result except: return -1
def find_ionbal(z, ion, lognH, logT): # compared to the line emission files, the order of the nH, T indices in the balance tables is switched balance, logTK, lognHcm3 = findiontables( ion, z ) #(np.array([[0.,0.],[0.,1.],[0.,2.]]), np.array([0.,1.,2.]), np.array([0.,1.]) ) NumPart = len(lognH) inbalance = np.zeros(NumPart, dtype=np.float32) if len(logT) != NumPart: print('logrho and logT should have the same length') return None # need to compile with some extra options to get this to work: make -f make_emission_only #print("------------------- C interpolation function output --------------------------\n") cfile = ol.c_interpfile acfile = ct.CDLL(cfile) interpfunction = acfile.interpolate_2d # just a linear interpolator; works for non-emission stuff too # ion balance tables are density x temperature x redshift interpfunction.argtypes = [np.ctypeslib.ndpointer(dtype=ct.c_float, shape=(NumPart,)),\ np.ctypeslib.ndpointer(dtype=ct.c_float, shape=(NumPart,)),\ ct.c_longlong , \ np.ctypeslib.ndpointer(dtype=ct.c_float, shape=(len(logTK)*len(lognHcm3),)), \ np.ctypeslib.ndpointer(dtype=ct.c_float, shape=(len(lognHcm3),)), \ ct.c_int,\ np.ctypeslib.ndpointer(dtype=ct.c_float, shape=(len(logTK),)), \ ct.c_int,\ np.ctypeslib.ndpointer(dtype=ct.c_float, shape=(NumPart,))] res = interpfunction(lognH.astype(np.float32),\ logT.astype(np.float32),\ ct.c_longlong(NumPart),\ np.ndarray.flatten(balance.astype(np.float32)),\ lognHcm3.astype(np.float32),\ ct.c_int(len(lognHcm3)),\ logTK.astype(np.float32),\ ct.c_int(len(logTK)), \ inbalance \ ) #print("-------------- C interpolation function output finished ----------------------\n") if res != 0: print('Something has gone wrong in the C function: output %s. \n', str(res)) return None return inbalance
def xprop_conv(self, I, F, O, X=None, bias=None, bsum=None, alpha=1.0, beta=0.0, relu=False, brelu=False, slope=0.0, backward=False, layer_op=None): if layer_op is None: layer_op = self if not layer_op.get_is_mklop(): I.backend.convert(I) F.backend.convert(F) I.clean_mkl() F.clean_mkl() super(ConvLayerMKL, self).xprop_conv( I, F, O, X, bias, bsum, alpha, beta, relu, brelu, slope, backward) return # hack for dealing with dilated conv if self.dilated: self.xprop_conv_dilated(I, F, O, X, bias, bsum, alpha, beta, relu, brelu, slope, backward) return if X is None: X = O # TODO, support bias C, D, H, W, N = self.dimI C, T, R, S, K = self.dimF K, M, P, Q, N = self.dimO pad_d, pad_h, pad_w = self.padding str_d, str_h, str_w = self.strides primitives = c_longlong(self.dnnPrimitives.ctypes.data) mkl_res = 0 if not backward: mkl_res = I.backend.mklEngine.Conv_forward( I.get_prim(), O.get_prim(), F.get_prim(), primitives, self.init_f, N, C, H, W, R, S, str_h, str_w, pad_h, pad_w, K, P, Q) self.init_f = 1 O.shape5D = self.dimO else: beta_ = c_float(beta) I.backend.mklEngine.Conv_bwdData( I.get_prim(), O.get_prim(), F.get_prim(), primitives, N, K, P, Q, self.init_bd, beta_) O.shape5D = self.dimI self.init_bd = 1 if mkl_res != 0: super(ConvLayerMKL, self).xprop_conv( I, F, O, X, bias, bsum, alpha, beta, relu, brelu, slope, backward) I.clean_mkl() O.clean_mkl() layer_op.set_not_mklop() return
def _pin_buffer(self, buffer: Union[GeomBuffer, str]) -> Optional[np.ndarray]: if isinstance(buffer, str): buffer = GeomBuffer[buffer] c_buffer = c_longlong() c_shape = c_longlong() c_size = c_int() c_type = c_uint() if self._optix.pin_geometry_buffer( self._name, buffer.value, byref(c_buffer), byref(c_shape), byref(c_size), byref(c_type)): if c_type.value == 4: elem = c_float elif c_type.value == 3: elem = c_uint elif c_type.value == 2: elem = c_int elif c_type.value == 1: elem = c_ubyte else: msg = "Data type not supported." self._logger.error(msg) if self._raise_on_error: raise RuntimeError(msg) shape_buf = (c_int * c_size.value).from_address(c_shape.value) shape = np.ctypeslib.as_array(shape_buf) for s in shape: elem = elem * s return elem.from_address(c_buffer.value) else: msg = "Buffer not pinned." raise RuntimeError(msg) return None
def getDiskFreeSpace( self, freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes, dokanFileInfo, ): """Get the amount of free space on this volume. :param freeBytesAvailable: pointer for free bytes available :type freeBytesAvailable: ctypes.c_void_p :param totalNumberOfBytes: pointer for total number of bytes :type totalNumberOfBytes: ctypes.c_void_p :param totalNumberOfFreeBytes: pointer for total number of free bytes :type totalNumberOfFreeBytes: ctypes.c_void_p :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ ret = self.operations("getDiskFreeSpace") ctypes.memmove( freeBytesAvailable, ctypes.byref(ctypes.c_longlong(ret["freeBytesAvailable"])), ctypes.sizeof(ctypes.c_longlong), ) ctypes.memmove( totalNumberOfBytes, ctypes.byref(ctypes.c_longlong(ret["totalNumberOfBytes"])), ctypes.sizeof(ctypes.c_longlong), ) ctypes.memmove( totalNumberOfFreeBytes, ctypes.byref(ctypes.c_longlong(ret["totalNumberOfFreeBytes"])), ctypes.sizeof(ctypes.c_longlong), ) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS
def get(n): dt = c_longlong(0) dr = c_longlong(0) ds = c_longlong(0) dd = c_longlong(0) libj.JGetM(c_void_p(jt), tob(n), byref(dt), byref(dr), byref(ds), byref(dd)) t = dt.value if t == 0: raise AssertionError('get arg not a name') shape = np.fromstring(string_at(ds.value, dr.value * 8), dtype=np.int64) count = np.prod(shape) if t == 2: r = (string_at(dd.value, count)) elif t == 4: r = np.fromstring(string_at(dd.value, count * 8), dtype=np.int64) r.shape = shape elif t == 8: r = np.fromstring(string_at(dd.value, count * 8), dtype=np.float64) r.shape = shape else: raise AssertionError('get type not supported') return r
def _set_value(self, cfg_set_t_p, value): if isinstance(value, bool): return self._config_setting_set_bool(cfg_set_t_p, value) elif isinstance(value, int): return self._config_setting_set_int(cfg_set_t_p, c_long(value)) elif isinstance(value, long): return self._config_setting_set_int64(cfg_set_t_p, c_longlong(value)) elif isinstance(value, float): return self._config_setting_set_float(cfg_set_t_p, c_double(value)) elif isinstance(value, str): return self._config_setting_set_string(cfg_set_t_p, value) elif isinstance(value, list): return self._set_list(cfg_set_t_p, value)
def do_write(state_name, data): state_name = ctypes.c_longlong(int(state_name, 16)) data_buffer = ctypes.c_char_p(data) buffer_size = len(data) status = ZwUpdateWnfStateData(ctypes.byref(state_name), data_buffer, buffer_size, 0, 0, 0, 0) status = ctypes.c_ulong(status).value if status == 0: return True else: print('[Error] Could not write for this statename: 0x{:x}'.format( status)) return False
def _train_and_share_model(self): log_info('Training and sharing the model...') # Standardize data cols = self.FD.features scaler = StandardScaler() X_train = scaler.fit_transform(self.FD.X_train[cols]) for i, feature in enumerate(cols): mean = scaler.mean_[i] std = scaler.scale_[i] log_info('Appending mean {} and std {} for feature {}'.format( mean, std, feature)) mean *= self.FD.m_scale std *= self.FD.s_scale self.BM.ebpf['train_set_params'][i * 2] = ct.c_ulonglong(int(mean)) self.BM.ebpf['train_set_params'][i * 2 + 1] = ct.c_ulonglong( int(std)) c_scale = self.FD.m_scale / self.FD.s_scale #Train kmeans self.FD.train_model(x_train=X_train) #Share the (scaled) centroids with the eBPF programs model = self.FD.model centroids = model.cluster_centers_ centroids *= c_scale log_info('Scaled centroids:') log_info(centroids) thresholds = list() centroid_l1s = list() self.BM.ebpf['centroid_offset'][0] = ct.c_ulonglong( int(sum(scaler.mean_ / scaler.scale_) * c_scale)) for k in range(0, len(centroids)): cluster_l1s = np.sum(X_train[model.labels_ == k], axis=1) precise_threshold = abs(cluster_l1s.mean() + 5 * cluster_l1s.std()) * c_scale threshold = ct.c_ulonglong(int(precise_threshold)) log_info('Scaled [{}] threshold: {}'.format(k, precise_threshold)) log_info('Shared [{}] threshold: {}'.format(k, threshold)) centroid_l1 = ct.c_longlong(int(sum(centroids[k]))) log_info('Centroid l1: {}'.format(sum(centroids[k]))) thresholds.append(threshold) centroid_l1s.append(centroid_l1) log_info('Setting centroid L1 & cluster thresholds') for k in range(len(centroids)): self.BM.ebpf['cluster_thresholds'][k] = thresholds[k] self.BM.ebpf['centroid_l1s'][k] = centroid_l1s[k]
def add_hook(hook_type, callback): GetModuleHandleW = kernel32.GetModuleHandleW GetModuleHandleW.restype = HMODULE GetModuleHandleW.argtypes = [LPCWSTR] # If we are running the program (Python interpreter) # in 64 bits mode, we need to handle 64 bit addresses if bits == 64: handle = ctypes.c_longlong(GetModuleHandleW(None)) # Else 32 bit addresses else: handle = GetModuleHandleW(None) hook = user32.SetWindowsHookExA(hook_type, callback, handle, 0) atexit.register(user32.UnhookWindowsHookEx, hook) return hook
def _wlfalec(self, fieldname, fieldlength, dtype): returncode, length = ctypes.c_longlong(), ctypes.c_longlong() result = numpy.ndarray((fieldlength, ), dtype=dtype, order='F') f = {numpy.float64:_solib.wlfalecr_, numpy.int64:_solib.wlfaleci_}[dtype] f.argtypes = [ctypes.POINTER(ctypes.c_longlong), #status ctypes.POINTER(ctypes.c_longlong), #logical unit ctypes.c_char_p, #article name ctypes.POINTER(ctypes.c_longlong), #array physical size numpy.ctypeslib.ndpointer(dtype=dtype, ndim=1, flags=str('F_CONTIGUOUS')), # Note: str() needed in Python2 for unicode/str obscure inner incompatibility, ctypes.POINTER(ctypes.c_longlong), #array size ctypes.c_longlong] #article name string length f(ctypes.byref(returncode), ctypes.byref(ctypes.c_longlong(self._unit)), ctypes.create_string_buffer(fieldname.encode("utf-8")), ctypes.byref(ctypes.c_longlong(fieldlength)), result, ctypes.byref(length), ctypes.c_longlong(len(fieldname.encode("utf-8")))) assert returncode.value == 0, "Error reading article " + fieldname + \ " on file " + self.filename return result, length.value
def to_js_args_val(self, es, val): if isinstance(val, str): val = self.mb.jsStringW(es, val) elif isinstance(val, int): val = self.mb.jsInt(val) elif isinstance(val, float): val = self.mb.jsFloat(val) elif isinstance(val, bool): val = self.mb.jsBoolean(val) elif isinstance(val, list): lens = len(val) tmp_arr = self.mb.jsEmptyArray(es) for i in range(lens): if isinstance(val[i], int): tmp_val = self.mb.jsInt(val[i]) elif isinstance(val[i], str): tmp_val = self.mb.jsStringW(es, val[i]) elif isinstance(val[i], float): tmp_val = self.mb.jsFloat(c_float(val[i])) self.mb.jsSetAt(es, c_longlong(tmp_arr), i, c_longlong(tmp_val)) val = tmp_arr elif isinstance(val, dict): tmp_obj = self.mb.jsEmptyObject(es) for k, v in val.items(): if isinstance(v, int): v = self.mb.jsInt(v) elif isinstance(v, str): v = self.mb.jsStringW(es, v) elif isinstance(v, float): v = self.mb.jsFloat(c_float(v)) self.mb.jsSet(es, c_longlong(tmp_obj), k.encode(), c_longlong(v)) val = tmp_obj return val
def checkRemovableDrives(self): drives = {} bitmask = windll.kernel32.GetLogicalDrives() # Check possible drive letters, from A to Z # Note: using ascii_uppercase because we do not want this to change with locale! for letter in string.ascii_uppercase: drive = "{0}:/".format(letter) # Do we really want to skip A and B? # GetDriveTypeA explicitly wants a byte array of type ascii. It will accept a string, but this wont work if bitmask & 1 and windll.kernel32.GetDriveTypeA( drive.encode("ascii")) == DRIVE_REMOVABLE: volume_name = "" name_buffer = ctypes.create_unicode_buffer(1024) filesystem_buffer = ctypes.create_unicode_buffer(1024) error = windll.kernel32.GetVolumeInformationW( ctypes.c_wchar_p(drive), name_buffer, ctypes.sizeof(name_buffer), None, None, None, filesystem_buffer, ctypes.sizeof(filesystem_buffer)) if error != 0: volume_name = name_buffer.value if not volume_name: volume_name = catalog.i18nc("@item:intext", "Removable Drive") # Certain readers will report themselves as a volume even when there is no card inserted, but will show an # "No volume in drive" warning when trying to call GetDiskFreeSpace. However, they will not report a valid # filesystem, so we can filter on that. In addition, this excludes other things with filesystems Windows # does not support. if filesystem_buffer.value == "": continue # Check for the free space. Some card readers show up as a drive with 0 space free when there is no card inserted. free_bytes = ctypes.c_longlong(0) if windll.kernel32.GetDiskFreeSpaceExA( drive.encode("ascii"), ctypes.byref(free_bytes), None, None) == 0: continue if free_bytes.value < 1: continue drives[drive] = "{0} ({1}:)".format(volume_name, letter) bitmask >>= 1 return drives
def seek(self, offset, whence=0): if whence == 0: npos = offset elif whence == 1: npos = self._pos + offset else: npos = self._pos - offset if self._pos == npos: return n = ctypes.c_longlong(offset) if 0xFFFFFFFF == self.SetFilePointerEx(self.handle, offset & 0xFFFFFFFF, ctypes.byref(n), whence): raise ctypes.WinError() self._pos = npos
def _unpack_device_array_argument(self, size, itemsize, buf, shape, strides, ndim, kernelargs): """ Implements the unpacking logic for array arguments. Args: size: Total number of elements in the array. itemsize: Size in bytes of each element in the array. buf: The pointer to the memory. shape: The shape of the array. ndim: Number of dimension. kernelargs: Array where the arguments of the kernel is stored. """ # meminfo kernelargs.append(ctypes.c_size_t(0)) # parent kernelargs.append(ctypes.c_size_t(0)) kernelargs.append(ctypes.c_longlong(size)) kernelargs.append(ctypes.c_longlong(itemsize)) kernelargs.append(buf) for ax in range(ndim): kernelargs.append(ctypes.c_longlong(shape[ax])) for ax in range(ndim): kernelargs.append(ctypes.c_longlong(strides[ax]))
def test_gsfStat_success(gsf_test_data_03_06): """ Get the size in bytes of a GSF file. """ # Arrange sz = c_longlong(0) # Act return_value = gsfpy.bindings.gsfStat( os.fsencode(str(gsf_test_data_03_06.path)), byref(sz) ) assert_that(return_value).is_zero() # Assert assert_that(sz.value).is_equal_to(165292)
def SymGetModuleInfo64(hProcess, address): """ Retrieves the module information of the specified module. """ img = imagehlp_module64_t() img.SizeOfStruct = C.c_ulong(C.sizeof(imagehlp_module64_t)) r = Symgetmoduleinfo64(C.c_void_p(hProcess), C.c_longlong(address), imagehlp_module64_p(img)) if r == 0: return None return img
def read_bytes(self, address, bytes=4): pdata = ctypes.c_void_p(0) data_cnt = ctypes.c_uint32(0) ret = libc.mach_vm_read(self.task, ctypes.c_ulonglong(address), ctypes.c_longlong(bytes), ctypes.pointer(pdata), ctypes.pointer(data_cnt)) #if ret==1: # return "" if ret != 0: raise ProcessException("mach_vm_read returned : %s" % ret) buf = ctypes.string_at(pdata.value, data_cnt.value) libc.vm_deallocate(self.mytask, pdata, data_cnt) return buf
def read_bytes(self, address, bytes=4): pdata = ctypes.c_void_p(0) data_cnt = ctypes.c_uint32(0) ret = libc.mach_vm_read(self.task, ctypes.c_ulonglong(address), ctypes.c_longlong(bytes), ctypes.pointer(pdata), ctypes.pointer(data_cnt)) if ret != 0: raise CannotReadException("mach_vm_read returned: {}".format(ret)) buf = ctypes.string_at(pdata.value, data_cnt.value) libc.vm_deallocate(self.my_task, pdata, data_cnt) return bytearray(buf)
def extract_entries(entries, flags=0): """Extracts the given archive entries into the current directory. """ buff, size, offset = c_void_p(), c_size_t(), c_longlong() buff_p, size_p, offset_p = byref(buff), byref(size), byref(offset) with new_archive_write_disk(flags) as write_p: for entry in entries: write_header(write_p, entry._entry_p) read_p = entry._archive_p while 1: r = read_data_block(read_p, buff_p, size_p, offset_p) if r == ARCHIVE_EOF: break write_data_block(write_p, buff, size, offset) write_finish_entry(write_p)
def xprop_conv(self, I, F, O, X=None, bias=None, bsum=None, alpha=1.0, beta=0.0, relu=False, brelu=False, slope=0.0, backward=False): # hack for dealing with dilated conv if self.dilated: self.xprop_conv_dilated(I, F, O, X, bias, bsum, alpha, beta, relu, brelu, slope, backward) return if X is None: X = O # call MKL # TODO, consider concact and beta # TODO, support bias C, D, H, W, N = self.dimI C, T, R, S, K = self.dimF K, M, P, Q, N = self.dimO pad_d, pad_h, pad_w = self.padding str_d, str_h, str_w = self.strides primitives = c_longlong(self.dnnPrimitives.ctypes.data) if not backward: I.backend.mklEngine.Conv_forward(I.get_prim(), O.get_prim(), F.get_prim(), primitives, self.init_f, N, C, H, W, S, R, str_h, str_w, pad_w, pad_h, K, P, Q) self.init_f = 1 O.shape5D = self.dimO else: beta_ = c_float(beta) I.backend.mklEngine.Conv_bwdData(I.get_prim(), O.get_prim(), F.get_prim(), primitives, N, K, P, Q, self.init_bd, beta_) O.shape5D = self.dimI self.init_bd = 1
def __init__(self, daten, seed): lib.createModell.restype = ctypes.c_longlong # man muss nach c_longlong casten, da x64 pointer verwendet werden self.handle = ctypes.c_longlong(lib.createModell(daten, seed)) self.gesund = 0 self.krank = 0 self.genesen = 0 self.nb_gesund = 0 self.nb_krank = 0 self.nb_genesen = 0 self.b_gesund = 0 self.b_krank = 0 self.b_genesen = 0
def test_cast(self): c_int = ctypes.c_int() c_uint = ctypes.c_uint() for v in (0, 1, sys.maxsize, sys.maxsize+2, 1<<31, -1, -10): c_int.value = v c_uint.value = v self.assertEqual(c_int.value, TestSupport.cast_int(v)) self.assertEqual(c_uint.value, TestSupport.cast_uint(v)) c_longlong = ctypes.c_longlong() c_ulonglong = ctypes.c_ulonglong() for v in (0, 1, sys.maxsize, sys.maxsize+2, 1<<63, -1, -10): c_longlong.value = v c_ulonglong.value = v self.assertEqual(c_longlong.value, TestSupport.cast_longlong(v)) self.assertEqual(c_ulonglong.value, TestSupport.cast_ulonglong(v))
def StringToRecord(self, input_file, output_file): print('Start to convert {} to {}'.format(input_file, output_file)) writer = tf.python_io.TFRecordWriter(output_file) for line in open(input_file, 'r'): tokens = line.split(' ') label = float(tokens[0]) field2feature = {} for fea in tokens[1:]: fieldid, featureid, value = fea.split(':') if int(fieldid) not in field2feature: feature2value = {} feature2value[int(featureid)] = float(value) field2feature[int(fieldid)] = feature2value else: field2feature[int(fieldid)][int(featureid)] = float(value) feature = {} feature['label'] = tf.train.Feature(float_list=tf.train.FloatList( value=[label])) for fieldid in self.sparse_field: feature_id_list = [] feature_val_list = [] if fieldid in field2feature: for featureid in field2feature[fieldid]: value = field2feature[fieldid][featureid] feature_id_list.append( ctypes.c_longlong(int(featureid)).value) feature_val_list.append(value) else: feature_id_list.append(0) feature_val_list.append(0.0) feature['sparse_id_in_field_' + str(fieldid)] = tf.train.Feature( int64_list=tf.train.Int64List( value=feature_id_list)) feature['sparse_val_in_field_' + str(fieldid)] = tf.train.Feature( float_list=tf.train.FloatList( value=feature_val_list)) example = tf.train.Example(features=tf.train.Features( feature=feature)) writer.write(example.SerializeToString()) writer.close() print('Successfully convert {} to {}'.format(input_file, output_file))