def load_numpy(self, data, offset=0): """ load data from numpy """ check_call( LIB.HPPS_TensorLoadData(self.handle, offset, data.ctypes.data_as(ctypes.c_void_p), data.size))
def store(self, uri): """ store table Parameters ---------- uri: The uri name """ check_call(LIB.TableStore(self.handle, c_str(uri)))
def get_tensor(self, name): """ Return the tensor according to name """ out = ctypes.c_void_p() check_call( LIB.HPPS_BatchGetTensorFromKey(self.handle, c_str(name), ctypes.byref(out))) return Tensor(handle=out, shape=None, type=None)
def wait(self, id): """ wait async pull or push Parameters ---------- id: The async pull or push's return value """ check_call(LIB.TableWait(self.handle, id))
def load(self, uri): """ load table Parameters ---------- uri: The uri name """ check_call(LIB.TableLoad(self.handle, c_str(uri)))
def get(self, value): """ pull parameter Parameters ---------- value: The array-like params """ check_call(LIB.ArrayTableGet(self.handle, value.handle))
def schedule(self, plan, max_queue_size=1): """ Schedule the plan """ out = ctypes.c_void_p() check_call( LIB.HPPS_FeederSchedule(plan.handle, max_queue_size, ctypes.byref(out))) return BatchIterator(out)
def asnumpy(self): """ Return the numpy """ data = np.empty(self.shape, dtype=self.dtype) check_call( LIB.HPPS_TensorExportData(self.handle, data.ctypes.data_as(ctypes.c_void_p))) return data
def get(self, key, value): """ pull parameter Parameters ---------- key: the uint32 or uint64 key tensor value: The params tensor """ check_call(LIB.KVTableGet(self.handle, key.handle, value.handle))
def set_epoch(self, epoch): """ set epoch number of sample files Parameters ---------- epoch: The epoch number """ check_call(LIB.HPPS_PlanMakerSetEpoch(self.handle, epoch))
def shape(self): """ Return the shape of Tensor """ ndim = ctypes.c_uint() pdata = ctypes.POINTER(ctypes.c_uint)() check_call( LIB.HPPS_TensorShape(self.handle, ctypes.byref(ndim), ctypes.byref(pdata))) return tuple(pdata[:ndim.value])
def set_batch_size(self, batch_size): """ set batch size Parameters ---------- batch_size: The batch size """ check_call(LIB.HPPS_PlanMakerSetBatchSize(self.handle, batch_size))
def names(self): """ Return the names of tensor """ out_names = ctypes.POINTER(ctypes.c_char_p)() out_size = ctypes.c_int() check_call( LIB.HPPS_BatchGetKeys(self.handle, ctypes.byref(out_size), ctypes.byref(out_names))) return tuple(out_names[:out_size.value])
def Num2Indices(num_tensor): """ Convert num tensor to indices. Parameters ---------- num_tensor: The num tensor """ out = ctypes.c_void_p() check_call(LIB.HPPS_Num2Indices(num_tensor.handle, ctypes.byref(out))) return Tensor(handle=out, own_handle=True, shape=None, type=None)
def zoo_set_log_file(log_file): """ Parameters ---------- log_file: The log file path for HPPS. """ check_call(LIB.HPPS_ZooSetLogFile(c_str(log_file)))
def write_finalize(self): """ record io write finalization Parameters ---------- None """ check_call(LIB.HPPS_RecordIOWriteFinalize(self.handle))
def create_kv_table(capacity, value_len, key_type, value_type, solver="", ps_mode="async", kwargs={}): """ create a new kv table Parameters --------- capacity: The key capacity value_len: The value length key_type: The key type value_type: The value type solver: The solver ps_mode: The ps model(sync/async) kwargs: The k/w config, The available key as follows: algo : assign/uniform/gaussian assigned_value : 0.1 mu : 0 sigma : 0.01 min : 0 max : 1 seed : 0 """ keys = [] values = [] for key, value in kwargs.iteritems(): keys.append(c_str(key)) values.append(c_str(value)) handle = ctypes.c_void_p() check_call( LIB.CreateKVTable(c_str(solver), c_str(ps_mode), capacity, value_len, _NP_2_DTYPE[key_type], _NP_2_DTYPE[value_type], len(keys), c_array(ctypes.c_char_p, keys), c_array(ctypes.c_char_p, values), ctypes.byref(handle))) return KVTable(handle)
def zoo_set_log_level(log_level=2): """ Parameters: ----------- log_level: 0 -> Debug 1 -> Info 2 -> Error 3 -> Fatal """ check_call(LIB.HPPS_ZooSetLogLevel(log_level))
def get_async(self, value): """ pull parameter Parameters ---------- value: The array-like params """ id = ctypes.c_int() check_call( LIB.ArrayTableGetAsync(self, handle, value.handle, ctypes.byref(id))) return id
def __init__(self, shape, type, handle=None, own_handle=False): """ create a new Tensor instance """ if handle != None: self.own_handle = own_handle self.handle = handle else: self.own_handle = True self.handle = ctypes.c_void_p() check_call( LIB.HPPS_TensorCreate(len(shape), c_array(ctypes.c_int, shape), _NP_2_DTYPE[type], ctypes.byref(self.handle)))
def set_uri(self, files): """ set the sample files for hpps Parameters --------- files: The files in array """ array = [] for file in files: array.append(c_str(file)) check_call( LIB.HPPS_PlanMakerSetURI(self.handle, len(array), c_array(ctypes.c_char_p, array)))
def make(self): """ make the sample reading plan Parameters ---------- None Return ------ Return the sample reading plan. """ nhandle = ctypes.c_void_p() check_call(LIB.HPPS_PlanMakerMake(self.handle, ctypes.byref(nhandle))) return Plan(nhandle)
def Id2UniqId(id_tensor): """ Convert id tensor to [uniq_id, local_id] Parameters ---------- id_tensor: The id tensor """ local_id = ctypes.c_void_p() uniq_id = ctypes.c_void_p() check_call( LIB.HPPS_Id2UniqId(id_tensor.handle, ctypes.byref(local_id), ctypes.byref(uniq_id))) return [ Tensor(handle=local_id, own_handle=True, shape=None, type=None), Tensor(handle=uniq_id, own_handle=True, shape=None, type=None) ]
def add(self, grad, option={}): """ push grads Parameters ---------- tensor: The array-like grads """ keys = [] values = [] for key, value in option.iteritems(): keys.append(c_str(key)) values.append(c_str(value)) check_call( LIB.ArrayTableAdd(self.handle, grad.handle, len(keys), c_array(ctypes.c_char_p, keys), c_array(ctypes.c_char_p, values)))
def write_sample(self, tensor_map): """ write one sample Parameters ---------- tensor_map: The name -> tensor map, each sample consists of mutiple tensors """ names = [] tensors = [] for name, tensor in tensor_map.iteritems(): names.append(c_str(name)) tensors.append(tensor.handle) check_call(LIB.HPPS_RecordIOWriteSample(self.handle, len(names), c_array(ctypes.c_char_p, names), c_array(ctypes.c_void_p, tensors)))
def add(self, key, grad, option={}): """ push grads Parameters ---------- key: the uint32 or uint64 key tensor grad: The param grads """ keys = [] values = [] for key, value in option.iteritems(): keys.append(c_str(key)) values.append(c_str(value)) check_call( LIB.KVTableAdd(self.handle, key.handle, grad.handle, c_array(ctypes.c_char_p, keys), c_array(ctypes.c_char_p, values)))
def add_async(self, grad, option={}): """ push grads Parameters ---------- tensor: The array-like grads """ keys = [] values = [] for key, value in option.iteritems(): keys.append(c_str(key)) values.append(c_str(value)) id = ctypes.c_int() check_call( LIB.ArrayTableGetAsync(self.handle, grad.handle, ctypes.byref(id), len(keys), c_array(ctypes.c_char_p, keys), c_array(ctypes.c_char_p, values))) return id
def create_array_table(size, type, solver="", ps_mode="sync", kwargs={}): """ create a new array table Parameters --------- size: The array length type: The array data type solver: The user defined solver ps_mode: ps mode(sync/async) kwargs: The k/w config, The available keys as follows:: algo : assign/uniform/gaussian assigned_value : 0.1 mu : 0 sigma : 0.01 min : 0 max : 1 seed : 0 """ keys = [] values = [] for key, value in kwargs.iteritems(): keys.append(c_str(key)) values.append(c_str(value)) handle = ctypes.c_void_p() check_call( LIB.CreateArrayTable(c_str(solver), c_str(ps_mode), size, _NP_2_DTYPE[type], len(keys), c_array(ctypes.c_char_p, keys), c_array(ctypes.c_char_p, values), ctypes.byref(handle))) return ArrayTable(handle)
def get_async(self, key, value, wait_id=None): """ pull parameter Parameters ---------- key: the uint32 or uint64 key tensor value: The params tensor wait_id: The wait id """ if wait_id is not None: self.wait(wait_id) check_call( LIB.KVTableGetFromLocal(self.handle, key.handle, value.handle)) else: id = ctypes.c_int() check_call( LIB.KVTableGetAsync(self.handle, key.handle, value.handle, ctypes.byref(id))) return id
def write_header(self, name, type, level=None, is_aux_number=None): """ write header of record Parameters --------- name_type_map: tensor name -> type map """ names = [] types = [] levels = [] is_aux_numbers = [] for index in xrange(len(name)): names.append(c_str(name[index])) types.append(_NP_2_DTYPE[type[index]]) levels.append(0 if level is None else level[index]) is_aux_numbers.append(0 if is_aux_number is None else is_aux_number[index]) check_call(LIB.HPPS_RecordIOWriteHeader(self.handle, len(names), c_array(ctypes.c_char_p, names), c_array(ctypes.c_int, types), c_array(ctypes.c_int, levels), c_array(ctypes.c_int, is_aux_numbers)))