示例#1
0
 def __init__(self, data, labels, cv_percent, test_percent, batch_size, mode='sequential'):
     if len(labels.shape) ==1: labels = u.create_t_matrix(labels)
     elif labels.shape[0]==1 or labels.shape[1]==1: labels = np.float32(u.create_t_matrix(labels))    
     self.size = labels.shape[0]
     self.shapes = [data.shape[1],labels.shape[1]]
         
     shape = u.handle_shape(data.shape)  
     shape_label = u.handle_shape(labels.shape)
     
     self.arrays = {}
     self.mode = mode
     
     #data = np.float32(np.asanyarray(data,order='F'))
     #data = np.float32(data.reshape(shape))
     #labels = np.float32(np.asanyarray(labels,order='F'))
     #labels = np.float32(labels.reshape(shape_label))
     
     
     
     self.pinned_buffers = {}
     
                    
     
     self.batch_size = batch_size
     self.cv_percent = cv_percent
     self.test_percent = test_percent
     self.current = None
     self.next_X = None
     self.offsize_X = []
     self.offsize_y = []
     
     self.set_type = 'train'  
     self.set_type_prev = 'train'          
     
     self.next_batch_idx = 0
     
     #self.X = array(None, lib.funcs.fto_pinned(shape[0],shape[1],shape[2],shape[3],data.ctypes.data_as(ct.POINTER(ct.c_float))))
     #self.y = array(None, lib.funcs.fto_pinned(shape_label[0],shape_label[1],shape_label[2],shape_label[3],
     #                                          labels.ctypes.data_as(ct.POINTER(ct.c_float))))
     
     self.X = lib.funcs.fto_pinned(shape[0], shape[1], shape[2], shape[3], data.ctypes.data_as(ct.POINTER(ct.c_float)))
     self.y = lib.funcs.fto_pinned(shape_label[0], shape_label[1], shape_label[2], shape_label[3], labels.ctypes.data_as(ct.POINTER(ct.c_float)))
     
     #del data
     #del labels
     
     self.set_batch_sizes()
     self.init_buffers()
     
     self.net = None
     self.sample_size = None
     self.relative_error = None
     self.start_batching = True
     self.peer_access_enabled = False
     
     self.batch_buffers = {}
示例#2
0
    def forward(self, data=None, target=None, inTrainingMode=True):
        if data is not None:
            shape = u.handle_shape(data.shape)
            self.unitcount = shape[3]
            self.handle_input_size(shape[2])
            self.root.target = target
            self.funcs.activation(data, self.activation, self.out,
                                  inTrainingMode)
            #if inTrainingMode: self.handle_parallelism()
        else:
            #if inTrainingMode: self.handle_parallelism()
            #cool
            gpu.dot(self.prev_layer.out, self.prev_layer.w_next,
                    self.activation)
            #not cool activation, dot problem? -> nope, memory problem (wrong buffer size)?
            #print self.prev_layer.out.sum()
            #print self.prev_layer.w_next.sum()
            #print self.activation.sum()
            #print 'a'
            #sleep(0.5)
            gpu.add(self.activation, self.prev_layer.b_next, self.activation)
            self.funcs.activation(self.activation, self.activation, self.out,
                                  inTrainingMode)

        if self.next_layer: self.next_layer.forward(None, None, inTrainingMode)
示例#3
0
    def __init__(self,
                 npArray=None,
                 mat_pointer=None,
                 split_idx=-1,
                 dtype=np.float32):
        self.shape = None
        self.dummy = False
        self.id = uuid.uuid4()
        self.split_idx = split_idx

        if type(npArray) == type(np.array(1)):
            npArray = np.float32(npArray)
            shape = u.handle_shape(npArray.shape)

            mat_pointer = lib.funcs.fempty_split(shape[0], shape[1], shape[2],
                                                 shape[3], split_idx)
            lib.funcs.ftogpu_split(
                mat_pointer, npArray.ctypes.data_as(ct.POINTER(ct.c_float)),
                split_idx)
            self.shape = npArray.shape

        if mat_pointer:
            self.shape_tensor = (mat_pointer.contents.batches,
                                 mat_pointer.contents.maps,
                                 mat_pointer.contents.rows,
                                 mat_pointer.contents.cols)
            if not self.shape: self.shape = self.shape_tensor

        self.pt = mat_pointer
        self.npArray = npArray
        mem.arrays[self.id] = [self.shape_tensor, self.pt, dtype]
        pass
示例#4
0
def empty(shape, split_idx=-1):
    shape = u.handle_shape(shape)
    out = array(
        None,
        lib.funcs.fempty_split(shape[0], shape[1], ct.c_int32(shape[2]),
                               ct.c_int32(shape[3]), split_idx), split_idx)
    return out
示例#5
0
def zeros(shape, split_idx=-1):
    shape = u.handle_shape(shape)
    out = array(
        None,
        lib.funcs.fzeros_split(shape[0], shape[1], shape[2], shape[3],
                               split_idx))
    return out
示例#6
0
 def normal(self, loc=0.0, scale=1.0, size=None):
     d0, d1, d2, d3 = u.handle_shape(size)
     assert (d0 * d1 * d2 * d3) % 2 == 0, "Size must be a multiple of 2!"
     assert size, "Size must be greater than zero!"
     return array(
         None,
         lib.funcs.fnormal(self.p_gpupy, d0, d1, d2, d3, ct.c_float(loc),
                           ct.c_float(scale)))
示例#7
0
def to_col_major_pinned_pointer(x1, pt_out=None):
    if x1.dtype != np.float32: x1 = np.float32(x1)
    shape = u.handle_shape(x1.shape)
    if not pt_out: pt_out = empty_pinned_pointer(shape)
    lib.funcs.inp_to_col_major_pinned(
        x1.ctypes.data_as(ct.POINTER(ct.c_float)), pt_out, shape[0], shape[1],
        shape[2], shape[3])
    return pt_out
示例#8
0
def pointer2ndarray(pt, shape, dtype=np.float32):
    shape_tensor = u.handle_shape(shape)
    size = shape_tensor[0] * shape_tensor[1] * shape_tensor[2] * shape_tensor[3]
    str_buffer = ct.string_at(pt, ct.sizeof(ct.c_float) * size)
    return np.fromstring(str_buffer, dtype=dtype).reshape(shape)
示例#9
0
def empty_pinned_pointer(shape):
    out = np.empty(shape)
    shape_tensor = u.handle_shape(shape)
    return lib.funcs.fto_pinned(shape_tensor[0], shape_tensor[1],
                                shape_tensor[2], shape_tensor[3],
                                out.ctypes.data_as(ct.POINTER(ct.c_float)))
示例#10
0
def to_pinned_pointer(x1):
    if x1.dtype != np.float32: x1 = np.float32(x1)
    shape = u.handle_shape(x1.shape)
    return lib.funcs.fto_pinned(shape[0], shape[1], shape[2], shape[3],
                                x1.ctypes.data_as(ct.POINTER(ct.c_float)))
示例#11
0
def ones(shape):
    shape = u.handle_shape(shape)
    out = array(None, lib.funcs.fones(shape[0], shape[1], shape[2], shape[3]))
    return out