Exemple #1
0
def data_iter(x, y):
    batch = 10
    idx = list(range(num_examples))
    random.shuffle(idx)
    for i in range(0, num_examples, batch):
        j = nd.array(idx[i:min(i + batch, num_examples)])
        yield nd.take(x, j), nd.take(y, j)
def data_iter():
    # 产生一个随机索引
    idx = list(range(num_examples))
    random.shuffle(idx)
    for i in range(0, num_examples, batch_size):
        j = nd.array(idx[i:min(i + batch_size, num_examples)])
        yield nd.take(X, j), nd.take(y, j)
 def goodness_of_function_optimizer_data(self):
     self.__batch_size = 100
     idx = list(range(self.__num_examples))
     random.shuffle(idx)
     for i in range(0, self.__num_examples, self.__batch_size):
         j = nd.array(idx[i:min(i + self.__batch_size, self.__num_examples)])
         yield nd.take(self.__X, j).reshape((-1, 2)), nd.take(self.__y, j).reshape((-1, 1))
Exemple #4
0
def data_iter():
    #产生一个随机索引
    idx = list(range(num_example))  #[0,1,2,……,999]
    random.shuffle(idx)  #打乱idx
    for i in range(0, num_example, batch_size):
        j = nd.array(idx[i:min(i + batch_size, num_example)])
        yield nd.take(X, j), nd.take(Y, j)
def data_iter():
    # 产生一个随机索引
    idx = list(range(num_examples))
    random.shuffle(idx)##打乱
    for i in range(0, num_examples, batch_size):##0 10 20 ...
        j = nd.array(idx[i:min(i+batch_size,num_examples)])##随机抽取10个样例
        yield nd.take(X, j), nd.take(y, j)##样例和标签 我们通过python的yield来构造一个迭代器。
def data_iter():
    idx = list(range(num_examples))
    random.shuffle(idx)
    data, label = mnist_train[:]
    for i in range(0, num_examples, batch_size):
        j = nd.array(idx[i:i + min(batch_size, num_examples)])
        yield nd.take(data, j), nd.take(nd.array(label), j)
def data_generator(batch_size):
    index = list(range(config.training_size))
    random.shuffle(index)

    for i in range(0, config.training_size, batch_size):
        j = nd.array(index[i:min(i + batch_size, config.training_size)])
        yield nd.take(X, j), nd.take(y, j)
Exemple #8
0
    def forward(self, is_train, req, in_data, out_data, aux):

        # 1.reshape to 1 dims
        feature = in_data[0].transpose((0, 2, 3, 1))
        self.diff_reshape = feature.shape
        dim = feature.shape[3]
        feature = feature.reshape((-1, dim))
        self.label = in_data[1].reshape((-1, ))
        self.batch_size = self.label.shape[0]

        # last shape
        self.center = aux[0]
        self.center[self.center.shape[0] - 1, :] = 0

        # 2.calculate diff
        hist = nd.array(np.bincount(self.label.asnumpy().astype(int)))
        centers_selected = nd.take(self.center, self.label)
        self.centers_count = nd.take(hist, self.label)

        label_valid = (self.label != 255).reshape((-1, 1))
        self.diff = label_valid * (
            feature - centers_selected) / self.centers_count.reshape((-1, 1))

        # 3.calculate output
        loss = mx.nd.sum(mx.nd.square(self.diff, axis=1), 1)
        out_data[0][:] = loss

        # 4.reshape diff
        self.diff = self.diff
Exemple #9
0
def data_iter(X, Y, batch_size=50):
    length = len(X)
    idx = list(range(length))
    random.shuffle(idx)
    for i in range(0, length, batch_size):
        ix_ = nd.array(idx[i: min(length, i + batch_size)],ctx=ctx)
        yield nd.take(X, ix_), nd.take(Y, ix_)
Exemple #10
0
def data_iter_old():
    idx = list(range(num_examples))
    random.shuffle(idx)  # random order
    for i in range(0, num_examples, batch_size):
        j = nd.array(idx[i:min(i + batch_size, num_examples)])

        yield nd.take(X, j), nd.take(y, j)
def data_iter():
    # generate random indices
    idx = list(range(num_examples))
    random.shuffle(idx) # randomly sort
    for i in range(0, num_examples, batch_size): #1000 examples and fetch 10 each time
        j = nd.array(idx[i: min(i+batch_size, num_examples)])
        yield nd.take(X, j), nd.take(y,j) # ?
Exemple #12
0
def data_iter():
    # 产生一个随机索引
    idx = list(range(num_examples))
    random.shuffle(idx)  ##打乱
    for i in range(0, num_examples, batch_size):  ##0 10 20 ...
        j = nd.array(idx[i:min(i + batch_size, num_examples)])  ##随机抽取10个样例
        yield nd.take(X, j), nd.take(y, j)  ##样例和标签 我们通过python的yield来构造一个迭代器。
Exemple #13
0
def data_iter(X_p, y_p, batch_size_p):
    # 产生一个随机索引
    idx = range(y_p.size)
    random.shuffle(idx)
    for i in range(0, y_p.size, batch_size_p):
        j = nd.array(idx[i:min(i + batch_size_p, y_p.size)])
        yield nd.take(X_p, j), nd.take(y_p, j)
Exemple #14
0
def data_iter(x, y, batch_size):
    n = len(x)
    idx = list(range(n))
    np.random.shuffle(idx)
    for i in range(0, n, batch_size):
        j = nd.array(idx[i:min(i + batch_size, n)])
        yield nd.take(x, j), nd.take(y, j)
Exemple #15
0
def data_iter():
    idx = list(range(num_examples))
    random.shuffle(idx)  # 随机打乱序列
    for i in range(0, num_examples, batch_size):
        j = nd.array(idx[i:min(i + batch_size, num_examples)])
        yield nd.take(X,
                      j), nd.take(y,
                                  j)  # yield是个迭代器,相当于在此返回take获得的内容,然后从下一句继续执行
Exemple #16
0
def dataIter(x, y, step):
    idx = list(range(x.shape[0]))
    # print(x.shape[0])
    random.shuffle(idx)
    for j in range(0, x.shape[0], step):
        j = nd.array(idx[j:min(j + step, x.shape[0])])
        # print('j = ',j)
        yield nd.take(x, j), nd.take(y, j)
Exemple #17
0
def data_iter():
    idx = list(range(num_examples))
    random.shuffle(idx)
    for i in range(0, num_examples, batch_size):
        #print("i = " + str(i))
        j = nd.array(idx[i : min(i+batch_size, num_examples)])
        #print(j)
        yield nd.take(X, j), nd.take(y, j)
def data_iter():
    # id创建序列
    idx = list(range(num_examples))
    # 将序列打乱
    random.shuffle(idx)
    # 读取数据
    for i in range(0, num_examples, batch_size):
        j = nd.array(idx[i:min(i + batch_size, num_examples)])
        yield nd.take(X, j), nd.take(y, j)
def data_iter(M, n):
    batch_size = 10
    num_examples = 1000
    # 产生一个随机索引
    idx = list(range(num_examples))
    random.shuffle(idx) # 随机排序
    for i in range(0, num_examples, batch_size):
        j = nd.array(idx[i:min(i + batch_size, num_examples)])
        yield nd.take(M, j), nd.take(n, j)
Exemple #20
0
def data_iter():
    # 产生一个随机索引列表
    idx = list(range(num_examples))
    # 将索引列表打乱
    random.shuffle(idx)
    # start-end step-size
    for i in range(0,num_examples,batch_size):
        # idx的下标对应的值是origin_data数组的下标
        j = nd.array(idx[i:min(i+batch_size,num_examples)])
        # 一次性取出batch-size的数据以及label值
        yield nd.take(origin_data,j),nd.take(y_hat,j)
Exemple #21
0
def data_iter(batch_size=100, kind='train'):
    if kind != 'train':
        idx = list(range(len(test_labels)))
        for i in range(0, len(test_labels), batch_size):
            j = nd.array(idx[i:min(i + batch_size, len(test_labels))])
            yield nd.take(test_img_nd,
                          j).as_in_context(ctx), nd.take(test_lab_nd,
                                                         j).as_in_context(ctx)
    else:
        idx = list(range(len(train_labels)))
        for i in range(0, len(train_labels), batch_size):
            j = nd.array(idx[i:min(i + batch_size, len(train_labels))])
            yield nd.take(train_img_nd,
                          j).as_in_context(ctx), nd.take(train_lab_nd,
                                                         j).as_in_context(ctx)
Exemple #22
0
    def forward(self, data, neighbor_data, neighbor_indices, neighbor_indptr,
                       node_type_mask=None, neighbor_type_mask=None, edge_type_mask=None, seg_indices=None):
        """Map the input features to hidden states + apply pooling + apply FC

        Parameters
        ----------
        F
        data : Symbol or NDArray
            Shape (batch_size, node_num, feat_dim)
        neighbor_data : Symbol or NDArray
            Shape (batch_size, neighbor_node_num, feat_dim)
        data_mask :  Symbol or NDArray
            Shape (batch_size, node_num, num_set, 1)
        neighbor_mask : Symbol or NDArray
            Shape (batch_size, neighbor_node_num, num_set, 1)
        neighbor_indices : Symbol or NDArray
            Shape (nnz, )
        neighbor_indptr : Symbol or NDArray
            Shape (node_num + 1, )
        edge_data : Symbol or NDArray or None
            Shape (batch_size, nnz, num_edge_num, 1)

        Returns
        -------

        """
        ## TODO does not consider node type
        if self._num_node_set is not None:
            #print("data", data.shape)
            #print("node_type_mask", node_type_mask.shape)
            data = self.data_map(data, node_type_mask)
            neighbor_data = self.neighbor_mid_map(neighbor_data, neighbor_type_mask)
        if self._num_edge_set is not None:
            neighbor_data = self.relation_W(neighbor_data)  ### (batch_size, neighbor_node_num, mid_units*num_edge_set)
            neighbor_data = nd.take(neighbor_data, indices=neighbor_indices, axis=-2) ## (batch_size, nnz, mid_units*num_edge_set)
            #print("neighbor_data", neighbor_data.shape)
            neighbor_data = nd.reshape(neighbor_data,
                                       shape=(0, 0, self._num_edge_set, self._mid_units)) ## (batch_size, nnz, mid_units*num_edge_set)
            #print("neighbor_data", neighbor_data.shape)
            #print("edge_data", edge_data.shape)
            neighbor_data = nd.reshape(nd.broadcast_mul(neighbor_data, edge_type_mask),
                                       shape=(0, 0, -1))
            #print("neighbor_data", neighbor_data.shape)


        pool_data = nd.contrib.seg_pool(data=neighbor_data,
                                       indices=seg_indices,
                                       indptr=neighbor_indptr,
                                       pool_type=self._pool_type)  # Shape(batch_size, node_num, mid_units*num_edge_set)
        if self._num_edge_set is not None:
            if self._accum_type == "stack":
                pool_data = self._out_act(pool_data)
            elif self._accum_type == "sum":
                pool_data = self._out_act(nd.sum(nd.reshape(pool_data, shape=(0, 0, self._num_edge_set, self._mid_units )), axis=2))

        #out = self.out_layer(nd.concat(pool_data, data, dim=-1))
        #out = self.out_layer(pool_data)
        return pool_data
Exemple #23
0
def gather_row(data, row_index):
    # MXNet workaround for empty row index
    if len(row_index) == 0:
        return data[0:0]

    if isinstance(row_index, nd.NDArray):
        return nd.take(data, row_index)
    else:
        return data[row_index, ]
 def __call__(self, idx, gpu_id=-1, trace=True):
     if self.emb.context != idx.context:
         idx = idx.as_in_context(self.emb.context)
     data = nd.take(self.emb, idx)
     if self.gpu >= 0:
         data = data.as_in_context(mx.gpu(self.gpu))
     data.attach_grad()
     if trace:
         self.trace.append((idx, data))
     return data
Exemple #25
0
    def forward(self):

        # 2-step
        diff = nd.subtract(nd.expand_dims(self.dataset,axis=0),nd.expand_dims(self.centroid,axis=1))
        sqr = nd.square(diff)
        distance = nd.sum(sqr,axis=2)
        clustering = nd.argmin(distance,axis=0)
        # 3-step
        '''
        Because mxnet's nd.where did not return the location. I wrote the np.where function.
        '''
        for j in range(self.centroid_numbers):
            self.centroid[j][:]=nd.mean(nd.take(self.dataset,nd.array(np.reshape(np.where(np.equal(clustering.asnumpy(), j)), (-1,)), ctx=self.ctx),axis=0),axis=0)
        return clustering , self.centroid
Exemple #26
0
    def __call__(self, idx, gpu_id=-1, trace=True):
        """ Return sliced tensor.

        Parameters
        ----------
        idx : th.tensor
            Slicing index
        gpu_id : int
            Which gpu to put sliced data in.
        trace : bool
            If True, trace the computation. This is required in training.
            If False, do not trace the computation.
            Default: True
        """
        if self.emb.context != idx.context:
            idx = idx.as_in_context(self.emb.context)
        data = nd.take(self.emb, idx)
        if gpu_id >= 0:
            data = data.as_in_context(mx.gpu(gpu_id))
        data.attach_grad()
        if trace:
            self.trace.append((idx, data))
        return data
Exemple #27
0
l = np.ones((2,1))
m = nd.array(l)  # numpy --> mxnet
n = m.asnumpy()  # mxnet --> numpy


# definr the data productor
def data_iter(num_data, batch_size=4):
    "
    para: num_data: length of the dataset
    para: batch_size: 
    "
    idx = list(range(num_data))
    random.shuffle(idx)
    for i in range(0, num_data, batch_size):
        batch = nd.array(idx[i:min(i + batch_ize, num_data)])
        yield nd.take(x, batch), nd.take(y, batch)

for data, label in data_iner():
    print() 

# initial the model parameterds
w = nd.random_normal(shape=(num_inputs, 1))
b = nd.zeros((1,))
params = [w, b]

for param in params:
    param.attach_grad()

# define the model

def net(x):
Exemple #28
0
def take(data, indices, dim):
    return nd.take(data, indices, dim)
batch_size = 256
train_data = gluon.data.DataLoader(mnist_train, batch_size, shuffle=True)
test_data = gluon.data.DataLoader(mnist_test, batch_size, shuffle=False)

# a different attainment

num_examples = len(mnist_train)

data, label = mnist_train[:]
print(data.shape, label.shape)

test = data

label = nd.array(label)

print(nd.take(test, nd.array([1, 2])).shape)
print(nd.take(label[:], nd.array([1, 2])))

for data, label in train_data:
    print(data.shape, label.shape)
    break

# print(num_examples)
# for data, label in train_data:
# 	print(data.shape, label)
# 	break
import random


def data_iter():
    idx = list(range(num_examples))
Exemple #30
0
def gather_row(data, row_index):
    if isinstance(row_index, nd.NDArray):
        return nd.take(data, row_index)
    else:
        return data[row_index, ]
Exemple #31
0
def dataIter(x, y, batch):
    idx = list(range(x.shape[0]))
    random.shuffle(idx)
    for j in range(0, x.shape[0], batch):
        j = nd.array(idx[j:min(j + batch, x.shape[0])])
        yield nd.take(x, j), nd.take(y, j)
Exemple #32
0
def K_means_Algorithm(epoch=100,
                      point_numbers=2000,
                      centroid_numbers=5,
                      ctx=mx.gpu(0)):

    dataset = []
    centroid = []

    # data generation
    for i in range(point_numbers):

        if random.random() > 0.5:
            dataset.append([
                np.random.normal(loc=0, scale=0.9),
                np.random.normal(loc=0, scale=0.9)
            ])
        else:
            dataset.append([
                np.random.normal(loc=3, scale=0.5),
                np.random.normal(loc=0, scale=0.9)
            ])

    df = pd.DataFrame({
        "x": [d[0] for d in dataset],
        "y": [d[1] for d in dataset]
    })
    sns.lmplot("x", "y", data=df, fit_reg=False, size=10)
    plt.savefig("K means Algorithm init using mxnet.png")

    # 1-step
    random.shuffle(dataset)
    for i in range(centroid_numbers):
        centroid.append(random.choice(dataset))

    # using mxnet
    dataset = nd.array(dataset, ctx=ctx)
    centroid = nd.array(centroid, ctx=ctx)

    # data assignment , updating new center values
    for i in tqdm(range(epoch)):

        # 2-step
        diff = nd.subtract(nd.expand_dims(dataset, axis=0),
                           nd.expand_dims(centroid, axis=1))
        sqr = nd.square(diff)
        distance = nd.sum(sqr, axis=2)
        clustering = nd.argmin(distance, axis=0)

        # 3-step
        '''
        Because mxnet's nd.where did not return the location. I wrote the np.where function.
        '''
        for j in range(centroid_numbers):
            centroid[j][:] = nd.mean(nd.take(
                dataset,
                nd.array(np.reshape(
                    np.where(np.equal(clustering.asnumpy(), j)), (-1, )),
                         ctx=ctx),
                axis=0),
                                     axis=0)
        print("epoch : {}".format(i + 1))

    for i in range(centroid_numbers):
        print("{}_center : Final center_value : {}".format(
            i + 1,
            centroid.asnumpy()[i]))

    #4 show result
    data = {"x": [], "y": [], "cluster": []}
    for i in range(len(clustering)):
        data["x"].append(dataset[i][0].asscalar())
        data["y"].append(dataset[i][1].asscalar())
        data["cluster"].append(clustering[i].asscalar())

    df = pd.DataFrame(data)
    sns.lmplot("x", "y", data=df, fit_reg=False, size=10, hue="cluster")
    plt.savefig("K means Algorithm completed using mxnet.png")
    plt.show()
def data_iter():
    idx = list(range(num_examples))  # generate a random index
    random.shuffle(idx)  # a order
    for i in range(0, num_examples, batch_size):
        j = nd.array(idx[i:min(i + batch_size, num_examples)])
        yield nd.take(X, j), nd.take(y, j)