Ejemplo n.º 1
0
    def pack_weights(self, args):
        """Pack separate weight matrices into a single packed
        weight.

        Parameters
        ----------
        args : dict of str -> NDArray
            Dictionary containing unpacked weights.

        Returns
        -------
        args : dict of str -> NDArray
            Dictionary with packed weights associated with
            this cell.
        """
        args = args.copy()
        if not self._gate_names:
            return args
        for group_name in ['i2h', 'h2h']:
            weight = []
            bias = []
            for gate in self._gate_names:
                wname = '%s%s%s_weight' % (self._prefix, group_name, gate)
                weight.append(args.pop(wname))
                bname = '%s%s%s_bias' % (self._prefix, group_name, gate)
                bias.append(args.pop(bname))
            args['%s%s_weight' %
                 (self._prefix, group_name)] = ndarray.concatenate(weight)
            args['%s%s_bias' %
                 (self._prefix, group_name)] = ndarray.concatenate(bias)
        return args
Ejemplo n.º 2
0
def divideTrainTest(x, y, rate, k=None):
    if k is None:
        n = int(len(x) * rate)
        return (x[0:n], y[0:n]), (x[n:], y[n:])
    else:
        num = int(len(x) / k)
        for i in range(k):
            if i != 0 and i != k - 1:
                start = i * num
                end = min((i + 1) * num, len(x))
                test_data = (nd.array(x.asnumpy()[start:end]),
                             nd.array(y.asnumpy()[start:end]))
                train_data = (nd.concatenate([
                    nd.array(x.asnumpy()[0:start]),
                    nd.array(x.asnumpy()[end:])
                ]),
                              nd.concatenate([
                                  nd.array(y.asnumpy()[0:start]),
                                  nd.array(y.asnumpy()[end:])
                              ]))
            else:
                if i == 0:
                    test_data = (x[0:num], y[0:num])
                    train_data = (x[num:], y[num:])
                else:
                    test_data = (x[i * num:], y[i * num:])
                    train_data = (x[0:i * num], y[0:i * num])
            yield train_data, test_data
Ejemplo n.º 3
0
def plot_img(losses_log):
    sw.add_image(tag='A', image=nd.clip(nd.concatenate([losses_log['real_A'][0][0:1],
                                                        losses_log['fake_B'][0][0:1],
                                                        losses_log['rec_A'][0][0:1],
                                                        losses_log['idt_A'][0][0:1]]) * 0.5 + 0.5, 0, 1))
    sw.add_image(tag='B', image=nd.clip(nd.concatenate([losses_log['real_B'][0][0:1],
                                                        losses_log['fake_A'][0][0:1],
                                                        losses_log['rec_B'][0][0:1],
                                                        losses_log['idt_B'][0][0:1]]) * 0.5 + 0.5, 0, 1))
Ejemplo n.º 4
0
def plot_img(losses_log):
    sw.add_image(tag='lr_img',
                 image=nd.clip(
                     nd.concatenate(losses_log['lr_img'])[0:4], 0, 1))
    sw.add_image(tag='hr_img',
                 image=nd.clip(
                     nd.concatenate(losses_log['hr_img'])[0:4], 0, 1))
    sw.add_image(tag='hr_img_fake',
                 image=nd.clip(
                     nd.concatenate(losses_log['hr_img_fake'])[0:4], 0, 1))
Ejemplo n.º 5
0
    def __next__(self):
        if self.cls_cnt >= self.cls_num:
            self.cls_cnt = 0
            raise StopIteration
        imgs_support = []
        imgs_query = []
        cls_ids_support = []
        cls_ids_query = []
        img_ids_support = []
        img_ids_query = []
        cnt = 0
        while cnt < self.nc:
            cur_cls = next(self.cls_seq)
            try:
                img, cls_id, img_id = next(self.ites[cur_cls])
            except StopIteration:
                self.ites[cur_cls] = iter(self.cls_loader[cur_cls])
                continue
            imgs_support.append(img[0:self.ns])
            imgs_query.append(img[self.ns:])
            cls_ids_support.append(cls_id[0:self.ns])
            cls_ids_query.append(cls_id[self.ns:])
            img_ids_support.append(img_id[0:self.ns])
            img_ids_query.append(img_id[self.ns:])

            cnt += 1
        self.cls_cnt += self.nc
        support = (nd.concatenate(imgs_support, 0), nd.concatenate(cls_ids_support, 0),
                   nd.concatenate(img_ids_support, 0))
        query = (nd.concatenate(imgs_query, 0), nd.concatenate(cls_ids_query, 0),
                 nd.concatenate(img_ids_query, 0))
        return nd.concatenate([support[0], query[0]], 0),\
                nd.concatenate([support[1], query[1]], 0),\
                nd.concatenate([support[2], query[2]], 0)
Ejemplo n.º 6
0
 def forward(net, data, ctx):
     """ Multiple xpu run support.
     """
     data = gluon.utils.split_and_load(
         data, ctx_list=ctx, batch_axis=baxis, even_split=False)
     outs = [net(d) for d in data]
     if olen == 1:
         outs = nd.concatenate(outs)
     else:
         outs = [nd.concatenate([outs[i][j] \
             for i in range(len(outs))]) for j in range(olen)]
     return outs
Ejemplo n.º 7
0
def get_embedding_loader(data_loader,
                         model,
                         is_episode=False,
                         batch_size=32,
                         ctx=mx.gpu(1)):
    """
    Generating a data loader containing the embedding of the data in the given data_loader,
    produced by model
    """
    all_data = {}
    all_cls_id = {}

    all_data2 = []
    all_cls_id2 = []
    for data, label in data_loader:
        data = data.as_in_context(ctx)
        label = label.as_in_context(ctx)

        data_batch = mx.io.DataBatch(data=(data, ))
        model.forward(data_batch, is_train=False)
        data2 = model.get_outputs()[0]
        data2 = data2.asnumpy()
        data2 = nd.array(data2, ctx=ctx)

        all_data2.append(data2)
        all_cls_id2.append(label)

        for i in range(data.shape[0]):
            cur_cls_id = label[i].asscalar()
            if all_data.get(cur_cls_id) == None:
                all_data[cur_cls_id] = []
                all_cls_id[cur_cls_id] = []
            all_data[cur_cls_id].append(data2)
            all_cls_id[cur_cls_id].append(label)

    if is_episode:
        loader = {}
        for key in all_data.keys():
            data = nd.concatenate(all_data[key], 0)
            cls_id = nd.concatenate(all_cls_id[key], 0)
            loader[key] = DataLoader(MyDataset(data, cls_id),
                                     batch_size=batch_size,
                                     shuffle=True,
                                     last_batch='rollover')
    else:
        all_data2 = nd.concatenate(all_data2, 0)
        all_cls_id2 = nd.concatenate(all_cls_id2, 0)
        loader = DataLoader(MyDataset(all_data2, all_cls_id2),
                            batch_size=batch_size,
                            shuffle=True,
                            last_batch='rollover')
    return loader
Ejemplo n.º 8
0
def fftfilt_nd(x, params):
    (b, m, nx, nb, L, nfft) = params

    B = nd.contrib.fft(data=nd.concatenate(
        [b.T, nd.zeros(shape=(1, (nfft - b.size)), ctx=ctx)], axis=1))
    if b.size == 1:
        B = B.T  # make sure fft of B is a column (might be a row if b is scalar)
    if b.shape[1] == 1:
        B = nd.repeat(data=B, repeats=x.shape[1],
                      axis=0)  # replicate the column B
        B_re = nd.slice(data=B, begin=(0, 0), end=(0, None), step=(1, 2))
        B_im = nd.slice(data=B, begin=(0, 1), end=(0, None), step=(1, 2))
    if x.shape[1] == 1:
        x = nd.repeat(data=x, repeats=b.shape[1],
                      axis=1)  # replicate the column x
    y = nd.zeros_like(x.T)

    istart = 1
    while istart <= nx:
        iend = min(istart + L - 1, nx)
        if (iend - istart) == 0:
            X = x[istart] * np.ones((nfft, 1))  # need to fft a scalar
        else:
            temp = nd.slice(x, begin=istart - 1, end=iend).T
            X = nd.contrib.fft(data=nd.concatenate([
                temp,
                nd.zeros(shape=(temp.shape[0], (nfft - temp.shape[1])),
                         ctx=ctx)
            ],
                                                   axis=1))
            X_re = nd.slice(data=X, begin=(0, 0), end=(0, None), step=(1, 2))
            X_im = nd.slice(data=X, begin=(0, 1), end=(0, None), step=(1, 2))

        XprodB_re = (X_re * B_re - X_im * B_im)
        XprodB_im = (X_re * B_im + X_im * B_re)
        Ytemp = nd.zeros((X.shape[0], X.shape[1]), ctx=ctx)
        Ytemp[:, ::2] = XprodB_re
        Ytemp[:, 1::2] = XprodB_im
        Y = mx.contrib.ndarray.ifft(Ytemp / nfft)  # only the real part!!!!

        yend = min(nx, istart + nfft - 1)

        y[:, istart - 1:yend] = nd.slice(
            data=y, begin=(0, istart - 1), end=(0, yend),
            step=(1, 1)) + nd.slice(
                data=Y, begin=(0, 0), end=(0, yend - istart + 1), step=(1, 1))
        istart += L
#     y = real(y)

    return y
Ejemplo n.º 9
0
 def graph_func(data):
     data = gluon.utils.split_and_load(data,
                                       ctx_list=ctx,
                                       batch_axis=0,
                                       even_split=False)
     res = [net1.forward(d) for d in data]
     return nd.concatenate(res)
Ejemplo n.º 10
0
def plot_loss(losses_log,global_step,epoch, i):
    message = '(epoch: %d, iters: %d) ' % (epoch, i)
    for key,value in losses_log.losses.items():
        if 'loss_' in key:
            loss = nd.concatenate(value,axis=0).mean().asscalar()
            sw.add_scalar('loss', {key : loss}, global_step)
            message += '%s: %.3f ' % (key, loss)
    print(message)
Ejemplo n.º 11
0
    def hybrid_forward(self, F, label, pred):
        # One-hot encode the label
        label = nd.concatenate([label != 1, label], axis=1)

        axes = tuple(range(2, len(pred.shape)))
        intersect = nd.sum(pred * label, axis=axes)
        denom = nd.sum(pred + label, axis=axes)
        return - (2. * intersect / (denom + self.eps)).mean(axis=1)
Ejemplo n.º 12
0
def plot_loss(losses_log, global_step, epoch, i):
    message = '(epoch: %d, iters: %d) ' % (epoch, i)
    for key, value in losses_log.losses.items():
        if 'loss_' in key:
            loss = nd.concatenate(value, axis=0).mean().asscalar()
            sw.add_scalar('loss', {key: loss}, global_step)
            message += '%s: %.3f ' % (key, loss)
    print(message)
Ejemplo n.º 13
0
def jacobian_autograd(x, y):
    jac = []
    for i in range(y.shape[1]):
        with autograd.record():
            yi = y[:, i]
        dyidx = autograd.grad(yi, [x], create_graph=True)[0]
        jac += [nd.expand_dims(dyidx, 1)]
    return nd.concatenate(jac, 1)
Ejemplo n.º 14
0
 def mobilenet(data, label):
     data = gluon.utils.split_and_load(data, ctx_list=ctx, batch_axis=0, even_split=False)
     res = [net1.forward(d) for d in data]
     res = nd.concatenate(res)
     acc_top1.update(label, res)
     _, top1 = acc_top1.get()
     acc_top5.update(label, res)
     _, top5 = acc_top5.get()
     return "top1={:6.2%} top5={:6.2%}".format(top1, top5)
Ejemplo n.º 15
0
def shuffle_data_nd(data, peak_samppoint, peak_time, times):
    shift_list = nd.random_uniform(0, peak_samppoint - round((peak_time-0.2)*data.shape[-1]), shape=(times), ctx=mx.cpu())
    base = forward_moving_wave_nd(data, int(shift_list.asnumpy()[0]))
    if times == 1:
        return base, shift_list
    else:
        for shift_size in shift_list[1:]:
            temp = forward_moving_wave_nd(data, int(shift_size.asnumpy()[0]))
            base = nd.concatenate([base, temp] , axis = 0)    
        return base, shift_list
Ejemplo n.º 16
0
def aggregate(imgs, recognizer):
    loader = DataLoader(imgs, 1)
    embeddings = []
    for data in loader:
        _, embedding = recognizer.predict(data)
        embeddings.append(embedding)

    embeddings = nd.concatenate(embeddings, axis=0)
    ret = nd.mean(embeddings, axis=0)
    return ret
Ejemplo n.º 17
0
 def cvm_quantize(data, label):
     data = sim.load_real_data(data, 'data', inputs_ext)
     data = gluon.utils.split_and_load(data, ctx_list=ctx, batch_axis=0, even_split=False)
     res = [net2.forward(d) for d in data]
     res = nd.concatenate(res)
     qacc_top1.update(label, res)
     _, top1 = qacc_top1.get()
     qacc_top5.update(label, res)
     _, top5 = qacc_top5.get()
     return "top1={:6.2%} top5={:6.2%}".format(top1, top5)
Ejemplo n.º 18
0
Archivo: model.py Proyecto: munkim/east
    def forward(self, stem_out, *args):
        out = self.unpool1(stem_out[3])  # Branch 1
        out = F.concatenate([out, stem_out[2]], axis=1)
        out = self.conv1_1(out)
        out = self.conv1_2(out)

        out = self.unpool2(out)  # Branch 2
        out = F.concatenate([out, stem_out[1]], axis=1)
        out = self.conv2_1(out)
        out = self.conv2_2(out)

        out = self.unpool3(out)  # Branch 3
        out = F.concatenate([out, stem_out[0]], axis=1)
        out = self.conv3_1(out)
        out = self.conv3_2(out)

        out = self.output(out)  # Branch 4

        return out
Ejemplo n.º 19
0
def predictProb(params, net, ctx, X):
    pred_probs = []
    num_examples = X.shape[0]
    for data, bs in dataIter(X, batch_size=params['bs'], shuffle=False):
        data = nd.array(data).as_in_context(ctx)
        pred_prob = net(data)
        pred_probs.append(pred_prob)

    pred_probs = nd.concatenate(pred_probs, axis=0).asnumpy()
    pred_probs = pred_probs.squeeze()
    return pred_probs
Ejemplo n.º 20
0
def load_dir(dir_path, img_size, ctx):
    imgs = []
    file_names = []
    for f in os.listdir(dir_path):
        file_names.append(f)
        img_path = os.path.join(dir_path, f)
        img = cv2.imread(img_path)
        img = resize_pad(img, img_size)
        img = np2nd(img, ctx)
        imgs.append(img)
    return nd.concatenate(imgs, axis=0), file_names
Ejemplo n.º 21
0
 def model_func(data, label):
     data = sim.load_real_data(data, 'data', inputs_qext) \
            if inputs_qext else data
     data = gluon.utils.split_and_load(data, ctx_list=ctx,
                                       batch_axis=0, even_split=False)
     res = [net.forward(d) for d in data]
     res = nd.concatenate(res)
     acc_top1.update(label, res)
     _, top1 = acc_top1.get()
     acc_top5.update(label, res)
     _, top5 = acc_top5.get()
     return "top1={:6.2%} top5={:6.2%}".format(top1, top5)
Ejemplo n.º 22
0
 def _getdata(self, data_source):
     if self.cursor + self.batch_size <= self.num_data:  # no pad
         return [
             x[1][self.cursor:self.cursor + self.batch_size]
             for x in data_source
         ]
     else:  # with pad
         pad = self.batch_size - self.num_data + self.cursor
         return [
             concatenate([x[1][self.cursor:], x[1][:pad]])
             for x in data_source
         ]
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets
Ejemplo n.º 24
0
def _merge_multi_context(outputs, major_axis):
    """Merge outputs that lives on multiple context into one, so that they look
    like living on one context.
    """
    rets = []
    for tensors, axis in zip(outputs, major_axis):
        if axis >= 0:
            rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
        else:
            # negative axis means the there is no batch_size axis, and all the
            # results should be the same on each device. We simply take the
            # first one, without checking they are actually the same
            rets.append(tensors[0])
    return rets
Ejemplo n.º 25
0
def _mkanchors(ws, hs, x_ctr, y_ctr):
    """
    Given a vector of widths (ws) and heights (hs) around a center
    (x_ctr, y_ctr), output a set of anchors (windows).
    """

    ws = ws.reshape((-1, 1))
    hs = hs.reshape((-1, 1))
    anchors = nd.concatenate([
        x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 *
        (ws - 1), y_ctr + 0.5 * (hs - 1)
    ],
                             axis=1)
    return anchors
Ejemplo n.º 26
0
 def _getdata(self, data_source):
     """Load data from underlying arrays, internal use only."""
     assert (self.cursor < self.num_data), "DataIter needs reset."
     if self.cursor + self.batch_size <= self.num_data:
         return [
             # np.ndarray or NDArray case
             x[1][self.cursor:self.cursor +
                  self.batch_size] if isinstance(x[1],
                                                 (np.ndarray, NDArray)) else
             # h5py (only supports indices in increasing order)
             array(x[1][sorted(
                 self.idx[self.cursor:self.cursor + self.batch_size])][[
                     list(self.idx[self.cursor:self.cursor +
                                   self.batch_size]).index(i)
                     for i in sorted(self.idx[self.cursor:self.cursor +
                                              self.batch_size])
                 ]]) for x in data_source
         ]
     else:
         pad = self.batch_size - self.num_data + self.cursor
         return [
             # np.ndarray or NDArray case
             concatenate([x[1][self.cursor:], x[1][:pad]]) if isinstance(
                 x[1], (np.ndarray, NDArray)) else
             # h5py (only supports indices in increasing order)
             concatenate([
                 array(x[1][sorted(self.idx[self.cursor:])][[
                     list(self.idx[self.cursor:]).index(i)
                     for i in sorted(self.idx[self.cursor:])
                 ]]),
                 array(x[1][sorted(self.idx[:pad])][[
                     list(self.idx[:pad]).index(i)
                     for i in sorted(self.idx[:pad])
                 ]])
             ]) for x in data_source
         ]
Ejemplo n.º 27
0
Archivo: model.py Proyecto: munkim/east
    def forward(self, inputs, *args):
        stem_out = self.stem(inputs)
        branch_out = self.branch(stem_out)

        # Score Map
        F_score = self.scores(branch_out)

        # Geometric Map (RBOX)
        boxes = self.boxes(
            branch_out)  # 4 channels of axis-aligned bounding box
        rot_angles = (
            self.angles(branch_out) - 0.5
        ) * np.pi / 2  # 1 channel rotation angle, which is between [-45, 45]
        F_geo = F.concatenate([boxes, rot_angles], axis=1)

        return F_score, F_geo
Ejemplo n.º 28
0
def ssd_generate_anchors(scale,
                         ratios=nd.array([0.5, 1, 2]),
                         append_scale=None):
    """
    Generate anchor (reference) windows by enumerating aspect ratios X
    scales wrt a reference (0, 0, scale, scale) window.
    
    append_scale is used to generate an extra anchor whose scale is 
    sqrt{scale*append_scale}. Set append_scale=None to disenable this 
    extra anchor.
    """
    base_anchor = nd.array([1, 1, scale, scale])
    anchors = _ratio_enum(base_anchor, ratios)
    if append_scale is not None:
        ns = int(scale * append_scale)
        append_anchor = nd.round(nd.sqrt(nd.array([[1, 1, ns, ns]])))
        anchors = nd.concatenate([anchors, append_anchor], axis=0)
    return anchors
Ejemplo n.º 29
0
def generate_anchors(base_size=16,
                     ratios=nd.array([0.5, 1, 2]),
                     scales=2**nd.arange(3, 6)):
    """
    Generate anchor (reference) windows by enumerating aspect ratios X
    scales wrt a reference (0, 0, 15, 15) window.
    This implementation matches the original Faster-RCNN RPN generate_anchors().
    But all calculations are on mxnet.ndarray.NDArray.

    Refer to 
    https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/rpn/generate_anchors.py
    """

    base_anchor = nd.array([1, 1, base_size, base_size])
    ratio_anchors = _ratio_enum(base_anchor, ratios)
    anchors = nd.concatenate([
        _scale_enum(ratio_anchors[i, :], scales)
        for i in range(ratio_anchors.shape[0])
    ])
    return anchors
Ejemplo n.º 30
0
    def __next__(self):
        if self.cls_cnt >= self.cls_num:
            self.cls_cnt = 0
            raise StopIteration
        imgs_support = []
        imgs_query = []
        cls_ids_support = []
        cls_ids_query = []
        # img_ids_support = []
        # img_ids_query = []
        cnt = 0
        while cnt < self.nc:
            try:
                cur_cls = next(self.cls_seq)
            except StopIteration:
                temp = list(self.cls_loader.keys())
                random.shuffle(temp)
                self.cls_seq = iter(temp)
                cur_cls = next(self.cls_seq)
            try:
                img, cls_id = next(self.ites[cur_cls])
            except StopIteration:
                self.ites[cur_cls] = iter(self.cls_loader[cur_cls])
                continue
            imgs_support.append(img[0:self.ns])
            cls_ids_support.append(cls_id[0:self.ns])

            if self.nq != 0:
                imgs_query.append(img[self.ns:])
                cls_ids_query.append(cls_id[self.ns:])

            # img_ids_support.append(img_id[0:self.ns])
            # img_ids_query.append(img_id[self.ns:])

            cnt += 1
        self.cls_cnt += self.nc
        support = (nd.concatenate(imgs_support,
                                  0), nd.concatenate(cls_ids_support, 0))
        if self.nq != 0:
            query = (nd.concatenate(imgs_query,
                                    0), nd.concatenate(cls_ids_query, 0))
            return nd.concatenate([support[0], query[0]], 0),\
                    nd.concatenate([support[1], query[1]], 0)
        else:
            return support
Ejemplo n.º 31
0
def objective_function(model: mx.gluon.HybridBlock,
                       training_data_iterator: mx.io.NDArrayIter,
                       loss: mx.gluon.loss.Loss,
                       gamma=AdaNetConfig.GAMMA.value) -> nd.array:
    """
    :param model: Union[SuperCandidateHull, ModelTemplate]
    :param training_data_iterator:
    :param loss:
    :param gamma:
    :return:
    """
    training_data_iterator.reset()
    err_list = []
    for batch_i, batch in enumerate(training_data_iterator):
        pred = model(batch.data[0])[0][0]
        label = batch.label[0]
        error = loss(pred, label)
        err_list.append(error)
    err = concatenate(err_list)
    c_complexities = model.get_candidate_complexity()
    c_complexities = c_complexities * gamma
    objective = err.mean() + c_complexities.mean()

    return objective[0][0]
Ejemplo n.º 32
0
    def predict_multi(self, imgs):
        loader = DataLoader(imgs.as_in_context(self.ctx),
                            self.batch_size,
                            last_batch='keep')
        max_sims = []
        labels = []
        features = []
        cls_center = nd.L2Normalization(self.cls_center)
        max_sims = []
        labels = []
        for data in loader:

            data_batch = mx.io.DataBatch(data=(data, ),
                                         pad=self.batch_size - data.shape[0])
            self.model.forward(data_batch, is_train=False)
            embeddings = self.model.get_outputs()[0]
            features.append(embeddings)
            embeddings = nd.L2Normalization(embeddings, mode='instance')

            if self.cls_center is not None:
                temp1 = embeddings.expand_dims(axis=1)
                temp2 = cls_center.expand_dims(axis=0)
                dis_mat = nd.sum(temp1 * temp2, axis=2)
                max_sim = nd.max(dis_mat, axis=1)
                label = nd.argmax(dis_mat, axis=1)

                labels += list(label.asnumpy())
                max_sims += list(max_sim.asnumpy())
            else:
                label = None

        features = nd.concatenate(features, axis=0)
        if self.label_map is not None:
            labels = [self.label_map[int(x)] for x in labels]

        return (max_sims, labels), features