Example #1
0
                       nd.log(-1 * gate._limit_lo / gate._limit_hi))
    print(cost)

# %%

[str(i) for i, item in enumerate(tree._embeddlayer._children.items())]

list(tree._embeddlayer._children.keys())

for i, key in enumerate(list(tree._embeddlayer._children.keys())):
    item = tree._embeddlayer._children.pop(key)
    tree._embeddlayer._children[str(i)] = item

# %%

tree._grow(nd.array([[0, 0], [3, 3], [7, 1]]))
tree._grow(nd.array([[10, 10]]))
tree._grow(nd.array([[6, 8]]))
tree._grow(nd.array([[9, 2], [1, 9]]))

tree(nd.array([[1, 1], [2, 2], [-1, -1]]))

# %%

[
    node._decision._gate() for node in tree._embeddlayer._children.values()
    if hasattr(node, "_decision")
]

# %%
Example #2
0
net.collect_params().initialize(mx.init.Normal(sigma=0.1),
                                force_reinit=True,
                                ctx=model_ctx)
error = gluon.loss.SoftmaxCrossEntropyLoss()

# %%

X, y = shuffle(X, y)

for data, target in zip(np.split(X, 10), np.split(y, 10)):

    data = nd.array(data).as_in_context(model_ctx)
    target = nd.array(target).as_in_context(model_ctx)

    tree._grow(prenet(data))
    # tree._grow(data)

    less = net.collect_params()
    for key in list(less._params.keys()):
        if less[key].shape is None:
            less._params.pop(key)

    trainer = gluon.Trainer(less, 'sgd', {'learning_rate': 2})

    with mx.autograd.record():
        loss = error(net(data), target)
        # cost = []
        # for decision in tree._routerlayer._children.values():
        #   gate = decision._gate
        #   cost.append(nd.sigmoid(
Example #3
0
from Tree import Tree
import mxnet as mx
import numpy as np
from mxnet import gluon, nd

tree = Tree(units=2)

tree.collect_params().initialize(force_reinit=True)

tree.collect_params()

tree._structure

tree(nd.array([[1, 3], [5, 6], [1, 2], [3, 0], [8, 8], [3.5, 3.5]]))

tree._grow(nd.array([[1, 1]]))

# %%

from Node import Node

Node()

node.collect_params()

# %%

a = nd.array([[1, 2], [3, 4], [-10, -10]])
a
upper = nd.max(a, axis=0)
lower = nd.min(a, axis=0)
Example #4
0
net = gluon.nn.Sequential()

with net.name_scope():
    net.add(tree)
    net.add(gluon.nn.Dense(1))

error = gluon.loss.L2Loss()
net.collect_params().initialize(mx.init.Normal(sigma=0.1), force_reinit=True)

# %%

X, y = shuffle(X, y)

for data, target in zip(np.split(X, 10), np.split(y, 10)):

    tree._grow(nd.array(data))

    less = net.collect_params()
    for key in list(less._params.keys()):
        if less[key].shape is None:
            less._params.pop(key)

    trainer = gluon.Trainer(less, 'sgd', {'learning_rate': 0.5})

    with mx.autograd.record():
        loss = error(net(nd.array(data)), nd.array(target))

    loss.backward()
    trainer.step(data.shape[0], ignore_stale_grad=True)

    after = [