Example #1
0
    def test_auto_namespace_scope(self):
        auto_namespace(scope=__name__)
        namespace("bleh", scope="")

        class MyTask(dbnd.Task):
            pass

        assert MyTask.task_definition.task_family == self.this_module + ".MyTask"
Example #2
0
    def test_auto_namespace_not_matching(self):
        auto_namespace(scope="incorrect_namespace")
        namespace("bleh", scope="")

        class MyTask(dbnd.Task):
            pass

        namespace(scope="incorrect_namespace")
        namespace(scope="")
        assert MyTask.task_definition.task_family == "bleh.MyTask"
Example #3
0
from dbnd import PipelineTask, namespace, output, parameter
from test_dbnd.factories import FooConfig, TTask

namespace("n_tv")


class FirstTask(TTask):
    foo = parameter(default="FooConfig")[FooConfig]
    param = parameter(default="from_first")[str]


class SecondATask(FirstTask):
    param = "from_second"


class SecondBTask(FirstTask):
    pass


class InnerPipeTask(PipelineTask):
    second_b = output

    def band(self):
        self.second_b = SecondBTask(task_name="innerB", param="from_pipe")


class BigPipeTask(PipelineTask):
    second_a = output
    second_b = output
    inner_second_b = output
Example #4
0
from mxnet import autograd, gluon, init
from mxnet.gluon import nn
from mxnet.gluon.data.dataset import ArrayDataset
from mxnet.gluon.data.vision import transforms

from dbnd import data, namespace, output, parameter
from dbnd.tasks import PipelineTask
from dbnd_examples.ml.tool_mxnet import (
    DownloadFile,
    MxNetGluonTask,
    fashion_data,
    read_minst,
)


namespace("fashion")


def accuracy(output, label):
    return (output.argmax(axis=1) == label.astype("float32")).mean().asscalar()


def build_lenet_classifier():
    net = nn.Sequential()
    net.add(
        nn.Conv2D(channels=6, kernel_size=5, activation="relu"),
        nn.MaxPool2D(pool_size=2, strides=2),
        nn.Conv2D(channels=16, kernel_size=3, activation="relu"),
        nn.MaxPool2D(pool_size=2, strides=2),
        nn.Flatten(),
        nn.Dense(120, activation="relu"),
Example #5
0
from dbnd import PipelineTask, PythonTask, data, namespace, output, parameter

namespace("scenario_4_tasks", scope=__name__)


class _F4Task(PythonTask):
    t_param = parameter[str]
    o_output = output

    def run(self):
        self.o_output.write("done %s\n" % self.t_param)


class A1_F4Task(_F4Task):
    t_param = parameter.value(default="A1")


class A2_F4Task(_F4Task):
    t_param = parameter.value(default="A2")


class B_F4Task(PythonTask):
    t_param = parameter.value(default="B")
    a1_input = data

    o_output = output

    def run(self):
        self.o_output.write("done %s\n" % self.t_param)

Example #6
0
import mxnet as mx

from dbnd import PipelineTask, data, namespace, output, parameter
from dbnd_examples.data.tool_mxnet import digit_data
from dbnd_examples.src.tool_mxnet.mxnet_task import DownloadFile, MXNetTask

namespace("digits")


def create_mlp_digit_classifier():
    data = mx.sym.var("data")
    # Flatten the data from 4-D shape into 2-D (batch_size, num_channel*width*height)
    data = mx.sym.flatten(data=data)
    # The first fully-connected layer and the corresponding activation function
    fc1 = mx.sym.FullyConnected(data=data, num_hidden=128)
    act1 = mx.sym.Activation(data=fc1, act_type="relu")

    # The second fully-connected layer and the corresponding activation function
    fc2 = mx.sym.FullyConnected(data=act1, num_hidden=64)
    act2 = mx.sym.Activation(data=fc2, act_type="relu")

    # MNIST has 10 classes
    fc3 = mx.sym.FullyConnected(data=act2, num_hidden=10)
    # Softmax with cross entropy loss
    mlp = mx.sym.SoftmaxOutput(data=fc3, name="softmax")
    return mlp


def create_lenet_digit_classifier():
    data = mx.sym.var("data")
    # first conv layer