Exemplo n.º 1
0
    def add_task(self, task: Task) -> None:
        """Add a single task to the network.

        Parameters
        ----------
        task
            A ``Task`` to add
        """
        # Combine module_pool from all tasks
        for key in task.module_pool.keys():
            if key in self.module_pool.keys():
                if self.config.dataparallel:
                    task.module_pool[key] = nn.DataParallel(
                        self.module_pool[key])
                else:
                    task.module_pool[key] = self.module_pool[key]
            else:
                if self.config.dataparallel:
                    self.module_pool[key] = nn.DataParallel(
                        task.module_pool[key])
                else:
                    self.module_pool[key] = task.module_pool[key]
        self.task_names.add(task.name)
        self.op_sequences[task.name] = task.op_sequence
        self.loss_funcs[task.name] = task.loss_func
        self.output_funcs[task.name] = task.output_func
        self.scorers[task.name] = task.scorer

        # Move model to specified device
        self._move_to_device()
Exemplo n.º 2
0
    def test_task_creation(self):
        module_pool = nn.ModuleDict({
            "linear1":
            nn.Sequential(nn.Linear(2, 10), nn.ReLU()),
            "linear2":
            nn.Linear(10, 1),
        })

        op_sequence = [
            Operation(name="the_first_layer",
                      module_name="linear1",
                      inputs=["_input_"]),
            Operation(
                name="the_second_layer",
                module_name="linear2",
                inputs=["the_first_layer"],
            ),
        ]

        task = Task(name=TASK_NAME,
                    module_pool=module_pool,
                    op_sequence=op_sequence)

        # Task has no functionality on its own
        # Here we only confirm that the object was initialized
        self.assertEqual(task.name, TASK_NAME)
Exemplo n.º 3
0
def create_task(task_name, module_suffixes=("", "")):
    module1_name = f"linear1{module_suffixes[0]}"
    module2_name = f"linear2{module_suffixes[1]}"

    linear1 = nn.Linear(2, 2)
    linear1.weight.data.copy_(torch.eye(2))
    linear1.bias.data.copy_(torch.zeros((2, )))

    linear2 = nn.Linear(2, 2)
    linear2.weight.data.copy_(torch.eye(2))
    linear2.bias.data.copy_(torch.zeros((2, )))

    module_pool = nn.ModuleDict({
        module1_name: nn.Sequential(linear1, nn.ReLU()),
        module2_name: linear2
    })

    op0 = Operation(module_name=module1_name,
                    inputs=[("_input_", "data")],
                    name="op0")
    op1 = Operation(module_name=module2_name, inputs=[op0.name], name="op1")

    op_sequence = [op0, op1]

    task = Task(name=task_name,
                module_pool=module_pool,
                op_sequence=op_sequence)

    return task
Exemplo n.º 4
0
 def test_no_input_spec(self):
     # Confirm model doesn't break when a module does not specify specific inputs
     dataset = create_dataloader("task", shuffle=False).dataset
     task = Task(
         name="task",
         module_pool=nn.ModuleDict({"identity": nn.Identity()}),
         op_sequence=[Operation("identity", [])],
     )
     model = MultitaskModel(tasks=[task], dataparallel=False)
     outputs = model.forward(dataset.X_dict, ["task"])
     self.assertIn("_input_", outputs)
Exemplo n.º 5
0
    def test_score_shuffled(self):
        # Test scoring with a shuffled dataset

        class SimpleVoter(nn.Module):
            def forward(self, x):
                """Set class 0 to -1 if x and 1 otherwise"""
                mask = x % 2 == 0
                out = torch.zeros(x.shape[0], 2)
                out[mask, 0] = 1  # class 0
                out[~mask, 1] = 1  # class 1
                return out

        # Create model
        task_name = "VotingTask"
        module_name = "simple_voter"
        module_pool = nn.ModuleDict({module_name: SimpleVoter()})
        op0 = Operation(module_name=module_name,
                        inputs=[("_input_", "data")],
                        name="op0")
        op_sequence = [op0]
        task = Task(name=task_name,
                    module_pool=module_pool,
                    op_sequence=op_sequence)
        model = MultitaskModel([task])

        # Create dataset
        y_list = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
        x_list = [i for i in range(len(y_list))]
        Y = torch.LongTensor(y_list * 100)
        X = torch.FloatTensor(x_list * 100)
        dataset = DictDataset(name="dataset",
                              split="train",
                              X_dict={"data": X},
                              Y_dict={task_name: Y})

        # Create dataloaders
        dataloader = DictDataLoader(dataset, batch_size=2, shuffle=False)
        scores = model.score([dataloader])

        self.assertEqual(scores["VotingTask/dataset/train/accuracy"], 0.6)

        dataloader_shuffled = DictDataLoader(dataset,
                                             batch_size=2,
                                             shuffle=True)
        scores_shuffled = model.score([dataloader_shuffled])
        self.assertEqual(scores_shuffled["VotingTask/dataset/train/accuracy"],
                         0.6)
Exemplo n.º 6
0
def create_task(task_name, module_suffixes=("", "")):
    module1_name = f"linear1{module_suffixes[0]}"
    module2_name = f"linear2{module_suffixes[1]}"

    module_pool = nn.ModuleDict({
        module1_name:
        nn.Sequential(nn.Linear(2, 10), nn.ReLU()),
        module2_name:
        nn.Linear(10, 2),
    })

    op1 = Operation(module_name=module1_name, inputs=[("_input_", "data")])
    op2 = Operation(module_name=module2_name, inputs=[op1.name])

    op_sequence = [op1, op2]

    task = Task(name=task_name,
                module_pool=module_pool,
                op_sequence=op_sequence)

    return task
Exemplo n.º 7
0
# The output of the final operation will then go into a loss function to calculate the loss (e.g., cross-entropy) during training or an output function (e.g., softmax) to convert the logits into a prediction.
#
# Each `Task` also specifies which metrics it supports, which are bundled together in a `Scorer` object. For this tutorial, we'll just look at accuracy.

# +
from functools import partial

import torch.nn.functional as F

from cerbero.metrics import Scorer
from cerbero.core import Task

class_task = Task(
    name="class_task",
    module_pool=module_pool,
    op_sequence=op_sequence,
    loss_func=F.cross_entropy,
    output_func=partial(F.softmax, dim=1),
    scorer=Scorer(metrics=["accuracy"]),
)

# -

# ### Again, for the RGB `Task`

# In this case, the RGB `Task` differs in that we'll be training the model to estimate the average RGB colors in the image which we model here as a regression task. Additonally, we'll define the RGB head as a two-layer module.


# +
class RGBHead(nn.Module):
    def __init__(self):
        super(RGBHead, self).__init__()
Exemplo n.º 8
0
# Each `Task` also specifies which metrics it supports, which are bundled together in a `Scorer` object. For this tutorial, we'll just look at accuracy.

# Putting this all together, we define the circle task:

# +
from functools import partial

import torch.nn.functional as F

from cerbero.metrics import Scorer
from cerbero.core import Task

circle_task = Task(
    name="circle_task",
    module_pool=module_pool,
    op_sequence=op_sequence,
    loss_func=F.cross_entropy,
    output_func=partial(F.softmax, dim=1),
    scorer=Scorer(metrics=["accuracy"]),
)
# -

# Note that `Task` objects are not dependent on a particular dataset; multiple datasets can be passed through the same modules for pre-training or co-training.

# ### Again, but faster

# We'll now define the square task, but more succinctly—for example, using the fact that the default name for an `Operation` is its `module_name` (since most tasks only use their modules once per forward pass).
#
# We'll also define the square task to share the first module in its task flow (`base_mlp`) with the circle task to demonstrate how to share modules. (Note that this is purely for illustrative purposes; for this toy task, it is quite possible that this is not the optimal arrangement of modules).
#
# Finally, the most common task definitions we see in practice are classification tasks with cross-entropy loss and softmax on the output of the last module, and accuracy is most often the primary metric of interest, these are all the default values, so we can drop them here for brevity.