コード例 #1
0
 def test_reachable(self):
     # target = TypeShape(DFloat, Shape((DimNames.BATCH, 1),
     #                                  (DimNames.UNITS, 20)))
     input_shape = TypeShape(
         DFloat,
         Shape((DimNames.BATCH, 1), (DimNames.HEIGHT, 32),
               (DimNames.WIDTH, 32), (DimNames.CHANNEL, 3)))
     depth = 8
     for i in range(1, 100):
         # input_shape = TypeShape(DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, i)))
         target = TypeShape(
             DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, i * 10)))
         print()
         print(input_shape)
         print(target)
         print(
             NeuralNetwork.reachable(input_nts=input_shape,
                                     target_nts=target,
                                     max_depth=depth,
                                     function_pool={Conv2D, Flatten}))
         print(
             list(
                 Dense.possible_output_shapes(
                     input_ntss={IOLabel.DEFAULT: input_shape},
                     target_output=target,
                     is_reachable=lambda x, y: NeuralNetwork.reachable(
                         x, y, depth - 1, {Dense, Merge}),
                 )))
     pass
コード例 #2
0
    def test_instantiation_Conv2D_Pool2D_Flatten_Dense(self):
        for i in range(10):
            batch = 1
            _data = TypeShape(
                DFloat,
                Shape((DimNames.BATCH, batch), (DimNames.HEIGHT, 32),
                      (DimNames.WIDTH, 32), (DimNames.CHANNEL, 3)))
            _target = TypeShape(
                DFloat, Shape(
                    (DimNames.BATCH, batch),
                    (DimNames.UNITS, 10),
                ))
            outputs = {'out0': _target}
            IOLabel.DS = 'DS'
            inputs = {IOLabel.DS: (IOLabel.DATA, _data, 'Dataset')}
            functions = [Conv2D, Pooling2D, Flatten, Dense]
            NN = NeuralNetwork(
                **{
                    NeuralNetwork.arg_INPUTS: inputs,
                    NeuralNetwork.arg_OUTPUT_TARGETS: outputs,
                    NeuralNetwork.arg_FUNCTIONS: functions
                })
            self.assertIsNotNone(NN)
            pb = NN.get_pb()
            state = NN.__getstate__()

            f_ids = dict([(_id, None) for _, _id in NN.inputs.values()])
            for _f in NN.functions:
                f_ids[_f.id_name] = _f

            for _f in NN.functions:
                for _f_input, (other_output, other_id) in _f.inputs.items():
                    if other_id not in f_ids:
                        self.assertTrue(False)

            stack = [f_id for _, f_id in NN.output_mapping.values()]
            required_ids = set()
            while stack:
                f_id = stack.pop()
                required_ids.add(f_id)
                f_ = f_ids.get(f_id)
                if f_ is not None:
                    stack.extend([f_id for _, f_id in f_.inputs.values()])
            self.assertSetEqual(required_ids, set(f_ids.keys()))

            NN_pb = NeuralNetwork.__new__(NeuralNetwork)
            NN_pb.__setstate__(pb)
            self.assertIsNot(NN, NN_pb)

            NN_state = NeuralNetwork.__new__(NeuralNetwork)
            NN_state.__setstate__(state)
            self.assertIsNot(NN, NN_state)

            NN_mut = NN.mutate(100)
            self.assertIsNot(NN, NN_mut)
            self.assertNotEqual(NN, NN_mut)
            NN_mut = NN.mutate(0)
            self.assertIsNot(NN, NN_mut)
            self.assertNotEqual(NN, NN_mut)
コード例 #3
0
    def test_Conv2D(self):
        IOLabel.DUMMY1 = 'DUMMY1'
        IOLabel.DUMMY2 = 'DUMMY2'
        _shape = Shape((DN.BATCH, -1), (DN.WIDTH, 64), (DN.HEIGHT, 64),
                       (DN.CHANNEL, 4))
        shape_ = Shape((DN.BATCH, -1), (DN.WIDTH, 32), (DN.HEIGHT, 32),
                       (DN.CHANNEL, 6))
        _input = {IOLabel.DUMMY1: TypeShape(DDouble, _shape)}
        _output = TypeShape(DDouble, shape_)
        _expected_output = TypeShape(DDouble, shape_)
        pos = list(
            Conv2D.possible_output_shapes(
                input_ntss=_input,
                target_output=_output,
                is_reachable=lambda x, y: NeuralNetwork.reachable(
                    x, y, 1, {Conv2D})))
        self.assertTrue(
            any([_expected_output in out.values() for _, out, _ in pos]))
        self.assertTrue(all([len(remaining) == 0 for remaining, _, _ in pos]))
        self.assertTrue(
            all([
                IOLabel.CONV2D_IN in mapping
                and mapping[IOLabel.CONV2D_IN] == IOLabel.DUMMY1
                for _, _, mapping in pos
            ]))

        dummyDF = TestSubFunctions.DummyDF()
        dummyDF._outputs = {IOLabel.DUMMY1: TypeShape(DDouble, _shape)}
        for _, out, _ in pos:
            for parameters in Conv2D.generateParameters(
                    input_dict={
                        IOLabel.CONV2D_IN:
                        (IOLabel.DUMMY1, dummyDF.outputs, dummyDF.id_name)
                    },
                    expected_outputs={IOLabel.CONV2D_OUT: _output},
                    variable_pool={},
            )[0]:
                _conv2D = Conv2D(**parameters)
                pb = _conv2D.get_pb()
                self.assertIsNotNone(pb)
                state = _conv2D.__getstate__()
                self.assertIsNotNone(state)

                new_conv2D = Conv2D.__new__(Conv2D)
                new_conv2D.__setstate__(pb)
                self.assertEqual(_conv2D, new_conv2D)
                self.assertIsNot(_conv2D, new_conv2D)

                new_conv2D = Conv2D.__new__(Conv2D)
                new_conv2D.__setstate__(state)
                self.assertEqual(_conv2D, new_conv2D)
                self.assertIsNot(_conv2D, new_conv2D)

                m_conv2D = _conv2D.mutate(100)
                self.assertNotEqual(_conv2D, m_conv2D)
                m_conv2D = _conv2D.mutate(0)
                self.assertEqual(_conv2D, m_conv2D)
                self.assertIsNot(_conv2D, m_conv2D)
        pass
コード例 #4
0
    def test_Dense(self):
        IOLabel.DUMMY = 'DUMMY'
        IOLabel.DUMMY2 = 'DUMMY2'
        _shape = Shape((DN.BATCH, -1), (DN.UNITS, 16))
        _input = {IOLabel.DUMMY: TypeShape(DDouble, _shape)}
        _output = TypeShape(DDouble, _shape)
        _expected_output = TypeShape(DDouble, _shape)
        pos = list(
            Dense.possible_output_shapes(
                input_ntss=_input,
                target_output=_output,
                is_reachable=lambda x, y: NeuralNetwork.reachable(
                    x, y, 1, {Dense})))
        self.assertTrue(
            any([_expected_output in out.values() for _, out, _ in pos]))
        self.assertTrue(all([len(remaining) == 0 for remaining, _, _ in pos]))
        self.assertTrue(
            all([
                IOLabel.DENSE_IN in mapping
                and mapping[IOLabel.DENSE_IN] == IOLabel.DUMMY
                for _, _, mapping in pos
            ]))

        dummyDF = TestSubFunctions.DummyDF()
        dummyDF._outputs = {IOLabel.DUMMY: TypeShape(DDouble, _shape)}
        for _, out, _ in pos:
            for parameters in Dense.generateParameters(
                    input_dict={
                        IOLabel.DENSE_IN:
                        (IOLabel.DUMMY, dummyDF.outputs, dummyDF.id_name)
                    },
                    expected_outputs={IOLabel.DUMMY2: _output},
                    variable_pool={},
            )[0]:
                # check if parameters are correct?
                _dense = Dense(**parameters)
                pb = _dense.get_pb()
                self.assertIsNotNone(pb)
                state = _dense.__getstate__()
                self.assertIsNotNone(state)

                new_dense = Dense.__new__(Dense)
                new_dense.__setstate__(pb)
                self.assertEqual(_dense, new_dense)
                self.assertIsNot(_dense, new_dense)

                new_dense = Dense.__new__(Dense)
                new_dense.__setstate__(state)
                self.assertEqual(_dense, new_dense)
                self.assertIsNot(_dense, new_dense)

                m_dense = _dense.mutate(100)
                self.assertNotEqual(_dense, m_dense)
                m_dense = _dense.mutate(0)
                self.assertEqual(_dense, m_dense)
        pass
コード例 #5
0
    def test_simple_path(self):
        ntss = {
            IOLabel.DEFAULT:
            TypeShape(DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, 23)))
        }
        target = TypeShape(DFloat,
                           Shape((DimNames.BATCH, 1), (DimNames.UNITS, 154)))
        depth = 5
        debug_node = 'debug'

        before = time.time()
        for _ in range(1):
            NeuralNetwork.reachable(next(iter(ntss)), target, depth,
                                    {Dense, Merge})
        print('Time', time.time() - before)

        print(
            NeuralNetwork.reachable(next(iter(ntss)), target, depth,
                                    {Dense, Merge}))
        print(ntss)
        runs = 10000
        fails = 0
        for i in range(runs):
            blueprint = nx.DiGraph()
            blueprint.add_node(debug_node, ntss=ntss, DataFlowObj=None)
            out_node, nts_id, nodes = next(
                NeuralNetwork.simple_path(
                    input_node=debug_node,
                    input_ntss=ntss,
                    output_shape=target,
                    output_label=IOLabel.DEFAULT,
                    blueprint=blueprint,
                    min_depth=0,
                    max_depth=depth,
                    function_pool={Dense, Merge},
                ), (None, None, None))
            if out_node is None:
                # print(i, 'Error')
                fails += 1
            # else:
            #   print(i, 'Success')
        print('percentage failed:', fails / runs)
        pass
コード例 #6
0
    def __init__(self, **kwargs):
        super(ClassifierIndividualACDG, self).__init__(**kwargs)
        if len(self._networks) > 1:
            raise Exception('Expected 1 or 0 networks got: ' +
                            str(len(self._networks)))
        elif len(self._networks) == 1:
            self.network = self._networks[0]
        else:
            _input = (IOLabel.DATA, *self._data_nts[IOLabel.DATA])
            _output = {IOLabel.TARGET: self._data_nts[IOLabel.TARGET][0]}
            _input = {'NN_DATA': _input}
            self.network = NeuralNetwork(
                **{
                    NeuralNetwork.arg_INPUTS:
                    _input,
                    NeuralNetwork.arg_OUTPUT_TARGETS:
                    _output,
                    NeuralNetwork.arg_FUNCTIONS:
                    kwargs.get(self.arg_NN_FUNCTIONS, [Dense]),
                    NeuralNetwork.arg_MAX_DEPTH:
                    kwargs.get(self.arg_MAX_NN_DEPTH, 7),
                    NeuralNetwork.arg_MIN_DEPTH:
                    kwargs.get(self.arg_MIN_NN_DEPTH, 2),
                    NeuralNetwork.arg_MAX_BRANCH:
                    kwargs.get(self.arg_MAX_NN_BRANCH, 1)
                })
            self._networks.append(self.network)

        if len(self._losses) != 0:
            raise Exception('Expected no loss!')
        _output = self._data_nts[IOLabel.TARGET][0]
        _output_units = _output.shape[DimNames.UNITS]
        if _output_units == 1:
            self.loss = MeanSquaredError(**{
                LossInterface.arg_REDUCE: Reduce.MEAN,
            })
        else:
            self.loss = SoftmaxCrossEntropyWithLogits(
                **{LossInterface.arg_REDUCE: Reduce.MEAN})
        self._losses.append(self.loss)
コード例 #7
0
    def test_Flatten(self):
        IOLabel.DUMMY1 = 'DUMMY1'
        IOLabel.DUMMY2 = 'DUMMY2'
        _shape = Shape((DN.BATCH, -1), (DN.WIDTH, 8), (DN.HEIGHT, 8),
                       (DN.CHANNEL, 32))
        shape_ = Shape((DN.BATCH, -1), (DN.UNITS, 2048))
        _input = {IOLabel.DUMMY1: TypeShape(DDouble, _shape)}
        _output = TypeShape(DDouble, shape_)
        _expected_output = TypeShape(DDouble, shape_)
        pos = list(
            Flatten.possible_output_shapes(
                input_ntss=_input,
                target_output=_output,
                is_reachable=lambda x, y: NeuralNetwork.reachable(
                    x, y, 1, {Flatten})))
        self.assertTrue(
            any([_expected_output in out.values() for _, out, _ in pos]))
        self.assertTrue(all([len(remaining) == 0 for remaining, _, _ in pos]))
        self.assertTrue(
            all([
                IOLabel.FLATTEN_IN in mapping
                and mapping[IOLabel.FLATTEN_IN] == IOLabel.DUMMY1
                for _, _, mapping in pos
            ]))

        dummyDF = TestSubFunctions.DummyDF()
        dummyDF._outputs = {IOLabel.DUMMY1: TypeShape(DDouble, _shape)}
        for _, out, _ in pos:
            for parameters in Flatten.generateParameters(
                    input_dict={
                        IOLabel.FLATTEN_IN:
                        (IOLabel.DUMMY1, dummyDF.outputs, dummyDF.id_name)
                    },
                    expected_outputs={IOLabel.DUMMY2: _output},
                    variable_pool={},
            )[0]:
                _flatten = Flatten(**parameters)
                pb = _flatten.get_pb()
                self.assertIsNotNone(pb)
                state = _flatten.__getstate__()
                self.assertIsNotNone(state)

                new_flatten = Flatten.__new__(Flatten)
                new_flatten.__setstate__(pb)
                self.assertEqual(_flatten, new_flatten)
                self.assertIsNot(_flatten, new_flatten)

                new_flatten = Flatten.__new__(Flatten)
                new_flatten.__setstate__(state)
                self.assertEqual(_flatten, new_flatten)
                self.assertIsNot(_flatten, new_flatten)
コード例 #8
0
    def test_func_children(self):
        ntss = {
            IOLabel.DEFAULT:
            TypeShape(
                DFloat,
                Shape((DimNames.BATCH, 1), (DimNames.HEIGHT, 10),
                      (DimNames.WIDTH, 10), (DimNames.CHANNEL, 2)))
        }
        target = TypeShape(
            DFloat,
            Shape(
                (DimNames.BATCH, 1), (DimNames.UNITS, 200)
                # (DimNames.HEIGHT, 32),
                # (DimNames.WIDTH, 32),
                # (DimNames.CHANNEL, 3)
            ))

        _f = Flatten
        for _, out_nts, _ in _f.possible_output_shapes(
                ntss, target,
                lambda x, y: NeuralNetwork.reachable(x, y, 0, {Flatten}), 10):
            print(next(iter(out_nts.values())))

        pass
コード例 #9
0
    def test_Softmax(self):
        IOLabel.DUMMY1 = 'DUMMY1'
        IOLabel.DUMMY2 = 'DUMMY2'
        shape0 = Shape((DN.BATCH, -1), (DN.WIDTH, 8), (DN.HEIGHT, 8),
                       (DN.CHANNEL, 32))
        shape1 = Shape((DN.BATCH, -1), (DN.UNITS, 10))
        output0 = TypeShape(DDouble, shape0)
        output1 = TypeShape(DDouble, shape1)
        input0 = {IOLabel.DUMMY1: output0}
        input1 = {IOLabel.DUMMY1: output1}
        pos0 = list(
            Softmax.possible_output_shapes(
                input_ntss=input0,
                target_output=output0,
                is_reachable=lambda x, y: NeuralNetwork.reachable(
                    x, y, 1, {Softmax})))
        self.assertTrue(any([output0 in out.values() for _, out, _ in pos0]))
        self.assertTrue(all([len(remaining) == 0 for remaining, _, _ in pos0]))
        self.assertTrue(
            all([
                IOLabel.SOFTMAX_IN in mapping
                and mapping[IOLabel.SOFTMAX_IN] == IOLabel.DUMMY1
                for _, _, mapping in pos0
            ]))
        pos1 = list(
            Softmax.possible_output_shapes(
                input_ntss=input1,
                target_output=output1,
                is_reachable=lambda x, y: NeuralNetwork.reachable(
                    x, y, 1, {Softmax})))
        self.assertTrue(any([output1 in out.values() for _, out, _ in pos1]))
        self.assertTrue(all([len(remaining) == 0 for remaining, _, _ in pos1]))
        self.assertTrue(
            all([
                IOLabel.SOFTMAX_IN in mapping
                and mapping[IOLabel.SOFTMAX_IN] == IOLabel.DUMMY1
                for _, _, mapping in pos1
            ]))

        dummyDF = TestSubFunctions.DummyDF()
        dummyDF._outputs = {IOLabel.DUMMY1: TypeShape(DDouble, shape0)}
        for _, out, _ in pos0:
            for parameters in Softmax.generateParameters(
                    input_dict={
                        IOLabel.SOFTMAX_IN:
                        (IOLabel.DUMMY1, dummyDF.outputs, dummyDF.id_name)
                    },
                    expected_outputs={IOLabel.DUMMY2: output0},
                    variable_pool={},
            )[0]:
                _softmax = Softmax(**parameters)
                pb = _softmax.get_pb()
                self.assertIsNotNone(pb)
                state = _softmax.__getstate__()
                self.assertIsNotNone(state)

                new_softmax = Softmax.__new__(Softmax)
                new_softmax.__setstate__(pb)
                self.assertEqual(_softmax, new_softmax)
                self.assertIsNot(_softmax, new_softmax)

                new_softmax = Softmax.__new__(Softmax)
                new_softmax.__setstate__(state)
                self.assertEqual(_softmax, new_softmax)
                self.assertIsNot(_softmax, new_softmax)

        for _, out, _ in pos1:
            for parameters in Softmax.generateParameters(
                    input_dict={
                        IOLabel.SOFTMAX_IN:
                        (IOLabel.DUMMY1, dummyDF.outputs, dummyDF.id_name)
                    },
                    expected_outputs={IOLabel.DUMMY2: output1},
                    variable_pool={})[0]:
                _softmax = Softmax(**parameters)
                pb = _softmax.get_pb()
                self.assertIsNotNone(pb)
                state = _softmax.__getstate__()
                self.assertIsNotNone(state)

                new_softmax = Softmax.__new__(Softmax)
                new_softmax.__setstate__(pb)
                self.assertEqual(_softmax, new_softmax)
                self.assertIsNot(_softmax, new_softmax)

                new_softmax = Softmax.__new__(Softmax)
                new_softmax.__setstate__(state)
                self.assertEqual(_softmax, new_softmax)
                self.assertIsNot(_softmax, new_softmax)
コード例 #10
0
    def test_Merge(self):
        IOLabel.DUMMY = 'DUMMY'
        IOLabel.DUMMY2 = 'DUMMY2'
        _input = {
            IOLabel.DUMMY:
            TypeShape(DDouble, Shape((DN.BATCH, -1), (DN.UNITS, 16)))
        }
        _output = TypeShape(DDouble, Shape((DN.BATCH, -1), (DN.UNITS, 24)))
        pos = list(
            Merge.possible_output_shapes(
                input_ntss=_input,
                target_output=_output,
                is_reachable=lambda x, y: NeuralNetwork.reachable(
                    x, y, 1, {Merge}),
            ))
        self.assertTrue(any([_output in out.values() for _, out, _ in pos]))
        self.assertTrue(
            all([
                len(remaining) == 1 and all([
                    nts_label == IOLabel.MERGE_OTHER
                    for nts_label in remaining.keys()
                ]) for remaining, _, _ in pos
            ]))
        self.assertTrue(
            all([
                IOLabel.MERGE_IN in mapping
                and mapping[IOLabel.MERGE_IN] == IOLabel.DUMMY
                for _, _, mapping in pos
            ]))
        dummyDF = TestSubFunctions.DummyDF()
        dummyDF._outputs = {
            IOLabel.DUMMY:
            TypeShape(DDouble, Shape((DN.BATCH, -1), (DN.UNITS, 16)))
        }
        for remaining, out, _ in pos:
            dummyDF2 = TestSubFunctions.DummyDF()
            nts = next(iter(remaining.values()))
            dummyDF2._outputs = {
                IOLabel.DUMMY2: TypeShape(nts.dtype, nts.shape)
            }
            for parameters in Merge.generateParameters(
                    input_dict={
                        IOLabel.MERGE_IN:
                        (IOLabel.DUMMY, dummyDF.outputs, dummyDF.id_name),
                        IOLabel.MERGE_OTHER:
                        (IOLabel.DUMMY2, dummyDF2.outputs, dummyDF2.id_name)
                    },
                    expected_outputs={_output},
                    variable_pool={},
            )[0]:
                _merge = Merge(**parameters)
                pb = _merge.get_pb()
                self.assertIsNotNone(pb)
                state = _merge.__getstate__()
                self.assertIsNotNone(state)

                new_merge = Merge.__new__(Merge)
                new_merge.__setstate__(pb)
                self.assertEqual(_merge, new_merge)
                self.assertIsNot(_merge, new_merge)

                new_merge = Merge.__new__(Merge)
                new_merge.__setstate__(state)
                self.assertEqual(_merge, new_merge)
                self.assertIsNot(_merge, new_merge)
        pass
コード例 #11
0
class ClassifierIndividualACDG(NetworkIndividualInterface, Mutation.Interface,
                               Recombination.Interface):
    arg_MAX_NN_DEPTH = 'max_depth'
    arg_MIN_NN_DEPTH = 'min_depth'
    arg_MAX_NN_BRANCH = 'max_branch'
    arg_NN_FUNCTIONS = 'functions'

    def __init__(self, **kwargs):
        super(ClassifierIndividualACDG, self).__init__(**kwargs)
        if len(self._networks) > 1:
            raise Exception('Expected 1 or 0 networks got: ' +
                            str(len(self._networks)))
        elif len(self._networks) == 1:
            self.network = self._networks[0]
        else:
            _input = (IOLabel.DATA, *self._data_nts[IOLabel.DATA])
            _output = {IOLabel.TARGET: self._data_nts[IOLabel.TARGET][0]}
            _input = {'NN_DATA': _input}
            self.network = NeuralNetwork(
                **{
                    NeuralNetwork.arg_INPUTS:
                    _input,
                    NeuralNetwork.arg_OUTPUT_TARGETS:
                    _output,
                    NeuralNetwork.arg_FUNCTIONS:
                    kwargs.get(self.arg_NN_FUNCTIONS, [Dense]),
                    NeuralNetwork.arg_MAX_DEPTH:
                    kwargs.get(self.arg_MAX_NN_DEPTH, 7),
                    NeuralNetwork.arg_MIN_DEPTH:
                    kwargs.get(self.arg_MIN_NN_DEPTH, 2),
                    NeuralNetwork.arg_MAX_BRANCH:
                    kwargs.get(self.arg_MAX_NN_BRANCH, 1)
                })
            self._networks.append(self.network)

        if len(self._losses) != 0:
            raise Exception('Expected no loss!')
        _output = self._data_nts[IOLabel.TARGET][0]
        _output_units = _output.shape[DimNames.UNITS]
        if _output_units == 1:
            self.loss = MeanSquaredError(**{
                LossInterface.arg_REDUCE: Reduce.MEAN,
            })
        else:
            self.loss = SoftmaxCrossEntropyWithLogits(
                **{LossInterface.arg_REDUCE: Reduce.MEAN})
        self._losses.append(self.loss)

    def _cls_setstate(self, _individual):
        super(ClassifierIndividualACDG, self)._cls_setstate(_individual)

        if len(self._networks) != 1:
            raise Exception(
                'Restored individual has an invalid number of networks: ' +
                str(len(self._networks)))
        self.network = self._networks[0]
        if len(self._losses) != 1:
            raise Exception(
                'Restored individual has an invalid number of losses: ' +
                str(len(self._losses)))
        self.loss = self._losses[0]

    def __eq__(self, other):
        if (super(ClassifierIndividualACDG, self).__eq__(other)
                and self.loss == other.loss and self.network == other.network):
            return True
        return False

    def mutate(self, prob):
        result = ClassifierIndividualACDG.__new__(ClassifierIndividualACDG)
        pb = self.get_pb()
        result.__setstate__(pb)
        result.network = self.network.mutate(prob=prob)[0]
        result._networks = [result.network]
        result._id_name = self.getNewName()
        return [result]

    def recombine(self, other):
        result = ClassifierIndividualACDG.__new__(ClassifierIndividualACDG)
        pb = self.get_pb()
        result.__setstate__(pb)
        result.network = self.network.recombine(other.network)[0]
        result._networks = [result.network]
        result._id_name = self.getNewName()
        return [result]

    def norm(self, other):
        return self.network.norm(other.network)

    def update_state(self, *args, **kwargs):
        self.network.update_state(*args, **kwargs)

    def build_instance(self, nn_framework):
        nn_framework.init_model({IOLabel.DATA}, {IOLabel.TARGET})
        f_id2obj = dict()
        for f in self.network.functions:
            nn_framework.add_function(f)
            f_id2obj[f.id_name] = f
        nn_framework.set_train_parameters(
            **{
                nn_framework.arg_LOSS: self.loss.__class__,
            })
        softmax_out = list()
        for label, f_id in self.network.output_mapping.values():
            f_obj = f_id2obj[f_id]
            softmax = Softmax(**Softmax.generateParameters(
                input_dict={IOLabel.SOFTMAX_IN: (label, f_obj.outputs, f_id)},
                expected_outputs={IOLabel.SOFTMAX_OUT: f_obj.outputs[label]},
            )[0][0])
            nn_framework.add_function(softmax)
            softmax_out.append((IOLabel.SOFTMAX_OUT, softmax.id_name))
        nn_framework.finalize_model(output_ids=softmax_out)

    def train_instance(self, nn_framework):
        return nn_framework.train()
コード例 #12
0
    def test_instantiation_USD_ONTS_Dense_Merge(self):
        for i in range(10):
            batch = 1
            _data = TypeShape(
                DFloat, Shape((DimNames.BATCH, batch), (DimNames.UNITS, 20)))

            IOLabel.DS1 = 'DS1'
            IOLabel.DS2 = 'DS2'
            inputs = {
                IOLabel.DS1: (IOLabel.DATA, _data, 'Dataset'),
                IOLabel.DS2: (IOLabel.DATA, _data, 'Dataset')
            }

            outShape = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 10))
            outShape1 = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 15))
            outputs = {
                'out0': TypeShape(DFloat, outShape),
                'out1': TypeShape(DFloat, outShape1)
            }
            functions = [Merge, Dense]
            NN = NeuralNetwork(
                **{
                    NeuralNetwork.arg_INPUTS: inputs,
                    NeuralNetwork.arg_OUTPUT_TARGETS: outputs,
                    NeuralNetwork.arg_FUNCTIONS: functions,
                    NeuralNetwork.arg_MAX_BRANCH: 2
                })
            self.assertIsNotNone(NN)

            pb = NN.get_pb()
            state = NN.__getstate__()

            NN_pb = NeuralNetwork.__new__(NeuralNetwork)
            NN_pb.__setstate__(pb)
            self.assertIsNot(NN, NN_pb)

            NN_state = NeuralNetwork.__new__(NeuralNetwork)
            NN_state.__setstate__(state)
            self.assertIsNot(NN, NN_state)

            NN_mut = NN.mutate(1)[0]
            self.assertEqual(pb, NN.get_pb())
            self.assertIsNot(NN, NN_mut)
            self.assertNotEqual(NN, NN_mut)

            f_ids = dict([(_id, None) for _, _id in NN_mut.inputs.values()])
            for _f in NN_mut.functions:
                f_ids[_f.id_name] = _f

            for _f in NN_mut.functions:
                for _f_input, (other_output, other_id) in _f.inputs.items():
                    if other_id not in f_ids:
                        self.assertTrue(False)

            stack = [f_id for _, f_id in NN_mut.output_mapping.values()]
            required_ids = set()
            while stack:
                f_id = stack.pop()
                required_ids.add(f_id)
                f_ = f_ids.get(f_id)
                if f_ is not None:
                    stack.extend([f_id for _, f_id in f_.inputs.values()])
            self.assertSetEqual(required_ids, set(f_ids.keys()))

            NN_mut = NN.mutate(1)[0]
            self.assertEqual(pb, NN.get_pb())
            self.assertIsNot(NN, NN_mut)
            self.assertNotEqual(NN, NN_mut)

            f_ids = dict([(_id, None) for _, _id in NN_mut.inputs.values()])
            for _f in NN_mut.functions:
                f_ids[_f.id_name] = _f

            for _f in NN_mut.functions:
                for _f_input, (other_output, other_id) in _f.inputs.items():
                    if other_id not in f_ids:
                        self.assertTrue(False)

            stack = [f_id for _, f_id in NN_mut.output_mapping.values()]
            required_ids = set()
            while stack:
                f_id = stack.pop()
                required_ids.add(f_id)
                f_ = f_ids.get(f_id)
                if f_ is not None:
                    stack.extend([f_id for _, f_id in f_.inputs.values()])
            self.assertSetEqual(required_ids, set(f_ids.keys()))

            NN_mut = NN.mutate(0)[0]
            NN_mut._id_name = NN._id_name
            self.assertNotEqual(NN, NN_mut)
コード例 #13
0
    def test_recombination_Dense_Merge(self):
        for i in range(100):
            batch = 1
            _data = TypeShape(
                DFloat, Shape((DimNames.BATCH, batch), (DimNames.UNITS, 20)))

            IOLabel.DS1 = 'DS1'
            IOLabel.DS2 = 'DS2'
            inputs = {
                IOLabel.DS1: (IOLabel.DATA, _data, 'Dataset'),
                IOLabel.DS2: (IOLabel.DATA, _data, 'Dataset')
            }

            outShape = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 10))
            outShape1 = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 15))
            outputs = {
                'out0': TypeShape(DFloat, outShape),
                'out1': TypeShape(DFloat, outShape1)
            }
            functions = [Merge, Dense]
            NN1 = NeuralNetwork(
                **{
                    NeuralNetwork.arg_INPUTS: inputs,
                    NeuralNetwork.arg_OUTPUT_TARGETS: outputs,
                    NeuralNetwork.arg_FUNCTIONS: functions,
                    NeuralNetwork.arg_RECOMBINATION_PROBABILITY: 1.0
                })
            self.assertIsNotNone(NN1)
            NN2 = NeuralNetwork(
                **{
                    NeuralNetwork.arg_INPUTS: inputs,
                    NeuralNetwork.arg_OUTPUT_TARGETS: outputs,
                    NeuralNetwork.arg_FUNCTIONS: functions,
                    NeuralNetwork.arg_RECOMBINATION_PROBABILITY: 1.0
                })
            self.assertIsNotNone(NN2)
            NN_rec = NN1.recombine(NN2)[0]

            f_ids = dict([(_id, None) for _, _id in NN_rec.inputs.values()])
            for _f in NN_rec.functions:
                f_ids[_f.id_name] = _f

            for _f in NN_rec.functions:
                for _f_input, (other_output, other_id) in _f.inputs.items():
                    if other_id not in f_ids:
                        self.assertTrue(False)

            stack = [f_id for _, f_id in NN_rec.output_mapping.values()]
            required_ids = set()
            while stack:
                f_id = stack.pop()
                required_ids.add(f_id)
                f_ = f_ids.get(f_id)
                if f_ is not None:
                    stack.extend([f_id for _, f_id in f_.inputs.values()])
            self.assertSetEqual(required_ids, set(f_ids.keys()))
            for f in NN_rec.functions:
                if f.__class__ != Dense:
                    continue
                kernel = [
                    v for v in f.variables if v.name.endswith('|kernel')
                ][0]
                label, f_id = f.inputs['DATA_IN']
                _f = f_ids[f_id]
                if _f is not None:
                    self.assertEqual(kernel.shape,
                                     (_f.outputs[label].shape[DimNames.UNITS],
                                      f.attr[f.arg_OUT_NAMED_TYPE_SHAPES]
                                      ['DATA_OUT'].shape[DimNames.UNITS]))