def test_pb(self):
        train_samples = 10
        train_X = [np.random.rand(3, 4, 5) for _ in range(train_samples)]
        train_Y = [np.random.rand(2) for _ in range(train_samples)]

        batch = 3
        ds0 = UncorrelatedSupervised(train_X=train_X,
                                     train_Y=train_Y,
                                     batch=batch,
                                     typeShapes={
                                         IOLabel.DEFAULT:
                                         TypeShape(
                                             DFloat,
                                             Shape((DimNames.CHANNEL, 3),
                                                   (DimNames.HEIGHT, 4),
                                                   (DimNames.WIDTH, 5)))
                                     })
        pb = ds0.get_pb()
        self.assertIsNotNone(pb)
        ds1 = UncorrelatedSupervised.__new__(UncorrelatedSupervised)
        ds1.__setstate__(pb)

        for data0, data1 in [(ds0.train_X, ds1.train_X),
                             (ds0.train_Y, ds1.train_Y),
                             (ds0.test_X, ds1.test_X),
                             (ds0.test_Y, ds1.test_Y)]:
            self.assertTrue(data0 is None and data1 is None
                            or len(data0) == len(data1) and not any([
                                not np.array_equal(_d0, _d1)
                                for _d0, _d1 in zip(data0, data1)
                            ]))
        self.assertEqual(ds0.idx, ds1.idx)
        self.assertEqual(ds0.len, ds1.len)
        self.assertEqual(ds0._id_name, ds1._id_name)
        self.assertIsNotNone(ds0, ds1)
Example #2
0
    def test_Conv_Flatten_Pool_Dense_Merge(self):
        train_samples = 1000
        data_X, data_Y = make_classification(
            n_samples=train_samples,
            n_features=3072,
            n_classes=5,
            n_informative=4,
        )
        data_X = data_X.reshape((train_samples, 32, 32, 3))
        data_Y = tf.keras.utils.to_categorical(data_Y)
        data_X, data_Y = np.asarray(data_X), np.asarray(data_Y)
        train_X, test_X = data_X[:int(train_samples *
                                      .9), :], data_X[int(train_samples *
                                                          .9):, :]
        train_Y, test_Y = data_Y[:int(train_samples *
                                      .9), :], data_Y[int(train_samples *
                                                          .9):, :]

        batch = None
        dataset = UncorrelatedSupervised(
            train_X=train_X,
            train_Y=train_Y,
            test_X=test_X,
            test_Y=test_Y,
            batch=batch,
            typeShapes={
                IOLabel.DATA:
                TypeShape(
                    DFloat,
                    Shape((DimNames.HEIGHT, 32), (DimNames.WIDTH, 32),
                          (DimNames.CHANNEL, 3))),
                IOLabel.TARGET:
                TypeShape(DFloat, Shape((DimNames.UNITS, 5)))
            },
            name='Dataset')

        ci = ClassifierIndividualACDG(
            **{
                ClassifierIndividualACDG.arg_DATA_NTS:
                dict([(label, (nts, dataset.id_name))
                      for label, nts in dataset.outputs.items()]),
                ClassifierIndividualACDG.arg_NN_FUNCTIONS:
                [Conv2D, Flatten, Dense, Merge],
                ClassifierIndividualACDG.arg_MAX_NN_DEPTH:
                10,
            })

        framework = NVIDIATensorFlow(**{
            NVIDIATensorFlow.arg_DATA_SETS: [dataset],
        })

        ci.build_instance(framework)
        framework.accuracy(ci)
        framework.time()
        framework.memory()
        # framework.flops_per_sample()
        # framework.parameters()
        framework.reset()
    def test_dataSamplingTrain(self):
        train_samples = 10
        train_X = [np.random.rand(3, 4, 5) for _ in range(train_samples)]
        train_Y = [np.random.rand(2) for _ in range(train_samples)]

        batch = 3
        dataset = UncorrelatedSupervised(train_X=train_X,
                                         train_Y=train_Y,
                                         batch=batch,
                                         typeShapes={
                                             IOLabel.DEFAULT:
                                             TypeShape(
                                                 DFloat,
                                                 Shape((DimNames.CHANNEL, 3),
                                                       (DimNames.HEIGHT, 4),
                                                       (DimNames.WIDTH, 5)))
                                         })
        for i in ['train', 'Train', 'TRAIN', 1, True]:
            for idx, d_set in enumerate(dataset(i)):
                self.assertEqual(len(d_set), 2)
                self.assertEqual(len(d_set[IOLabel.DATA]), batch)
                self.assertEqual(len(d_set[IOLabel.TARGET]), batch)
                self.assertTupleEqual(d_set[IOLabel.DATA][0].shape,
                                      train_X[0].shape)
                self.assertTupleEqual(d_set[IOLabel.TARGET][0].shape,
                                      train_Y[0].shape)
                if idx > 20:
                    break
        for i in [{
                'train': 1
        }, {
                'Train': 1
        }, {
                'TRAIN': 1
        }, {
                'train': True
        }, {
                'Train': True
        }, {
                'TRAIN': True
        }]:
            for idx, d_set in enumerate(dataset(**i)):
                self.assertEqual(len(d_set), 2)
                self.assertEqual(len(d_set[IOLabel.DATA]), batch)
                self.assertEqual(len(d_set[IOLabel.TARGET]), batch)
                self.assertTupleEqual(d_set[IOLabel.DATA][0].shape,
                                      train_X[0].shape)
                self.assertTupleEqual(d_set[IOLabel.TARGET][0].shape,
                                      train_Y[0].shape)
                if idx > 20:
                    break
            pass
Example #4
0
    def test_classifier_individualOPACDG(self):
        train_samples = 1
        train_X = [np.random.rand(20) for _ in range(train_samples)]
        train_Y = [np.random.rand(10) for _ in range(train_samples)]

        batch = 1
        dataset = UncorrelatedSupervised(
            train_X=train_X,
            train_Y=train_Y,
            batch=batch,
            typeShapes={
                IOLabel.DATA: TypeShape(DFloat, Shape((DimNames.UNITS, 20))),
                IOLabel.TARGET: TypeShape(DFloat, Shape((DimNames.UNITS, 10)))
            },
            name='Dataset')
        ci = ClassifierIndividualOPACDG(
            **{
                ClassifierIndividualOPACDG.arg_DATA_NTS:
                dict([(ts_label, (ts, dataset.id_name))
                      for ts_label, ts in dataset.outputs.items()])
            })
        self.assertIsNotNone(ci)
        ci.metrics['debug'] = .3

        pb = ci.get_pb()
        self.assertIsNotNone(pb)
        state = ci.__getstate__()
        self.assertIsNotNone(state)

        state_obj = ClassifierIndividualOPACDG.__new__(
            ClassifierIndividualOPACDG)
        state_obj.__setstate__(state)
        self.assertIsNotNone(state_obj)
        self.assertIsNot(ci, state_obj)
        self.assertEqual(ci, state_obj)

        pb_obj = ClassifierIndividualOPACDG.__new__(ClassifierIndividualOPACDG)
        pb_obj.__setstate__(pb)
        self.assertIsNotNone(pb_obj)
        self.assertIsNot(ci, pb_obj)
        self.assertEqual(ci, pb_obj)
Example #5
0
    def test_MLP_Dense_Merge_mutate(self):
        train_samples = 1000
        data_X, data_Y = make_classification(
            n_samples=train_samples,
            n_features=20,
            n_classes=5,
            n_informative=4,
        )
        data_Y = tf.keras.utils.to_categorical(data_Y)
        data_X, data_Y = np.asarray(data_X), np.asarray(data_Y)
        train_X, test_X = data_X[:int(train_samples *
                                      .9), :], data_X[int(train_samples *
                                                          .9):, :]
        train_Y, test_Y = data_Y[:int(train_samples *
                                      .9), :], data_Y[int(train_samples *
                                                          .9):, :]

        batch = None
        dataset = UncorrelatedSupervised(
            train_X=train_X,
            train_Y=train_Y,
            test_X=test_X,
            test_Y=test_Y,
            batch=batch,
            typeShapes={
                IOLabel.DATA: TypeShape(DFloat, Shape((DimNames.UNITS, 20))),
                IOLabel.TARGET: TypeShape(DFloat, Shape((DimNames.UNITS, 5)))
            },
            name='Dataset')

        ci = ClassifierIndividualACDG(
            **{
                ClassifierIndividualACDG.arg_DATA_NTS:
                dict([(label, (nts, dataset.id_name))
                      for label, nts in dataset.outputs.items()]),
                ClassifierIndividualACDG.arg_NN_FUNCTIONS: [Dense, Merge],
            })
        ci = ci.mutate(1)[0]
        NN = ci.network
        f_ids = dict([(_id, None) for _, _id in NN.inputs.values()])
        for _f in NN.functions:
            f_ids[_f.id_name] = _f

        for _f in NN.functions:
            for _f_input, (other_output, other_id) in _f.inputs.items():
                if other_id not in f_ids:
                    self.assertTrue(False)

        stack = [f_id for _, f_id in NN.output_mapping.values()]
        required_ids = set()
        while stack:
            f_id = stack.pop()
            required_ids.add(f_id)
            f_ = f_ids.get(f_id)
            if f_ is not None:
                stack.extend([f_id for _, f_id in f_.inputs.values()])
        self.assertSetEqual(required_ids, set(f_ids.keys()))

        framework = NVIDIATensorFlow(**{
            NVIDIATensorFlow.arg_DATA_SETS: [dataset],
        })

        ci.build_instance(framework)
        framework.accuracy(ci)
        framework.time()
        framework.memory()
        # framework.flops_per_sample()
        # framework.parameters()
        framework.reset()
Example #6
0
    def test_weightAgnosticIndividual(self):
        train_samples = 1
        train_X = [np.random.rand(20) for _ in range(train_samples)]
        train_Y = [np.random.rand(10) for _ in range(train_samples)]

        batch = 1
        dataset = UncorrelatedSupervised(
            train_X=train_X,
            train_Y=train_Y,
            batch=batch,
            typeShapes={
                IOLabel.DATA:
                TypeShape(DFloat,
                          Shape((DimNames.UNITS, 5), (DimNames.UNITS, 5))),
                IOLabel.TARGET:
                TypeShape(DFloat, Shape((DimNames.UNITS, 10)))
            },
            name='Dataset')
        wann = WeightAgnosticIndividual(
            **{
                WeightAgnosticIndividual.arg_DATA_NTS: {
                    ts_label: (ts, dataset.id_name)
                    for ts_label, ts in dataset.outputs.items()
                },
                WeightAgnosticIndividual.arg_INITIAL_DEPTH: 5,
            })
        self.assertIsNotNone(wann)
        wann.metrics['debug'] = .3

        pb = wann.get_pb()
        self.assertIsNotNone(pb)
        state = wann.__getstate__()
        self.assertIsNotNone(state)

        state_obj = WeightAgnosticIndividual.__new__(WeightAgnosticIndividual)
        state_obj.__setstate__(state)
        self.assertIsNotNone(state_obj)
        self.assertIsNot(wann, state_obj)
        self.assertEqual(wann, state_obj)

        pb_obj = WeightAgnosticIndividual.__new__(WeightAgnosticIndividual)
        pb_obj.__setstate__(pb)
        self.assertIsNotNone(pb_obj)
        self.assertIsNot(wann, pb_obj)
        self.assertEqual(wann, pb_obj)

        m_obj = wann.mutate(1)
        self.assertIsInstance(m_obj, list)
        self.assertEqual(len(m_obj), 1)
        m_obj = m_obj[0]
        self.assertIsInstance(m_obj, WeightAgnosticIndividual)
        for _ in range(20):
            m_obj = m_obj.mutate(random())
            self.assertIsInstance(m_obj, list)
            self.assertEqual(len(m_obj), 1)
            m_obj = m_obj[0]
            self.assertIsInstance(m_obj, WeightAgnosticIndividual)

        wann_other = WeightAgnosticIndividual(
            **{
                WeightAgnosticIndividual.arg_DATA_NTS: {
                    ts_label: (ts, dataset.id_name)
                    for ts_label, ts in dataset.outputs.items()
                },
                WeightAgnosticIndividual.arg_INITIAL_DEPTH: 5,
            })

        rec_obj = wann.recombine(wann_other)
        self.assertIsInstance(rec_obj, list)
        self.assertEqual(len(rec_obj), 1)
        rec_A, rec_B = rec_obj[0], wann_other.recombine(wann)[0]
        self.assertIsInstance(rec_A, WeightAgnosticIndividual)
        self.assertIsInstance(rec_B, WeightAgnosticIndividual)
        for _ in range(20):
            rec_obj = rec_A.recombine(rec_B)
            self.assertIsInstance(rec_obj, list)
            self.assertEqual(len(rec_obj), 1)
            rec_A, rec_B = rec_obj[0], rec_B.recombine(rec_A)[0]
            self.assertIsInstance(rec_A, WeightAgnosticIndividual)
            self.assertIsInstance(rec_B, WeightAgnosticIndividual)

        m_step = wann.step(2)
        self.assertIsInstance(m_step, list)
        self.assertEqual(len(m_step), 1)
        m_step = m_step[0]
        self.assertIsInstance(m_step, WeightAgnosticIndividual)
        for _ in range(20):
            m_step = m_step.step(randint(1, 5))
            self.assertIsInstance(m_step, list)
            self.assertEqual(len(m_step), 1)
            m_step = m_step[0]
            self.assertIsInstance(m_step, WeightAgnosticIndividual)
    def run_ga():
        dataDir = '/path/to/experiment/folder/'
        classes = 100
        with open(dataDir + 'DataDumps/cifar-100_ga_train.p', 'rb') as f:
            data = pickle.load(f)
            train_X, data_Y = np.asarray(data['X']), data['Y']
            train_Y = np.zeros((len(data_Y), classes))
            train_Y[np.arange(len(data_Y)), data_Y] = 1
        with open(dataDir + 'DataDumps/cifar-100_ga_valid.p', 'rb') as f:
            data = pickle.load(f)
            valid_X, data_Y = np.asarray(data['X']), data['Y']
            valid_Y = np.zeros((len(data_Y), classes))
            valid_Y[np.arange(len(data_Y)), data_Y] = 1
        with open(dataDir + 'DataDumps/cifar-100_ga_test.p', 'rb') as f:
            data = pickle.load(f)
            test_X, data_Y = np.asarray(data['X']), data['Y']
            test_Y = np.zeros((len(data_Y), classes))
            test_Y[np.arange(len(data_Y)), data_Y] = 1

        dataset = UncorrelatedSupervised(
            **{
                UncorrelatedSupervised.arg_SHAPES: {
                    IOLabel.DATA:
                    TypeShape(
                        DFloat,
                        Shape((DimNames.HEIGHT, 32), (DimNames.WIDTH,
                                                      32), (DimNames.CHANNEL,
                                                            3))),
                    IOLabel.TARGET:
                    TypeShape(DFloat, Shape((DimNames.UNITS, classes)))
                },
                UncorrelatedSupervised.arg_NAME: 'CIFAR-100',
                UncorrelatedSupervised.arg_TRAINX: train_X,
                UncorrelatedSupervised.arg_TRAINY: train_Y,
                UncorrelatedSupervised.arg_TESTX: test_X,
                UncorrelatedSupervised.arg_TESTY: test_Y,
                UncorrelatedSupervised.arg_VALIDX: valid_X,
                UncorrelatedSupervised.arg_VALIDY: valid_Y,
            })

        model = GenerationalModel()
        model.add([
            RandomInitializer(
                **{
                    RandomInitializer.arg_GEN_SIZE: 10,
                    RandomInitializer.arg_CLASS: ClassifierIndividualOPACDG,
                    RandomInitializer.arg_PARAM: {
                        ClassifierIndividualOPACDG.arg_NN_FUNCTIONS:
                        [QConv2D, QPooling2D, Flatten, Dense, Merge],
                        ClassifierIndividualOPACDG.arg_DATA_NTS:
                        dict([(ts_label, (ts, dataset.id_name))
                              for ts_label, ts in dataset.outputs.items()]),
                        ClassifierIndividualOPACDG.arg_MIN_NN_DEPTH:
                        2,
                        ClassifierIndividualOPACDG.arg_MAX_NN_DEPTH:
                        10,
                        ClassifierIndividualOPACDG.arg_MAX_NN_BRANCH:
                        1,
                    }
                }),
            LocalEH(
                **{
                    LocalEH.arg_NN_FRAMEWORK:
                    NVIDIATensorFlow(
                        **{
                            NVIDIATensorFlow.arg_DATA_SETS: [dataset],
                            NVIDIATensorFlow.arg_TMP_FILE: dataDir +
                            'CIFAR-100/LAMARCK_log/state.ckpt',
                            NVIDIATensorFlow.arg_BATCH_SIZE: 32,
                            NVIDIATensorFlow.arg_EPOCHS: 100,
                            NVIDIATensorFlow.arg_CMP: cmpClass
                        })
                }),
            Accuracy,
            FlOps,
            Parameters,
            ExponentialRankingSelection(
                **{
                    ExponentialRankingSelection.arg_LIMIT: 4,
                    ExponentialRankingSelection.arg_CMP: ind_cmp,
                }),
            Recombination(**{Recombination.arg_LIMIT: 10}),
            Mutation(),
            NElitism(**{
                NElitism.arg_N: 2,
                NElitism.arg_CMP: ind_cmp
            }),
            StopByNoProgress(
                **{
                    StopByNoProgress.arg_PATIENCE: 15,
                    StopByNoProgress.arg_CMP: ind_cmp
                }),
            ModelStateSaverLoader(
                **{
                    ModelStateSaverLoader.arg_PREPARATION:
                    True,
                    ModelStateSaverLoader.arg_REPRODUCTION:
                    False,
                    ModelStateSaverLoader.arg_SELECTION:
                    False,
                    ModelStateSaverLoader.arg_REPLACEMENT:
                    False,
                    ModelStateSaverLoader.arg_EVALUATION:
                    True,
                    ModelStateSaverLoader.arg_FILE:
                    dataDir + 'CIFAR-100/LAMARCK_log/model_state.pb',
                }),
            DSSqlite3(**{
                DSSqlite3.arg_FILE:
                dataDir + 'CIFAR-100/LAMARCK_log/history.db3'
            }),
            # TelegramNotifier(**{
            #   TelegramNotifier.arg_TOKEN: 'telegram-token',
            #   TelegramNotifier.arg_USER_IDS: {'chat ids as int'},
            #   TelegramNotifier.arg_SELECTION: True,
            #   TelegramNotifier.arg_NEA_DONE: True})
        ])

        if model.reset():
            print('Successfully initialized model!')
        model.run()
Example #8
0
train_Y, test_Y = data_Y[:int(train_samples *
                              .9), :], data_Y[int(train_samples * .9):, :]
train_X, valid_X = train_X[:int(train_samples *
                                .75), :], train_X[int(train_samples * .75):, :]
train_Y, valid_Y = train_Y[:int(train_samples *
                                .75), :], train_Y[int(train_samples * .75):, :]

batch = None
dataset = UncorrelatedSupervised(train_X=train_X,
                                 train_Y=train_Y,
                                 test_X=test_X,
                                 test_Y=test_Y,
                                 valid_X=valid_X,
                                 valid_Y=valid_Y,
                                 batch=batch,
                                 typeShapes={
                                     IOLabel.DATA:
                                     TypeShape(DFloat,
                                               Shape((DimNames.UNITS, 20))),
                                     IOLabel.TARGET:
                                     TypeShape(DFloat,
                                               Shape((DimNames.UNITS, 5)))
                                 },
                                 name='Dataset')

model = GenerationalModel()
model.add([
    RandomInitializer(
        **{
            RandomInitializer.arg_GEN_SIZE: 10,
            RandomInitializer.arg_CLASS: ClassifierIndividualOPACDG,
            RandomInitializer.arg_PARAM: {