def test_instantiate(self): for j in range(10): batch = 1 _data = TypeShape( DFloat, Shape((DimNames.BATCH, batch), (DimNames.UNITS, 20))) IOLabel.DS1 = 'DS1' IOLabel.DS2 = 'DS2' inputs = { IOLabel.DS1: (IOLabel.DATA, _data, 'Dataset0'), IOLabel.DS2: (IOLabel.DATA, _data, 'Dataset1') } outShape = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 10)) outShape1 = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 15)) outputs = { 'out0': TypeShape(DFloat, outShape), 'out1': TypeShape(DFloat, outShape1) } functions = [Merge, Dense] OPNN0 = OverParameterizedNeuralNetwork( **{ OverParameterizedNeuralNetwork.arg_INPUTS: inputs, OverParameterizedNeuralNetwork.arg_OUTPUT_TARGETS: outputs, OverParameterizedNeuralNetwork.arg_FUNCTIONS: functions, OverParameterizedNeuralNetwork.arg_MAX_BRANCH: 2, }) pb = OPNN0.get_pb() pb_OPNN = object.__new__(OverParameterizedNeuralNetwork) pb_OPNN.__setstate__(pb) self.assertEqual(OPNN0, pb_OPNN) OPNN1 = OverParameterizedNeuralNetwork( **{ OverParameterizedNeuralNetwork.arg_INPUTS: inputs, OverParameterizedNeuralNetwork.arg_OUTPUT_TARGETS: outputs, OverParameterizedNeuralNetwork.arg_FUNCTIONS: functions, OverParameterizedNeuralNetwork.arg_MAX_BRANCH: 2, }) for i in range(1): mutOPNN = OPNN0.recombine(OPNN1)[0] for f in mutOPNN.functions: self.assertTrue(f.id_name in mutOPNN.meta_functions) functions = {f.id_name for f in mutOPNN.functions} for out_id, (label, f_id) in mutOPNN.output_mapping.items(): self.assertTrue(f_id in functions) for k in range(10): mutOPNN = mutOPNN.mutate(1)[0] for f in mutOPNN.functions: self.assertTrue(f.id_name in mutOPNN.meta_functions) if isinstance(f, Merge): self.assertEqual(len(f.inputs), 2) functions = {f.id_name for f in mutOPNN.functions} for out_id, (label, f_id) in mutOPNN.output_mapping.items(): self.assertTrue(f_id in functions) pass
def test_reachable(self): # target = TypeShape(DFloat, Shape((DimNames.BATCH, 1), # (DimNames.UNITS, 20))) input_shape = TypeShape( DFloat, Shape((DimNames.BATCH, 1), (DimNames.HEIGHT, 32), (DimNames.WIDTH, 32), (DimNames.CHANNEL, 3))) depth = 8 for i in range(1, 100): # input_shape = TypeShape(DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, i))) target = TypeShape( DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, i * 10))) print() print(input_shape) print(target) print( NeuralNetwork.reachable(input_nts=input_shape, target_nts=target, max_depth=depth, function_pool={Conv2D, Flatten})) print( list( Dense.possible_output_shapes( input_ntss={IOLabel.DEFAULT: input_shape}, target_output=target, is_reachable=lambda x, y: NeuralNetwork.reachable( x, y, depth - 1, {Dense, Merge}), ))) pass
def generateParameters(cls, input_dict: Dict[str, Tuple[str, Dict[str, TypeShape], str]], expected_outputs: Set[TypeShape], variable_pool: dict = None) -> \ Tuple[List[Dict[str, object]], List[float]]: default_input_label, default_input_ntss, _ = input_dict.get(IOLabel.MERGE_IN) other_input_label, other_input_ntss, _ = input_dict.get(IOLabel.MERGE_OTHER) default_input_nts = default_input_ntss[default_input_label] other_input_nts = other_input_ntss[other_input_label] if default_input_nts.dtype != other_input_nts.dtype: return [], [] out_shape = Shape() for _dim in default_input_nts.shape.dim: if _dim.name == DimNames.CHANNEL or \ _dim.name == DimNames.UNITS: out_shape.dim.append(Shape.Dim(_dim.name, _dim.size + other_input_nts.shape[_dim.name])) elif _dim.size != other_input_nts.shape[_dim.name]: return [], [] else: out_shape.dim.append(Shape.Dim(_dim.name, _dim.size)) input_mapping = dict([(l_in, (l_out, df_id_name)) for l_in, (l_out, _, df_id_name) in input_dict.items()]) return [{cls.arg_INPUT_MAPPING: input_mapping, cls.arg_ATTRIBUTES: {cls.arg_OUT_NAMED_TYPE_SHAPES: {IOLabel.MERGE_OUT: TypeShape(default_input_nts.dtype, out_shape)}}, }], [1.0]
def __init__(self, **kwargs): super(TestDBSqlite3.dummyModel, self).__init__(**kwargs) _data_nts = TypeShape( DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, 20))) _target_nts = TypeShape( DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, 10))) self.ci = ClassifierIndividualOPACDG( **{ NetworkIndividualInterface.arg_DATA_NTS: { IOLabel.DATA: (_data_nts, 'Dataset'), IOLabel.TARGET: (_target_nts, 'Dataset') }, }) self.anc1 = ClassifierIndividualOPACDG( **{ NetworkIndividualInterface.arg_DATA_NTS: { IOLabel.DATA: (_data_nts, 'Dataset'), IOLabel.TARGET: (_target_nts, 'Dataset') }, }) self.anc2 = ClassifierIndividualOPACDG( **{ NetworkIndividualInterface.arg_DATA_NTS: { IOLabel.DATA: (_data_nts, 'Dataset'), IOLabel.TARGET: (_target_nts, 'Dataset') }, }) self._GENERATION = [self.ci] self._GENERATION_IDX = 1
def test_instantiation_Conv2D_Pool2D_Flatten_Dense(self): for i in range(10): batch = 1 _data = TypeShape( DFloat, Shape((DimNames.BATCH, batch), (DimNames.HEIGHT, 32), (DimNames.WIDTH, 32), (DimNames.CHANNEL, 3))) _target = TypeShape( DFloat, Shape( (DimNames.BATCH, batch), (DimNames.UNITS, 10), )) outputs = {'out0': _target} IOLabel.DS = 'DS' inputs = {IOLabel.DS: (IOLabel.DATA, _data, 'Dataset')} functions = [Conv2D, Pooling2D, Flatten, Dense] NN = NeuralNetwork( **{ NeuralNetwork.arg_INPUTS: inputs, NeuralNetwork.arg_OUTPUT_TARGETS: outputs, NeuralNetwork.arg_FUNCTIONS: functions }) self.assertIsNotNone(NN) pb = NN.get_pb() state = NN.__getstate__() f_ids = dict([(_id, None) for _, _id in NN.inputs.values()]) for _f in NN.functions: f_ids[_f.id_name] = _f for _f in NN.functions: for _f_input, (other_output, other_id) in _f.inputs.items(): if other_id not in f_ids: self.assertTrue(False) stack = [f_id for _, f_id in NN.output_mapping.values()] required_ids = set() while stack: f_id = stack.pop() required_ids.add(f_id) f_ = f_ids.get(f_id) if f_ is not None: stack.extend([f_id for _, f_id in f_.inputs.values()]) self.assertSetEqual(required_ids, set(f_ids.keys())) NN_pb = NeuralNetwork.__new__(NeuralNetwork) NN_pb.__setstate__(pb) self.assertIsNot(NN, NN_pb) NN_state = NeuralNetwork.__new__(NeuralNetwork) NN_state.__setstate__(state) self.assertIsNot(NN, NN_state) NN_mut = NN.mutate(100) self.assertIsNot(NN, NN_mut) self.assertNotEqual(NN, NN_mut) NN_mut = NN.mutate(0) self.assertIsNot(NN, NN_mut) self.assertNotEqual(NN, NN_mut)
def test_Conv2D(self): IOLabel.DUMMY1 = 'DUMMY1' IOLabel.DUMMY2 = 'DUMMY2' _shape = Shape((DN.BATCH, -1), (DN.WIDTH, 64), (DN.HEIGHT, 64), (DN.CHANNEL, 4)) shape_ = Shape((DN.BATCH, -1), (DN.WIDTH, 32), (DN.HEIGHT, 32), (DN.CHANNEL, 6)) _input = {IOLabel.DUMMY1: TypeShape(DDouble, _shape)} _output = TypeShape(DDouble, shape_) _expected_output = TypeShape(DDouble, shape_) pos = list( Conv2D.possible_output_shapes( input_ntss=_input, target_output=_output, is_reachable=lambda x, y: NeuralNetwork.reachable( x, y, 1, {Conv2D}))) self.assertTrue( any([_expected_output in out.values() for _, out, _ in pos])) self.assertTrue(all([len(remaining) == 0 for remaining, _, _ in pos])) self.assertTrue( all([ IOLabel.CONV2D_IN in mapping and mapping[IOLabel.CONV2D_IN] == IOLabel.DUMMY1 for _, _, mapping in pos ])) dummyDF = TestSubFunctions.DummyDF() dummyDF._outputs = {IOLabel.DUMMY1: TypeShape(DDouble, _shape)} for _, out, _ in pos: for parameters in Conv2D.generateParameters( input_dict={ IOLabel.CONV2D_IN: (IOLabel.DUMMY1, dummyDF.outputs, dummyDF.id_name) }, expected_outputs={IOLabel.CONV2D_OUT: _output}, variable_pool={}, )[0]: _conv2D = Conv2D(**parameters) pb = _conv2D.get_pb() self.assertIsNotNone(pb) state = _conv2D.__getstate__() self.assertIsNotNone(state) new_conv2D = Conv2D.__new__(Conv2D) new_conv2D.__setstate__(pb) self.assertEqual(_conv2D, new_conv2D) self.assertIsNot(_conv2D, new_conv2D) new_conv2D = Conv2D.__new__(Conv2D) new_conv2D.__setstate__(state) self.assertEqual(_conv2D, new_conv2D) self.assertIsNot(_conv2D, new_conv2D) m_conv2D = _conv2D.mutate(100) self.assertNotEqual(_conv2D, m_conv2D) m_conv2D = _conv2D.mutate(0) self.assertEqual(_conv2D, m_conv2D) self.assertIsNot(_conv2D, m_conv2D) pass
def possible_output_shapes(cls, input_ntss: Dict[str, TypeShape], target_output: TypeShape, is_reachable, max_possibilities: int = 10, **kwargs) -> \ List[Tuple[Dict[str, TypeShape], Dict[str, TypeShape], Dict[str, str]]]: target_shape = target_output.shape for label, nts in input_ntss.items(): if nts.dtype not in cls.allowedTypes: continue possible_sizes = [] names = [] invalid_dim = False for _dim in nts.shape.dim: target_size = target_shape[_dim.name] if _dim.name == DimNames.WIDTH or \ _dim.name == DimNames.HEIGHT: lower_border = max(math.floor(_dim.size * cls.__min_f_hw), (min(2, target_size) if target_size is not None else 2)) upper_border = math.ceil(_dim.size * cls.__max_f_hw) pool = list(range(lower_border + 1, upper_border)) border_pool = list({upper_border, lower_border}) if target_size is None or not (lower_border < target_size < upper_border): pool = sample( pool, k=min(max(max_possibilities - len(border_pool), 0), len(pool))) else: pool.remove(target_size) pool = sample( pool, k=min( max(max_possibilities - len(border_pool) - 1, 0), len(pool))) + [target_size] pool = pool + border_pool elif _dim.name == DimNames.CHANNEL or \ _dim.name == DimNames.BATCH: pool = [_dim.size] else: invalid_dim = True break possible_sizes.append(pool) names.append(_dim.name) if invalid_dim: continue for comb in Shape.random_dimension_product(possible_sizes): out_nts = TypeShape(nts.dtype, Shape(*zip(names, comb))) if is_reachable(out_nts, target_output): yield ({}, { IOLabel.POOLING2D_OUT: out_nts }, { IOLabel.POOLING2D_IN: label })
def test_Conv_Flatten_Pool_Dense_Merge(self): train_samples = 1000 data_X, data_Y = make_classification( n_samples=train_samples, n_features=3072, n_classes=5, n_informative=4, ) data_X = data_X.reshape((train_samples, 32, 32, 3)) data_Y = tf.keras.utils.to_categorical(data_Y) data_X, data_Y = np.asarray(data_X), np.asarray(data_Y) train_X, test_X = data_X[:int(train_samples * .9), :], data_X[int(train_samples * .9):, :] train_Y, test_Y = data_Y[:int(train_samples * .9), :], data_Y[int(train_samples * .9):, :] batch = None dataset = UncorrelatedSupervised( train_X=train_X, train_Y=train_Y, test_X=test_X, test_Y=test_Y, batch=batch, typeShapes={ IOLabel.DATA: TypeShape( DFloat, Shape((DimNames.HEIGHT, 32), (DimNames.WIDTH, 32), (DimNames.CHANNEL, 3))), IOLabel.TARGET: TypeShape(DFloat, Shape((DimNames.UNITS, 5))) }, name='Dataset') ci = ClassifierIndividualACDG( **{ ClassifierIndividualACDG.arg_DATA_NTS: dict([(label, (nts, dataset.id_name)) for label, nts in dataset.outputs.items()]), ClassifierIndividualACDG.arg_NN_FUNCTIONS: [Conv2D, Flatten, Dense, Merge], ClassifierIndividualACDG.arg_MAX_NN_DEPTH: 10, }) framework = NVIDIATensorFlow(**{ NVIDIATensorFlow.arg_DATA_SETS: [dataset], }) ci.build_instance(framework) framework.accuracy(ci) framework.time() framework.memory() # framework.flops_per_sample() # framework.parameters() framework.reset()
def possible_output_shapes(cls, input_ntss: Dict[str, TypeShape], target_output: TypeShape, is_reachable, max_possibilities: int = 10, **kwargs) -> \ List[Tuple[Dict[str, TypeShape], Dict[str, TypeShape], Dict[str, str]]]: target_shape = target_output.shape for label, nts in input_ntss.items(): if nts.dtype not in cls.allowedTypes or \ nts.dtype != target_output.dtype: continue possible_sizes = [] names = [] invalid_dim = False for _dim in nts.shape.dim: target_size = target_shape[_dim.name] if _dim.name == DimNames.WIDTH or \ _dim.name == DimNames.HEIGHT or \ _dim.name == DimNames.BATCH or \ _dim.name == DimNames.TIME: pool = [_dim.size] elif _dim.name == DimNames.CHANNEL or \ _dim.name == DimNames.UNITS: lower_border = max(math.floor(_dim.size * cls.__min_f), (min(2, target_size) if target_size is not None else 2)) upper_border = math.ceil(_dim.size * cls.__max_f) pool = list(range(lower_border + 1, upper_border)) border_pool = list({lower_border, upper_border}) if target_size is None or not (lower_border < (target_size - _dim.size) < upper_border): pool = sample(pool, k=min(max(max_possibilities - len(border_pool), 0), len(pool))) else: pool.remove(target_size - _dim.size) pool = sample(pool, k=min(max(max_possibilities - len(border_pool) - 1, 0), len(pool))) + [ target_size - _dim.size] pool = pool + border_pool else: invalid_dim = True break possible_sizes.append(pool) names.append(_dim.name) if invalid_dim: continue for dim_combination in Shape.random_dimension_product(possible_sizes): remaining_shape = Shape(*zip(names, dim_combination)) out_nts = TypeShape(nts.dtype, Shape(*zip(names, [ d_.size + _d.size if _d.name == DimNames.CHANNEL or _d.name == DimNames.UNITS else _d.size for d_, _d in zip(remaining_shape.dim, nts.shape.dim)]))) if is_reachable(out_nts, target_output): yield ({IOLabel.MERGE_OTHER: TypeShape(nts.dtype, remaining_shape)}, {IOLabel.MERGE_OUT: out_nts}, {IOLabel.MERGE_IN: label})
def test_Flatten(self): IOLabel.DUMMY1 = 'DUMMY1' IOLabel.DUMMY2 = 'DUMMY2' _shape = Shape((DN.BATCH, -1), (DN.WIDTH, 8), (DN.HEIGHT, 8), (DN.CHANNEL, 32)) shape_ = Shape((DN.BATCH, -1), (DN.UNITS, 2048)) _input = {IOLabel.DUMMY1: TypeShape(DDouble, _shape)} _output = TypeShape(DDouble, shape_) _expected_output = TypeShape(DDouble, shape_) pos = list( Flatten.possible_output_shapes( input_ntss=_input, target_output=_output, is_reachable=lambda x, y: NeuralNetwork.reachable( x, y, 1, {Flatten}))) self.assertTrue( any([_expected_output in out.values() for _, out, _ in pos])) self.assertTrue(all([len(remaining) == 0 for remaining, _, _ in pos])) self.assertTrue( all([ IOLabel.FLATTEN_IN in mapping and mapping[IOLabel.FLATTEN_IN] == IOLabel.DUMMY1 for _, _, mapping in pos ])) dummyDF = TestSubFunctions.DummyDF() dummyDF._outputs = {IOLabel.DUMMY1: TypeShape(DDouble, _shape)} for _, out, _ in pos: for parameters in Flatten.generateParameters( input_dict={ IOLabel.FLATTEN_IN: (IOLabel.DUMMY1, dummyDF.outputs, dummyDF.id_name) }, expected_outputs={IOLabel.DUMMY2: _output}, variable_pool={}, )[0]: _flatten = Flatten(**parameters) pb = _flatten.get_pb() self.assertIsNotNone(pb) state = _flatten.__getstate__() self.assertIsNotNone(state) new_flatten = Flatten.__new__(Flatten) new_flatten.__setstate__(pb) self.assertEqual(_flatten, new_flatten) self.assertIsNot(_flatten, new_flatten) new_flatten = Flatten.__new__(Flatten) new_flatten.__setstate__(state) self.assertEqual(_flatten, new_flatten) self.assertIsNot(_flatten, new_flatten)
def test_recombination(self): batch = 1 _data = TypeShape(DFloat, Shape((DimNames.BATCH, batch), (DimNames.UNITS, 20))) IOLabel.DS1 = 'DS1' IOLabel.DS2 = 'DS2' inputs = { IOLabel.DS1: (IOLabel.DATA, _data, 'Dataset0'), IOLabel.DS2: (IOLabel.DATA, _data, 'Dataset1') } outShape = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 10)) outShape1 = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 15)) outputs = { 'out0': TypeShape(DFloat, outShape), 'out1': TypeShape(DFloat, outShape1) } functions = [Merge, Dense] OPNN0 = OverParameterizedNeuralNetwork( **{ OverParameterizedNeuralNetwork.arg_INPUTS: inputs, OverParameterizedNeuralNetwork.arg_OUTPUT_TARGETS: outputs, OverParameterizedNeuralNetwork.arg_FUNCTIONS: functions, OverParameterizedNeuralNetwork.arg_MAX_BRANCH: 2, }) pb = OPNN0.get_pb() pb_OPNN = object.__new__(OverParameterizedNeuralNetwork) pb_OPNN.__setstate__(pb) self.assertEqual(OPNN0, pb_OPNN) OPNN1 = OverParameterizedNeuralNetwork( **{ OverParameterizedNeuralNetwork.arg_INPUTS: inputs, OverParameterizedNeuralNetwork.arg_OUTPUT_TARGETS: outputs, OverParameterizedNeuralNetwork.arg_FUNCTIONS: functions, OverParameterizedNeuralNetwork.arg_MAX_BRANCH: 2, }) mut1 = OPNN0.recombine(OPNN1)[0] mut2 = OPNN1.recombine(OPNN0)[0] self.assertEqual(len(mut1.meta_functions), len(mut2.meta_functions)) mut3 = mut1.recombine(mut2)[0] mut4 = mut2.recombine(mut1)[0] self.assertEqual(len(mut3.meta_functions), len(mut4.meta_functions)) self.assertEqual(len(mut3.meta_functions), len(mut1.meta_functions)) self.assertEqual(len(mut4.meta_functions), len(mut1.meta_functions)) pass
def test_instantiation_USD_outputTypeShapes(self): batch = 3 _data = TypeShape( DFloat, Shape((DimNames.BATCH, batch), (DimNames.CHANNEL, 3), (DimNames.HEIGHT, 4), (DimNames.WIDTH, 5))) outShape = Shape((DimNames.BATCH, batch), (DimNames.UNITS, 60)) self.assertRaises( InvalidFunctionType, NeuralNetwork, **{ NeuralNetwork.arg_INPUTS: {'data_in', (_data, 'Dataset')}, NeuralNetwork.arg_OUTPUT_TARGETS: { 'out0': TypeShape(DFloat, outShape) } })
def __init__(self, **kwargs): super(SupervisedData, self).__init__(**kwargs) self.batch = kwargs.get(self.arg_BATCH) self._namedOutputShapes = kwargs.get(self.arg_SHAPES) for _shape in self._namedOutputShapes.values(): _shape.shape.dim.insert(0, Shape.Dim(Shape.Dim.Names.BATCH, self.batch)) self.train_X = kwargs.get(self.arg_TRAINX) self.train_Y = kwargs.get(self.arg_TRAINY) self.test_X = kwargs.get(self.arg_TESTX) self.test_Y = kwargs.get(self.arg_TESTY) self.valid_X = kwargs.get(self.arg_VALIDX) self.valid_Y = kwargs.get(self.arg_VALIDY) if self.train_X is None and self.test_X is None and self.valid_X is None: raise Exception('All input data is None!') if self.train_X is not None: dimension_sizes = next(iter(self.train_X)).shape elif self.test_X is not None: dimension_sizes = next(iter(self.test_X)).shape elif self.valid_X is not None: dimension_sizes = next(iter(self.valid_X)).shape if (self.train_X is not None and next(iter(self.train_X)).shape != dimension_sizes) or \ (self.test_X is not None and next(iter(self.test_X)).shape != dimension_sizes) or \ (self.valid_X is not None and next(iter(self.valid_X)).shape != dimension_sizes): raise Exception('Train test and valid data must have the same size!') self.idx = 0 self.len = 1 self.data_X = self.test_X self.data_Y = self.test_Y self.state = ''
def pb2val(pb): whichone = pb.WhichOneof("v") if whichone == 'shape_val': shape_ = Shape.__new__(Shape) shape_.__setstate__(getattr(pb, whichone)) return shape_ elif whichone == 'type_val': return BaseType.pb2cls(getattr(pb, whichone))[0] elif whichone == 'list_val': _list = [pb2val(_pb) for _pb in pb.list_val.v] return np.asarray(_list) if getattr(pb.list_val, 'numpy', False) else _list elif whichone == 'set_val': return set([pb2val(_pb) for _pb in pb.set_val.v]) elif whichone == 'tuple_val': return tuple([pb2val(_pb) for _pb in pb.tuple_val.v]) elif whichone == 'nts_val': return TypeShape.from_pb(pb.nts_val) elif whichone == 'dict_val': # return dict([(elem.name, pb2val(elem.v)) for elem in pb.dict_val.vs]) return dict([pb2val(elem) for elem in pb.dict_val.v]) else: attr = str(whichone) if attr != 'None': return getattr(pb, attr) return None
def test_step(self): node_ts = TypeShape(DFloat, Shape((DimNames.BATCH, None), (DimNames.UNITS, 1))) inputs = { '%i:%i:%i' % (w, h, c): ('node', node_ts.__copy__(), 'data_src_%i:%i:%i' % (w, h, c)) for w in range(5) for h in range(7) for c in range(3) } output_target = {'%02i' % (c): node_ts.__copy__() for c in range(11)} self.assertRaises( InvalidFunctionType, WeightAgnosticNeuralNetwork, **{ WeightAgnosticNeuralNetwork.arg_INPUTS: inputs, WeightAgnosticNeuralNetwork.arg_OUTPUT_TARGETS: output_target, }) objA = WeightAgnosticNeuralNetwork( **{ WeightAgnosticNeuralNetwork.arg_INPUTS: inputs, WeightAgnosticNeuralNetwork.arg_OUTPUT_TARGETS: output_target, WeightAgnosticNeuralNetwork.arg_FUNCTIONS: [Perceptron], }) self.assertIsNotNone(objA) mut_obj = objA.step(3) self.assertIsInstance(mut_obj, list) self.assertGreaterEqual(len(mut_obj), 1) mut_obj = mut_obj[0] self.assertIsInstance(mut_obj, WeightAgnosticNeuralNetwork) for _ in range(20): mut_obj = mut_obj.step(randint(1, 10)) self.assertIsInstance(mut_obj, list) self.assertGreaterEqual(len(mut_obj), 1) mut_obj = mut_obj[0] self.assertIsInstance(mut_obj, WeightAgnosticNeuralNetwork)
def test_pb(self): train_samples = 10 train_X = [np.random.rand(3, 4, 5) for _ in range(train_samples)] train_Y = [np.random.rand(2) for _ in range(train_samples)] batch = 3 ds0 = UncorrelatedSupervised(train_X=train_X, train_Y=train_Y, batch=batch, typeShapes={ IOLabel.DEFAULT: TypeShape( DFloat, Shape((DimNames.CHANNEL, 3), (DimNames.HEIGHT, 4), (DimNames.WIDTH, 5))) }) pb = ds0.get_pb() self.assertIsNotNone(pb) ds1 = UncorrelatedSupervised.__new__(UncorrelatedSupervised) ds1.__setstate__(pb) for data0, data1 in [(ds0.train_X, ds1.train_X), (ds0.train_Y, ds1.train_Y), (ds0.test_X, ds1.test_X), (ds0.test_Y, ds1.test_Y)]: self.assertTrue(data0 is None and data1 is None or len(data0) == len(data1) and not any([ not np.array_equal(_d0, _d1) for _d0, _d1 in zip(data0, data1) ])) self.assertEqual(ds0.idx, ds1.idx) self.assertEqual(ds0.len, ds1.len) self.assertEqual(ds0._id_name, ds1._id_name) self.assertIsNotNone(ds0, ds1)
def max_transform(cls, nts): if nts.dtype not in cls.allowedTypes: return None s = Shape() result = TypeShape(nts.dtype, s) for _dim in nts.shape.dim: if _dim.name == DimNames.BATCH or \ _dim.name == DimNames.CHANNEL: s.dim.append(Shape.Dim(_dim.name, _dim.size)) elif _dim.name == DimNames.WIDTH or \ _dim.name == DimNames.HEIGHT: s.dim.append( Shape.Dim(_dim.name, int(math.floor(_dim.size * cls.__max_f_hw)))) else: return None return result
def test_Dense(self): IOLabel.DUMMY = 'DUMMY' IOLabel.DUMMY2 = 'DUMMY2' _shape = Shape((DN.BATCH, -1), (DN.UNITS, 16)) _input = {IOLabel.DUMMY: TypeShape(DDouble, _shape)} _output = TypeShape(DDouble, _shape) _expected_output = TypeShape(DDouble, _shape) pos = list( Dense.possible_output_shapes( input_ntss=_input, target_output=_output, is_reachable=lambda x, y: NeuralNetwork.reachable( x, y, 1, {Dense}))) self.assertTrue( any([_expected_output in out.values() for _, out, _ in pos])) self.assertTrue(all([len(remaining) == 0 for remaining, _, _ in pos])) self.assertTrue( all([ IOLabel.DENSE_IN in mapping and mapping[IOLabel.DENSE_IN] == IOLabel.DUMMY for _, _, mapping in pos ])) dummyDF = TestSubFunctions.DummyDF() dummyDF._outputs = {IOLabel.DUMMY: TypeShape(DDouble, _shape)} for _, out, _ in pos: for parameters in Dense.generateParameters( input_dict={ IOLabel.DENSE_IN: (IOLabel.DUMMY, dummyDF.outputs, dummyDF.id_name) }, expected_outputs={IOLabel.DUMMY2: _output}, variable_pool={}, )[0]: # check if parameters are correct? _dense = Dense(**parameters) pb = _dense.get_pb() self.assertIsNotNone(pb) state = _dense.__getstate__() self.assertIsNotNone(state) new_dense = Dense.__new__(Dense) new_dense.__setstate__(pb) self.assertEqual(_dense, new_dense) self.assertIsNot(_dense, new_dense) new_dense = Dense.__new__(Dense) new_dense.__setstate__(state) self.assertEqual(_dense, new_dense) self.assertIsNot(_dense, new_dense) m_dense = _dense.mutate(100) self.assertNotEqual(_dense, m_dense) m_dense = _dense.mutate(0) self.assertEqual(_dense, m_dense) pass
def test_simple_path(self): ntss = { IOLabel.DEFAULT: TypeShape(DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, 23))) } target = TypeShape(DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, 154))) depth = 5 debug_node = 'debug' before = time.time() for _ in range(1): NeuralNetwork.reachable(next(iter(ntss)), target, depth, {Dense, Merge}) print('Time', time.time() - before) print( NeuralNetwork.reachable(next(iter(ntss)), target, depth, {Dense, Merge})) print(ntss) runs = 10000 fails = 0 for i in range(runs): blueprint = nx.DiGraph() blueprint.add_node(debug_node, ntss=ntss, DataFlowObj=None) out_node, nts_id, nodes = next( NeuralNetwork.simple_path( input_node=debug_node, input_ntss=ntss, output_shape=target, output_label=IOLabel.DEFAULT, blueprint=blueprint, min_depth=0, max_depth=depth, function_pool={Dense, Merge}, ), (None, None, None)) if out_node is None: # print(i, 'Error') fails += 1 # else: # print(i, 'Success') print('percentage failed:', fails / runs) pass
def __setstate__(self, state): if isinstance(state, str) or isinstance(state, bytes): _proto_ob = TypeShapeProto() _proto_ob.ParseFromString(state) elif isinstance(state, TypeShapeProto): _proto_ob = state else: return self.dtype = BaseType.pb2cls(_proto_ob.dtype_val) self.shape = Shape.__new__(Shape) self.shape.__setstate__(_proto_ob.shape_val)
def test_classifier_individualOPACDG(self): train_samples = 1 train_X = [np.random.rand(20) for _ in range(train_samples)] train_Y = [np.random.rand(10) for _ in range(train_samples)] batch = 1 dataset = UncorrelatedSupervised( train_X=train_X, train_Y=train_Y, batch=batch, typeShapes={ IOLabel.DATA: TypeShape(DFloat, Shape((DimNames.UNITS, 20))), IOLabel.TARGET: TypeShape(DFloat, Shape((DimNames.UNITS, 10))) }, name='Dataset') ci = ClassifierIndividualOPACDG( **{ ClassifierIndividualOPACDG.arg_DATA_NTS: dict([(ts_label, (ts, dataset.id_name)) for ts_label, ts in dataset.outputs.items()]) }) self.assertIsNotNone(ci) ci.metrics['debug'] = .3 pb = ci.get_pb() self.assertIsNotNone(pb) state = ci.__getstate__() self.assertIsNotNone(state) state_obj = ClassifierIndividualOPACDG.__new__( ClassifierIndividualOPACDG) state_obj.__setstate__(state) self.assertIsNotNone(state_obj) self.assertIsNot(ci, state_obj) self.assertEqual(ci, state_obj) pb_obj = ClassifierIndividualOPACDG.__new__(ClassifierIndividualOPACDG) pb_obj.__setstate__(pb) self.assertIsNotNone(pb_obj) self.assertIsNot(ci, pb_obj) self.assertEqual(ci, pb_obj)
def max_transform(cls, nts): if nts.dtype not in cls.allowedTypes: return None s = Shape() image, vektor = False, False result = TypeShape(nts.dtype, s) for _dim in nts.shape.dim: if (_dim.name == DimNames.BATCH or _dim.name == DimNames.WIDTH or _dim.name == DimNames.HEIGHT ): s.dim.append(Shape.Dim(_dim.name, _dim.size)) elif _dim.name == DimNames.UNITS and not image: s.dim.append(Shape.Dim(_dim.name, int(math.ceil(_dim.size * (1 + cls.__max_f))))) # pessimistic estimation vektor = True elif _dim.name == DimNames.CHANNEL and not vektor: s.dim.append(Shape.Dim(_dim.name, int(math.ceil(_dim.size * (1 + cls.__max_f))))) # pessimistic estimation image = True else: return None return result
def test_dataSamplingTrain(self): train_samples = 10 train_X = [np.random.rand(3, 4, 5) for _ in range(train_samples)] train_Y = [np.random.rand(2) for _ in range(train_samples)] batch = 3 dataset = UncorrelatedSupervised(train_X=train_X, train_Y=train_Y, batch=batch, typeShapes={ IOLabel.DEFAULT: TypeShape( DFloat, Shape((DimNames.CHANNEL, 3), (DimNames.HEIGHT, 4), (DimNames.WIDTH, 5))) }) for i in ['train', 'Train', 'TRAIN', 1, True]: for idx, d_set in enumerate(dataset(i)): self.assertEqual(len(d_set), 2) self.assertEqual(len(d_set[IOLabel.DATA]), batch) self.assertEqual(len(d_set[IOLabel.TARGET]), batch) self.assertTupleEqual(d_set[IOLabel.DATA][0].shape, train_X[0].shape) self.assertTupleEqual(d_set[IOLabel.TARGET][0].shape, train_Y[0].shape) if idx > 20: break for i in [{ 'train': 1 }, { 'Train': 1 }, { 'TRAIN': 1 }, { 'train': True }, { 'Train': True }, { 'TRAIN': True }]: for idx, d_set in enumerate(dataset(**i)): self.assertEqual(len(d_set), 2) self.assertEqual(len(d_set[IOLabel.DATA]), batch) self.assertEqual(len(d_set[IOLabel.TARGET]), batch) self.assertTupleEqual(d_set[IOLabel.DATA][0].shape, train_X[0].shape) self.assertTupleEqual(d_set[IOLabel.TARGET][0].shape, train_Y[0].shape) if idx > 20: break pass
def test_recombine(self): node_ts = TypeShape(DFloat, Shape((DimNames.BATCH, None), (DimNames.UNITS, 1))) inputs = { '%i:%i:%i' % (w, h, c): ('node', node_ts.__copy__(), 'data_src_%i:%i:%i' % (w, h, c)) for w in range(5) for h in range(7) for c in range(3) } output_target = {'%02i' % (c): node_ts.__copy__() for c in range(10)} self.assertRaises( InvalidFunctionType, WeightAgnosticNeuralNetwork, **{ WeightAgnosticNeuralNetwork.arg_INPUTS: inputs, WeightAgnosticNeuralNetwork.arg_OUTPUT_TARGETS: output_target, }) objA = WeightAgnosticNeuralNetwork( **{ WeightAgnosticNeuralNetwork.arg_INPUTS: inputs, WeightAgnosticNeuralNetwork.arg_OUTPUT_TARGETS: output_target, WeightAgnosticNeuralNetwork.arg_FUNCTIONS: [Perceptron], }) self.assertIsNotNone(objA) objB = WeightAgnosticNeuralNetwork( **{ WeightAgnosticNeuralNetwork.arg_INPUTS: inputs, WeightAgnosticNeuralNetwork.arg_OUTPUT_TARGETS: output_target, WeightAgnosticNeuralNetwork.arg_FUNCTIONS: [Perceptron], }) self.assertIsNotNone(objB) rec_objA = objA.recombine(objB) self.assertIsInstance(rec_objA, list) self.assertGreaterEqual(len(rec_objA), 2) rec_objA = rec_objA[0] self.assertIsInstance(rec_objA, WeightAgnosticNeuralNetwork) rec_objB = objB.recombine(objA) self.assertIsInstance(rec_objB, list) self.assertGreaterEqual(len(rec_objB), 2) rec_objB = rec_objB[0] self.assertIsInstance(rec_objB, WeightAgnosticNeuralNetwork) for _ in range(20): rec_objA = rec_objA.recombine(rec_objB) self.assertIsInstance(rec_objA, list) self.assertGreaterEqual(len(rec_objA), 2) rec_objA = rec_objA[0] self.assertIsInstance(rec_objA, WeightAgnosticNeuralNetwork) rec_objB = rec_objB.recombine(rec_objA) self.assertIsInstance(rec_objB, list) self.assertGreaterEqual(len(rec_objB), 2) rec_objB = rec_objB[0] self.assertIsInstance(rec_objB, WeightAgnosticNeuralNetwork)
def min_transform(cls, nts): if nts.dtype not in cls.allowedTypes: return None s = Shape() result = TypeShape(nts.dtype, s) units = 1 num_units = 0 for _dim in nts.shape.dim: if _dim.name == DimNames.BATCH: s.dim.append(Shape.Dim(_dim.name, _dim.size)) elif (_dim.name == DimNames.WIDTH or _dim.name == DimNames.HEIGHT or _dim.name == DimNames.CHANNEL): units *= _dim.size elif _dim.name == DimNames.UNITS: units *= _dim.size num_units += 1 else: return None if num_units == 1: return None s.dim.append(Shape.Dim(DimNames.UNITS, units)) return result
def possible_output_shapes(cls, input_ntss: Dict[str, TypeShape], target_output: TypeShape, is_reachable, max_possibilities: int = 10, **kwargs) -> \ List[Tuple[Dict[str, TypeShape], Dict[str, TypeShape], Dict[str, str]]]: allowed_in_dimensions = {DimNames.CHANNEL, DimNames.WIDTH, DimNames.HEIGHT} # target_shape = target_output.shape for label, nts in input_ntss.items(): if nts.dtype not in cls.allowedTypes: continue units = 1 invalid_dim = False batch = False batch_size = -1 for _dim in nts.shape.dim: if _dim.name in allowed_in_dimensions: units *= _dim.size elif _dim.name == DimNames.BATCH: batch = True batch_size = _dim.size else: invalid_dim = True break if invalid_dim: continue out_nts = TypeShape(nts.dtype, Shape((DimNames.BATCH, batch_size), (DimNames.UNITS, units))) if batch else \ TypeShape(nts.dtype, Shape((DimNames.UNITS, units))) if is_reachable(out_nts, target_output): yield ({}, {IOLabel.FLATTEN_OUT: out_nts}, {IOLabel.FLATTEN_IN: label})
def test_func_children(self): ntss = { IOLabel.DEFAULT: TypeShape( DFloat, Shape((DimNames.BATCH, 1), (DimNames.HEIGHT, 10), (DimNames.WIDTH, 10), (DimNames.CHANNEL, 2))) } target = TypeShape( DFloat, Shape( (DimNames.BATCH, 1), (DimNames.UNITS, 200) # (DimNames.HEIGHT, 32), # (DimNames.WIDTH, 32), # (DimNames.CHANNEL, 3) )) _f = Flatten for _, out_nts, _ in _f.possible_output_shapes( ntss, target, lambda x, y: NeuralNetwork.reachable(x, y, 0, {Flatten}), 10): print(next(iter(out_nts.values()))) pass
def subclassTest(self, cls): obj = cls() pb = obj.get_pb() self.assertIsNotNone(pb) new_obj = Regularisation.__new__(Regularisation) new_obj.__setstate__(pb) self.assertEqual(obj, new_obj) self.assertIsNot(obj, new_obj) state = obj.__getstate__() new_obj = Regularisation.__new__(Regularisation) new_obj.__setstate__(state) self.assertEqual(obj, new_obj) self.assertIsNot(obj, new_obj) obj.attr = { 'int_val': random.randint(0, 100), 'float_val': random.random(), 'string_val': "asdfasdf", 'bool_val': True, 'bool_val2': False, 'bytes_val': random.randint(0, 100).to_bytes(10, byteorder='big'), 'shape_val': Shape((DN.BATCH, -1), (DN.WIDTH, 16), (DN.HEIGHT, 16), (DN.WIDTH, 3)), 'dtype_val': DDouble, 'list_val': [1, 2, 3, 4], } pb = obj.get_pb() self.assertIsNotNone(pb) new_obj = Regularisation.__new__(Regularisation) new_obj.__setstate__(pb) self.assertEqual(obj, new_obj) self.assertIsNot(obj, new_obj) state = obj.__getstate__() new_obj = Regularisation.__new__(Regularisation) new_obj.__setstate__(state) self.assertEqual(obj, new_obj) self.assertIsNot(obj, new_obj) del obj.attr[obj.attr.keys().__iter__().__next__()] self.assertNotEqual(obj, new_obj)
def test_instantiate(self): node_ts = TypeShape(DFloat, Shape((DimNames.BATCH, None), (DimNames.UNITS, 1))) inputs = { '%i:%i:%i' % (w, h, c): ('node', node_ts.__copy__(), 'data_src_%i:%i:%i' % (w, h, c)) for w in range(5) for h in range(7) for c in range(3) } output_target = {'%02i' % (c): node_ts.__copy__() for c in range(11)} self.assertRaises( InvalidFunctionType, WeightAgnosticNeuralNetwork, **{ WeightAgnosticNeuralNetwork.arg_INPUTS: inputs, WeightAgnosticNeuralNetwork.arg_OUTPUT_TARGETS: output_target, }) obj = WeightAgnosticNeuralNetwork( **{ WeightAgnosticNeuralNetwork.arg_INPUTS: inputs, WeightAgnosticNeuralNetwork.arg_OUTPUT_TARGETS: output_target, WeightAgnosticNeuralNetwork.arg_FUNCTIONS: [Perceptron], }) self.assertIsNotNone(obj) _pb = obj.get_pb() self.assertIsNotNone(_pb) pb_obj = object.__new__(WeightAgnosticNeuralNetwork) pb_obj.__setstate__(_pb) self.assertEqual(obj, pb_obj) self.assertIsNot(obj, pb_obj) _state = obj.__getstate__() self.assertIsNotNone(_state) state_obj = object.__new__(WeightAgnosticNeuralNetwork) state_obj.__setstate__(_state) self.assertEqual(obj, state_obj) self.assertIsNot(obj, state_obj)
def test_pb(self): dummy_in = TestFunction.dummySubClass(variables=[ self.randomVariable() for _ in range(random.randint(1, 3)) ], input_mapping={}, attributes={ 'int_val': random.randint(0, 100), 'float_val': random.random(), 'string_val': "asdfasdf", 'bool_val': True, }, dtype=DDouble) IOLabel.OTHER = 'OTHER' obj = TestFunction.dummySubClass( variables=[ self.randomVariable() for _ in range(random.randint(1, 3)) ], input_mapping={ IOLabel.DEFAULT: (IOLabel.DEFAULT, dummy_in), IOLabel.OTHER: (IOLabel.DEFAULT, dummy_in.id_name) }, attributes={ 'int_val': random.randint(0, 100), 'float_val': random.random(), 'string_val': "asdfasdf", 'bool_val': True, 'bool_val2': False, 'bytes_val': random.randint(0, 100).to_bytes(10, byteorder='big'), 'shape_val': Shape((DN.BATCH, -1), (DN.WIDTH, 16), (DN.HEIGHT, 16), (DN.WIDTH, 3)), 'dtype_val': DDouble, 'list_val': [1, 2, 3, 4], }, dtype=DDouble) pb = obj.get_pb() self.assertIsNotNone(pb) state = obj.__getstate__() self.assertIsNotNone(state) new_obj = Function.get_instance(state) self.assertIs(obj, new_obj) new_obj = Function.__new__(Function) new_obj.__setstate__(state) self.assertEqual(obj, new_obj) self.assertIsNot(obj, new_obj) new_obj = Function.__new__(Function) new_obj.__setstate__(pb) self.assertEqual(obj, new_obj) self.assertIsNot(obj, new_obj) new_obj2 = Function.get_instance(state) self.assertIs(new_obj, new_obj2) pb.attr[0].v.int_val = -1 new_obj = Function.__new__(Function) new_obj.__setstate__(pb) self.assertNotEqual(obj, new_obj)