def test_image_blocks(tmp_path): num_instances = 10 (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train[:num_instances] y_regression = utils.generate_data(num_instances=num_instances, shape=(1, )) input_node = ak.ImageInput() output = ak.Normalization()(input_node) output = ak.ImageAugmentation()(output) outputs1 = ak.ResNetBlock(version="v2")(output) outputs2 = ak.XceptionBlock()(output) output_node = ak.Merge()((outputs1, outputs2)) output_node = ak.ClassificationHead()(output_node) automodel = ak.AutoModel( inputs=input_node, outputs=output_node, directory=tmp_path, max_trials=1, seed=utils.SEED, ) automodel.fit(x_train, y_regression, validation_data=(x_train, y_regression), epochs=1)
def test_image_blocks(tmp_path): num_instances = 10 x_train = test_utils.generate_data(num_instances=num_instances, shape=(28, 28)) y_train = np.random.randint(0, 10, num_instances) input_node = ak.ImageInput() output = ak.Normalization()(input_node) output = ak.ImageAugmentation()(output) outputs1 = ak.ResNetBlock(version="v2")(output) outputs2 = ak.XceptionBlock()(output) output_node = ak.Merge()((outputs1, outputs2)) output_node = ak.ClassificationHead()(output_node) automodel = ak.AutoModel( inputs=input_node, outputs=output_node, directory=tmp_path, max_trials=1, seed=test_utils.SEED, ) automodel.fit(x_train, y_train, validation_data=(x_train, y_train), epochs=1)
def train_ak(): image_count = len(list(config.database_path.glob('**/*.jpg'))) print("# of images found:", image_count) list_ds = tf.data.Dataset.list_files(str(config.database_path / '*/*.jpg'), shuffle=False) list_ds = list_ds.shuffle(image_count, reshuffle_each_iteration=False) # Set `num_parallel_calls` so multiple images are loaded/processed in parallel. AUTOTUNE = tf.data.experimental.AUTOTUNE train_ds = list_ds.map(utils.process_path, num_parallel_calls=AUTOTUNE) features = np.array([list(x[0].numpy()) for x in list(train_ds)]) labels = np.array([x[1].numpy() for x in list(train_ds)]) input_node = ak.ImageInput() output_node = ak.Normalization()(input_node) output_node = ak.ImageAugmentation(horizontal_flip=False, vertical_flip=False, rotation_factor=False, zoom_factor=False)(output_node) output_node = ak.ClassificationHead()(output_node) clf = ak.AutoModel(inputs=input_node, outputs=output_node, overwrite=True, max_trials=config.max_trials, directory=config.outpath_mpii) # Feed the tensorflow Dataset to the classifier. split = config.split x_val = features[split:] y_val = labels[split:] x_train = features[:split] y_train = labels[:split] clf.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=config.epochs) # Predict with the best model. #predicted_y = clf.predict(x_val) #print(predicted_y) # Evaluate the best model with testing data. print(clf.evaluate(x_val, y_val)) # Export as a Keras Model. model = clf.export_model() print( type(model)) # <class 'tensorflow.python.keras.engine.training.Model'> model.save(config.output_path + "model_ak_imgClsf.h5") return 0
def test_functional_api(tmp_dir): # Prepare the data. num_instances = 20 (image_x, train_y), (test_x, test_y) = mnist.load_data() (text_x, train_y), (test_x, test_y) = common.imdb_raw() (structured_data_x, train_y), (test_x, test_y) = common.dataframe_numpy() image_x = image_x[:num_instances] text_x = text_x[:num_instances] structured_data_x = structured_data_x[:num_instances] classification_y = common.generate_one_hot_labels( num_instances=num_instances, num_classes=3) regression_y = common.generate_data(num_instances=num_instances, shape=(1, )) # Build model and train. image_input = ak.ImageInput() output = ak.Normalization()(image_input) output = ak.ImageAugmentation()(output) outputs1 = ak.ResNetBlock(version='next')(image_input) outputs2 = ak.XceptionBlock()(image_input) image_output = ak.Merge()((outputs1, outputs2)) structured_data_input = ak.StructuredDataInput( column_names=common.COLUMN_NAMES_FROM_CSV, column_types=common.COLUMN_TYPES_FROM_CSV) structured_data_output = ak.FeatureEngineering()(structured_data_input) structured_data_output = ak.DenseBlock()(structured_data_output) text_input = ak.TextInput() outputs1 = ak.TextToIntSequence()(text_input) outputs1 = ak.EmbeddingBlock()(outputs1) outputs1 = ak.ConvBlock(separable=True)(outputs1) outputs1 = ak.SpatialReduction()(outputs1) outputs2 = ak.TextToNgramVector()(text_input) outputs2 = ak.DenseBlock()(outputs2) text_output = ak.Merge()((outputs1, outputs2)) merged_outputs = ak.Merge()( (structured_data_output, image_output, text_output)) regression_outputs = ak.RegressionHead()(merged_outputs) classification_outputs = ak.ClassificationHead()(merged_outputs) automodel = ak.GraphAutoModel( inputs=[image_input, text_input, structured_data_input], directory=tmp_dir, outputs=[regression_outputs, classification_outputs], max_trials=2, seed=common.SEED) automodel.fit((image_x, text_x, structured_data_x), (regression_y, classification_y), validation_split=0.2, epochs=2)
def test_functional_api(tmp_path): # Prepare the data. num_instances = 80 (image_x, train_y), (test_x, test_y) = mnist.load_data() (text_x, train_y), (test_x, test_y) = utils.imdb_raw() (structured_data_x, train_y), (test_x, test_y) = utils.dataframe_numpy() image_x = image_x[:num_instances] text_x = text_x[:num_instances] structured_data_x = structured_data_x[:num_instances] classification_y = utils.generate_one_hot_labels( num_instances=num_instances, num_classes=3) regression_y = utils.generate_data(num_instances=num_instances, shape=(1, )) # Build model and train. image_input = ak.ImageInput() output = ak.Normalization()(image_input) output = ak.ImageAugmentation()(output) outputs1 = ak.ResNetBlock(version='next')(output) outputs2 = ak.XceptionBlock()(output) image_output = ak.Merge()((outputs1, outputs2)) structured_data_input = ak.StructuredDataInput() structured_data_output = ak.CategoricalToNumerical()(structured_data_input) structured_data_output = ak.DenseBlock()(structured_data_output) text_input = ak.TextInput() outputs1 = ak.TextToIntSequence()(text_input) outputs1 = ak.Embedding()(outputs1) outputs1 = ak.ConvBlock(separable=True)(outputs1) outputs1 = ak.SpatialReduction()(outputs1) outputs2 = ak.TextToNgramVector()(text_input) outputs2 = ak.DenseBlock()(outputs2) text_output = ak.Merge()((outputs1, outputs2)) merged_outputs = ak.Merge()( (structured_data_output, image_output, text_output)) regression_outputs = ak.RegressionHead()(merged_outputs) classification_outputs = ak.ClassificationHead()(merged_outputs) automodel = ak.AutoModel( inputs=[image_input, text_input, structured_data_input], directory=tmp_path, outputs=[regression_outputs, classification_outputs], max_trials=2, tuner=ak.Hyperband, seed=utils.SEED) automodel.fit((image_x, text_x, structured_data_x), (regression_y, classification_y), validation_split=0.2, epochs=1)
def functional_api(): (x_train, y_train), (x_test, y_test) = cifar10.load_data() input_node = ak.ImageInput() output_node = input_node output_node = ak.Normalization()(output_node) output_node = ak.ImageAugmentation()(output_node) output_node = ak.ResNetBlock(version='next')(output_node) output_node = ak.SpatialReduction()(output_node) output_node = ak.DenseBlock()(output_node) output_node = ak.ClassificationHead()(output_node) clf = ak.AutoModel(input_node, output_node, seed=5, max_trials=3) clf.fit(x_train, y_train, validation_split=0.2) return clf.evaluate(x_test, y_test)
id4 --> id6(Merge) id5 --> id6 id7(StructuredDataInput) --> id8(CategoricalToNumerical) id8 --> id9(DenseBlock) id6 --> id10(Merge) id9 --> id10 id10 --> id11(Classification Head) id10 --> id12(Regression Head) </div> """ import autokeras as ak input_node1 = ak.ImageInput() output_node = ak.Normalization()(input_node1) output_node = ak.ImageAugmentation()(output_node) output_node1 = ak.ConvBlock()(output_node) output_node2 = ak.ResNetBlock(version='v2')(output_node) output_node1 = ak.Merge()([output_node1, output_node2]) input_node2 = ak.StructuredDataInput() output_node = ak.CategoricalToNumerical()(input_node2) output_node2 = ak.DenseBlock()(output_node) output_node = ak.Merge()([output_node1, output_node2]) output_node1 = ak.ClassificationHead()(output_node) output_node2 = ak.RegressionHead()(output_node) auto_model = ak.AutoModel(inputs=[input_node1, input_node2], outputs=[output_node1, output_node2], overwrite=True,
output_node = ak.ClassificationHead()(output_node) clf = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=1) clf.fit(x_train, y_train, epochs=10) """ The usage of AutoModel is similar to the functional API of Keras. Basically, you are building a graph, whose edges are blocks and the nodes are intermediate outputs of blocks. To add an edge from input_node to output_node with output_node = ak.[some_block]([block_args])(input_node). You can even also use more fine grained blocks to customize the search space even further. See the following example. """ input_node = ak.ImageInput() output_node = ak.Normalization()(input_node) output_node = ak.ImageAugmentation(horizontal_flip=False)(output_node) output_node = ak.ResNetBlock(version="v2")(output_node) output_node = ak.ClassificationHead()(output_node) clf = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=1) clf.fit(x_train, y_train, epochs=10) """ ## Data Format The AutoKeras ImageClassifier is quite flexible for the data format. For the image, it accepts data formats both with and without the channel dimension. The images in the MNIST dataset do not have the channel dimension. Each image is a matrix with shape (28, 28). AutoKeras also accepts images of three dimensions with the channel dimension at last, e.g., (32, 32, 3), (28, 28, 1). For the classification labels, AutoKeras accepts both plain labels, i.e. strings or integers, and one-hot encoded encoded labels, i.e. vectors of 0s and 1s.
)(input_node) output_node = ak.RegressionHead()(output_node) reg = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=10) reg.fit(x_train, y_train, epochs=3) """ The usage of AutoModel is similar to the functional API of Keras. Basically, you are building a graph, whose edges are blocks and the nodes are intermediate outputs of blocks. To add an edge from input_node to output_node with output_node = ak.some_block(input_node). You can even also use more fine grained blocks to customize the search space even further. See the following example. """ input_node = ak.ImageInput() output_node = ak.Normalization()(input_node) output_node = ak.ImageAugmentation(translation_factor=0.3)(output_node) output_node = ak.ResNetBlock(version="v2")(output_node) output_node = ak.RegressionHead()(output_node) clf = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=10) clf.fit(x_train, y_train, epochs=3) """ ### **Data Format** """ """ The AutoKeras ImageClassifier is quite flexible for the data format. For the image, it accepts data formats both with and without the channel dimension. The images in the IMDB-Wiki dataset do not have a channel dimension. Each image is a matrix with shape (128, 128). AutoKeras also accepts images with a channel dimension at last, e.g., (32, 32, 3), (28, 28, 1).
output_node = ak.ClassificationHead()(output_node) clf = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=10) clf.fit(x_train, y_train, epochs=3) """ The usage of AutoModel is similar to the functional API of Keras. Basically, you are building a graph, whose edges are blocks and the nodes are intermediate outputs of blocks. To add an edge from input_node to output_node with output_node = ak.[some_block]([block_args])(input_node). You can even also use more fine grained blocks to customize the search space even further. See the following example. """ input_node = ak.ImageInput() output_node = ak.Normalization()(input_node) output_node = ak.ImageAugmentation(percentage=0.3)(output_node) output_node = ak.ResNetBlock(version="v2")(output_node) output_node = ak.ClassificationHead()(output_node) clf = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=10) clf.fit(x_train, y_train, epochs=3) """ ## Data Format """ """ The AutoKeras ImageClassifier is quite flexible for the data format. For the image, it accepts data formats both with and without the channel dimension. The images in the MNIST dataset do not have the channel dimension. Each image is a matrix with shape (28, 28). AutoKeras also accepts images of three dimensions with the channel dimension at last, e.g., (32, 32, 3), (28, 28, 1).