Exemplo n.º 1
0
    def test_basic(self):
        dataset_doc_path = os.path.abspath(
            os.path.join(os.path.dirname(__file__), 'data', 'datasets',
                         'image_dataset_1', 'datasetDoc.json'))

        dataset = container.Dataset.load(utils.path_to_uri(dataset_doc_path))

        dataframe_hyperparams_class = DatasetToDataFramePrimitive.metadata.get_hyperparams(
        )
        dataframe_primitive = DatasetToDataFramePrimitive(
            hyperparams=dataframe_hyperparams_class.defaults().replace(
                {'dataframe_resource': '0'}))
        dataframe = dataframe_primitive.produce(inputs=dataset).value

        image_hyperparams_class = DummyImageReaderPrimitive.metadata.get_hyperparams(
        )
        image_primitive = DummyImageReaderPrimitive(
            hyperparams=image_hyperparams_class.defaults().replace(
                {'return_result': 'replace'}))
        images_names = image_primitive.produce(inputs=dataframe).value

        self.assertEqual(images_names.iloc[0]['filename'][0],
                         '001_HandPhoto_left_01.jpg')
        self.assertEqual(images_names.iloc[1]['filename'][0],
                         'cifar10_bird_1.png')
        self.assertEqual(images_names.iloc[2]['filename'][0],
                         'cifar10_bird_2.png')
        self.assertEqual(images_names.iloc[3]['filename'][0], 'mnist_0_2.png')
        self.assertEqual(images_names.iloc[4]['filename'][0], 'mnist_1_1.png')

        self._test_metadata(images_names.metadata)
Exemplo n.º 2
0
from d3m.container.dataset import D3MDatasetLoader, Dataset, CSVLoader

from dsbox.datapreprocessing.cleaner import MeanImputation, MeanHyperparameter

from dsbox.datapreprocessing.cleaner.denormalize import Denormalize, DenormalizeHyperparams as hyper_DE
from common_primitives.dataset_to_dataframe import DatasetToDataFramePrimitive, Hyperparams as hyper_DD
from common_primitives.extract_columns_semantic_types import ExtractColumnsBySemanticTypesPrimitive

h_DE = hyper_DE.defaults()
h_DD =hyper_DD.defaults()

h_attr = {'semantic_types': ('https://metadata.datadrivendiscovery.org/types/Attribute',),'use_columns': (), 'exclude_columns': ()}
h_target = {'semantic_types': ('https://metadata.datadrivendiscovery.org/types/Target','https://metadata.datadrivendiscovery.org/types/SuggestedTarget',), 'use_columns': (), 'exclude_columns': ()}

primitive_0 = Denormalize(hyperparams=h_DE)
primitive_1 = DatasetToDataFramePrimitive(hyperparams=h_DD)


primitive_3 = ExtractColumnsBySemanticTypesPrimitive(hyperparams=h_attr)
primitive_4 = ExtractColumnsBySemanticTypesPrimitive(hyperparams=h_target)

# global variables
dataset_file_path = "dsbox/unit_tests/resources/38_sick_data/datasetDoc.json"

dataset = D3MDatasetLoader()
dataset = dataset.load('file://{dataset_doc_path}'.format(dataset_doc_path=os.path.abspath(dataset_file_path)))

result0 = primitive_0.produce(inputs=dataset)
result1 = primitive_1.produce(inputs=result0.value)

X = primitive_3.produce(inputs=result1.value).value
Exemplo n.º 3
0
dataset = container.Dataset.load('file://{uri}'.format(uri=path))

#==============================training dataset================================
dataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Target')
dataset.metadata = dataset.metadata.add_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/TrueTarget')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Attribute')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 15), 'https://metadata.datadrivendiscovery.org/types/Attribute')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 1), 'https://metadata.datadrivendiscovery.org/types/Attribute')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 10), 'https://metadata.datadrivendiscovery.org/types/Attribute')
# dataset.metadata = dataset.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, 13), 'https://metadata.datadrivendiscovery.org/types/Attribute')



print('\nDataset to Dataframe')
hyperparams_class = DatasetToDataFramePrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
primitive = DatasetToDataFramePrimitive(hyperparams=hyperparams_class.defaults())
call_metadata = primitive.produce(inputs=dataset)
dataframe = call_metadata.value

print('\n metadata generation')
hyperparams_class = SimpleProfilerPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
profile_primitive = SimpleProfilerPrimitive(hyperparams=hyperparams_class.defaults().replace({'detect_semantic_types': ['https://metadata.datadrivendiscovery.org/types/CategoricalData',
                'http://schema.org/Integer', 'http://schema.org/Float', 'http://schema.org/Text', 'https://metadata.datadrivendiscovery.org/types/Attribute','https://metadata.datadrivendiscovery.org/types/PrimaryKey']}))
profile_primitive.set_training_data(inputs = dataframe)
profile_primitive.fit()
call_metadata = profile_primitive.produce(inputs=dataframe)
dataframe = call_metadata.value

print('\n remove semantic type')
# dataframe.metadata = dataframe.metadata.remove_semantic_type(('learningData', metadata_base.ALL_ELEMENTS, target_index), 'https://metadata.datadrivendiscovery.org/types/Attribute')
hyperparams_class = RemoveSemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
Exemplo n.º 4
0
    def test_1(self):
        print('\n')
        print('running test-2..............')
        # Loading training dataset.
        base_path = "/ubc_primitives/datasets/seed_datasets_current/LL1_TXT_CLS_apple_products_sentiment"
        dataset_doc_path = os.path.join(base_path,\
                                        'TRAIN/dataset_TRAIN',\
                                        'datasetDoc.json')
        dataset = Dataset.load('file://{dataset_doc_path}'.format(
            dataset_doc_path=dataset_doc_path))

        # Step 0: Denormalize primitive
        denormalize_hyperparams_class = DenormalizePrimitive.metadata.get_hyperparams(
        )
        denormalize_primitive = DenormalizePrimitive(
            hyperparams=denormalize_hyperparams_class.defaults())
        denormalized_dataset = denormalize_primitive.produce(inputs=dataset)

        print(denormalized_dataset.value)
        print('------------------------')

        # Step 1: Dataset to DataFrame
        dataframe_hyperparams_class = DatasetToDataFramePrimitive.metadata.get_hyperparams(
        )
        dataframe_primitive = DatasetToDataFramePrimitive(
            hyperparams=dataframe_hyperparams_class.defaults())
        dataframe = dataframe_primitive.produce(
            inputs=denormalized_dataset.value)

        print(dataframe.value)
        print('------------------------')

        # Step 2: DataFrame to features
        bow_hyperparams_class = BagOfWords.metadata.get_hyperparams()
        bow_primitive = BagOfWords(
            hyperparams=bow_hyperparams_class.defaults())
        bow_primitive_out = bow_primitive.produce(inputs=dataframe.value)

        # Step 3: Dataset to DataFrame
        kmeans_hyperparams_class = KMeansClusteringPrimitive.metadata.query(
        )['primitive_code']['class_type_arguments']['Hyperparams']
        kmeans_hyperparams = kmeans_hyperparams_class.defaults().replace({
            'n_clusters':
            4,
            'n_init':
            10,
            'max_iter':
            1000
        })
        kmeans_primitive = KMeansClusteringPrimitive(
            hyperparams=kmeans_hyperparams)
        kmeans_primitive.set_training_data(inputs=bow_primitive_out.value)
        kmeans_primitive.fit()

        #-----------------------------------------------------------------------
        # Loading Testing dataset.
        dataset_doc_path2 = os.path.join(base_path,\
                                         'SCORE/dataset_SCORE',\
                                         'datasetDoc.json')
        dataset2 = Dataset.load('file://{dataset_doc_path}'.format(
            dataset_doc_path=dataset_doc_path2))

        # Step 0: Denormalize primitive
        score_denormalize_hyperparams_class = DenormalizePrimitive.metadata.get_hyperparams(
        )
        score_denormalize_primitive = DenormalizePrimitive(
            hyperparams=score_denormalize_hyperparams_class.defaults())
        score_denormalized_dataset = score_denormalize_primitive.produce(
            inputs=dataset2)

        print(denormalized_dataset.value)
        print('------------------------')

        # Step 1: Dataset to DataFrame
        score_dataframe_hyperparams_class = DatasetToDataFramePrimitive.metadata.get_hyperparams(
        )
        score_dataframe_primitive = DatasetToDataFramePrimitive(
            hyperparams=score_dataframe_hyperparams_class.defaults())
        score_dataframe = score_dataframe_primitive.produce(
            inputs=score_denormalized_dataset.value)

        print(score_dataframe.value)
        print('------------------------')

        # Step 2: Read images to DataFrame
        score_bow_dataframe = bow_primitive.produce(
            inputs=score_dataframe.value)

        print(score_bow_dataframe.value)
        print('------------------------')

        score = kmeans_primitive.produce(inputs=score_bow_dataframe.value)
        score = score.value

        print(score)
        print('------------------------')

        for col in range(score.shape[1]):
            col_dict = dict(
                score.metadata.query((metadata_base.ALL_ELEMENTS, col)))
            print('Meta-data - {}'.format(col), col_dict)

        # Computer Error
        ground_truth = ((
            score_dataframe.value['sentiment']).to_numpy()).astype(np.float)
        predictions = ((score.iloc[:, -1]).to_numpy()).astype(np.float)
        print('------------------------')
        print('Predictions')
        print(predictions)
        print('------------------------')
        print('Ground Truth')
        print(ground_truth)
        print('------------------------')

        print('------------------------')
        print('MLP Test missclassification rate (lower better):  ',
              (100 * (1 - np.mean(ground_truth == predictions))))
        print('------------------------')
Exemplo n.º 5
0
    def test_1(self):
        """
        Dataset test
        """
        print('\n')
        print('########################')
        print('#--------TEST-1--------#')
        print('########################')

        # Loading dataset.
        path1 = 'file://{uri}'.format(uri=os.path.abspath(
            '/ubc_primitives/datasets/seed_datasets_current/LL1_736_stock_market/SCORE/dataset_SCORE/datasetDoc.json'
        ))
        dataset = Dataset.load(dataset_uri=path1)

        # # Step 0: Denormalize primitive
        # denormalize_hyperparams_class = DenormalizePrimitive.metadata.get_hyperparams()
        # denormalize_primitive = DenormalizePrimitive(hyperparams=denormalize_hyperparams_class.defaults())
        # denormalized_dataset  = denormalize_primitive.produce(inputs=dataset)
        # denormalized_dataset  = denormalized_dataset.value
        # print(denormalized_dataset)
        # print('------------------------')

        print('Loading Training Dataset....')
        # Step 0: Dataset to DataFrame
        dataframe_hyperparams_class = DatasetToDataFramePrimitive.metadata.get_hyperparams(
        )
        dataframe_primitive = DatasetToDataFramePrimitive(
            hyperparams=dataframe_hyperparams_class.defaults())
        dataframe = dataframe_primitive.produce(inputs=dataset)
        dataframe = dataframe.value
        print(dataframe)

        for col in range(dataframe.shape[1]):
            col_dict = dict(
                dataframe.metadata.query((metadata_base.ALL_ELEMENTS, col)))
            print('Meta-data - {}'.format(col), col_dict)
        print('------------------------')

        # Step 1: Profiler
        print('Profiler')
        profiler_hyperparams_class = SimpleProfilerPrimitive.metadata.get_hyperparams(
        )
        profiler_primitive = SimpleProfilerPrimitive(
            hyperparams=profiler_hyperparams_class.defaults())
        profiler_primitive.set_training_data(inputs=dataframe)
        profiler_primitive.fit()
        profiler_dataframe = profiler_primitive.produce(inputs=dataframe)
        profiler_dataframe = profiler_dataframe.value
        print(profiler_dataframe)

        for col in range(profiler_dataframe.shape[1]):
            col_dict = dict(
                profiler_dataframe.metadata.query(
                    (metadata_base.ALL_ELEMENTS, col)))
            print('Meta-data - {}'.format(col), col_dict)
        print('------------------------')

        # Step 2: Column parser
        print('Column parser')
        parser_hyperparams_class = ColumnParserPrimitive.metadata.get_hyperparams(
        )
        parser_hyperparams = parser_hyperparams_class.defaults().replace({
            'parse_semantic_types': [
                "http://schema.org/Boolean", "http://schema.org/Integer",
                "http://schema.org/Float",
                "https://metadata.datadrivendiscovery.org/types/FloatVector",
                "http://schema.org/DateTime"
            ]
        })
        parser_primitive = ColumnParserPrimitive(
            hyperparams=parser_hyperparams)
        parser_dataframe = parser_primitive.produce(inputs=profiler_dataframe)
        parser_dataframe = parser_dataframe.value
        print(parser_dataframe)
        print('------------------------')

        for col in range(parser_dataframe.shape[1]):
            col_dict = dict(
                parser_dataframe.metadata.query(
                    (metadata_base.ALL_ELEMENTS, col)))
            print('Meta-data - {}'.format(col), col_dict)

        # Step 4: Extract dataframe
        print('Extract dataframe')
        extract_hyperparams_class = ExtractColumnsBySemanticTypesPrimitive.metadata.get_hyperparams(
        )
        extract_hyperparams = extract_hyperparams_class.defaults().replace({
            'semantic_types':
            ['https://metadata.datadrivendiscovery.org/types/Attribute']
        })
        extract_primitive = ExtractColumnsBySemanticTypesPrimitive(
            hyperparams=extract_hyperparams)
        extract_dataframe = extract_primitive.produce(inputs=parser_dataframe)
        extract_dataframe = extract_dataframe.value
        print(extract_dataframe)
        print('------------------------')

        # Step 5: Extract target
        print('Extract target')
        extract_hyperparams_class = ExtractColumnsBySemanticTypesPrimitive.metadata.get_hyperparams(
        )
        extract_hyperparams = extract_hyperparams_class.defaults().replace({
            'semantic_types':
            ['https://metadata.datadrivendiscovery.org/types/TrueTarget']
        })
        extract_primitive = ExtractColumnsBySemanticTypesPrimitive(
            hyperparams=extract_hyperparams)
        extract_targets = extract_primitive.produce(inputs=parser_dataframe)
        extract_targets = extract_targets.value
        print(extract_targets)
        print('------------------------')

        print('DMM Primitive....')
        dmm_hyperparams_class = DeepMarkovModelPrimitive.metadata.query(
        )['primitive_code']['class_type_arguments']['Hyperparams']
        dmm_hyperparams = dmm_hyperparams_class.defaults()
        dmm_primitive = DeepMarkovModelPrimitive(hyperparams=dmm_hyperparams)
        dmm_primitive.set_training_data(inputs=extract_dataframe,
                                        outputs=extract_targets)
        print(dmm_primitive._training_inputs)
        dmm_primitive.fit()
Exemplo n.º 6
0
from sklearn.metrics import hamming_loss

from d3m.container.dataset import D3MDatasetLoader, Dataset, CSVLoader

from common_primitives.denormalize import DenormalizePrimitive, Hyperparams as hyper_Den
from common_primitives.dataset_to_dataframe import DatasetToDataFramePrimitive, Hyperparams as hyper_Dat
from common_primitives.extract_columns_semantic_types import ExtractColumnsBySemanticTypesPrimitive, Hyperparams as hyper_Ext

from dsbox.spen.application.MLPClassifier import MLCHyperparams, Params, MLClassifier
from dsbox.datapreprocessing.cleaner.to_numeric import ToNumeric, Hyperparams as hyper_Nu
from dsbox.datapreprocessing.cleaner.encoder import Encoder, EncHyperparameter as hyper_En

h0 = hyper_Den.defaults()
h1 = hyper_Dat.defaults()
primitive_0 = DenormalizePrimitive(hyperparams=h0)
primitive_1 = DatasetToDataFramePrimitive(hyperparams=h1)

dataset_train_file_path = 'bibtex_dataset/bibtex_dataset/datasetDoc.json'
dataset = D3MDatasetLoader()

dataset_train = dataset.load('file://{dataset_doc_path}'.format(
    dataset_doc_path=os.path.abspath(dataset_train_file_path)))
dataset_org = primitive_0.produce(inputs=dataset_train)
res_df = primitive_1.produce(inputs=dataset_org.value)

h2 = hyper_Ext({
    'semantic_types': (
        'https://metadata.datadrivendiscovery.org/types/PrimaryKey',
        'https://metadata.datadrivendiscovery.org/types/Attribute',
    ),
    'use_columns': (),
Exemplo n.º 7
0
    def test_1(self):
        """
        Feature extraction only and Testing on seed dataset from D3M datasets
        """
        print('\n')
        print('########################')
        print('#--------TEST-1--------#')
        print('########################')

        # Get volumes:
        all_weights = os.listdir('./static')
        all_weights = {w: os.path.join('./static', w) for w in all_weights}

        # Loading dataset.
        path1 = 'file://{uri}'.format(uri=os.path.abspath('/ubc_primitives/datasets/seed_datasets_current/22_handgeometry/TRAIN/dataset_TRAIN/datasetDoc.json'))
        dataset = Dataset.load(dataset_uri=path1)

        # Get dataset paths
        path2 = 'file://{uri}'.format(uri=os.path.abspath('/ubc_primitives/datasets/seed_datasets_current/22_handgeometry/SCORE/dataset_TEST/datasetDoc.json'))
        score_dataset = Dataset.load(dataset_uri=path2)

        # Step 0: Denormalize primitive
        denormalize_hyperparams_class = DenormalizePrimitive.metadata.get_hyperparams()
        denormalize_primitive = DenormalizePrimitive(hyperparams=denormalize_hyperparams_class.defaults())
        denormalized_dataset  = denormalize_primitive.produce(inputs=dataset)
        print(denormalized_dataset.value)
        print('------------------------')

        print('Loading Training Dataset....')
        # Step 1: Dataset to DataFrame
        dataframe_hyperparams_class = DatasetToDataFramePrimitive.metadata.get_hyperparams()
        dataframe_primitive = DatasetToDataFramePrimitive(hyperparams=dataframe_hyperparams_class.defaults())
        dataframe = dataframe_primitive.produce(inputs=denormalized_dataset.value)
        print(dataframe.value)
        print('------------------------')

        print('Loading Testing Dataset....')
        # Step 0: Denormalize primitive
        score_denormalize_hyperparams_class = DenormalizePrimitive.metadata.get_hyperparams()
        score_denormalize_primitive = DenormalizePrimitive(hyperparams=score_denormalize_hyperparams_class.defaults())
        score_denormalized_dataset  = score_denormalize_primitive.produce(inputs=score_dataset)
        print(score_denormalized_dataset.value)
        print('------------------------')

        score_hyperparams_class = DatasetToDataFramePrimitive.metadata.get_hyperparams()
        score_primitive = DatasetToDataFramePrimitive(hyperparams=score_hyperparams_class.defaults())
        score = score_primitive.produce(inputs=score_denormalized_dataset.value)
        print(score.value)
        print('------------------------')

        extractA_hyperparams_class = ExtractColumnsBySemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
        extractA_hyperparams_class = extractA_hyperparams_class.defaults().replace(
                {
                'semantic_types': ('https://metadata.datadrivendiscovery.org/types/FileName',)
                }
        )
        extractA_primitive = ExtractColumnsBySemanticTypesPrimitive(hyperparams=extractA_hyperparams_class)
        extractA = extractA_primitive.produce(inputs=dataframe.value)
        print(extractA.value)
        print('------------------------')

        extractP_hyperparams_class = ExtractColumnsBySemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
        extractP_hyperparams = extractP_hyperparams_class.defaults().replace(
                {
                'semantic_types': ('https://metadata.datadrivendiscovery.org/types/SuggestedTarget',)
                }
        )
        extractP_primitive = ExtractColumnsBySemanticTypesPrimitive(hyperparams=extractP_hyperparams)
        extractP = extractP_primitive.produce(inputs=dataframe.value)
        print(extractP.value)
        print('------------------------')

        # Call primitives
        hyperparams_class = ConvolutionalNeuralNetwork.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
        hyperparams_class = hyperparams_class.defaults().replace(
                {
                'feature_extract_only': False,
                'cnn_type': 'mobilenet',
                'num_iterations': 150,
                'output_dim': 1
                }
        )
        primitive = ConvolutionalNeuralNetwork(hyperparams=hyperparams_class, volumes=all_weights)
        primitive.set_training_data(inputs = dataframe.value, outputs = extractP.value)
        test_out  = primitive.fit()
        test_out  = primitive.produce(inputs=score.value)
        test_out  = test_out.value

        print(test_out)
        print('------------------------')
        for col in range(test_out.shape[1]):
            col_dict = dict(test_out.metadata.query((metadata_base.ALL_ELEMENTS, col)))
            print('Meta-data - {}'.format(col), col_dict)

        # Computer Error
        ground_truth = ((score.value['WRISTBREADTH']).to_numpy()).astype(np.float)
        predictions  = (test_out.iloc[:, -1]).to_numpy()

        print(ground_truth)
        print(predictions)
        print('------------------------')

        print('Mean squared error (lower better): ', (np.mean((predictions - ground_truth)**2)))
        print('------------------------')
Exemplo n.º 8
0
    def test_2(self):
        """
        Training and Testing on seed dataset from D3M datasets
        """
        print('\n')
        print('########################')
        print('#--------TEST-2--------#')
        print('########################')

        # Get volumes:
        all_weights = os.listdir('./static')
        all_weights = {w: os.path.join('./static', w) for w in all_weights}

        # Loading dataset.
        path1 = 'file://{uri}'.format(uri=os.path.abspath('/ubc_primitives/datasets/seed_datasets_current/22_handgeometry/TRAIN/dataset_TRAIN/datasetDoc.json'))
        dataset = Dataset.load(dataset_uri=path1)

        # Get dataset paths
        path2 = 'file://{uri}'.format(uri=os.path.abspath('/ubc_primitives/datasets/seed_datasets_current/22_handgeometry/SCORE/dataset_TEST/datasetDoc.json'))
        score_dataset = Dataset.load(dataset_uri=path2)

        # Step 0: Denormalize primitive
        denormalize_hyperparams_class = DenormalizePrimitive.metadata.get_hyperparams()
        denormalize_primitive = DenormalizePrimitive(hyperparams=denormalize_hyperparams_class.defaults())
        denormalized_dataset  = denormalize_primitive.produce(inputs=dataset)
        print(denormalized_dataset.value)
        print('------------------------')

        print('Loading Training Dataset....')
        # Step 1: Dataset to DataFrame
        dataframe_hyperparams_class = DatasetToDataFramePrimitive.metadata.get_hyperparams()
        dataframe_primitive = DatasetToDataFramePrimitive(hyperparams=dataframe_hyperparams_class.defaults())
        dataframe = dataframe_primitive.produce(inputs=denormalized_dataset.value)
        print(dataframe.value)
        print('------------------------')

        print('Loading Testing Dataset....')
        # Step 0: Denormalize primitive
        score_denormalize_hyperparams_class = DenormalizePrimitive.metadata.get_hyperparams()
        score_denormalize_primitive = DenormalizePrimitive(hyperparams=score_denormalize_hyperparams_class.defaults())
        score_denormalized_dataset  = score_denormalize_primitive.produce(inputs=score_dataset)
        print(score_denormalized_dataset.value)
        print('------------------------')

        score_hyperparams_class = DatasetToDataFramePrimitive.metadata.get_hyperparams()
        score_primitive = DatasetToDataFramePrimitive(hyperparams=score_hyperparams_class.defaults())
        score = score_primitive.produce(inputs=score_denormalized_dataset.value)
        print(score.value)
        print('------------------------')

        # Call primitives
        hyperparams_class = ConvolutionalNeuralNetwork.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
        hyperparams_class = hyperparams_class.defaults().replace(
                {
                'include_top': False,
                'cnn_type': 'mobilenet',
                'output_dim': 1,
                }
        )
        primitive = ConvolutionalNeuralNetwork(hyperparams=hyperparams_class, volumes=all_weights)
        test_out  = primitive.produce(inputs=dataframe.value)

        print(test_out)
        print('------------------------')

        extractA_hyperparams_class = ExtractColumnsBySemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
        extractA_hyperparams_class = extractA_hyperparams_class.defaults().replace(
                {
                'semantic_types': ('https://metadata.datadrivendiscovery.org/types/Attribute',)
                }
        )
        extractA_primitive = ExtractColumnsBySemanticTypesPrimitive(hyperparams=extractA_hyperparams_class)
        extractA = extractA_primitive.produce(inputs=test_out.value)
        print(extractA.value)
        print('------------------------')

        extractP_hyperparams_class = ExtractColumnsBySemanticTypesPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
        extractP_hyperparams = extractP_hyperparams_class.defaults().replace(
                {
                'semantic_types': ('https://metadata.datadrivendiscovery.org/types/SuggestedTarget',)
                }
        )
        extractP_primitive = ExtractColumnsBySemanticTypesPrimitive(hyperparams=extractP_hyperparams)
        extractP = extractP_primitive.produce(inputs=dataframe.value)
        extractP = extractP.value
        # Update Metadata from SuggestedTarget to TrueTarget
        for col in range((extractP).shape[1]):
            col_dict = dict(extractP.metadata.query((metadata_base.ALL_ELEMENTS, col)))
            col_dict['structural_type'] = type(1.0)
            col_dict['name']            = "WRISTBREADTH"
            col_dict["semantic_types"]  = ("http://schema.org/Float", "https://metadata.datadrivendiscovery.org/types/TrueTarget",)
            extractP.metadata           = extractP.metadata.update((metadata_base.ALL_ELEMENTS, col), col_dict)

        print(extractP)
        print('------------------------')

        # Call primitives
        score_out = primitive.produce(inputs=score.value)

        XGB_hyperparams_class = XGBoostGBTreeRegressorPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
        XGB_primitive = XGBoostGBTreeRegressorPrimitive(hyperparams=XGB_hyperparams_class.defaults())
        XGB_primitive.set_training_data(inputs=test_out.value, outputs=extractP)
        XGB_primitive.fit()
        test_out_xgb = XGB_primitive.produce(inputs=score_out.value)
        test_out_xgb = test_out_xgb.value

        print('Predictions')
        print(test_out_xgb)
        print('------------------------')

        # Computer Error
        ground_truth = ((score.value['WRISTBREADTH']).to_numpy()).astype(np.float)
        predictions  = (test_out_xgb.iloc[:, -1]).to_numpy()

        print(ground_truth)
        print(predictions)
        print('------------------------')

        print('Mean squared error (lower better): ', (np.mean((predictions - ground_truth)**2)))
        print('------------------------')