示例#1
0
 def __init__(self):
     self.entities = []
     self.columns = []
     self.relationships = []
     self.synonyms_col = []
     self.synonyms_tab = []
     self.entity_graph = []
     self.loaded_entities = []
     self.config = Configuration()
     self.conn = pyodbc.connect(self.config.get_sql_connection_string())
     lookups = Lookups()
     self.lemmatizer = Lemmatizer(lookups)
     self.load_db_model()
示例#2
0
 def __init__(self):
     self.entities = []
     self.columns = []
     self.relationships = []
     self.synonyms_col = []
     self.synonyms_tab = []
     self.entity_graph = []
     self.loaded_entities = []
     self.config = Configuration()
     self.conn = pyodbc.connect(self.config.get_sql_connection_string())
     #self.lemmatizer = Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES)
     self.lemmatizer = English.Defaults.create_lemmatizer()
     self.load_db_model()
示例#3
0
 def __init__(self, entities, columns, db_model):
     self.columns = columns
     self.entities = entities
     self.db_model = db_model
     self.entity_column_mapping = []
     self.joins = []
     self.conditions = []
     self.select = []
     self.query = ""
     self.entities_parsed = []
     self.isMaxRequired = ""
     self.isMaxRequiredEntity = ""
     self.isMinRequired = ""
     self.isMinRequiredEntity = ""
     self.isAverage = ""
     self.isAverageEntity = ""
     self.isCount = ""
     self.isCountEntity = ""
     self.isSum = ""
     self.isSumEntity = ""
     self.config = Configuration()
     self.conn = pyodbc.connect(self.config.get_sql_connection_string())
示例#4
0
    def __init__(self):
        self.config = Configuration().get_config()
        self.transformer_dict = self.config['preprocessing']['transformers']
        self.shadow_train_size = self.config['models']['sample_sizes'][
            'shadow_train']
        self.probe_size = self.config['models']['sample_sizes']['probe_size']
        self.update_size = self.config['models']['sample_sizes']['update_size']
        self.num_train_update_samples = self.config['models']['sample_sizes'][
            'num_train_update_samples']
        self.num_test_update_samples = self.config['models']['sample_sizes'][
            'num_test_update_samples']

        self.data_file_path = 'data/raw_data_files/' + Utils().get_model_name(
            self.num_train_update_samples, self.num_test_update_samples,
            self.shadow_train_size)
示例#5
0
    def __init__(self, parameters):
        self.config = Configuration().get_config()
        self.device = Utils().get_device()
        self.log_dir = 'runs'
        self.model_dir = 'saved_models/'
        self.update_set_size = self.config['models']['sample_sizes'][
            'update_size']
        self.num_train_update_samples = self.config['models']['sample_sizes'][
            'num_train_update_samples']
        self.num_test_update_samples = self.config['models']['sample_sizes'][
            'num_test_update_samples']
        self.image_size = (3, 32, 32)
        self.time = datetime.now().strftime("%d-%m-%H-%M-%S")
        self.noise_dim = self.config['models']['GAN']['noise_dim']
        self.datasets_directory = f'data/datasets'
        self.shadow_num_epochs = self.config['models']['shadow_model'][
            'num_epochs']

        self.shadow_in_channel = parameters['shdow_in_channel']
        self.shadow_out_channels = parameters['shdow_out_channels']
        self.shadow_fc_size = self.shadow_out_channels[1] * 25
        self.shadow_learning_rate = parameters['shdow_learning_rate']

        self.update_learning_rate = parameters['update_learning_rate']
        self.update_shadow_epochs = parameters['update_shadow_epochs']

        self.encoder_leky_reul = parameters['encoder_leaky_relu']
        self.encoder_dropout = parameters['encoder_dropout']
        self.dis_leaky = parameters['dis_leaky']
        self.dis_ndf = parameters['dis_ndf']
        self.generator_leaky = parameters['generator_leaky']
        self.generator_ngf = parameters['generator_ngf']
        self.gan_loss_weight = parameters['gan_loss_weight']
        self.discriminator_type = parameters['discriminator_type']
        self.generator_type = parameters['generator_type']
        self.noisy_label_change = parameters['noisy_label_change']
        self.discriminator_dropout = parameters['discriminator_dropout']
        self.lr_gamma = parameters['lr_gamma']

        self.cache_name = 'cache/' + Utils.get_folder_name(
            self.update_set_size, self.num_train_update_samples,
            self.num_test_update_samples)
class HandleParameterize:
    """
    参数化类
    """
    invested_user_pwd_pattern = r'{invest_user_pwd}'
    invested_user_id_pattern = r'{invest_user_id}'

    configuration = Configuration(CONFIG_USER_ACCOUNT_FILE_PATH)

    @classmethod
    def do_param(cls, data):

        # 替换密码
        if re.search(cls.invested_user_pwd_pattern, data):
            data = re.sub(cls.invested_user_pwd_pattern,
                          cls.configuration.getConfig('Invest', 'pwd'), data)

        # 替换id
        if re.search(cls.invested_user_id_pattern, data):
            data = re.sub(cls.invested_user_id_pattern,
                          str(cls.configuration.getConfig('Invest', 'id')),
                          data)

        return data
    def __predict_output__():

        plt.interactive(False)
        cfg = Configuration()
        GPU = True

        if GPU != True:
            os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
            os.environ["CUDA_VISIBLE_DEVICES"] = ""

        # Input Path

        root_dir = os.path.dirname(os.path.abspath(__file__))

        image_path = cfg.image_path

        json_path = os.path.join(root_dir, cfg.input_filename)

        testingset = os.path.join(root_dir, 'testingset')

        Preprocessor.__generate_kijiji_set__(root_dir, image_path, json_path,
                                             testingset, 'model')

        # ------------------generator to compile training data of kijiji dataset----------------------------------------

        image_path = os.path.join(root_dir, 'testingset')

        data_path = glob(image_path + "/*")

        # Image Segmentation Parameters

        model_path = os.path.expanduser(cfg.model_path)
        assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
        anchors_path = os.path.expanduser(cfg.anchors_path)
        classes_path = os.path.expanduser(cfg.classes_path)
        test_path = os.path.expanduser(cfg.test_path)
        output_path = os.path.expanduser(cfg.segmented_output_path)
        json_path = os.path.expanduser(cfg.json_output)

        if not os.path.exists(output_path):
            print('Creating output path {}'.format(output_path))
            os.mkdir(output_path)

        sess = K.get_session()

        class_names = Preprocessor.__return_class_names__(classes_path)

        anchors = Preprocessor.__return_anchors__(anchors_path)

        yolo_model = load_model(model_path)

        # Verify model, anchors, and classes are compatible

        num_classes = len(class_names)

        num_anchors = len(anchors)

        info = 'Mismatch between model and given anchor and class sizes. ' \
               'Specify matching anchors and classes with --anchors_path and --classes_path flags.'
        model_output_channels = yolo_model.layers[-1].output_shape[-1]
        assert model_output_channels == num_anchors * (num_classes + 5), info
        print('{} model, anchors, and classes loaded.'.format(model_path))

        # Check if model is fully convolutional, assuming channel last order.

        model_image_size = yolo_model.layers[0].input_shape[1:3]

        is_fixed_size = model_image_size != (None, None)

        # Generate Colors for drawing bounding boxes

        hsv_tuples, colors = Preprocessor.__generate_colors_for_bounding_boxes__(
            class_names)

        yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

        input_image_shape = K.placeholder(shape=(2, ))
        boxes, scores, classes = yolo_eval(yolo_outputs,
                                           input_image_shape,
                                           score_threshold=cfg.score_threshold,
                                           iou_threshold=cfg.iou_threshold)

        # Load Images from the root folder

        input_images_model_1, all_images, data_path, data_path_with_image_name = Preprocessor.__load_image_data_thumbnails__(
            data_path,
            cfg.compressed_image_height,
            cfg.compressed_image_width,
            cfg.compressed_channel,
            cfg.number_of_categories,
            cfg.number_of_images_per_category,
            root_dir,
            is_fixed_size,
            model_image_size,
            sess,
            yolo_model,
            input_image_shape,
            boxes,
            scores,
            classes,
            cfg.font_path,
            class_names,
            colors,
            output_path,
            json_path,
            test_path,
            True,  # Segmentation Flag
            False,  # Edge-detection Flag
            True,  # Extract object Flag
            False)  # Gray Scale Flag

        input_images_model_2, all_images, data_path, data_path_with_image_name = Preprocessor.__load_image_data_thumbnails__(
            data_path, cfg.compressed_image_height, cfg.compressed_image_width,
            cfg.compressed_channel, cfg.number_of_categories,
            cfg.number_of_images_per_category, root_dir, is_fixed_size,
            model_image_size, sess, yolo_model, input_image_shape, boxes,
            scores, classes, cfg.font_path, class_names, colors, output_path,
            json_path, test_path, False, True, False, False)

        input_images_model_3, all_images, data_path, data_path_with_image_name = Preprocessor.__load_image_data_thumbnails__(
            data_path, cfg.image_height, cfg.image_width, cfg.channel,
            cfg.number_of_categories, cfg.number_of_images_per_category,
            root_dir, is_fixed_size, model_image_size, sess, yolo_model,
            input_image_shape, boxes, scores, classes, cfg.font_path,
            class_names, colors, output_path, json_path, test_path, False,
            False, False, False)

        input_shape = [
            cfg.compressed_image_height, cfg.compressed_image_width,
            cfg.compressed_channel
        ]

        input_shape_3 = [cfg.image_height, cfg.image_width, cfg.channel]

        # load (pre-trained) weights for model_1

        print('-' * 30)
        print('Loading model weights...\n')
        weight_folder = cfg.model_1_save  # the path where the model weights are stored
        weight_file = 'model_1.h5'
        model_1 = Preprocessor.__load_model_weights__(weight_folder,
                                                      weight_file, input_shape,
                                                      input_shape_3, "Model_1")

        # load (pre-trained) weights for model_2

        print('-' * 30)
        print('Loading model weights...\n')
        weight_folder = cfg.model_2_save  # the path where the model weights are stored
        weight_file = 'model_2.h5'
        model_2 = Preprocessor.__load_model_weights__(weight_folder,
                                                      weight_file, input_shape,
                                                      input_shape_3, "Model_2")

        # load (pre-trained) weights for model_2

        print('-' * 30)
        print('Loading model weights...\n')
        weight_folder = cfg.model_3_save  # the path where the model weights are stored
        weight_file = 'model_3.h5'
        model_3 = Preprocessor.__load_model_weights__(weight_folder,
                                                      weight_file, input_shape,
                                                      input_shape_3, "Model_3")
        print(root_dir)
        print(os.path.join(root_dir, cfg.output_model_1))

        output_path_model_1 = os.path.join(root_dir + cfg.output_model_1)
        output_path_model_2 = os.path.join(root_dir + cfg.output_model_2)
        output_path_model_3 = os.path.join(root_dir + cfg.output_model_3)

        Preprocessor.__create_output_directories__(output_path_model_1)
        Preprocessor.__create_output_directories__(output_path_model_2)
        Preprocessor.__create_output_directories__(output_path_model_3)

        features_from_model_1 = Preprocessor.__get_score_model__(
            model_1, input_images_model_1, output_path_model_1)
        features_from_model_2 = Preprocessor.__get_score_model__(
            model_2, input_images_model_2, output_path_model_2)
        features_from_model_3 = Preprocessor.__get_score_model__(
            model_3, input_images_model_3, output_path_model_3)

        features_from_model_1 = Preprocessor.__flatten_img_data__(
            features_from_model_1)
        features_from_model_2 = Preprocessor.__flatten_img_data__(
            features_from_model_2)
        features_from_model_3 = Preprocessor.__flatten_img_data__(
            features_from_model_3)

        fused_features = np.concatenate([
            features_from_model_1, features_from_model_2, features_from_model_3
        ],
                                        axis=1)

        fused_features = [
            Preprocessor.__binarize__(features) for features in fused_features
        ]

        counter_for_predictions = 0

        sub_average_precision_make, sub_average_precision_color = [], []
        sub_average_precision_body, sub_average_precision_model = [], []

        cum_average_precision_make, cum_average_precision_color = [], []
        cum_average_precision_body, cum_average_precision_model = [], []

        precision_at_3_5_10_all = ''.join(cfg.precision_counter).split(',')

        while counter_for_predictions <= 2:

            test_image_idx = int(len(input_images_model_1) * random())

            if test_image_idx < len(data_path_with_image_name):

                idx_closest = Preprocessor.__get_closest_images__(
                    test_image_idx, fused_features, cfg.number_of_predictions)
                test_image = Preprocessor.__get_concatenated_images__(
                    data_path_with_image_name, [test_image_idx],
                    cfg.compressed_image_width)
                results_image = Preprocessor.__get_concatenated_images__(
                    data_path_with_image_name, idx_closest,
                    cfg.compressed_image_width)

                source_category = str(
                    data_path_with_image_name[test_image_idx]).split('/')
                similar_image = []
                similar_idx_closest = []

                for counter_for_recommendations in range(0, len(idx_closest)):

                    category = str(data_path_with_image_name[
                        idx_closest[counter_for_recommendations]]).split('/')

                    if str(source_category[-2]).strip() == str(
                            category[-2].strip()):
                        similar_image.append(data_path_with_image_name[
                            idx_closest[counter_for_recommendations]])
                        similar_idx_closest.append(
                            idx_closest[counter_for_recommendations])

                print("Test Image ID:", test_image_idx)
                print("\n")
                print("Closest Images ID:", idx_closest)
                print("\n")
                print("Similar Images ID", similar_idx_closest)
                print("\n")

                precision_per_make, precision_per_color = [], []
                precision_per_body_wise, precision_per_model_wise = [], []
                results_image_recommendations = []

                for i in range(0, len(precision_at_3_5_10_all)):

                    results_image_recommendations = Preprocessor.__get_concatenated_images__(
                        data_path_with_image_name, similar_idx_closest,
                        cfg.compressed_image_width)

                    list_of_similar_image_names = Preprocessor.__return_image_names__(
                        data_path_with_image_name, similar_idx_closest)

                    name_of_test_image = Preprocessor.__return_image_names__(
                        data_path_with_image_name, [test_image_idx])

                    dict_of_attributes_of_similar_images = Preprocessor.__get_attributes_list__(
                        list_of_similar_image_names,
                        os.path.join(root_dir, cfg.input_filename))

                    dict_of_attributes_of_test_image = Preprocessor.__get_attributes_list__(
                        name_of_test_image,
                        os.path.join(root_dir, cfg.input_filename))

                    similar_make_wise = Preprocessor.__get_similar__(
                        dict_of_attributes_of_test_image,
                        dict_of_attributes_of_similar_images[:int(
                            precision_at_3_5_10_all[i])], 'make')

                    similar_color_wise = Preprocessor.__get_similar__(
                        dict_of_attributes_of_test_image,
                        dict_of_attributes_of_similar_images[:int(
                            precision_at_3_5_10_all[i])], 'color')

                    similar_body_wise = Preprocessor.__get_similar__(
                        dict_of_attributes_of_test_image,
                        dict_of_attributes_of_similar_images[:int(
                            precision_at_3_5_10_all[i])], 'body')

                    similar_model_wise = Preprocessor.__get_similar__(
                        dict_of_attributes_of_test_image,
                        dict_of_attributes_of_similar_images[:int(
                            precision_at_3_5_10_all[i])], 'model')

                    precision_per_make.append(
                        float(
                            float(len(similar_make_wise)) /
                            int(precision_at_3_5_10_all[i])))
                    precision_per_color.append(
                        float(
                            float(len(similar_color_wise)) /
                            int(precision_at_3_5_10_all[i])))
                    precision_per_body_wise.append(
                        float(
                            float(len(similar_body_wise)) /
                            int(precision_at_3_5_10_all[i])))
                    precision_per_model_wise.append(
                        float(
                            float(len(similar_model_wise)) /
                            int(precision_at_3_5_10_all[i])))

                sub_average_precision_make.append(precision_per_make)
                sub_average_precision_color.append(precision_per_color)
                sub_average_precision_body.append(precision_per_body_wise)
                sub_average_precision_model.append(precision_per_model_wise)

                imsave('test.png', test_image)
                imsave('recommendations.png', results_image_recommendations)
                imsave('total_results.png', results_image)
                counter_for_predictions += 1
                time.sleep(1)

            else:

                print("Index is out of bound")

            cum_average_precision_make.append(
                map(Preprocessor.__mean__, zip(*sub_average_precision_make)))
            cum_average_precision_color.append(
                map(Preprocessor.__mean__, zip(*sub_average_precision_color)))
            cum_average_precision_body.append(
                map(Preprocessor.__mean__, zip(*sub_average_precision_body)))
            cum_average_precision_model.append(
                map(Preprocessor.__mean__, zip(*sub_average_precision_model)))

        print("\n \n \n")
        print(
            "-----------------------------------------------------------------------------------"
        )
        print("Average Precision Make-Wise", precision_at_3_5_10_all,
              map(Preprocessor.__mean__, zip(*cum_average_precision_make)))
        print("Average Precision Color-Wise", precision_at_3_5_10_all,
              map(Preprocessor.__mean__, zip(*cum_average_precision_color)))
        print("Average Precision Body-Wise", precision_at_3_5_10_all,
              map(Preprocessor.__mean__, zip(*cum_average_precision_body)))
        print("Average Precision Model-Wise", precision_at_3_5_10_all,
              map(Preprocessor.__mean__, zip(*cum_average_precision_model)))

        writer = csv.writer(open(os.path.join(root_dir, 'results.csv'), 'w'))

        writer.writerow([
            "Make-Wise: Precision at 3", "Make-Wise: Precision at 5",
            "Make-Wise: Precision at 10"
        ])
        for row in zip(*cum_average_precision_make):
            writer.writerow(row)

        writer.writerow('\n')

        writer.writerow([
            "Color-Wise: Precision at 3", "Color-Wise: Precision at 5",
            "Color-Wise: Precision at 10"
        ])

        for row in zip(*cum_average_precision_color):
            writer.writerow(row)

        writer.writerow('\n')

        writer.writerow([
            "Body-Wise: Precision at 3", "Body-Wise: Precision at 5",
            "Body-Wise: Precision at 10"
        ])

        for row in zip(*cum_average_precision_body):
            writer.writerow(row)

        writer.writerow('\n')

        writer.writerow([
            "Model-Wise: Precision at 3", "Model-Wise: Precision at 5",
            "Model-Wise: Precision at 10"
        ])

        for row in zip(*cum_average_precision_model):
            writer.writerow(row)

        writer.writerow('\n')
示例#8
0
class SQLGenerator(object):
    def __init__(self, entities, columns, db_model):
        self.columns = columns
        self.entities = entities
        self.db_model = db_model
        self.entity_column_mapping = []
        self.joins = []
        self.conditions = []
        self.select = []
        self.query = ""
        self.entities_parsed = []
        self.isMaxRequired = ""
        self.isMaxRequiredEntity = ""
        self.isMinRequired = ""
        self.isMinRequiredEntity = ""
        self.isAverage = ""
        self.isAverageEntity = ""
        self.isCount = ""
        self.isCountEntity = ""
        self.isSum = ""
        self.isSumEntity = ""
        self.config = Configuration()
        self.conn = pyodbc.connect(self.config.get_sql_connection_string())

    def run_query(self):
        cursor = self.conn.cursor()
        cursor.execute(self.query)
        result = []
        columns = []
        for row in cursor:
            result.append([col for col in row])

        columns = [column[0] for column in cursor.description]
        return [result, columns]


    def sortSecond(self, join_comb): 
        return join_comb[0] 

    def get_from_clause(self, level):
        # build the from_clause
        from_clause = ""
        if len(self.entity_column_mapping) == 1:
            from_clause = self.entity_column_mapping[0][0] + " " + self.entity_column_mapping[0][0] + level
        elif len(self.entity_column_mapping) > 1:
            from_clause = ""
            join_index = 0
            entity_included_in_join = []
            for join in self.joins:
                if join_index == 0:
                    from_clause = from_clause + join[0] + " " + join[0] + level + " JOIN " + join[1] + " " + join[1] + level + " ON " + join[0] + level + "." + join[2] + "=" + join[1] + level + "." + join[3]
                    entity_included_in_join.append(join[0])
                    entity_included_in_join.append(join[1])
                else:
                    if join[0] in entity_included_in_join:
                        from_clause = from_clause + " " + " JOIN " + join[1] + " " + join[1] + level + " ON " + join[0]+ level + "." + join[2] + " = " + join[1] + level + "." + join[3]
                    else:
                        from_clause = from_clause + " JOIN " + join[0] + " " + join[0] + level + " ON " + join[0] + level + "." + join[2] + " = " + join[1] + level + "." + join[3]
                join_index = join_index + 1 
        return from_clause

    def get_where_clause(self, level):
        return " and ".join([cond[0] + level + "." + cond[1] + " " + cond[2] + " " + cond[3] for cond in self.conditions])

    def get_select_clause(self, level):
        return ", ".join([col[0] + level + "." + col[1] for col in self.select])

    def correlated_sub_query_in_where(self, 
        column,
        entity,
        type_): # type = min, max
        # from clause
        from_clause = self.get_from_clause("1")
        # select clause
        select_clause = self.get_select_clause("1")
        # where clause
        where_clause = self.get_where_clause("1")

        type_sub_query_where_clause = self.get_where_clause("2")
        type_sub_query_from_clause = self.get_from_clause("2")

        if type_sub_query_where_clause != "":
            type_sub_query_where_clause = " Where " + type_sub_query_where_clause

        typeQuery = "SELECT " + \
            type_ + "(" + entity + "2." + column + ") " + \
            " From " + \
            type_sub_query_from_clause + \
            type_sub_query_where_clause
        if select_clause != "":
            select_clause = select_clause + ", "
        if where_clause != "":
            where_clause = where_clause + " and "
        self.query = "SELECT " + \
            select_clause + entity + "1." + column + " " + \
            " From " + \
            from_clause + \
            " Where " + \
            where_clause + \
            entity + "1." + column + " = (" + typeQuery + ")"

    def correlated_sub_query_in_select(self, 
        column,
        entity,
        type_): # type = avg, sum, count
        # from clause
        from_clause = self.get_from_clause("1")
        # select clause
        select_clause = self.get_select_clause("1")
        # where clause
        where_clause = self.get_where_clause("1")

        type_sub_query_where_clause = self.get_where_clause("2")
        type_sub_query_from_clause = self.get_from_clause("2")

        # find the identifier column of the entity in parameter
        db_model_ent = next(e for e in self.db_model.entities if e.name.lower() == entity.lower())
        # db_model_ent.primaryKey
        # correlation

        # find where this table is being referenced
        entity_relationships = [(rel.entity1, rel.column1) for rel in self.db_model.relationships if (rel.entity2 == entity)]

        correlation_entity = entity
        correlation_entity_column = db_model_ent.primaryKey
        parent_entity_exists = False
        parent_entries = []
        if len(self.entity_column_mapping) > 1:
            for ecm in self.entity_column_mapping:
                if ecm[0] != entity:
                    if ecm[0] in [ent[0] for ent in entity_relationships]:
                        parent_entry = next(ent for ent in entity_relationships if ent[0] == ecm[0])
                        parent_entity_exists = True
                        parent_entries.append(parent_entry)
        elif len(self.entity_column_mapping) == 1:
            # only one entity, use where filters
            correlation = " and ".join([cond[0] + "1" + "." + cond[1] + " = " + cond[0] + "2" + "." + cond[1] for cond in self.conditions])
        
        if len(self.entity_column_mapping) > 1:              
            if parent_entity_exists == True and len(parent_entry) > 0:
                correlations = []
                for parent_entry in parent_entries:
                    correlations.append(parent_entry[0] + "2." + parent_entry[1] + "=" + parent_entry[0] + "1." + parent_entry[1])
                correlation = " and ".join(correlations)
            else:
                correlation = entity + "2." + db_model_ent.primaryKey + "=" + entity + "1." + db_model_ent.primaryKey
        
        if type_sub_query_where_clause == "":
            type_sub_query_where_clause = correlation
        else:
            type_sub_query_where_clause = type_sub_query_where_clause + " and " + correlation

        if type_sub_query_where_clause != "":
            type_sub_query_where_clause = " Where " + type_sub_query_where_clause

        type_sub_query = "SELECT " + \
            type_ + "(" + entity + "2." + column + ") " + \
            " From " + \
            type_sub_query_from_clause + \
            type_sub_query_where_clause

        if select_clause != "":
            select_clause = select_clause + ", "

        if where_clause != "":
            where_clause = " Where " + where_clause

        self.query = "SELECT distinct " + \
            select_clause + "(" + type_sub_query + ") as " + type_ + "_" + column + " " + \
            " From " + \
            from_clause + \
            where_clause


    def build_query(self):

        # maximum case
        if self.isMaxRequired != "":
            self.correlated_sub_query_in_where(self.isMaxRequired, self.isMaxRequiredEntity,"max")
        # minimum case
        elif self.isMinRequired != "":
            self.correlated_sub_query_in_where(self.isMinRequired, self.isMinRequiredEntity,"min")
        # average case
        elif self.isAverage != "":
            self.correlated_sub_query_in_select(self.isAverage, self.isAverageEntity, "avg")
        # count
        elif self.isCount != "":
            self.correlated_sub_query_in_select(self.isCount, self.isCountEntity, "count")
        # sum
        elif self.isSum != "":
            self.correlated_sub_query_in_select(self.isSum, self.isSumEntity, "sum")
        # regular
        else:
            # from clause
            from_clause = self.get_from_clause("1")
            # select clause
            select_clause = self.get_select_clause("1")
            # where clause
            where_clause = self.get_where_clause("1")

            if where_clause != "":
                where_clause = " Where " + where_clause
            self.query = "SELECT distinct " + \
                select_clause + " " + \
                " From " + \
                from_clause + \
                where_clause

    def find_select(self):
        for ecm in self.entity_column_mapping:
            # column mapping within entity
            for cm in ecm[1]:
                # if cm.condition is None and cm.value_ is None:
                if cm.value_ is None or cm.value_ == "NoValue":
                    # entity, column name, [Avg, Min, Max, Sum, Count]
                    # add the where clause here for min, max and sum conditions
                    if cm.isMax == True:
                        self.isMaxRequired = cm.name.lower()
                        self.isMaxRequiredEntity = ecm[0]
                    elif cm.isMin == True:
                        self.isMinRequired = cm.name.lower()
                        self.isMinRequiredEntity = ecm[0]
                    elif cm.isAverage == True:
                        self.isAverage = cm.name.lower()
                        self.isAverageEntity = ecm[0]
                    elif cm.isCount == True:
                        self.isCount = cm.name.lower()
                        self.isCountEntity = ecm[0]
                    elif cm.isSum == True:
                        self.isSum = cm.name.lower()
                        self.isSumEntity = ecm[0]
                    else:
                        # check for duplicates
                        if len([sel for sel in self.select if sel[0].lower() == ecm[0].lower() and sel[1].lower() == cm.name.lower()]) == 0:
                            self.select.append((ecm[0], cm.name.lower(), None))
                    


        for ent in self.entities:
            # TODO... add max, min..etc case
            # get default column from db_model
            db_model_ent = next(e for e in self.db_model.entities if e.name.lower() == ent.name.lower())
            # check for duplicates
            if len([sel for sel in self.select if sel[0].lower() == ent.name.lower() and sel[1].lower() == db_model_ent.defaultColumn.lower()]) == 0:
                self.select.append((ent.name.lower(), db_model_ent.defaultColumn, None))

    def find_conditions(self):
        # entity column mapping
        for ecm in self.entity_column_mapping:
            # column mapping within entity
            for cm in ecm[1]:
                if cm.condition is not None and cm.value_ is not None and cm.value_ != "NoValue":
                    val = cm.value_
                    if cm.type_ == "string":
                        val = "'" + val + "'"
                    self.conditions.append((ecm[0], cm.name.lower(), cm.condition, str(val)))

    def find_relationships(self):
        i = 0
        j = 0
        while i < len(self.entity_column_mapping):
            j = i + 1
            base_entity = self.entity_column_mapping[i][0]
            while j < len(self.entity_column_mapping):
                join_entity = self.entity_column_mapping[j][0]
                if len([rel for rel in self.db_model.relationships if ((rel.entity1 == base_entity and rel.entity2 == join_entity) or (rel.entity2 == base_entity and rel.entity1 == join_entity))]) == 1:
                    rel = next(rel for rel in self.db_model.relationships if ((rel.entity1 == base_entity and rel.entity2 == join_entity) or (rel.entity2 == base_entity and rel.entity1 == join_entity)))

                    if rel.entity1 == base_entity:
                        self.joins.append((base_entity, join_entity, rel.column1, rel.column2))
                    else:
                        self.joins.append((join_entity, base_entity, rel.column1, rel.column2))
                j = j + 1
            i = i + 1
        
        if len(self.joins) == 0 and len(self.entity_column_mapping) > 1:
            # try to find the relationship using db model's entity_graph
            i = 0
            entities_mapped = []
            while i < (len(self.entity_column_mapping) - 1):
                base_entity = self.entity_column_mapping[i]
                join_entity = self.entity_column_mapping[i + 1]
                if base_entity[0] not in entities_mapped:
                    entities_mapped.append(entities_mapped)
                found, entities_mapped = self.find_entities_relationship(base_entity, join_entity, entities_mapped)
                i = i + 1

            i = 0
            j = 0
            while i < len(entities_mapped):
                j = i + 1
                base_entity = entities_mapped[i]
                while j < len(entities_mapped):
                    join_entity = entities_mapped[j]
                    if len([rel for rel in self.db_model.relationships if ((rel.entity1 == base_entity and rel.entity2 == join_entity) or (rel.entity2 == base_entity and rel.entity1 == join_entity))]) == 1:
                        rel = next(rel for rel in self.db_model.relationships if ((rel.entity1 == base_entity and rel.entity2 == join_entity) or (rel.entity2 == base_entity and rel.entity1 == join_entity)))

                        if rel.entity1 == base_entity:
                            self.joins.append((base_entity, join_entity, rel.column1, rel.column2))
                        else:
                            self.joins.append((join_entity, base_entity, rel.column1, rel.column2))
                    j = j + 1
                i = i + 1

    def find_entities_relationship(self, base_entity, join_entity, entities_mapped):
        
        entities_to_be_included = copy.copy(entities_mapped)
        found = False
        base_entity_graph = next(eg for eg in self.db_model.entity_graph if eg[0].lower() == base_entity[0].lower())
        for child_entity_in_graph in base_entity_graph[1]:
            if child_entity_in_graph == join_entity[0]:
                entities_to_be_included.append(child_entity_in_graph)
                found = True
                break;
            child_entity_graph = next(eg for eg in self.db_model.entity_graph if eg[0].lower() == child_entity_in_graph.lower())
            entities_to_be_included_temp = copy.copy(entities_to_be_included)
            if child_entity_in_graph not in entities_to_be_included_temp:
                entities_to_be_included_temp.append(child_entity_in_graph)
                found, entities_to_be_included = self.find_entities_relationship(child_entity_graph, join_entity, entities_to_be_included_temp)
            if found:
                break;
            
        if found:
            for entity_to_be_included in entities_to_be_included:
                if entity_to_be_included not in entities_mapped:
                    entities_mapped.append(entity_to_be_included)
        
        return (found, entities_to_be_included)


    def find_column(self, column, entityName):
        column_parent_entity_found = False
        # get the db model for entity
        db_model_entity = next(model_entity for model_entity in self.db_model.entities if model_entity.name == entityName.lower())

        # add entity into parsed collection
        self.entities_parsed.append(entityName)

        # check if the column exists in the db_model
        if column.name.lower() in [db_model_column.name for db_model_column in db_model_entity.columns]:
            # column parent found, break the loop
            column_parent_entity_found = True
            return (column_parent_entity_found, db_model_entity.name, column)

        # if column does not exists in db_model_entity
        # then look for the related entities
        if column_parent_entity_found == False:
            # look for related entities
            for model_entity in [model_entity for model_entities in self.db_model.entity_graph if model_entities[0].lower() == entityName.lower() for model_entity in model_entities[1]]:

                # only process, if not processed before
                if len([ep for ep in self.entities_parsed if ep.lower() == model_entity]) == 0:
                    column_parent_entity_found, model_name, columnName = self.find_column(column, model_entity)
                    # column found, return entity with column
                    if column_parent_entity_found == True:
                        return (column_parent_entity_found, model_name, columnName)

        # column not found
        return (column_parent_entity_found, None, None)

    def find_entity(self, column):
        column_parent_entity_found = False
        for entity in self.entities:
            column_parent_entity_found, model_name, columnName =  self.find_column(column, entity.name)
            # column found, return entity with column
            if column_parent_entity_found == True:
                return (column_parent_entity_found, model_name, columnName)
                
        return (column_parent_entity_found, None, None)


    def get_sql(self):
        if len(self.entities) > 0:
            for column in self.columns:
                # reset the entities_parsed array for new column
                self.entities_parsed = []
                column_parent_entity_found, model_name, columnName = self.find_entity(column)

                if column_parent_entity_found == True:
                    if len([ecm for ecm in self.entity_column_mapping if ecm[0] == model_name]) == 1:
                        ecm = next(ecm for ecm in self.entity_column_mapping if ecm[0] == model_name)
                        ecm[1].append(columnName)
                    else:
                        self.entity_column_mapping.append((model_name, [columnName]))
                else:
                    print("Column " + column.name + " not found.. ignoring column")
            
            for entity in self.entities:
                if entity.condition is not None and entity.value_ is not None:
                    # reset the entities_parsed array for new column
                    model_name = entity.name

                    ent = next(en for en in self.db_model.entities if en.name.lower() == entity.name.lower())
                    default_column = next(col for col in ent.columns if col.name.lower() == ent.defaultColumn.lower())
                    copy_default_column = copy.copy(default_column)  
                    copy_default_column.condition = entity.condition
                    copy_default_column.value_ = entity.value_                    
                    copy_default_column.isSum = entity.isSum                    
                    copy_default_column.isAverage = entity.isAverage                    
                    copy_default_column.isCount = entity.isCount                    
                    copy_default_column.isMin = entity.isMin                    
                    copy_default_column.isMax = entity.isMax                    

                    if len([ecm for ecm in self.entity_column_mapping if ecm[0].lower() == model_name.lower()]) == 1:
                        ecm = next(ecm for ecm in self.entity_column_mapping if ecm[0].lower() == model_name.lower())
                        ecm[1].append(copy_default_column)
                    else:
                        self.entity_column_mapping.append((model_name.lower(), [copy_default_column]))
                else:
                    if len([ecm for ecm in self.entity_column_mapping if ecm[0].lower() == entity.name.lower()]) == 0:
                        self.entity_column_mapping.append((entity.name.lower(), []))
                    else:
                        ecm = next(ecm for ecm in self.entity_column_mapping if ecm[0].lower() == entity.name.lower())

                        ent = next(en for en in self.db_model.entities if en.name.lower() == entity.name.lower())
                        default_column = next(col for col in ent.columns if col.name.lower() == ent.defaultColumn.lower())
                        copy_default_column = copy.copy(default_column)  
                        copy_default_column.condition = entity.condition
                        copy_default_column.value_ = entity.value_                    
                        copy_default_column.isSum = entity.isSum                    
                        copy_default_column.isAverage = entity.isAverage                    
                        copy_default_column.isCount = entity.isCount                    
                        copy_default_column.isMin = entity.isMin                    
                        copy_default_column.isMax = entity.isMax 
                        ecm[1].append(copy_default_column)                   
                        
        elif len(self.columns) > 0:
            # No entities identified in the phrase
            # Finding...entities as per the columns identified in the phrase
            for col in self.columns:
                max_col_found_count = 0
                max_col_found_count_entity = ""
                # look for columns and find max column in an entity and related entities"
                for entity in self.db_model.entities:
                    column_found_count = 0
                    if col.name.lower() in [col.name.lower() for col in entity.columns]:
                        column_found_count = column_found_count + 1
                if max_col_found_count <  column_found_count:
                    max_col_found_count = column_found_count
                    max_col_found_count_entity = entity.name

                if max_col_found_count_entity != "":
                    if len([ecm for ecm in self.entity_column_mapping if ecm[0] == max_col_found_count_entity]) == 1:
                        ecm = next(ecm for ecm in self.entity_column_mapping if ecm[0] == max_col_found_count_entity)
                        ecm[1].append(col)
                    else:
                        self.entity_column_mapping.append((max_col_found_count_entity, [col]))
        else:
            # no column and entity identified
            return []

        # print([(e[0], [ec.name for ec in e[1]]) for e in self.entity_column_mapping])
        # build the sql
        self.find_relationships()
        self.find_conditions()
        self.find_select()
        self.build_query()
        print(self.query)
        return self.run_query()
示例#9
0
class DBModel(object):
    def __init__(self):
        self.entities = []
        self.columns = []
        self.relationships = []
        self.synonyms_col = []
        self.synonyms_tab = []
        self.entity_graph = []
        self.loaded_entities = []
        self.config = Configuration()
        self.conn = pyodbc.connect(self.config.get_sql_connection_string())
        lookups = Lookups()
        self.lemmatizer = Lemmatizer(lookups)
        self.load_db_model()

    def load_db_model(self):
        # loading the database from sql server
        cursor = self.conn.cursor()
        cursor.execute(self.config.get_tables_sql_query())
        for row in cursor:
            self.entities.append(
                Entities(row.table_name,
                         self.config.get_default_column(row.table_name)))

        cursor.execute(self.config.get_columns_sql_query())
        current_entity = None
        current_entity_name = ""
        for row in cursor:
            if current_entity_name != row.table_name:
                current_entity_name = row.table_name
                current_entity = next(en for en in self.entities
                                      if en.name == current_entity_name)

            col_type = row.type_name
            if col_type == "varchar" or col_type == "nvarchar":
                col_type = "string"
            current_entity.columns.append(Columns(row.column_name, col_type))

        current_entity = None
        current_entity_name = ""
        cursor.execute(self.config.get_FK_sql_query())
        for row in cursor:
            self.relationships.append(
                Relationship(row.parent_table, row.refrenced_table,
                             row.parent_table_col, row.referenced_table_col))
            if len([
                    en for en in self.entity_graph if en[0] == row.parent_table
            ]) > 0:
                current_entity = next(en for en in self.entity_graph
                                      if en[0] == row.parent_table)
                current_entity[1].append(row.refrenced_table)
            else:
                self.entity_graph.append(
                    (row.parent_table, [row.refrenced_table]))

            if len([
                    en
                    for en in self.entity_graph if en[0] == row.refrenced_table
            ]) > 0:
                current_entity = next(en for en in self.entity_graph
                                      if en[0] == row.refrenced_table)
                current_entity[1].append(row.parent_table)
            else:
                self.entity_graph.append(
                    (row.refrenced_table, [row.parent_table]))

        current_entity = None
        current_entity_name = ""
        cursor.execute(self.config.get_PK_sql_query())
        for row in cursor:
            if len([en for en in self.entity_graph
                    if en[0] == row.table_name]) == 1:
                current_entity = next(en for en in self.entities
                                      if en.name == row.table_name)
                current_entity.primaryKey = row.primary_key

        for entity_to_load in self.config.get_entitites_to_load():
            entity_load_query = "select distinct " + entity_to_load[
                "column"] + " from " + entity_to_load["entity"]
            cursor.execute(entity_load_query)
            entity_data = (entity_to_load["entity"], [])
            for row in cursor:
                entity_data[1].append(row[0])
                # add lemma strings
                lemmas = self.lemmatizer(str(row[0]), u'NOUN')
                for lemma in lemmas:
                    entity_data[1].append(str(lemma))
            self.loaded_entities.append(entity_data)

        # load synonyms from declarative file
        # table sysnonyms
        for table_synonym in self.config.get_synonyms()["table"]:
            orginal_val = table_synonym["original"]
            synonyms_vals = table_synonym["synonyms"]
            for synonyms_val in synonyms_vals:
                self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))

        # column sysnonyms
        for column_synonym in self.config.get_synonyms()["column"]:
            orginal_val = column_synonym["original"]
            synonyms_vals = column_synonym["synonyms"]
            for synonyms_val in synonyms_vals:
                self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))

        # make a single array
        self.columns = [
            column for entity in self.entities for column in entity.columns
        ]

    # might have to write a custom matcher TODO
    # build the matcher based upon the original value and domain synonyms defined
    def get_matcher(self, matcher, nlp):
        for entity in self.entities:
            matcher.add(entity.name.upper() + "_TABLE", None,
                        nlp(entity.name.lower()))
            for column in entity.columns:
                matcher.add(column.name.upper() + "_COLUMN", None,
                            nlp(column.name.lower()))

        # add table synonyms to matcher
        for synonym in self.synonyms_tab:
            for entity in self.entities:
                if synonym.column.lower() == entity.name.lower():
                    matcher.add(entity.name.upper() + "_TABLE", None,
                                nlp(synonym.synonym.lower()))

        # add column synonyms to matcher
        for synonym in self.synonyms_col:
            for column in self.columns:
                if synonym.column.lower() == column.name.lower():
                    matcher.add(column.name.upper() + "_COLUMN", None,
                                nlp(synonym.synonym.lower()))

        return matcher

    def get_custom_matcher(self, matcher, nlp):
        for entity in self.entities:
            matcher.add(entity.name.upper() + "_TABLE",
                        nlp(entity.name.lower()))
            for column in entity.columns:
                matcher.add(column.name.upper() + "_COLUMN",
                            nlp(column.name.lower()))

        # add table synonyms to matcher
        for synonym in self.synonyms_tab:
            for entity in self.entities:
                if synonym.column.lower() == entity.name.lower():
                    matcher.add(entity.name.upper() + "_TABLE",
                                nlp(synonym.synonym.lower()))

        # add column synonyms to matcher
        for synonym in self.synonyms_col:
            for column in self.columns:
                if synonym.column.lower() == column.name.lower():
                    matcher.add(column.name.upper() + "_COLUMN",
                                nlp(synonym.synonym.lower()))

        return matcher
示例#10
0
 def __init__(self):
     self.config=Configuration().get_config()
     self.experiment_parameters_list=Configuration().get_experiments_list()
     self.device=Utils().get_device()
示例#11
0
 def __init__(self):
     self.config = Configuration().get_config()
     self.device = torch.device(
         self.config['device']['gpu'] if torch.cuda.is_available(
         ) else self.config['device']['cpu'])
示例#12
0
    def __run_training__():

        cfg = Configuration()

        # These variable would be parametrized

        GPU = True

        if GPU != True:
            os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
            os.environ["CUDA_VISIBLE_DEVICES"] = ""

        # Input Path

        root_dir = os.path.dirname(os.path.abspath(__file__))

        image_path = cfg.image_path

        json_path = os.path.join(root_dir, cfg.input_filename)

        trainingset = os.path.join(root_dir, 'trainingset')

        Preprocessor.__generate_kijiji_set__(root_dir, image_path, json_path,
                                             trainingset, 'make')

        # --------------------------------------------------------------------------------------------------------------

        image_path = os.path.join(root_dir, 'trainingset')

        data_path = glob(image_path + "/*")

        # Image Segmentation Parameters

        model_path = os.path.expanduser(cfg.model_path)
        assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
        anchors_path = os.path.expanduser(cfg.anchors_path)
        classes_path = os.path.expanduser(cfg.classes_path)
        test_path = os.path.expanduser(cfg.test_path)
        output_path = os.path.expanduser(cfg.segmented_output_path)
        json_path = os.path.expanduser(cfg.json_output)

        if not os.path.exists(output_path):
            print('Creating output path {}'.format(output_path))
            os.mkdir(output_path)

        sess = K.get_session()

        class_names = Preprocessor.__return_class_names__(classes_path)

        anchors = Preprocessor.__return_anchors__(anchors_path)

        yolo_model = load_model(model_path)

        # Verify model, anchors, and classes are compatible

        num_classes = len(class_names)

        num_anchors = len(anchors)

        info = 'Mismatch between model and given anchor and class sizes. ' \
               'Specify matching anchors and classes with --anchors_path and --classes_path flags.'

        model_output_channels = yolo_model.layers[-1].output_shape[-1]
        assert model_output_channels == num_anchors * (num_classes + 5), info
        print('{} model, anchors, and classes loaded.'.format(model_path))

        # Check if model is fully convolutional, assuming channel last order.

        model_image_size = yolo_model.layers[0].input_shape[1:3]

        is_fixed_size = model_image_size != (None, None)

        # Generate Colors for drawing bounding boxes

        hsv_tuples, colors = Preprocessor.__generate_colors_for_bounding_boxes__(
            class_names)

        yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))

        input_image_shape = K.placeholder(shape=(2, ))
        boxes, scores, classes = yolo_eval(yolo_outputs,
                                           input_image_shape,
                                           score_threshold=cfg.score_threshold,
                                           iou_threshold=cfg.iou_threshold)

        # Load Images from the root folder

        input_images_model_1, all_images, data_path, data_path_with_image_name = Preprocessor.__load_image_data_thumbnails__(
            data_path,
            cfg.compressed_image_height,
            cfg.compressed_image_width,
            cfg.compressed_channel,
            cfg.number_of_categories,
            cfg.number_of_images_per_category,
            root_dir,
            is_fixed_size,
            model_image_size,
            sess,
            yolo_model,
            input_image_shape,
            boxes,
            scores,
            classes,
            cfg.font_path,
            class_names,
            colors,
            output_path,
            json_path,
            test_path,
            True,  # Segmentation Flag
            False,  # Edge-detection Flag
            True,  # Extract object Flag
            False)  # Gray Scale Flag

        input_images_model_2, all_images, data_path, data_path_with_image_name = Preprocessor.__load_image_data_thumbnails__(
            data_path,
            cfg.compressed_image_height,
            cfg.compressed_image_width,
            cfg.compressed_channel,
            cfg.number_of_categories,
            cfg.number_of_images_per_category,
            root_dir,
            is_fixed_size,
            model_image_size,
            sess,
            yolo_model,
            input_image_shape,
            boxes,
            scores,
            classes,
            cfg.font_path,
            class_names,
            colors,
            output_path,
            json_path,
            test_path,
            False,  # Segmentation Flag
            True,  # Edge-detection Flag
            False,  # Extract object Flag
            False)  # Gray Scale Flag

        input_images_model_3, all_images, data_path, data_path_with_image_name = Preprocessor.__load_image_data_thumbnails__(
            data_path,
            cfg.image_height,
            cfg.image_width,
            cfg.channel,
            cfg.number_of_categories,
            cfg.number_of_images_per_category,
            root_dir,
            is_fixed_size,
            model_image_size,
            sess,
            yolo_model,
            input_image_shape,
            boxes,
            scores,
            classes,
            cfg.font_path,
            class_names,
            colors,
            output_path,
            json_path,
            test_path,
            False,  # Segmentation Flag
            False,  # Edge-detection Flag
            False,  # Extract object Flag
            False)  # Gray Scale Flag

        input_shape = [
            cfg.compressed_image_height, cfg.compressed_image_width,
            cfg.compressed_channel
        ]

        input_shape_3 = [cfg.image_height, cfg.image_width, cfg.channel]

        # Model Save Paths

        model_1_save_path = os.path.join(root_dir + cfg.model_1_save)
        model_2_save_path = os.path.join(root_dir + cfg.model_2_save)
        model_3_save_path = os.path.join(root_dir + cfg.model_3_save)

        Preprocessor.__create_output_directories__(model_1_save_path)
        Preprocessor.__create_output_directories__(model_2_save_path)
        Preprocessor.__create_output_directories__(model_3_save_path)

        # Instantiating the training class

        train = Train(input_images_model_1, input_images_model_2,
                      input_images_model_3, input_shape, input_shape_3,
                      cfg.batch_size, cfg.epochs, model_1_save_path,
                      model_2_save_path, model_3_save_path)

        # Output Path

        output_path_model_1 = os.path.join(root_dir + cfg.output_model_1)
        output_path_model_2 = os.path.join(root_dir + cfg.output_model_2)
        output_path_model_3 = os.path.join(root_dir + cfg.output_model_3)

        Preprocessor.__create_output_directories__(output_path_model_1)
        Preprocessor.__create_output_directories__(output_path_model_2)
        Preprocessor.__create_output_directories__(output_path_model_3)

        # FCN Model

        model_1 = train.__train_model_1__()

        # VGG Model

        model_2 = train.__train_model_2__()

        # Inception-v3

        model_3 = train.__train_model_3__()

        features_from_model_1 = Preprocessor.__get_score_model__(
            model_1, input_images_model_1, output_path_model_1)
        features_from_model_2 = Preprocessor.__get_score_model__(
            model_2, input_images_model_2, output_path_model_2)
        features_from_model_3 = Preprocessor.__get_score_model__(
            model_3, input_images_model_3, output_path_model_3)

        print("Output FeatureMap For Model 1 \n")
        print(features_from_model_1.shape)
        print("\n")

        print("Output FeatureMap For Model 2 \n")
        print(features_from_model_2.shape)
        print("\n")

        print("Output FeatureMap For Model 3 \n")
        print(features_from_model_3.shape)
        print("\n")