Example #1
0
    def createOptimizer(self, typeOptimizer: str, model: Model,
                        *args) -> Optimizer:
        '''
        THIS FUNCTION IS USED TO CREATE INHERITED INSTANCES OF OPTIMIZERS, e.g PSO or GA
        :param typeOptimizer: str --> type of optimizer user wants (pso or ga)
        :param model: Model Object --> model to associate with optimizer
        :param args: list of data --> (number individuals, iterations, dimensions of problem)
        :return:
        '''

        try:

            if model is None:
                raise CustomError.ErrorCreationModel(config.ERROR_NO_MODEL)

            if typeOptimizer == config.PSO_OPTIMIZER:
                return PSO.PSO(model, *args)
            elif typeOptimizer == config.GA_OPTIMIZER:
                return GA.GA(model, *args)
            else:
                raise CustomError.ErrorCreationModel(
                    config.ERROR_INVALID_OPTIMIZER)

        except:
            raise
Example #2
0
def get_subsample_of_data(percentage, data):
    '''
    THIS FUNCTION IS USED TO GET SUBSAMPLE OF DATA, CONSIDERED ALWAYS THE INITIAL % OF DATA PER CLASS
    e.g: i want 20% of data
        - i get the same % of samples by all classes:
        e.g: 1000 initial values for class 0 in 2000 samples, in the end i get 200 samples for class 0 (same 50% of samples for this class)
    :param percentage: float --> percentage between 0 and 1, for subsample data
    :param data: DataFrame --> object with all samples
    :return: subsample: DataFrame --> object containg only subsample data
    '''

    try:

        if percentage == 0.0:
            raise CustomError.ErrorCreationModel(config.WARNING_SUBSAMPLING)

        # new subsample
        subsample = pd.DataFrame()

        # get % of rows for each class
        for i in range(config.NUMBER_CLASSES):
            query = "{} == {} ".format(config.TARGET, i)
            sub = data.query(query).sample(frac=percentage,
                                           replace=False,
                                           random_state=config.RANDOM_STATE)
            subsample = subsample.append(sub, ignore_index=True)

        return subsample

    except:
        raise CustomError.ErrorCreationModel(config.ERROR_ON_SUBSAMPLING)
    def applyStrategy(self, data: Data.Data, **kwargs):
        '''
        THIS FUNCTION APPLIES UNDERSAMPLING TECHNIQUE ON TRAINING DATA
        :param X_train: numpy array --> training data
        :param y_train: numpy array --> training targets
        :return X_train: numpy array --> under sampler training data
        :return y_train: numpy array --> under sampler training targets
        '''

        try:

            if not bool(kwargs) == False:  # CHECK IF DICT IS EMPTY
                raise CustomError.ErrorCreationModel(
                    config.ERROR_NO_ARGS_ACCEPTED)

            numberValues = [np.argmax(data.y_train, axis=1)]
            numberValues = np.array(numberValues)
            numberValues = numberValues.reshape(numberValues.shape[0] *
                                                numberValues.shape[1])
            occorrences_counter = np.bincount(numberValues)
            #print("\nNumber samples Class 0: ", occorrences_counter[0])
            #print("\nNumber samples Class 1: ", occorrences_counter[1])

            overSampler = RandomOverSampler(
                random_state=0)  # ALLOWS REPRODUCIBILITY

            # I NEED TO RESHAPE TRAINING DATA TO 2D ARRAY (SAMPLES, FEATURES)
            X_train = data.reshape4D_to_2D()

            # APLY DECODE OF TARGET DATA NEEDED TO APPLY RESAMPLE
            decoded_ytrain = data.decodeYData()

            # APPLY RESAMPLE OF DATA
            args = (None, None, None, None, None)  # REDUCE MEMORY USAGE
            deepData = Data.Data(data.X_train, *args)
            deepData.X_train, decoded_ytrain = overSampler.fit_resample(
                X_train, decoded_ytrain)

            # I NEED TO RESHAPE DATA AGAIN FROM 2D TO 4D
            X_train = data.reshape2D_to_4D()
            del deepData

            occorrences_counter = np.bincount(decoded_ytrain)
            #print("\nNumber samples Class 0: ", occorrences_counter[0])
            #print("\nNumber samples Class 1: ", occorrences_counter[1])

            # TRANSFORM Y_DECODED TO CATEGORICAL AGAIN
            decoded_ytrain = keras.utils.to_categorical(
                decoded_ytrain, config.NUMBER_CLASSES)

            # SHUFFLE DATA
            X_train, decoded_ytrain = shuffle(X_train,
                                              decoded_ytrain,
                                              random_state=config.RANDOM_STATE)

            return X_train, decoded_ytrain

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_UNDERSAMPLING)
    def objectiveFunction(self, acc, *args):
        '''
        THIS FUNCTION REPRESENTS PARENT OBJECTIVE FUNCTION OF OPTIMIZER
            - IF USER WANTS CAN OVERRIDE THIS ON CONCRETE OPTIMIZER CLASS METHOD
        :param acc: final score on train
        :param args: first argument is a Keras Model
                    last argument is a confusion matrix
        :return: cost : float
        '''

        try:

            # get report
            report = args[-1]
            #recall_idc = report['macro avg']['recall']
            #precision_idc = report['macro avg']['precision']
            f1_idc = report['macro avg']['f1-score']

            # get model
            model = args[0]
            trainable_count = np.sum(
                [K.count_params(w) for w in model.trainable_weights])

            return 1e-9 * trainable_count + 6.0 * (1.0 - f1_idc)

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_OPTIMIZATION)
Example #5
0
    def predict(self, model : Model):

        try:

            segmented_images = []
            for i in range(self.data.X_train.shape[0]):

                ## get copy of image, to avoid damage content
                image_copy = np.copy(self.data.X_train[i])

                ##reshape to 4 dimensions expected input (batch, width, height, channels)
                reshaped_image = image_copy.reshape(1, image_copy.shape[0],
                                                                     image_copy.shape[1], image_copy.shape[2])

                ## get predicted values for pixels on image from unet predict
                predicted_mask_values = model.predict(reshaped_image)
                predicted_mask_values =predicted_mask_values.reshape\
                    (predicted_mask_values.shape[0] * predicted_mask_values.shape[1], predicted_mask_values.shape[2])

                ## create binary mask with predicted values of image
                mask = config_func.defineMask(predicted_mask_values)

                ## concatenate real image and mask
                concatenated_mask = config_func.concate_image_mask(image_copy, mask)

                ## appen segmented image to list of predicted images
                segmented_images.append(concatenated_mask)

            return segmented_images

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_UNET_STRATEGY)
Example #6
0
    def applyStrategy(self, data: Data.Data, **kwargs):
        '''
        THIS FUNCTION IS RESPONSIBLE TO FIT MODEL USING DATA AUGMENTATION
        :param X_train: training data
        :param y_train: training targets
        :param kwargs:
        :return: train_generator: tuple (augmented X, augmented Y)
        '''

        try:

            image_gen = ImageDataGenerator(
                horizontal_flip=config.HORIZONTAL_FLIP,
                vertical_flip=config.VERTICAL_FLIP,
                width_shift_range=config.WIDTH_SHIFT_RANGE,
                height_shift_range=config.HEIGHT_SHIFT_RANGE,
            )

            image_gen.fit(data.X_train, augment=True)  #DATA AUGMENTATION

            train_generator = image_gen.flow(
                data.X_train,
                data.y_train,
                shuffle=True,
                batch_size=config.BATCH_SIZE_ALEX_AUG,
                seed=config.RANDOM_STATE)

            return train_generator

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_DATA_AUG)
Example #7
0
    def optimize(self) -> Tuple[float, float, ps.general_optimizer.SwarmOptimizer]:

        '''
        THIS FUNCTION IS RESPONSIBLE TO APPLY ALL LOGIC OF PSO CNN NETWORK OPTIMIZATION
        :return: [float, float, SwarmOptimizer] --> best cost, best particle position and pso optimizer
        '''

        try:

            #DEFINITION OF BOUNDS
            bounds = self.boundsDefinition()

            optimizer = None
            if config.TOPOLOGY_FLAG == 0: #global best topology
                optimizer = ps.single.GlobalBestPSO(n_particles=self.indiv, dimensions=self.dims, bh_strategy='shrink', vh_strategy='invert',
                                                    options=config.gbestOptions, bounds=bounds)
            else: #local best topology
                optimizer = ps.single.LocalBestPSO(n_particles=self.indiv, dimensions=self.dims, bh_strategy='shrink', vh_strategy='invert',
                                                    options=config.lbestOptions, bounds=bounds)

            cost, pos = optimizer.optimize(objective_func=self.loopAllParticles, iters=self.iters, n_processes=2)

            return cost, pos, optimizer

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_OPTIMIZATION)
Example #8
0
    def plotPositionHistory(self, optimizer, xLimits, yLimits, filename, xLabel, yLabel):

        '''
        :param optimizer: optimizer object returned in the application/definition of PSO
        :param xLimits: numpy array (minLimit, maxLimit) of x Axis
        :param yLimits: numpy array (minLimit, maxLimit) of y Axis
        :param filename: name of filename returned by plot_contour (html gif)
        :param xLabel: name of X axis
        :param yLabel: name of Y axis
        '''

        try:

            ##code ref: https://www.tutorialfor.com/questions-83899.htm
            d = Designer(limits=[xLimits, yLimits], label=[xLabel, yLabel])
            pos = []
            for i in range(config.ITERATIONS):
                pos.append(optimizer.pos_history[i][:, 0:2])
            animation = plot_contour(pos_history=pos,
                                     designer=d)

            plt.close(animation._fig)
            html_file = animation.to_jshtml()
            with open(filename, 'w') as f:
                f.write(html_file)

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_PLOTTING)
Example #9
0
    def loopAllParticles(self, particles):

        '''
        THIS FUNCTION APPLIES PARTICLES ITERATION, EXECUTION CNN MODEL
        :param particles: numpy array of shape (nParticles, dimensions)
        :return: list: all losses returned along all particles iteration
        '''

        try:

            losses = []
            for i in range(particles.shape[0]):
                config_func.print_log_message()
                if isinstance(self.model, DenseNet.DenseNet) == True:
                    int_converted_values = [math.trunc(j) for j in particles[i][:-2]]
                    int_converted_values.append(particles[i][-2])  # compression rate --> float
                    int_converted_values.append(math.trunc(particles[i][-1]))
                else:
                    int_converted_values = [math.trunc(i) for i in particles[i]]  # CONVERSION OF DIMENSION VALUES OF PARTICLE
                print(int_converted_values)
                model, predictions, history = self.model.template_method(*int_converted_values) #APPLY BUILD, TRAIN AND PREDICT MODEL OPERATIONS, FOR EACH PARTICLE AND ITERATION
                decoded_predictions = config_func.decode_array(predictions)
                decoded_y_true = config_func.decode_array(self.model.data.y_test)
                report, conf = config_func.getConfusionMatrix(decoded_predictions, decoded_y_true, dict=True)
                acc = report['accuracy']# i can't compare y_test and predict, because some classes may have been unclassified
                # define args to pass to objective function
                obj_args = (model, report)
                losses.append(self.objectiveFunction(acc, *obj_args)) #ADD COST LOSS TO LIST
                K.clear_session()
                gc.collect()
                del model
            return losses

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_OPTIMIZATION)
Example #10
0
    def objectiveFunction(self, acc, *args):
        '''
        :param score: final score on train
        :param args: first argument is a Keras Model
                    last argument is a confusion matrix
        :return: lost function
        '''

        try:

            # get report
            report = args[-1]
            recall_idc = report['macro avg']['recall']
            precision_idc = report['macro avg']['precision']
            f1_idc = report['macro avg']['f1-score']

            # get model
            model = args[0]
            trainable_count = np.sum(
                [K.count_params(w) for w in model.trainable_weights])

            return 1e-9 * trainable_count + 4 * (1.0 - f1_idc) +\
                   3 * (1.0 - recall_idc) + 2 * (1.0 - precision_idc)

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_OPTIMIZATION)
 def __init__(self, model: Model.Model, individuals, iterations,
              dimensions):
     if model == None:
         raise CustomError.ErrorCreationModel(config.ERROR_NO_MODEL)
     self.model = model
     self.indiv = individuals
     self.iters = iterations
     self.dims = dimensions
    def convolution_block(self, tensor_input, *args):
        '''
        THIS FUNCTIONS REPRESENTS THE CONCEPT OF CONVOLUTION BLOCK ON RESNET, COMBINING MAIN PATH AND SHORTCUT
            paper: https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf
            residual model image (outout same): https://www.youtube.com/watch?v=wqkc-sj5H94
        :param tensor_input: input_tensor result of previous block application on cnn architecture (conv_block or identity_block)
        :param args: number of filters to populate conv2d layers
        :return: tensor merge of path created using convs and final shortcut
        '''

        try:

            ## save copy input, because i need to apply alteration on tensor_input parameter, and in final i need to merge this two tensors
            shortcut_path = tensor_input

            tensor_input = Conv2D(
                filters=args[0],
                padding=config.SAME_PADDING,
                kernel_size=(3, 3),
                strides=args[1],
                # in paper 1 conv layer in 1 conv_block have stride=1, i continue with stride=2, in order to reduce computacional costs)
                kernel_initializer=he_uniform(config.HE_SEED),
                kernel_regularizer=l2(config.DECAY))(tensor_input)
            tensor_input = BatchNormalization(axis=3)(
                tensor_input
            )  ## perform batch normalization alongside channels axis [samples, width, height, channels]
            tensor_input = Activation(config.RELU_FUNCTION)(tensor_input)

            tensor_input = Conv2D(
                filters=args[0],
                padding=config.SAME_PADDING,
                kernel_size=(3, 3),
                strides=1,
                kernel_initializer=he_uniform(config.HE_SEED),
                kernel_regularizer=l2(config.DECAY))(tensor_input)
            tensor_input = BatchNormalization(axis=3)(
                tensor_input
            )  ## perform batch normalization alongside channels axis [samples, width, height, channels]
            tensor_input = Activation(config.RELU_FUNCTION)(tensor_input)

            ## definition of shortcut path
            shortcut_path = Conv2D(
                filters=args[0],
                kernel_size=(1, 1),
                strides=args[1],
                padding=config.SAME_PADDING,
                kernel_initializer=he_uniform(config.HE_SEED),
                kernel_regularizer=l2(config.DECAY))(shortcut_path)
            shortcut_path = BatchNormalization(axis=3)(shortcut_path)

            ## now i need to merge conv path and shortcut path, this is passed to activation function
            tensor_input = Add()([tensor_input, shortcut_path])
            tensor_input = Activation(config.RELU_FUNCTION)(tensor_input)

            return tensor_input

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_CONV_BLOCK)
    def build(self, *args, trainedModel=None) -> Sequential:
        '''
        THIS FUNCTION IS RESPONSIBLE FOR THE INITIALIZATION OF SEQUENTIAL ALEXNET MODEL
        Reference: https://arxiv.org/pdf/1608.06993.pdf --> Original Paper
        Reference: https://github.com/liuzhuang13/DenseNet/blob/master/models/densenet.lua --> Original Author of DenseNet Paper
        :param args: list integers, in logical order --> to populate cnn (filters) and dense (neurons)
        :return: Sequential: AlexNet MODEL
        '''

        try:

            #IF USER ALREADY HAVE A TRAINED MODEL, AND NO WANTS TO BUILD AGAIN A NEW MODEL
            if trainedModel != None:
                return trainedModel

            input_shape = (config.WIDTH, config.HEIGHT, config.CHANNELS)
            input = Input(shape=(input_shape))

            x = Conv2D(args[0],
                       kernel_size=(5, 5),
                       use_bias=False,
                       kernel_initializer=he_uniform(config.HE_SEED),
                       strides=2,
                       padding=config.SAME_PADDING,
                       kernel_regularizer=regularizers.l2(1e-4))(input)
            x = BatchNormalization(axis=3)(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x)

            nFilters = args[0]
            for i in range(args[1]):
                x = self.dense_block(
                    x, args[2], args[3], args[3]
                )  # initial number of filters is equal to growth rate, and all conv's uses all same number of filters: growth rate
                if i < (args[1] - 1):
                    x = self.transition(
                        x, args[4]
                    )  ## in last block (final step doesn't apply transition logic, global average pooling, made this

            x = BatchNormalization(axis=3)(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = GlobalAveragePooling2D()(x)

            x = Dense(config.NUMBER_CLASSES,
                      kernel_initializer=he_uniform(config.HE_SEED),
                      kernel_regularizer=regularizers.l2(1e-4))(
                          x)  # Num Classes for CIFAR-10
            outputs = Activation(config.SOFTMAX_FUNCTION)(x)

            model = mp(input, outputs)

            if config.BUILD_SUMMARY == 1:
                model.summary()

            return model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_BUILD)
    def build(self, *args, trainedModel=None) -> Sequential:
        '''
        THIS FUNCTION IS RESPONSIBLE FOR THE INITIALIZATION OF SEQUENTIAL ALEXNET MODEL
        :param args: list integers, in logical order --> to populate cnn (filters) and dense (neurons)
        :return: Sequential: AlexNet MODEL
        '''

        try:

            #IF USER ALREADY HAVE A TRAINED MODEL, AND NO WANTS TO BUILD AGAIN A NEW MODEL
            if trainedModel != None:
                return trainedModel

            # definition of input shape and Input Layer
            input_shape = (config.WIDTH, config.HEIGHT, config.CHANNELS)
            input = Input(input_shape)

            ## add stack conv layer to the model
            numberFilters = args[1]
            model = None
            for i in range(args[0]):
                if i == 0:
                    model = self.add_stack(
                        input, numberFilters, 0.25,
                        input_shape)  # first stack convolution layer
                else:
                    model = self.add_stack(model, numberFilters, 0.25)
                numberFilters += args[2]

            # flatten
            model = Flatten()(model)

            # Full Connected Layer(s)
            for i in range(args[3]):
                model = Dense(units=args[4],
                              kernel_regularizer=regularizers.l2(config.DECAY),
                              kernel_initializer='he_uniform')(model)
                model = Activation(config.RELU_FUNCTION)(model)
                model = BatchNormalization()(model)
                if i != (args[3] - 1):
                    model = Dropout(0.25)(
                        model
                    )  ## applies Dropout on all FCL's except FCL preceding the ouput layer (softmax)

            # Output Layer
            model = Dense(units=config.NUMBER_CLASSES)(model)
            model = Activation(config.SOFTMAX_FUNCTION)(model)

            # build model
            model = mp(inputs=input, outputs=model)

            if config.BUILD_SUMMARY == 1:
                model.summary()

            return model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_BUILD)
Example #15
0
    def train(self, model : Model):

        try:

            if model is None:
                raise CustomError.ErrorCreationModel(config.ERROR_NO_MODEL)
                return None

            ## get weights file
            #file_unet_weights = os.path.join(os.getcwd(), config.UNET_WIGHTS_PATH)
            file_unet_weights = os.path.join(os.getcwd(), config.UNET_WIGHTS_PATH)

            model.load_weights(file_unet_weights)

            return [], model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_UNET_STRATEGY)
    def plotCostHistory(self, optimizer):
        '''
        :param optimizer: optimizer object returned in the application/definition of PSO
        '''

        try:

            plot_cost_history(cost_history=optimizer.cost_history)

            plt.show()

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_PLOTTING)
    def optimize(self):
        '''

        :return:
        '''

        try:

            creator.create('FitnessMax', base.Fitness, weights=(-1.0, ))
            creator.create('Individual', list, fitness=creator.FitnessMax)

            toolbox = base.Toolbox()
            toolbox.register('binary', bernoulli.rvs, 0.5)
            toolbox.register('individual',
                             tools.initRepeat,
                             creator.Individual,
                             toolbox.binary,
                             n=self.dims)  #REGISTER INDIVIDUAL
            toolbox.register('population', tools.initRepeat, list,
                             toolbox.individual)  #REGISTER POPULATION

            toolbox.register(
                'mate', tools.cxOrdered
            )  #CROSSOVER TECHNIQUE --> https://www.researchgate.net/figure/The-order-based-crossover-OX-a-and-the-insertion-mutation-b-operators_fig2_224330103
            toolbox.register(
                'mutate', tools.mutShuffleIndexes, indpb=config.INDPB
            )  #MUTATION TECHNIQUE --> https://www.mdpi.com/1999-4893/12/10/201/htm
            toolbox.register('select',
                             tools.selTournament,
                             tournsize=config.TOURNAMENT_SIZE
                             )  #IN MINIMIZATION PROBLEMS I CAN'T USE ROULETTE
            toolbox.register('evaluate',
                             self.objectiveFunction)  #EVALUATION FUNCTION

            population = toolbox.population(n=self.indiv)
            r = algorithms.eaSimple(population,
                                    toolbox,
                                    cxpb=config.CXPB,
                                    mutpb=config.MUTPB,
                                    ngen=self.iters,
                                    verbose=True)

            bestValue = tools.selBest(
                population, k=1
            )  #I ONLY NEED BEST INDIVIDUAL --> ARRAY BIDIMENSIONAL (K=1, GENE_LENGTH)

            return bestValue
        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_OPTIMIZATION)
Example #18
0
    def addStrategy(self, strategy: Strategy.Strategy) -> bool:
        '''
        THIS FUNCTION ESTABILISHES TRAINING STRATEGIES (UNDER SAMPLING AND OVER SAMPLING ARE INDEPENDENT, USER ONLY ACCEPTS ONE)
        IF USER WANTS TO USE STRATEGIES, NEED TO ADD STRATEGIES BEFORE CALL TEMPLATE FUNCTION (template_method)
        :param Strategy object --> inherited object descendent from Strategy, e.g UnderSampling, OverSampling or Data Augmentation
        :return: boolean --> True no errors occured, False --> problem on the definition of any strategy
        '''

        try:

            self.StrategyList.append(strategy)

            return True
        except:
            raise CustomError.ErrorCreationModel(config.ERROR_APPEND_STRATEGY)
Example #19
0
    def identity_block(self, tensor_input, *args):
        '''
        THIS FUNCTION SIMULES THE CONCEPT OF A IDENTITY BLOCK
            paper: https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf
        :param tensor_input: input_tensor result of previous block application on cnn architecture (conv_block or identity_block)
        :param args: number of filters to populate conv2d layers
        :return: tensor merge of input and identity conv blocks
        '''

        try:

            ## save copy input, because i need to apply alteration on tensor_input parameter, and in final i need to merge this two tensors
            input = tensor_input

            tensor_input = Conv2D(
                filters=args[0],
                padding=config.SAME_PADDING,
                kernel_size=(3, 3),
                strides=1,
                kernel_initializer=he_uniform(config.HE_SEED),
                kernel_regularizer=l2(config.DECAY))(tensor_input)
            tensor_input = BatchNormalization(axis=3)(
                tensor_input
            )  ## perform batch normalization alongside channels axis [samples, width, height, channels]
            tensor_input = Activation(config.RELU_FUNCTION)(tensor_input)

            tensor_input = Conv2D(
                filters=args[0],
                padding=config.SAME_PADDING,
                kernel_size=(3, 3),
                strides=1,
                kernel_initializer=he_uniform(config.HE_SEED),
                kernel_regularizer=l2(config.DECAY))(tensor_input)
            tensor_input = BatchNormalization(axis=3)(
                tensor_input
            )  ## perform batch normalization alongside channels axis [samples, width, height, channels]
            #tensor_input = Activation(config.RELU_FUNCTION)(tensor_input)

            ## now i need to merge initial input and identity block created, this is passed to activation function
            tensor_input = Add()([tensor_input, input])
            tensor_input = Activation(config.RELU_FUNCTION)(tensor_input)

            return tensor_input

        except:
            raise CustomError.ErrorCreationModel(
                config.ERROR_ON_IDENTITY_BLOCK)
Example #20
0
 def input_w_or_consumption(state, space):
     input_temp = input(f'Какая характеристика {state} теплоносителя '
                        f'известна: скорость или расход? ').lower()
     if input_temp not in ('скорость', 'расход'):
         message = f'Введена некорректная характеристика {state} ' \
                   f'теплоносителя.'
         raise InputError(message)
     if input_temp == 'скорость':
         w = input(f'Введите скорость {state} теплоносителя, м/c: ')
         try:
             w = float(w)
         except ValueError:
             message = f'Введено некорректное значение скорости {state} ' \
                       f'теплоносителя.'
             raise InputError(message)
         if w <= 0:
             message = f'Введено отрицательное или нулевое значение ' \
                       f'скорости {state} теплоносителя.'
             raise InputError(message)
         consumption = None
         print('-' * 69)
         return w, consumption
     elif input_temp == 'расход':
         if space == 'in':
             consumption = input(f'Введите расход {state} теплоносителя, '
                                 f'кг/c: ')
             try:
                 consumption = float(consumption)
             except ValueError:
                 message = f'Введено некорректное значение расхода ' \
                           f'{state} теплоносителя.'
                 raise InputError(message)
             if consumption <= 0:
                 message = f'Введено отрицательное или нулевое значение ' \
                           f'расхода {state} теплоносителя.'
                 raise InputError(message)
             w = None
             print('-' * 69)
             return w, consumption
         elif space == 'out':
             message = 'Не предусмотрен расчет расхода без известного ' \
                       'значения диаметра кожуха теплообменного аппарата'
             raise CustomError(message)
Example #21
0
    def template_method(self, *args) -> Tuple[Sequential, np.array, History]:
        '''
        https://refactoring.guru/design-patterns/template-method/python/example
        THIS FUNCTION REPRESENTS A TEMPLATE PATTERN TO EXECUTE THE ALL SEQUENCE OF JOBS TO DO
        :param: args: list of integers in logical order to populate cnn and dense layers (filters, neurons and last value is batch size)
        :return: Sequential: trained model
        :return: numpy array: model test data predictions
        :return History.history: history of trained model
        '''

        try:

            model = self.build(*args)
            history, model = self.train(model, args[-1])
            predictions = self.predict(model)

            return model, predictions, history
        except:
            raise CustomError.ErrorCreationModel(config.ERROR_MODEL_EXECUTION)
def el_lam_count(length: float, d: float) -> float:
    """Функция считает поправку на участок стабилизации
    при ламинарном режиме
    """
    el_m = [[1, 1.9], [4, 1.7], [5, 1.44], [10, 1.28], [15, 1.18], [20, 1.13],
            [30, 1.05], [40, 1.02], [50, 1]]
    if length / d >= 50:
        return 1
    elif length / d < 1:
        raise CustomError('Диаметр превышает значение длины трубы!')
    else:
        for i in el_m:
            if i[0] == length / d:
                return i[1]
            else:  # Если такого отношения нет, то делается интерполяция по
                # вышеуказанной функции interpolation
                for j in range(len(el_m)):
                    if el_m[j + 1][0] > length / d > el_m[j][0]:
                        return interpolation(el_m[j + 1][1], el_m[j][1],
                                             el_m[j + 1][0], el_m[j][0],
                                             length / d)
Example #23
0
    def train(self, model: Sequential, *args) -> Tuple[History, Sequential]:
        '''
        THIS FUNCTION IS RESPONSIBLE FOR MAKE THE TRAINING OF MODEL
        :param model: Sequential model builded before, or passed (already trained model)
        :param args: only one value batch size
        :return: Sequential model --> trained model
        :return: History.history --> train and validation loss and metrics variation along epochs
        '''

        try:

            if model is None:
                raise CustomError.ErrorCreationModel(config.ERROR_NO_MODEL)

            # OPTIMIZER
            #opt = SGD(learning_rate=config.LEARNING_RATE, decay=config.DECAY, nesterov=True, momentum=0.9)
            opt = Adam(learning_rate=config.LEARNING_RATE, decay=config.DECAY)

            # COMPILE
            model.compile(optimizer=opt,
                          loss=config.LOSS_CATEGORICAL,
                          metrics=[config.ACCURACY_METRIC])

            #GET STRATEGIES RETURN DATA, AND IF DATA_AUGMENTATION IS APPLIED TRAIN GENERATOR
            train_generator = None

            # get data
            X_train = self.data.X_train
            y_train = self.data.y_train

            if self.StrategyList:  # if strategylist is not empty
                for i, j in zip(self.StrategyList,
                                range(len(self.StrategyList))):
                    if isinstance(i, DataAugmentation.DataAugmentation):
                        train_generator = self.StrategyList[j].applyStrategy(
                            self.data)
                    else:
                        X_train, y_train = self.StrategyList[j].applyStrategy(
                            self.data)

            es_callback = EarlyStopping(monitor=config.VALIDATION_LOSS,
                                        patience=5,
                                        restore_best_weights=True)
            decrease_callback = ReduceLROnPlateau(monitor=config.LOSS,
                                                  patience=1,
                                                  factor=0.7,
                                                  mode='min',
                                                  verbose=1,
                                                  min_lr=0.000001)
            decrease_callback2 = ReduceLROnPlateau(
                monitor=config.VALIDATION_LOSS,
                patience=1,
                factor=0.7,
                mode='min',
                verbose=1,
                min_lr=0.000001)

            if train_generator is None:  #NO DATA AUGMENTATION

                history = model.fit(
                    x=X_train,
                    y=y_train,
                    batch_size=args[0],
                    epochs=config.EPOCHS,
                    validation_data=(self.data.X_val, self.data.y_val),
                    shuffle=True,
                    callbacks=[
                        es_callback, decrease_callback, decrease_callback2
                    ],
                    #class_weight=config.class_weights
                    verbose=config.TRAIN_VERBOSE)

                return history, model

            #ELSE APPLY DATA AUGMENTATION
            history = model.fit_generator(
                generator=train_generator,
                validation_data=(self.data.X_val, self.data.y_val),
                epochs=config.EPOCHS,
                steps_per_epoch=X_train.shape[0] / args[0],
                shuffle=True,
                #class_weight=config.class_weights,
                verbose=config.TRAIN_VERBOSE,
                callbacks=[es_callback, decrease_callback, decrease_callback2])

            return history, model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_TRAINING)
Example #24
0
    def build(self, *args, trainedModel=None) -> Sequential:

        #resnet v1, based on: https://keras.io/examples/cifar10_resnet/
        #----------PAPER------------- https://arxiv.org/pdf/1512.03385.pdf
        #---------RESNET 18 AND 34 ARCHITECTURE: https://datascience.stackexchange.com/questions/33022/how-to-interpert-resnet50-layer-types/47489
        #---------VERY GOOD EXPLANATION: http://ethen8181.github.io/machine-learning/keras/resnet_cam/resnet_cam.html#Identity-Block
        ## model based on resnet-18 approach and described in paper cited in identity_block and convolution_block functions
        try:

            # IF USER ALREADY HAVE A TRAINED MODEL, AND NO WANTS TO BUILD AGAIN A NEW MODEL
            if trainedModel != None:
                return trainedModel

            input_shape = (config.HEIGHT, config.WIDTH, config.CHANNELS)
            input_shape = Input(input_shape)

            X = ZeroPadding2D((3, 3))(input_shape)

            ## normal convolution layer --> first entry
            X = Conv2D(filters=args[0],
                       kernel_size=(5, 5),
                       strides=2,
                       padding=config.SAME_PADDING,
                       kernel_initializer=he_uniform(config.HE_SEED),
                       kernel_regularizer=l2(config.DECAY))(X)
            X = BatchNormalization(axis=3)(X)
            X = Activation(config.RELU_FUNCTION)(X)
            X = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(X)

            ## loop of convolution and identity blocks
            numberFilters = args[0]
            for i in range(args[1]):
                if i == 0:
                    X = self.convolution_block(
                        X, *(numberFilters,
                             1))  #first set of building blocks, stride is 1
                else:
                    X = self.convolution_block(
                        X, *(numberFilters,
                             2))  #next set of building blocks, stride is 2
                for i in range(args[2]):
                    X = self.identity_block(X, *(numberFilters, ))
                numberFilters += args[3]

            X = GlobalAveragePooling2D()(X)

            X = Dense(units=config.NUMBER_CLASSES,
                      kernel_initializer=he_uniform(config.HE_SEED),
                      kernel_regularizer=l2(config.DECAY))(X)
            X = Activation(config.SOFTMAX_FUNCTION)(X)

            ## finally model creation
            model = mp(inputs=input_shape, outputs=X)

            if config.BUILD_SUMMARY == 1:
                model.summary()
            #plot_model(model, show_shapes=True, to_file='residual_module.png')

            return model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_BUILD)
Example #25
0
def getDataFromImages(dataframe: pandas.DataFrame, size):
    '''
    THIS FUNCTION IS USED TO RETRIEVE X and Y data inherent from all images
    :param dataframe: Pandas Dataframe --> with all images path's and correspondent targets
    :param size: integer --> this values is <= than total_images
            e.g = total images equals to 10000 and user only wants 5000
            note: stratify option is used to continue with respective perecntage of samples by class
    :return: X : numpy array --> Data from images (pixels from images)
    :return Y: numpy array --> targets for each image
    '''

    try:

        ##GET TOTAL IMAGES
        number_images = dataframe.shape[0]
        X = []
        Y = []

        d = dict(enumerate(dataframe.dx.cat.categories))
        #print(d)
        numeric_targets = dataframe.dx.cat.codes.values
        #unique, counts = numpy.unique(numeric_targets, return_counts=True)
        #samples_per_label = dict(zip(unique, counts))
        #print(samples_per_label)

        if size > number_images:
            raise

        elif size < number_images:
            ## GET PERCENTAGE OF IMAGES BY CLASS
            images_by_class = [
                int(
                    round(
                        ((dataframe.loc[dataframe.dx == config.DICT_TARGETS[i],
                                        config.DX].count()) / number_images) *
                        size)) for i in range(len(dataframe.dx.unique()))
            ]

            counter_by_class = [
                config.DICT_TARGETS[i]
                for i in range(len(dataframe.dx.unique()))
            ]
            for i in range(dataframe.shape[0]):
                target = dataframe.at[i, config.DX]  # GET TARGET OF THIS IMAGE
                index_target_counter = counter_by_class.index(
                    target
                )  # CORRESPONDENT INDEX BETWEEN CLASS AND NUMBER OF PERMITTED IMAGES FOR THIS CLASS
                if images_by_class[
                        index_target_counter] != 0:  ## IF THIS CLASS STILL ALLOWS TO PUT IMAGES
                    X.append(getX_Y_Image(dataframe.at[i, config.PATH]))
                    Y.append(numeric_targets[i])
                    images_by_class[index_target_counter] = images_by_class[
                        index_target_counter] - 1  # DECREASE NUMBER OF IMAGES ALLOWED FOR THIS CLASS
                else:
                    continue
                if all(
                        images_by_class[i] == 0
                        for i in range(len(images_by_class))
                ):  ## IF JOB IS FINISHED --> ALREADY HAVE STRATIFIED IMAGES FOR ALL CLASSES
                    break

            return np.array(X), np.array(Y)

        else:  ## size == number_images, i want all images

            for i in range(dataframe.shape[0]):
                X.append(getX_Y_Image(dataframe.at[i, config.PATH]))
                Y.append(numeric_targets[i])

            return np.array(X), np.array(Y)
    except:
        raise CustomError.ErrorCreationModel(config.ERROR_ON_GET_DATA)
Example #26
0
    def train(self, model : Sequential, *args) -> Tuple[History, Sequential]:

        try:

            if model is None:
                raise CustomError.ErrorCreationModel(config.ERROR_NO_MODEL)

            # OPTIMIZER
            opt = Adam(learning_rate=config.LEARNING_RATE, decay=config.DECAY)
            #opt = SGD(learning_rate=0.001, decay=config.DECAY, momentum=0.9, nesterov=True)

            # COMPILE
            model.compile(optimizer=opt, loss=config.LOSS_CATEGORICAL, metrics=[config.ACCURACY_METRIC])

            #GET STRATEGIES RETURN DATA, AND IF DATA_AUGMENTATION IS APPLIED TRAIN GENERATOR
            train_generator = None

            # get data
            X_train = self.data.X_train
            y_train = self.data.y_train

            if self.StrategyList: # if strategylist is not empty
                for i, j in zip(self.StrategyList, range(len(self.StrategyList))):
                    if isinstance(i, DataAugmentation.DataAugmentation):
                        train_generator = self.StrategyList[j].applyStrategy(self.data)
                    if isinstance(i, OverSampling.OverSampling):
                        X_train, y_train = self.StrategyList[j].applyStrategy(self.data)
                    if isinstance(i, UnderSampling.UnderSampling):
                        X_train, y_train = self.StrategyList[j].applyStrategy(self.data)

            es_callback = EarlyStopping(monitor='val_loss', patience=2)
            decrease_callback = ReduceLROnPlateau(monitor='loss',
                                                        patience=1,
                                                        factor=0.7,
                                                        mode='min',
                                                        verbose=1,
                                                        min_lr=0.000001)

            decrease_callback2 = ReduceLROnPlateau(monitor='val_loss',
                                                        patience=1,
                                                        factor=0.7,
                                                        mode='min',
                                                        verbose=1,
                                                        min_lr=0.000001)

            #CLASS WEIGHTS
            weights_y_train = config_func.decode_array(y_train)
            class_weights = class_weight.compute_class_weight('balanced',
                                                              numpy.unique(weights_y_train),
                                                              weights_y_train)

            if train_generator is None: #NO DATA AUGMENTATION

                history = model.fit(
                    x=X_train,
                    y=y_train,
                    batch_size=args[0],
                    epochs=config.EPOCHS,
                    validation_data=(self.data.X_val, self.data.y_val),
                    shuffle=True,
                    callbacks=[es_callback, decrease_callback, decrease_callback2],
                    verbose=config.TRAIN_VERBOSE,
                    class_weight=class_weights
                )

                return history, model

            #ELSE APPLY DATA AUGMENTATION

            history = model.fit_generator(
                generator=train_generator,
                validation_data=(self.data.X_val, self.data.y_val),
                epochs=config.EPOCHS,
                steps_per_epoch=X_train.shape[0] / args[0],
                shuffle=True,
                class_weight=class_weights,
                verbose=config.TRAIN_VERBOSE,
                callbacks= [es_callback, decrease_callback, decrease_callback2]
            )

            return history, model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_TRAINING)
Example #27
0
    def build(self, *args, trainedModel=None):

        try:

            input_shape = (config.HEIGHT, config.WIDTH, config.CHANNELS)
            img_input = Input(input_shape)
            x = Conv2D(64, (3, 3), padding=config.SAME_PADDING, name='conv1', strides=(1, 1))(img_input)
            x = BatchNormalization(name='bn1')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(64, (3, 3), padding=config.SAME_PADDING, name='conv2')(x)
            x = BatchNormalization(name='bn2')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D()(x)

            x = Conv2D(128, (3, 3), padding=config.SAME_PADDING, name='conv3')(x)
            x = BatchNormalization(name='bn3')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(128, (3, 3), padding=config.SAME_PADDING, name='conv4')(x)
            x = BatchNormalization(name='bn4')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D()(x)

            x = Conv2D(256, (3, 3), padding=config.SAME_PADDING, name='conv5')(x)
            x = BatchNormalization(name='bn5')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(256, (3, 3), padding=config.SAME_PADDING, name='conv6')(x)
            x = BatchNormalization(name='bn6')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(256, (3, 3), padding=config.SAME_PADDING, name='conv7')(x)
            x = BatchNormalization(name='bn7')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D()(x)

            x = Conv2D(512, (3, 3), padding=config.SAME_PADDING, name='conv8')(x)
            x = BatchNormalization(name='bn8')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(512, (3, 3), padding=config.SAME_PADDING, name='conv9')(x)
            x = BatchNormalization(name='bn9')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(512, (3, 3), padding=config.SAME_PADDING, name='conv10')(x)
            x = BatchNormalization(name='bn10')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D()(x)

            x = Conv2D(512, (3, 3), padding=config.SAME_PADDING, name='conv11')(x)
            x = BatchNormalization(name='bn11')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(512, (3, 3), padding=config.SAME_PADDING, name='conv12')(x)
            x = BatchNormalization(name='bn12')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(512, (3, 3), padding=config.SAME_PADDING, name='conv13')(x)
            x = BatchNormalization(name='bn13')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D()(x)

            x = Dense(1024, activation=config.RELU_FUNCTION, name='fc1')(x)
            x = Dense(1024, activation=config.RELU_FUNCTION, name='fc2')(x)

            # Decoding Layer
            x = UpSampling2D()(x)
            x = Conv2DTranspose(512, (3, 3), padding=config.SAME_PADDING, name='deconv1')(x)
            x = BatchNormalization(name='bn14')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(512, (3, 3), padding=config.SAME_PADDING, name='deconv2')(x)
            x = BatchNormalization(name='bn15')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(512, (3, 3), padding=config.SAME_PADDING, name='deconv3')(x)
            x = BatchNormalization(name='bn16')(x)
            x = Activation(config.RELU_FUNCTION)(x)

            x = UpSampling2D()(x)
            x = Conv2DTranspose(512, (3, 3), padding=config.SAME_PADDING, name='deconv4')(x)
            x = BatchNormalization(name='bn17')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(512, (3, 3), padding=config.SAME_PADDING, name='deconv5')(x)
            x = BatchNormalization(name='bn18')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(256, (3, 3), padding=config.SAME_PADDING, name='deconv6')(x)
            x = BatchNormalization(name='bn19')(x)
            x = Activation(config.RELU_FUNCTION)(x)

            x = UpSampling2D()(x)
            x = Conv2DTranspose(256, (3, 3), padding=config.SAME_PADDING, name='deconv7')(x)
            x = BatchNormalization(name='bn20')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(256, (3, 3), padding=config.SAME_PADDING, name='deconv8')(x)
            x = BatchNormalization(name='bn21')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(128, (3, 3), padding=config.SAME_PADDING, name='deconv9')(x)
            x = BatchNormalization(name='bn22')(x)
            x = Activation(config.RELU_FUNCTION)(x)

            x = UpSampling2D()(x)
            x = Conv2DTranspose(128, (3, 3), padding=config.SAME_PADDING, name='deconv10')(x)
            x = BatchNormalization(name='bn23')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(64, (3, 3), padding=config.SAME_PADDING, name='deconv11')(x)
            x = BatchNormalization(name='bn24')(x)
            x = Activation(config.RELU_FUNCTION)(x)

            x = UpSampling2D()(x)
            x = Conv2DTranspose(64, (3, 3), padding=config.SAME_PADDING, name='deconv12')(x)
            x = BatchNormalization(name='bn25')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(1, (3, 3), padding=config.SAME_PADDING, name='deconv13')(x)
            x = BatchNormalization(name='bn26')(x)
            x = Activation(config.SIGMOID_FUNCTION)(x)

            pred = Reshape((config.HEIGHT, config.WIDTH))(x) #reshape to single channel
            model = mp(inputs=img_input, outputs=pred)

            # input_size =(config.WIDTH, config.HEIGHT, config.CHANNELS)
            # N = input_size[0]
            # inputs = Input(input_size)
            # conv1 = Conv2D(64, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(inputs)
            # conv1 = Conv2D(64, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv1)
            #
            # pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
            # conv2 = Conv2D(128, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(pool1)
            # conv2 = Conv2D(128, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv2)
            # pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
            # conv3 = Conv2D(256, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(pool2)
            # conv3 = Conv2D(256, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv3)
            # drop3 = Dropout(0.5)(conv3)
            # pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
            # # D1
            # conv4 = Conv2D(512, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(pool3)
            # conv4_1 = Conv2D(512, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv4)
            # drop4_1 = Dropout(0.5)(conv4_1)
            # # D2
            # conv4_2 = Conv2D(512, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(drop4_1)
            # conv4_2 = Conv2D(512, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv4_2)
            # conv4_2 = Dropout(0.5)(conv4_2)
            # # D3
            # merge_dense = concatenate([conv4_2, drop4_1], axis=3)
            # conv4_3 = Conv2D(512, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(merge_dense)
            # conv4_3 = Conv2D(512, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv4_3)
            # drop4_3 = Dropout(0.5)(conv4_3)
            #
            # up6 = Conv2DTranspose(256, kernel_size=2, strides=2, padding=config.SAME_PADDING, kernel_initializer='he_normal')(
            #     drop4_3)
            # up6 = BatchNormalization(axis=3)(up6)
            # up6 = Activation(config.RELU_FUNCTION)(up6)
            #
            # x1 = Reshape(target_shape=(1, np.int32(N / 4), np.int32(N / 4), 256))(drop3)
            # x2 = Reshape(target_shape=(1, np.int32(N / 4), np.int32(N / 4), 256))(up6)
            # merge6 = concatenate([x1, x2], axis=1)
            # merge6 = ConvLSTM2D(filters=128, kernel_size=(3, 3), padding=config.SAME_PADDING, return_sequences=False,
            #                     go_backwards=True, kernel_initializer='he_normal')(merge6)
            #
            # conv6 = Conv2D(256, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(merge6)
            # conv6 = Conv2D(256, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv6)
            #
            # up7 = Conv2DTranspose(128, kernel_size=2, strides=2, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv6)
            # up7 = BatchNormalization(axis=3)(up7)
            # up7 = Activation(config.RELU_FUNCTION)(up7)
            #
            # x1 = Reshape(target_shape=(1, np.int32(N / 2), np.int32(N / 2), 128))(conv2)
            # x2 = Reshape(target_shape=(1, np.int32(N / 2), np.int32(N / 2), 128))(up7)
            # merge7 = concatenate([x1, x2], axis=1)
            # merge7 = ConvLSTM2D(filters=64, kernel_size=(3, 3), padding=config.SAME_PADDING, return_sequences=False,
            #                     go_backwards=True, kernel_initializer='he_normal')(merge7)
            #
            # conv7 = Conv2D(128, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(merge7)
            # conv7 = Conv2D(128, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv7)
            #
            # up8 = Conv2DTranspose(64, kernel_size=2, strides=2, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv7)
            # up8 = BatchNormalization(axis=3)(up8)
            # up8 = Activation(config.RELU_FUNCTION)(up8)
            #
            # x1 = Reshape(target_shape=(1, N, N, 64))(conv1)
            # x2 = Reshape(target_shape=(1, N, N, 64))(up8)
            # merge8 = concatenate([x1, x2], axis=1)
            # merge8 = ConvLSTM2D(filters=32, kernel_size=(3, 3), padding=config.SAME_PADDING, return_sequences=False,
            #                     go_backwards=True, kernel_initializer='he_normal')(merge8)
            #
            # conv8 = Conv2D(64, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(merge8)
            # conv8 = Conv2D(64, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv8)
            # conv8 = Conv2D(2, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv8)
            # conv9 = Conv2D(1, 1, activation=config.SIGMOID_FUNCTION)(conv8)
            #
            # model = mp(input=inputs, output=conv9)

            model.summary()
            return model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_UNET_STRATEGY)
Example #28
0
    def train(self, model: Sequential, *args) -> Tuple[History, Sequential]:
        '''
        THIS FUNCTION IS RESPONSIBLE FOR MAKE THE TRAINING OF MODEL
        :param model: Sequential model builded before, or passed (already trained model)
        :return: Sequential model --> trained model
        :return: History.history --> train and validation loss and metrics variation along epochs
        '''

        try:

            if model is None:
                raise CustomError.ErrorCreationModel(config.ERROR_NO_MODEL)

            # OPTIMIZER
            opt = Adam(learning_rate=config.LEARNING_RATE, decay=config.DECAY)

            # COMPILE
            model.compile(optimizer=opt,
                          loss=config.LOSS_CATEGORICAL,
                          metrics=[config.ACCURACY_METRIC])

            #GET STRATEGIES RETURN DATA, AND IF DATA_AUGMENTATION IS APPLIED TRAIN GENERATOR
            train_generator = None

            # get data
            X_train = self.data.X_train
            y_train = self.data.y_train

            if self.StrategyList:  # if strategylist is not empty
                for i, j in zip(self.StrategyList,
                                range(len(self.StrategyList))):
                    if isinstance(i, DataAugmentation.DataAugmentation):
                        train_generator = self.StrategyList[j].applyStrategy(
                            self.data)
                    if isinstance(i, OverSampling.OverSampling):
                        X_train, y_train = self.StrategyList[j].applyStrategy(
                            self.data)
                    if isinstance(i, UnderSampling.UnderSampling):
                        X_train, y_train = self.StrategyList[j].applyStrategy(
                            self.data)

            #reduce_lr = LearningRateScheduler(config_func.lr_scheduler)
            es_callback = EarlyStopping(monitor='val_loss',
                                        patience=5,
                                        restore_best_weights=True)
            decrease_callback = ReduceLROnPlateau(monitor='val_loss',
                                                  patience=1,
                                                  factor=0.7,
                                                  mode='min',
                                                  verbose=0,
                                                  min_lr=0.000001)

            decrease_callback2 = ReduceLROnPlateau(monitor='loss',
                                                   patience=1,
                                                   factor=0.7,
                                                   mode='min',
                                                   verbose=0,
                                                   min_lr=0.000001)

            weights_y_train = config_func.decode_array(y_train)
            class_weights = class_weight.compute_class_weight(
                'balanced', np.unique(weights_y_train), weights_y_train)

            if train_generator is None:  #NO DATA AUGMENTATION

                history = model.fit(
                    x=X_train,
                    y=y_train,
                    batch_size=args[0],
                    epochs=config.EPOCHS,
                    validation_data=(self.data.X_val, self.data.y_val),
                    shuffle=True,
                    #use_multiprocessing=config.MULTIPROCESSING,
                    callbacks=[
                        decrease_callback2, es_callback, decrease_callback
                    ],
                    class_weight=class_weights,
                    verbose=config.TRAIN_VERBOSE)

                return history, model

            #ELSE APPLY DATA AUGMENTATION

            history = model.fit_generator(
                generator=train_generator,
                validation_data=(self.data.X_val, self.data.y_val),
                epochs=config.EPOCHS,
                steps_per_epoch=X_train.shape[0] / args[0],
                shuffle=True,
                callbacks=[decrease_callback2, es_callback, decrease_callback],
                verbose=config.TRAIN_VERBOSE,
                class_weight=class_weights)

            return history, model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_TRAINING)