예제 #1
0
    def _enable_float32(self):
        dtype = 'float32'

        K.set_floatx(dtype)
        K.set_epsilon(1e-7)

        return dtype
예제 #2
0
def create_model(layers, activation, initializer, epochs, lr=0.001):
    model = Sequential()
    opt = optimizers.Adam(learning_rate=lr, decay=lr / epochs)

    for i in range(len(layers)):
        if i == 0:
            model.add(
                Dense(
                    layers[i],
                    name='Input',
                    input_dim=numinputs,
                    activation=activation,
                    kernel_initializer=initializer,
                ))
        else:
            model.add(
                Dense(layers[i],
                      activation=activation,
                      kernel_initializer=initializer))

    model.add(
        Dense(numoutput,
              name='Output',
              activation=activation,
              kernel_initializer=initializer))

    backend.set_epsilon(1)
    print("COMPILE!")
    model.compile(optimizer=opt, loss='mse', metrics=['mse', 'mae', 'mape'])
    model.summary()

    return model
예제 #3
0
    def _enable_float16(self):
        dtype = 'float16'

        K.set_floatx(dtype)
        K.set_epsilon(1e-4)

        return dtype
예제 #4
0
def BuildModel(dataShape, modelName, learningRate):

    K.set_floatx('float16')
    K.set_epsilon(1e-4)

    input0 = tf.keras.Input(shape=(dataShape, dataShape, 3),
                            name='input_0',
                            dtype='float16')  #Scene color
    input1 = tf.keras.Input(shape=(dataShape, dataShape, 1),
                            name='input_1',
                            dtype='float16')  #Depth 0
    input2 = tf.keras.Input(shape=(dataShape, dataShape, 1),
                            name='input_2',
                            dtype='float16')  #Depth -1
    input3 = tf.keras.Input(shape=(dataShape, dataShape, 1),
                            name='input_3',
                            dtype='float16')  #Depth -2

    modelFunc = importlib.import_module('Models.' + modelName)

    model = modelFunc.MakeModel([input0, input1, input2, input3], dataShape,
                                modelName)
    print("Loaded model from disk")

    model.compile(loss=Loss,
                  optimizer=LossScaleOptimizer(
                      RMSprop(lr=learningRate, epsilon=1e-4), 1000))

    model.summary()

    return model
예제 #5
0
def precision(y_true, y_pred):
    K.set_epsilon(1e-05)
    #y_pred = tf.convert_to_tensor(y_pred, np.float32)
    #y_true = tf.convert_to_tensor(y_true, np.float32)
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
예제 #6
0
def recall(y_true, y_pred):
    K.set_epsilon(1e-05)
    #y_pred = tf.convert_to_tensor(y_pred, np.float32)
    #y_true = tf.convert_to_tensor(y_true, np.float32)
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
예제 #7
0
    def _set_precision(calculation_dtype, calculation_epsilon):
        # enable single/half/double precision
        K.set_floatx(calculation_dtype)
        K.set_epsilon(calculation_epsilon)

        # enable mixed precission
        if "float16" in calculation_dtype:

            mixed_precision.set_global_policy("mixed_float16")
예제 #8
0
def main(use_mixed_precision=False,
         training_batch_size=TRAINING_BATCH_SIZE,
         generation_batch_size=generation_batch_size,
         generator_optimizer=generator_optimizer,
         discriminator_optimizer=discriminator_optimizer):
    if use_mixed_precision:
        print('Using Mixed Precision')
        policy = mixed_precision.Policy('mixed_float16')
        mixed_precision.set_policy(policy)
    else:
        policy = mixed_precision.Policy('float32')
        mixed_precision.set_policy(policy)
    epsilon = 1e-7
    dtype = 'float32'
    K.set_epsilon(epsilon)
    K.set_floatx(dtype)
    print(K.floatx(), K.epsilon(), training_batch_size)
    print('Compute dtype: %s' % policy.compute_dtype)
    print('Variable dtype: %s' % policy.variable_dtype)
    tf.autograph.set_verbosity(0, False)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    # if start_from_scratch:
    #     initializeSnakeIdentifier(
    #         train_datagen,
    #         test_datagen
    #     )
    snek_generator = createSnekMaker()
    snek_discriminator = initializeSnakeIdentifier()
    # snek_discriminator.predict([baby_noise, tf.constant([0]*32)])
    gan = make_gan(snek_discriminator, snek_generator)
    snek_discriminator.compile(optimizer=discriminator_optimizer,
                               loss='binary_crossentropy')

    train_df = pd.read_csv('./classes_train.csv')

    checkpoint_dir = './snek_checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
    print(checkpoint_prefix)
    checkpoint = tf.train.Checkpoint(
        generator_optimizer=generator_optimizer,
        discriminator_optimizer=discriminator_optimizer,
        snek_checker=snek_discriminator,
        snek_generator=snek_generator,
        gan=gan)
    checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
    # checkpoint.save(file_prefix=checkpoint_prefix)
    # trainSnekMaker(
    #     train_datagen,
    #     check_model=snek_checker,
    #     gen_model=snek_generator,
    #     train_model=train_model
    # )
    train(train_df, EPOCHS, snek_generator, snek_discriminator, gan,
          checkpoint, checkpoint_prefix)
예제 #9
0
파일: GAN.py 프로젝트: eladshabi/Acgan
    def __init__(self, rows, cols, channels, classes, latent, tpu=False):

        if tpu:
            set_floatx('float16')
            set_epsilon(1e-4)

        # Input shape
        self.img_rows = rows
        self.img_cols = cols
        self.channels = channels
        self.img_shape = (self.img_rows, self.img_cols, self.channels)

        self.num_of_classes = classes

        # size of the vector to fid the generator (z)
        self.latent_dim = latent

        #optimizer = Adam(0.0002, 0.5)

        optimizer = tf.train.AdamOptimizer(0.0002, 0.5)

        loss_scale_manager = FixedLossScaleManager(5000)

        loss_scale_optimizer = LossScaleOptimizer(optimizer,
                                                  loss_scale_manager)

        losses = ['binary_crossentropy', 'sparse_categorical_crossentropy']

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()

        self.discriminator.compile(loss=losses,
                                   optimizer=loss_scale_optimizer,
                                   metrics=['accuracy'])

        # Build the generator
        self.generator = self.build_generator()

        # The generator takes noise and the target label as input
        # and generates the corresponding digit of that label
        noise = Input(shape=(self.latent_dim, ))
        label = Input(shape=(1, ))
        img = self.generator([noise, label])

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # The discriminator takes generated image as input and determines validity
        # and the label of that image
        valid, target_label = self.discriminator(img)

        # The combined model  (stacked generator and discriminator)
        # Trains the generator to fool the discriminator
        self.combined = Model([noise, label], [valid, target_label])
        self.combined.compile(loss=losses, optimizer=loss_scale_optimizer)
예제 #10
0
    def _set_precision(calculation_dtype, calculation_epsilon):
        # enable single/half/double precision
        import tensorflow.keras.backend as K
        K.set_floatx(calculation_dtype)
        K.set_epsilon(calculation_epsilon)

        # enable mixed precission
        if "float16" in calculation_dtype:
            import tensorflow.keras.mixed_precision as mixed_precision
            policy = mixed_precision.Policy("mixed_float16")
            mixed_precision.set_global_policy(policy)
예제 #11
0
    def precision(y_true, y_pred):
        """Precision metric.

        Only computes a batch-wise average of precision.

        Computes the precision, a metric for multi-label classification of
        how many selected items are relevant.
        """
        K.set_epsilon(1e-05)
        #y_pred = tf.convert_to_tensor(y_pred, np.float32)
        #y_true = tf.convert_to_tensor(y_true, np.float32)
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision
예제 #12
0
    def recall(y_true, y_pred):
        """Recall metric.

        Only computes a batch-wise average of recall.

        Computes the recall, a metric for multi-label classification of
        how many relevant items are selected.
        """
        K.set_epsilon(1e-05)
        #y_pred = tf.convert_to_tensor(y_pred, np.float32)
        #y_true = tf.convert_to_tensor(y_true, np.float32)
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall
예제 #13
0
def set_precision(precision):
    if precision == 'float16':
        dtype = 'float16'
        K.set_floatx(dtype)
        # default is 1e-7 which is too small for float16.  Without adjusting the epsilon, we will get NaN predictions because of divide by zero problems
        K.set_epsilon(1e-4)
        print_debug('Compute dtype: %s' % 'float16')
        print_debug('Variable dtype: %s' % 'float16')
    elif precision == 'mixed':
        policy = mixed_precision.Policy('mixed_float16')
        mixed_precision.set_policy(policy)
        print_debug('Compute dtype: %s' % policy.compute_dtype)
        print_debug('Variable dtype: %s' % policy.variable_dtype)
    else:
        policy = mixed_precision.Policy('float32')
        mixed_precision.set_policy(policy)
        print_debug('Compute dtype: %s' % policy.compute_dtype)
        print_debug('Variable dtype: %s' % policy.variable_dtype)
예제 #14
0
파일: NN.py 프로젝트: philgun/coolstuff
    def training_NN(self,
                    prefix_res,
                    input_dim,
                    output_dim,
                    network_layout,
                    learning_rate,
                    epochs,
                    batch_size,
                    count,
                    activation,
                    initializer,
                    ES=False,
                    max_epochs=2000,
                    verbose=1,
                    with_validation=False):
        self.prefix_res = prefix_res
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.network_layout = network_layout
        self.learning_rate = learning_rate
        self.opt = optimizers.Adam(learning_rate=learning_rate)
        self.epochs = epochs
        self.batch_size = batch_size
        self.activation = activation
        self.kernel_initializer = initializer

        self.initiate_check_point()

        if not os.path.exists(self.prefix_res):
            os.makedirs(self.prefix_res)

        stagnant = True
        self.loss = 200

        ii = 0
        while stagnant:  #If the model stagnant due to a bad weight initialization, build a new model with the same configuration
            print(
                "ITERATION.......................................................",
                ii)
            ######################  BUILD MODEL ############################
            #Building the neural network
            self.model = Sequential()

            #Adding input layer and first hidden layer
            self.model.add(
                Dense(network_layout[0],
                      name="Input",
                      input_dim=self.input_dim,
                      kernel_initializer=self.kernel_initializer,
                      use_bias=True,
                      activation=self.activation))

            #Adding the rest of hidden layer
            for numneurons in self.network_layout[1:]:
                self.model.add(
                    Dense(numneurons,
                          kernel_initializer=self.kernel_initializer,
                          activation=self.activation))

            #Adding the output layer
            self.model.add(
                Dense(self.output_dim,
                      name="Output",
                      kernel_initializer=self.kernel_initializer,
                      activation=self.activation))
            '''
            fixing the problem of super high MAPE 
            https://stackoverflow.com/questions/49729522/why-is-the-mean-average-percentage-errormape-extremely-high
            '''
            backend.set_epsilon(1)

            #Training the model
            '''
            More into epoch and batch size
            https://machinelearningmastery.com/difference-between-a-batch-and-an-epoch/
            '''

            #discard previous weight and start from fresh when the model got stuck
            print("COMPILE!")
            self.model.compile(optimizer=self.opt,
                               loss='mse',
                               metrics=['mse', 'mae', 'mape'])

            self.model.summary()

            if ES:  #Use early stopping
                if with_validation:
                    self.initiate_early_stop()
                    history = self.model.fit(x=self.Xtrain,
                                             y=self.ytrain,
                                             validation_data=(self.Xtest,
                                                              self.ytest),
                                             batch_size=self.batch_size,
                                             epochs=self.epochs,
                                             verbose=verbose,
                                             callbacks=[self.es])
                else:
                    self.initiate_early_stop()
                    history = self.model.fit(x=self.Xtrain,
                                             y=self.ytrain,
                                             batch_size=self.batch_size,
                                             epochs=self.epochs,
                                             verbose=verbose,
                                             callbacks=[self.es])

            else:
                history = self.model.fit(x=self.Xtrain,
                                         y=self.ytrain,
                                         batch_size=self.batch_size,
                                         verbose=verbose,
                                         epochs=self.epochs)

            losses = history.history['loss'][:]
            self.loss = min(losses)

            delta = losses[-1] - losses[-2]
            print("DELTA: ", delta)

            def nearly_equal(a, b, sig_fig=5):
                return (a == b or int(a * 10**sig_fig) == int(b * 10**sig_fig))

            stagnant = nearly_equal(abs(delta), 0, 7)

            if stagnant:
                print("Stagnant Model! Re-initialize")
            else:
                print("Not Stagnant!")

        #Save model
        self.fn_res = self.prefix_res + 'surrogate_model_%s' % (count)

        print(self.fn_res)
        print("Save model to disk..................")
        self.model.save(self.fn_res)  # ===> save model in SavedModel format
        print("Done..................")
        return self.model
예제 #15
0
파일: lib.py 프로젝트: philgun/coolstuff
    def training_NN(self,
                    prefix_res,
                    input_dim,
                    output_dim,
                    network_layout,
                    learning_rate,
                    epochs,
                    batch_size,
                    count,
                    activation,
                    initializer,
                    ES=False,
                    verbose=1,
                    with_validation=False,
                    with_decay=False):
        self.prefix_res = prefix_res
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.network_layout = network_layout
        self.learning_rate = learning_rate
        if with_decay:
            self.opt = optimizers.Adam(learning_rate=learning_rate,
                                       decay=0.001 / epochs)
        else:
            self.opt = optimizers.Adam(learning_rate=learning_rate)
        self.epochs = epochs
        self.batch_size = batch_size
        self.activation = activation
        self.kernel_initializer = initializer

        if not os.path.exists(self.prefix_res):
            os.makedirs(self.prefix_res)

        ######################  BUILD MODEL ############################
        #Building the neural network
        self.model = Sequential()

        #Adding input layer and first hidden layer
        self.model.add(
            Dense(network_layout[0],
                  name="Input",
                  input_dim=self.input_dim,
                  kernel_initializer=self.kernel_initializer,
                  use_bias=True,
                  activation=self.activation))

        #Adding the rest of hidden layer
        for numneurons in self.network_layout[1:]:
            self.model.add(
                Dense(numneurons,
                      kernel_initializer=self.kernel_initializer,
                      use_bias=True,
                      activation=self.activation))

        #Adding the output layer
        self.model.add(
            Dense(self.output_dim,
                  name="Output",
                  kernel_initializer=self.kernel_initializer,
                  use_bias=True,
                  activation=self.activation))
        '''
        fixing the problem of super high MAPE 
        https://stackoverflow.com/questions/49729522/why-is-the-mean-average-percentage-errormape-extremely-high
        '''
        backend.set_epsilon(1)

        #Training the model
        '''
        More into epoch and batch size
        https://machinelearningmastery.com/difference-between-a-batch-and-an-epoch/
        '''

        #discard previous weight and start from fresh when the model got stuck
        print("COMPILE!")
        self.model.compile(optimizer=self.opt,
                           loss='mse',
                           metrics=['mse', 'mae', 'mape'])

        self.model.summary()

        if ES:  #Use early stopping
            if with_validation:
                self.initiate_early_stop()
                self.history = self.model.fit(x=self.Xtrain,
                                              y=self.ytrain,
                                              validation_data=(self.Xvalid,
                                                               self.yvalid),
                                              batch_size=self.batch_size,
                                              epochs=self.epochs,
                                              verbose=verbose,
                                              callbacks=[self.es])
            else:
                self.initiate_early_stop()
                self.history = self.model.fit(x=self.Xtrain,
                                              y=self.ytrain,
                                              batch_size=self.batch_size,
                                              epochs=self.epochs,
                                              verbose=verbose,
                                              callbacks=[self.es])

        else:
            self.history = self.model.fit(x=self.Xtrain,
                                          y=self.ytrain,
                                          validation_data=(self.Xvalid,
                                                           self.yvalid),
                                          batch_size=self.batch_size,
                                          epochs=self.epochs,
                                          verbose=verbose)

        losses = self.history.history['loss'][:]

        #Save model
        self.fn_res = self.prefix_res + 'surrogate_model_%s' % (count)

        print(self.fn_res)
        print("Save model to disk..................")
        self.model.save(self.fn_res)  # ===> save model in SavedModel format
        print("Done..................")
        return self.model
예제 #16
0
def eval_net(arr,wd,verbose,scaling_method,batch_size,loss_function,nPercent,nShrink,lr,dropout):
    #****************************** Generate Model
    model = generate_model(arr,dropout,nPercent,nShrink)

    #****************************** Compile model and callbacks
    model.compile(
        loss="mse",
        optimizer=optimizers.Adam(lr=lr),
        metrics = ['mse','mae','mape']
    )

    monitor = EarlyStopping(
        monitor="val_mape",
        mode='min', 
        min_delta=0.5,
        patience=500,
        restore_best_weights=True
    )
    
    #****************************** Retrieve ANN set-up from array
    Xtrain = arr[0]
    ytrain = arr[1]

    Xvalid = arr[2]
    yvalid = arr[3]
    
    Xtest = arr[4]
    y_test_raw = arr[5]

    mmx = arr[6]
    mmy = arr[7]

    backend.set_epsilon(1)

    #****************************** Train
    model.fit(
        x=Xtrain,
        y=ytrain,
        validation_data=(Xvalid,yvalid),
        batch_size=int(batch_size),
        epochs=int(5000),
        callbacks=[monitor],
        verbose=verbose
    )

    #****************************** Predict test data
    pred = mmy.inverse_transform(model.predict(Xtest))
    
    #*************************** Get the score
    if loss_function.lower() == "mse":
        score = mean_squared_error(y_test_raw,pred,squared=True)
        score = score * -1
    elif loss_function.lower() == "rmse":
        score = mean_squared_error(y_test_raw,pred,squared=False)
        score = score * -1
    elif loss_function.lower() == "mae":
        score = mean_absolute_error(y_test_raw,pred)
        score = score * -1
    elif loss_function.lower() == "r2":
        score = r2_score(y_test_raw,pred)
    else:
        raise ValueError(
            "Valid loss function to be optimised are: mse, mae, rmse and r2 (case insensitive). Your loss function: %s"%(loss_function)
        )

    #*************************** Create epochs file if it is not there
    fnepoch = "%s/epochs.csv"%(wd)
    if not os.path.exists(fnepoch):
        f = open(fnepoch,"w")
        f.write("Epochs_needed\n")
        f.close()
    f = open(fnepoch,"a")
    f.write("%s\n"%(monitor.stopped_epoch))
    f.close()

    #*************************** Create score file if it is not there
    fnscore = "%s/score.csv"%(wd)
    if not os.path.exists(fnscore):
        f = open(fnscore,"w")
        f.write("Scores\n")
        f.write("%s\n"%(-10000)) #initial score
        f.close()

    #*************************** Get the best score (RMSE)
    dfscores = pd.read_csv(fnscore)
    bestScore = dfscores.Scores.max()
    
    #****************************** If score at this iteration is better than historical score then:
    if score > bestScore:
        #****************************** Save model 
        model.save("%s/surrogate_model"%(wd)) 

        #****************************** Save Prediction vs True data
        PRED = pred[:,0]
        TEST = y_test_raw[:,0]

        df = pd.DataFrame(zip(PRED,TEST),columns=['Prediction','Test'])
        df.to_csv("%s/prediction_vs_test.csv"%(wd),index=False)

        #****************************** Plot and save
        if loss_function.lower() == "r2":
            s = score
        else:
            s = score * -1
            
        fig,ax = plt.subplots()
        ax.scatter(
            TEST,PRED,c="black",s=2
        )
        ax.set_title("Test vs. Pred value of the best model. %s: %.6f"%(loss_function.upper(),s))
        ax.set_xlabel("Test data")
        ax.set_ylabel("Prediction")
        ax.set_xlim(
            TEST.min(),TEST.max()
        )
        ax.set_ylim(
            TEST.min(),TEST.max()
        )
        fig.savefig("%s/45DegPlot.png"%(wd))

        #****************************** Save the boundaries of the better model
        if scaling_method == "MinMax":
            maxdata = "%s/max.txt"%(wd)
            mindata = "%s/min.txt"%(wd)

            Xmax = mmx.data_max_

            try:
                ymax = mmy.data_max_
            except:
                ymax=[-1000000]

            write=""

            for i in range(len(Xmax)):
                write+="%s,"%(Xmax[i])
            write+="%s"%(ymax[0])
            f = open(maxdata,"w")
            f.write(write)
            f.close()

            Xmin = mmx.data_min_

            try:
                ymin = mmy.data_min_
            except:
                ymin = [-1000000]

            write=""
            
            for i in range(len(Xmin)):
                write+="%s,"%(Xmin[i])
            write+="%s"%(ymin[0])
            f = open(mindata,"w")
            f.write(write)
            f.close()
        else:
            maxdata = "%s/mean.txt"%(wd)
            mindata = "%s/std.txt"%(wd)

            Xmax = mmx.mean_

            try:
                ymax = mmy.mean_
            except:
                ymax = [-100000]

            write=""

            for i in range(len(Xmax)):
                write+="%s,"%(Xmax[i])
            write+="%s"%(ymax[0])
            f = open(maxdata,"w")
            f.write(write)
            f.close()

            Xmin = mmx.var_

            try:
                ymin = mmy.var_
            except:
                ymin = [-100000]

            write=""
            
            for i in range(len(Xmin)):
                write+="%s,"%(Xmin[i]**0.5)
            write+="%s"%(ymin[0]**0.5)
            f = open(mindata,"w")
            f.write(write)
            f.close()

    #*********************** Append the score anyway
    f = open(fnscore,"a")
    f.write("%s\n"%(score))
    f.close()

    #****************************** Clear session to release memory
    tf.keras.backend.clear_session()

    return score
예제 #17
0
def eval_net(wd, verbose, scaling_method, fntrain, fntest, inputsize,
             outputsize, batch_size, ub, lb, weight, nPercent, nShrink, lr,
             dropout):
    #****************************** Regenerate the dataset
    arr = preprocessing(wd, fntrain, fntest, inputsize, outputsize,
                        scaling_method)

    #****************************** Retrieve ANN set-up from array
    Xtrain = arr[0]
    ytrain = arr[1]

    Xvalid = arr[2]
    yvalid = arr[3]

    Xtest = arr[4]
    y_test_raw = arr[5]

    mmx = arr[6]
    mmy = arr[7]

    #****************************** Generate Model
    model = generate_model(arr, dropout, nPercent, nShrink)

    #****************************** Compile model and callbacks
    model.compile(
        optimizer=optimizers.Adam(lr=lr),
        loss=WeightedMSE(mmy, ub, lb, weight)  #Custom losses
    )

    monitor = EarlyStopping(monitor="val_loss",
                            mode='min',
                            min_delta=1e-3,
                            patience=50,
                            restore_best_weights=True)

    backend.set_epsilon(1)

    #****************************** Train
    model.fit(x=Xtrain,
              y=ytrain,
              validation_data=(Xvalid, yvalid),
              batch_size=batch_size,
              epochs=int(5000),
              callbacks=[monitor],
              verbose=verbose)

    #****************************** Save prediction of training data vs true training data
    #Scalled
    pred_train = model.predict(Xtrain)
    df = pd.DataFrame(
        zip(pred_train[:, 0], ytrain[:, 0]),
        columns=['Prediction_Training_Scalled', 'YTrain_Scalled'])
    df.to_csv("%s/bias_scalled.csv" % (wd), index=False)

    #Unscalled
    pred_train_unscalled = mmy.inverse_transform(pred_train)
    ytrain_unscalled = mmy.inverse_transform(ytrain)
    df = pd.DataFrame(zip(pred_train_unscalled[:, 0], ytrain_unscalled[:, 0]),
                      columns=['Prediction_Training', 'YTrain'])
    df.to_csv("%s/bias_unscalled.csv" % (wd), index=False)

    #****************************** Predict test data
    pred = model.predict(Xtest)

    #****************************** Save prediction of test data vs true test data
    #Scalled
    df = pd.DataFrame(zip(pred[:, 0],
                          mmy.transform(y_test_raw)[:, 0]),
                      columns=['Prediction_Scalled', 'Test_Scalled'])
    df.to_csv("%s/prediction_vs_test_Scalled.csv" % (wd), index=False)

    #Unscalled
    pred = mmy.inverse_transform(pred)
    df = pd.DataFrame(zip(pred[:, 0], y_test_raw[:, 0]),
                      columns=['Prediction', 'Test'])
    df.to_csv("%s/prediction_vs_test_Unscalled.csv" % (wd), index=False)

    #*************************** Get the weight vector
    N_common = np.count_nonzero((y_test_raw > lb) & (y_test_raw < ub))
    N_rare = y_test_raw.shape[0] - N_common

    weight_vector = []

    for i in range(y_test_raw.shape[0]):
        v = y_test_raw[i, 0]
        if v > lb and v < ub:
            weight_vector.append(
                1
            )  #y_test_raw.shape[0] / N_common  #The majority  --> less penalised
        else:
            weight_vector.append(
                weight
            )  #y_test_raw.shape[0] / N_rare #The minority --> more penalised

    #*************************** Get the weighted-MSE
    weighted_squared_error = sum(
        (y_test_raw[:, 0] - pred[:, 0])**2 * weight_vector)
    score = weighted_squared_error / (
        y_test_raw.shape[0])  #Calculate the mean weighted squared error

    score = score * -1  #Multiply the score to make it as a minimisation problem

    #*************************** Create epochs file if it is not there
    fnepoch = "%s/epochs.csv" % (wd)
    if not os.path.exists(fnepoch):
        f = open(fnepoch, "w")
        f.write("Epochs_needed\n")
        f.close()
    f = open(fnepoch, "a")
    f.write("%s\n" % (monitor.stopped_epoch))
    f.close()

    #*************************** Create score file if it is not there
    fnscore = "%s/score.csv" % (wd)
    if not os.path.exists(fnscore):
        f = open(fnscore, "w")
        f.write("Scores\n")
        f.write("%s\n" % (-10000))
        f.close()

    #*************************** Get the best score (RMSE)
    dfscores = pd.read_csv(fnscore)
    bestScore = dfscores.Scores.max()

    #****************************** If score at this iteration is better than historical score then:
    if score > bestScore:
        #****************************** Save model --> sc = joblib.load('std_scaler.bin') --> when load later
        model.save("%s/surrogate_model" % (wd))
        dump(mmx, "%s/mmx.bin" % (wd))
        dump(mmy, "%s/mmy.bin" % (wd))

        #****************************** Save Prediction vs test data
        df = pd.DataFrame(zip(pred[:, 0], y_test_raw[:, 0]),
                          columns=['Prediction', 'Test'])
        df.to_csv("%s/prediction_vs_test_best.csv" % (wd), index=False)

        #****************************** Save the boundaries of the better model
        if scaling_method == "MinMax":
            maxdata = "%s/max.txt" % (wd)
            mindata = "%s/min.txt" % (wd)

            Xmax = mmx.data_max_

            try:
                ymax = mmy.data_max_
            except:
                ymax = [-100000000]

            write = ""

            for i in range(len(Xmax)):
                write += "%s," % (Xmax[i])
            write += "%s" % (ymax[0])
            f = open(maxdata, "w")
            f.write(write)
            f.close()

            Xmin = mmx.data_min_

            try:
                ymin = mmy.data_min_
            except:
                ymin = [-100000000]

            write = ""

            for i in range(len(Xmin)):
                write += "%s," % (Xmin[i])
            write += "%s" % (ymin[0])
            f = open(mindata, "w")
            f.write(write)
            f.close()

        else:
            maxdata = "%s/mean.txt" % (wd)
            mindata = "%s/std.txt" % (wd)

            Xmax = mmx.mean_

            try:
                ymax = mmy.mean_
            except:
                ymax = [-10000000]

            write = ""

            for i in range(len(Xmax)):
                write += "%s," % (Xmax[i])
            write += "%s" % (ymax[0])
            f = open(maxdata, "w")
            f.write(write)
            f.close()

            Xmin = mmx.var_

            try:
                ymin = mmy.var_
            except:
                ymin = [-10000000]

            write = ""

            for i in range(len(Xmin)):
                write += "%s," % (Xmin[i]**0.5)
            write += "%s" % (ymin[0]**0.5)
            f = open(mindata, "w")
            f.write(write)
            f.close()

    #*********************** Append the score anyway
    f = open(fnscore, "a")
    f.write("%s\n" % (score))
    f.close()

    #****************************** Clear Session to release memory
    tf.keras.backend.clear_session()

    return score
예제 #18
0
BATCHES = 160 // FACTOR
POINTS = 32
DAT_SHP = (POINTS, 2, 1)
LAT_DIM = 100
PAR_DIM = 3
DEPTH = 32
LEARN_RATE = 0.0002
DTYPE = 'float32'

################################################################################
# %% KERAS/TF SETTINGS
################################################################################

##### SET PRECISION TYPE
K.set_floatx(DTYPE)
K.set_epsilon(1e-8)

##### INTEL MKL
os.environ["KMP_AFFINITY"] = "granularity=fine,compact,1,0"
os.environ["KMP_BLOCKTIME"] = "0"
os.environ["OMP_NUM_THREADS"] = "10"
os.environ["KMP_SETTINGS"] = "1"

##### ALLOW GPU MEMORY GROWTH
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
    #tf.config.experimental.set_memory_growth(gpu, True)
    tf.config.experimental.set_virtual_device_configuration(
        gpu,
        [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=2500)])
예제 #19
0
from tensorflow.keras.regularizers import l2
from tensorflow.compat.v1.graph_util import convert_variables_to_constants
from google.protobuf.internal.encoder import _VarintBytes
from google.protobuf.internal.decoder import _DecodeVarint32
from maximum.industries.loader import NUM_INPUT_CHANNELS, DTYPE
from maximum.industries.normalization import FixedNormalization

# initialize TF default session to use GPU growth so all memory is not immediately reserved
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
K.clear_session(
)  # this is what clears the default graph, not setting a session.
K.set_floatx(DTYPE)
if DTYPE == 'float16':
    K.set_epsilon(1e-4)  # use a larger epsilon for float16


def get_conv(filters, kernel_size=3, activation=tf.nn.relu):
    return Conv2D(filters=filters,
                  kernel_size=kernel_size,
                  padding='same',
                  activation=activation,
                  data_format='channels_first',
                  kernel_initializer=initializers.glorot_normal(),
                  bias_initializer=initializers.zeros(),
                  kernel_regularizer=l2(0.001),
                  bias_regularizer=l2(0.001))


def get_dense(units, regu=0.001, activation=tf.nn.relu):
예제 #20
0
import os
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, TimeDistributed, concatenate, RepeatVector, Reshape, BatchNormalization
from tensorflow.keras.layers import Activation, Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras import backend
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, Callback, TensorBoard
from sklearn.metrics import roc_auc_score
import configparser
import sys
from utils_data import *

# setting the float data type
backend.set_floatx('float32')
backend.set_epsilon(1e-7)
os.environ["CUDA_VISIBLE_DEVICES"] = "2"

cfgParse = configparser.ConfigParser()
cfgParse.read(sys.argv[1])

train_data_path = cfgParse.get("data", "train_data")
output_dir = cfgParse.get("output", "model_dir")

data = pd.read_csv(train_data_path).rename(columns={
    "i": "Customer_id",
    "j": "Product_id",
    "t": "week_no"
})
data['is_purchased'] = 1
weeks = list(range(49))
    print(ee1)

# log setup
current_script_name = os.path.basename(__file__).split('.')[0]
log_path_filename = ''.join([local_script_settings['log_path'], current_script_name, '.log'])
logging.basicConfig(filename=log_path_filename, level=logging.DEBUG,
                    format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger = logging.getLogger(__name__)
logHandler = handlers.RotatingFileHandler(log_path_filename, maxBytes=10485760, backupCount=5)
logger.addHandler(logHandler)

# keras session and random seed reset/fix, set epsilon keras backend
kb.clear_session()
np.random.seed(1)
tf.random.set_seed(2)
kb.set_epsilon(1)  # needed while using "mape" as one of the metric at training model

# classes definitions


# functions definitions


def cof_zeros(array, local_cof_settings):
    if local_cof_settings['zeros_control'] == "True":
        local_max = np.amax(array) + 1
        array[array <= 0] = local_max
        local_min = np.amin(array)
        array[array == local_max] = local_min
    return array
예제 #22
0
    model = CustomModel(architectures, mm_der, mmx_state)

    #Build the model
    input_arr = tf.random.uniform((1, 14))
    outputs = model(input_arr)
    model.model().summary()

    #initialise the metrics and optimizer and compile the model
    metrics = keras.metrics.MeanAbsolutePercentageError(name="MAPE")
    optimizer = optimizers.Adam(lr=1e-3)

    #Compile the model
    model.compile(optimizer=optimizer, loss=keras.losses.MeanSquaredError())

    #Set tf keras backend epsilon to 1 so MAPE is scaled to the normal scale
    backend.set_epsilon(1)

    #Training with custom-fit
    model.fit(
        X_train,
        y_train,
        validation_data=(X_valid, y_valid),
        batch_size=int(X_train.shape[0] / 600),  #turn data into 600 batches
        epochs=500)

    #Save the trained model
    model.save(fsave)

    #Load the trained-model, df_test, mmx and mmy
    trained_model = keras.models.load_model(fsave)
from tensorflow.keras.models import Model
from tqdm import tqdm
import os
import pandas as pd
import numpy as np
import time
# import skimage.io
import pickle

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)

dtype = 'float16'
K.set_floatx(dtype)
K.set_epsilon(1e-4)


"""
TO-DO
1) Write own generator. Currently using ImageDataGenerator in Keras
"""

# def feature_extraction(exp_name, patch_dir, target_size=(299, 299)):
#
# 	# Load imagenet pre-trained InceptionV3
# 	base_model = InceptionV3(weights='imagenet')
# 	model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output)
# 	patch_list = os.listdir(patch_dir)
# 	image_features_df = pd.DataFrame(index=patch_list, columns=list(range(2048)), dtype='float16')
#