Ejemplo n.º 1
0
    def objective_func(data, hyperpars):
        data.load_data(normalize=True, shuffle=False)

        activation = LeakyReLU(0.2)

        unmixer = Autoencoder(n_end=n_end, data=my_data, activation=activation,
                              optimizer=hyperpars['optimizer'], l2=hyperpars['l2'], l1=hyperpars['l1'], plot_every_n=0)

        unmixer.create_model(SAD)
        my_data.make_patches(1, num_patches=hyperpars['num_patches'], use_orig=True)
        history = unmixer.fit(epochs=100, batch_size=hyperpars['batch_size'])

        endmembers = unmixer.get_endmembers().transpose()
        abundances = unmixer.get_abundances()
        Y = np.transpose(data.orig_data)
        GT = np.transpose(data.GT)
        sad, idx_org, idx_hat, sad_k_m, s0 = calc_SAD_2(GT, endmembers)
        MSE = mse(Y, endmembers, np.transpose(abundances))
        abundances = abundances.reshape(data.n_rows, data.n_cols, endmembers.shape[1]).transpose((1, 0, 2))
        resdict = {'endmembers': endmembers,
                   'abundances': abundances,
                   'loss': history.history['loss'],
                   'SAD': sad,
                   'MSE': MSE}

        del unmixer
        K.clear_session()

        return {'loss': sad, 'status': STATUS_OK, 'attachments': resdict}
Ejemplo n.º 2
0
    def test_load_model_broadcast(self):
        def create_model():
            opt = keras.optimizers.SGD(lr=0.01 * hvd.size(), momentum=0.9)
            opt = hvd.DistributedOptimizer(opt)

            model = keras.models.Sequential()
            model.add(keras.layers.Dense(2, input_shape=(3, )))
            model.add(keras.layers.RepeatVector(3))
            model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
            model.compile(loss=keras.losses.MSE,
                          optimizer=opt,
                          metrics=[keras.metrics.categorical_accuracy],
                          sample_weight_mode='temporal')

            return model

        with temppath() as fname:
            with self.session(config=self.config) as sess:
                K.set_session(sess)

                model = create_model()

                x = np.random.random((1, 3))
                y = np.random.random((1, 3, 3))
                model.train_on_batch(x, y)

                if hvd.rank() == 0:
                    model.save(fname)

            K.clear_session()
            with self.session(config=self.config) as sess:
                K.set_session(sess)

                weight = np.random.random((1, 3))

                if hvd.rank() == 0:
                    model = hvd.load_model(fname)
                else:
                    model = create_model()

                def generator():
                    while 1:
                        yield (x, y, weight)

                if hvd.rank() == 0:
                    self.assertEqual(len(model.optimizer.weights), 5)
                else:
                    self.assertEqual(len(model.optimizer.weights), 0)

                # No assertions, we just need to verify that it doesn't hang
                callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0)]
                model.fit_generator(generator(),
                                    steps_per_epoch=1,
                                    callbacks=callbacks,
                                    epochs=1,
                                    verbose=0,
                                    workers=4,
                                    initial_epoch=0)

                self.assertEqual(len(model.optimizer.weights), 5)
Ejemplo n.º 3
0
  def testTwoWayCompatibility(self):
    save_model_dir = os.path.join(self.get_temp_dir(), 'model_dir')
    save_est, input_fn = self._make_estimator(save_model_dir)

    save_est.train(input_fn, steps=3)

    model = SubclassedModel()
    optimizer = adam.Adam(0.01)
    checkpoint = util.Checkpoint(
        step=tf.Variable(0, dtype=tf.dtypes.int64),
        optimizer=optimizer,
        model=model)
    status = checkpoint.restore(tf.train.latest_checkpoint(save_model_dir))
    self.assertEqual(3, self.evaluate(checkpoint.step))
    with tf.GradientTape() as tape:
      output = model(tf.constant([[1.]]))
      loss = tf.math.reduce_sum(output)
    variables = model.trainable_variables
    gradients = tape.gradient(loss, variables)
    optimizer.apply_gradients(zip(gradients, variables))
    status.assert_consumed()

    # The optimizer uses this for some reason...
    backend.clear_session()

    load_model_dir = os.path.join(self.get_temp_dir(), 'load_model_dir/')
    checkpoint.step.assign(40)
    checkpoint.model.dense_two.bias.assign([13.])
    checkpoint.save(load_model_dir)
    load_est, input_fn = self._make_estimator(load_model_dir)
    predictions = load_est.predict(input_fn)
    predictions = next(predictions)
    self.assertAllClose([13.], predictions['bias'])
    self.assertEqual(40, predictions['step'])
Ejemplo n.º 4
0
    def generate_uni_directional_RNN_model(self):
        """
        Initalize a model in keras. Here only a uni-directional RNN. 
        """
        tf.reset_default_graph()
        try:
            del model
        except:
            pass
        K.clear_session()

        FEATURES = self.X_test.shape[2]
        LOOKBACK = self.X_test.shape[1]

        model = Sequential()

        for layer_number in range(self.INPUT_LAYERS):
            model.add(LSTM(self.HIDDEN_LAYER_SIZE, input_shape=(LOOKBACK, FEATURES), kernel_initializer="he_normal",
                                     return_sequences=True))
            model.add(Dropout(self.DROPOUT))
        model.add(TimeDistributed(Dense(1, kernel_initializer="he_normal")))

        model.add(Activation('sigmoid'))
        adm = Adam(lr=0.001) #default lr = 0.001 ep1e-8
        model.compile(loss='binary_crossentropy', optimizer=adm, metrics=['acc'])
        return model
Ejemplo n.º 5
0
    def train_model(model: tf.keras.Model,
                    train_data,
                    validation_data,
                    optimizer,
                    loss='categorical_crossentropy',
                    epochs=3,
                    verbose=1,
                    batch_size=None,
                    callbacks=None):
        # init
        K.clear_session()
        tf.random.set_seed(51)
        np.random.seed(51)

        # optimizer
        opt = tf.keras.optimizers.Adam() if optimizer is None else optimizer

        # compile
        model.compile(opt, loss=loss, metrics=["acc"])

        # fit
        history = model.fit(train_data,
                            validation_data=validation_data,
                            epochs=epochs,
                            verbose=verbose,
                            callbacks=callbacks,
                            batch_size=batch_size)
        return history
Ejemplo n.º 6
0
def fitness(num_dense_layers, num_dense_nodes, activation, adam_decay):

    model = create_model(num_dense_layers=num_dense_layers,
                         num_dense_nodes=num_dense_nodes,
                         activation=activation,
                         adam_decay=adam_decay)

    # named blackbox becuase it represents the structure
    blackbox = model.fit(
        x=x_train,
        y=y_train_one_hot,
        epochs=15,
        batch_size=512,
        validation_split=0.15,
    )
    # return the validation accuracy for the last epoch.
    accuracy = blackbox.history['val_accuracy'][-1]

    # Print the classification accuracy.
    print()
    print("Accuracy: {0:.2%}".format(accuracy))
    print()

    # Delete the Keras model with these hyper-parameters from memory.
    del model

    # Clear the Keras session, otherwise it will keep adding new
    # models to the same TensorFlow graph each time we create
    # a model with a different set of hyper-parameters.
    K.clear_session()
    tf.compat.v1.reset_default_graph()

    # the optimizer aims for the lowest score, so we return our negative accuracy
    return -accuracy
	def onBeginTraining(self):
		ue.log("starting MnistTutorial training")

		#reset the session each time we get training calls
		K.clear_session()

		#load mnist data set
		mnist = tf.keras.datasets.mnist
		(x_train, y_train), (x_test, y_test) = mnist.load_data()

		#rescale 0-255 -> 0-1.0
		x_train, x_test = x_train / 255.0, x_test / 255.0

		#define model
		model = tf.keras.models.Sequential([
			tf.keras.layers.Flatten(),
			tf.keras.layers.Dense(512, activation=tf.nn.relu),
			tf.keras.layers.Dropout(0.2),
			tf.keras.layers.Dense(10, activation=tf.nn.softmax)
		])

		model.compile(	optimizer='adam',
						loss='sparse_categorical_crossentropy',
						metrics=['accuracy'])

		#this will do the actual training
		model.fit(x_train, y_train, epochs=1)
		model.evaluate(x_test, y_test)

		ue.log("Training complete.")

		#store our model and graph for prediction
		self.graph = tf.get_default_graph()
		self.model = model
Ejemplo n.º 8
0
def model_pred(modelfile, test_Feature_path, test_Label_path,
               SARS_Feature_path, SARS_Label_path):
    K.clear_session()
    tf.reset_default_graph()
    model = load_model(modelfile)

    X_test = np.load(test_Feature_path)
    Y_test = np.load(test_Label_path)
    X_test = X_test.reshape([len(X_test), 20, PCA_num + 1, 2])

    test_CM, accuracy1, precision1, recall1, f11, MCC1, fpr1, tpr1, roc_auc1 = computing_result(
        X_test, Y_test, model)

    X_SARS = np.load(SARS_Feature_path)
    Y_SARS = np.load(SARS_Label_path)
    X_SARS = X_SARS.reshape([len(X_SARS), 20, PCA_num + 1, 2])

    SARS_CM, accuracy2, precision2, recall2, f12, MCC2, fpr2, tpr2, roc_auc2 = computing_result(
        X_SARS, Y_SARS, model)

    test_row = [
        model_number, 'TEST', test_CM[0][0], test_CM[0][1], test_CM[1][0],
        test_CM[1][1], accuracy1, precision1, recall1, f11, MCC1, roc_auc1
    ]

    SARS_CoV_2_row = [
        model_number, 'SARS-CoV-2', SARS_CM[0][0], SARS_CM[0][1],
        SARS_CM[1][0], SARS_CM[1][1], accuracy2, precision2, recall2, f12,
        MCC2, roc_auc2
    ]

    del model

    return test_row, SARS_CoV_2_row
Ejemplo n.º 9
0
    def test_application_variable_input_channels(self, app, last_dim):
        if backend.image_data_format() == 'channels_first':
            input_shape = (1, None, None)
        else:
            input_shape = (None, None, 1)
        output_shape = _get_output_shape(lambda: app(
            weights=None, include_top=False, input_shape=input_shape))
        self.assertShapeEqual(output_shape, (None, None, None, last_dim))
        backend.clear_session()

        if backend.image_data_format() == 'channels_first':
            input_shape = (4, None, None)
        else:
            input_shape = (None, None, 4)
        output_shape = _get_output_shape(lambda: app(
            weights=None, include_top=False, input_shape=input_shape))
        self.assertShapeEqual(output_shape, (None, None, None, last_dim))
        backend.clear_session()

        @parameterized.parameters(MODEL_LIST)
        def test_application_custom_input_shape_imagenet(self, app, _):
            custom_input_shape = (42, 42, 3)
            input_shape = _get_input_shape(
                lambda: app(input_shape=custom_input_shape,
                            include_top=False,
                            pooling='avg'))
            self.assertShapeEqual(input_shape, (None, *custom_input_shape))
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
            weights_path='model_data/tiny_yolo_weights.h5'):
    '''create the training model, for Tiny YOLOv3'''
    K.clear_session() # get a new session
    image_input = Input(shape=(None, None, 3))
    h, w = input_shape
    num_anchors = len(anchors)

    y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
        num_anchors//2, num_classes+5)) for l in range(2)]

    model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
    print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))

    if load_pretrained:
        model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
        print('Load weights {}.'.format(weights_path))
        if freeze_body in [1, 2]:
            # Freeze the darknet body or freeze all but 2 output layers.
            num = (20, len(model_body.layers)-2)[freeze_body-1]
            for i in range(num): model_body.layers[i].trainable = False
            print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))

    model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
        arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
        [*model_body.output, *y_true])
    model = Model([model_body.input, *y_true], model_loss)

    return model
Ejemplo n.º 11
0
def computing_result(Feature_array, Label_array, model):

    X_TEST = Feature_array
    Y_TEST = Label_array

    K.clear_session()
    tf.reset_default_graph()

    model1 = model
    Y_PRED = model1.predict(X_TEST)

    Y_pred2 = np.argmin(Y_PRED, axis=-1)  #使用pca特征时  标签需用np.argmin 替换np.argmax
    Y_test2 = np.argmin(Y_TEST, axis=-1)
    #     print(Y_pred2)
    #     print(Y_test2)
    confusion_matrix1 = confusion_matrix(Y_test2, Y_pred2)

    new_confusion_matrix1 = [[
        confusion_matrix1[1, 1], confusion_matrix1[1, 0]
    ], [confusion_matrix1[0, 1], confusion_matrix1[0, 0]]]
    accuracy = accuracy_score(Y_test2, Y_pred2)  #准确率
    precision = precision_score(Y_test2, Y_pred2)  #精确率
    recall = recall_score(Y_test2, Y_pred2)  #召回率
    f1 = f1_score(Y_test2, Y_pred2)  #F1
    MCC = matthews_corrcoef(Y_test2, Y_pred2)  #MCC

    fpr, tpr, thresholds = metrics.roc_curve(Y_TEST[:, 1], Y_PRED[:, 1])
    roc_auc = auc(fpr, tpr)

    return new_confusion_matrix1, accuracy, precision, recall, f1, MCC, fpr, tpr, roc_auc
def create_model(input_shape, anchors, num_classes, load_pretrained=False, freeze_body=2,
            # weights_path='f:/1_code/python/keras-YOLOv3-mobilenet/voc07/model_data/yolo_weights.h5'):
            weights_path='f:/1_code/python/keras-YOLOv3-mobilenet/coco2017/model_data/yolo_weights.h5'):
    '''create the training model'''
    K.clear_session() # get a new session
    image_input = Input(shape=(None, None, 3))
    h, w = input_shape
    num_anchors = len(anchors)

    y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
        num_anchors//3, num_classes+5)) for l in range(3)]

    model_body = yolo_body(image_input, num_anchors//3, num_classes)
    print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))

    if load_pretrained:
        model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
        print('Load weights {}.'.format(weights_path))
        if freeze_body in [1, 2]:
            # Freeze darknet53 body or freeze all but 3 output layers.
            num = (185, len(model_body.layers)-3)[freeze_body-1]
            for i in range(num): model_body.layers[i].trainable = False
            print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))

    print(model_body.output)
    model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
        arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
        [*model_body.output, *y_true])
    model = Model([model_body.input, *y_true], model_loss)

    return model
Ejemplo n.º 13
0
def loadImg(path):
  #k.clear_session()
  K.clear_session()
  path = "C:/Users/Dell/Desktop/InsurePlus - Copy/static/"+path
  model0 = loadIsCar()
  model1 = loadIsDamaged()
  model2 = loadDmgLoc ()
  model3 = loadDmgSev ()

  print("All models loaded sucessfully")

  #path = 'car.jpg'
  image = mpimg.imread(path)
  #plt.imshow(image)
  #plt.show()
  cost = 1 # 0 cheap; 1 moderate; 2 costly
  car = (isCar(path,model0))
  dmg = (isDamaged(path,model1))
  loc = (dmgLoc(path,model2))
  sev = (dmgSev(path,model3))
  a=  "is car:"+str(car)+"  "+"cost:"+str(cost)+"  "+"is damaged:"+str(dmg)+"  "+"damage loc:"+str(loc)+"  "+"sev:"+str(sev)
  est = calclaim(cost,dmg,loc,sev)*1000
  b =  str("₹"+str(est))
  # b =  str("Estimated cost to repair your car is ₹"+str(est))


  return b
  K.clear_session()
Ejemplo n.º 14
0
 def _session_config(self):
     K.clear_session()
     config = tf.ConfigProto()
     # config.gpu_options.per_process_gpu_memory_fraction = 0.01
     config.gpu_options.allow_growth = True
     self.sess = tf.Session(config=config)
     K.set_session(self.sess)
Ejemplo n.º 15
0
def build_train_cnn(x_train,
                    x_test,
                    y_train,
                    y_test,
                    epochs=250,
                    batch_size=64):
    clear_session()
    classifier = tf.keras.Sequential()
    classifier.add(
        tf.keras.layers.Conv1D(
            filters=16,
            kernel_size=(3, ),
            input_shape=(x_train.shape[1:]),
            kernel_initializer='random_uniform',
            kernel_regularizer=tf.keras.regularizers.l2(l=1e-5)))
    classifier.add(tf.keras.layers.BatchNormalization())
    classifier.add(tf.keras.layers.MaxPooling1D(pool_size=2))

    classifier.add(
        tf.keras.layers.Conv1D(
            filters=16,
            kernel_size=(3, ),
            kernel_initializer='random_uniform',
            kernel_regularizer=tf.keras.regularizers.l2(l=1e-5)))
    classifier.add(tf.keras.layers.BatchNormalization())
    classifier.add(tf.keras.layers.MaxPooling1D(pool_size=2))

    classifier.add(
        tf.keras.layers.Conv1D(
            filters=16,
            kernel_size=(3, ),
            kernel_initializer='random_uniform',
            kernel_regularizer=tf.keras.regularizers.l2(l=1e-5)))
    classifier.add(tf.keras.layers.BatchNormalization())
    classifier.add(tf.keras.layers.MaxPooling1D(pool_size=2))

    classifier.add(tf.keras.layers.Flatten())

    classifier.add(
        (tf.keras.layers.Dense(units=128,
                               activation='relu',
                               kernel_initializer='random_uniform')))
    classifier.add(tf.keras.layers.Dropout(rate=0.2))
    classifier.add(
        (tf.keras.layers.Dense(units=y_train.shape[1],
                               activation='softmax',
                               kernel_initializer='random_uniform')))

    adam = tf.keras.optimizers.Adam(lr=1e-4, decay=1e-7)
    classifier.compile(optimizer=adam,
                       loss='categorical_crossentropy',
                       metrics=['accuracy'])
    history = classifier.fit(x=x_train,
                             y=y_train,
                             validation_data=(x_test, y_test),
                             epochs=epochs,
                             batch_size=batch_size)
    get_model_size(classifier)

    return history
Ejemplo n.º 16
0
def fitness(filter1, filter2, filter3, learning_rate):
    n_input = 51 # Number of gold nanocluster classes 
    n_classes = 751 # Total wavelengths in UV-VIS pattern
    EPOCHS = 300
    BATCH_SIZE = 10
    
    # wrap model with KerasRegressor in order to include epochs and batch size
    model = KerasRegressor(build_fn = lambda: cnn_model(n_input, n_classes, filter1, filter2, filter3, learning_rate),
                          epochs = EPOCHS, 
                           batch_size = BATCH_SIZE, 
                           verbose = False)
    
    print("")
    print("Current hyperparams:")
    print(f"filter1: {filter1}")
    print(f"filter2: {filter2}")
    print(f"filter3: {filter3}")
    print(f"learning_rate: {learning_rate}")
    
    # 5 sets of train validation splits 
    kfold = KFold(n_splits = 5, shuffle = True, random_state = 42)
    results = cross_val_score(model, X_train, Y_train, 
                             cv = kfold, 
                             scoring = 'neg_mean_absolute_error',
                             verbose = 1)
    print(results)
    mean_neg_MAE = results.mean()

    K2.clear_session()
    return -mean_neg_MAE
Ejemplo n.º 17
0
def ffnn_predict(queue, welllog):
    features_columns = ['GR', 'RHOB', 'NPHI', 'RESI']
    target_column = 'DT'
    try:
        from joblib import dump, load
    except:
        print('Joblib not available. Install joblib before generating logs')

    try:
        from sklearn.preprocessing import MinMaxScaler
    except:
        print('Sklearn not available. Install sklearn before generating logs')

    ffnn_model = load_model('models/ffnn_model.h5')
    ffnn_X_scaler = load('models/ffnn_model.X_scaler')
    ffnn_y_scaler = load('models/ffnn_model.y_scaler')
    ffnn_model.summary()

    X_test_norm = ffnn_X_scaler.transform(welllog[features_columns].values)
    y_predict_norm = ffnn_model.predict(X_test_norm)
    y_predict = ffnn_y_scaler.inverse_transform(y_predict_norm).flatten()

    welllog_dept = pd.DataFrame(data=welllog['DEPT'].values, columns=['DEPT'])
    welllog_ffnn = pd.DataFrame(data=y_predict, columns=[target_column])
    welllog_ffnn = pd.concat([welllog_dept, welllog_ffnn], axis=1)

    K.clear_session()
    y_true = welllog[target_column].values
    # y_true_norm = ffnn_y_scaler.transform(y_true.reshape(1,-1))

    result = {}
    result['log'] = welllog_ffnn
    result['error'] = MAPE(y_true, y_predict)
    queue.put(result)
Ejemplo n.º 18
0
def build_train_ann(x_train,
                    x_test,
                    y_train,
                    y_test,
                    epochs=250,
                    batch_size=64):
    clear_session()

    classifier = tf.keras.Sequential()
    classifier.add(tf.keras.layers.Flatten(input_shape=(x_train.shape[1:])))
    classifier.add(
        tf.keras.layers.Dense(units=128, kernel_initializer='random_uniform'))
    classifier.add(tf.keras.layers.Dropout(rate=0.2))

    classifier.add(
        (tf.keras.layers.Dense(units=128,
                               activation='relu',
                               kernel_initializer='random_uniform')))
    classifier.add(tf.keras.layers.Dropout(rate=0.2))
    classifier.add(
        (tf.keras.layers.Dense(units=y_train.shape[1],
                               activation='softmax',
                               kernel_initializer='random_uniform')))

    adam = tf.keras.optimizers.Adam(lr=1e-4, decay=1e-7)
    classifier.compile(optimizer=adam,
                       loss='categorical_crossentropy',
                       metrics=['accuracy'])
    history = classifier.fit(x=x_train,
                             y=y_train,
                             validation_data=(x_test, y_test),
                             epochs=epochs,
                             batch_size=batch_size)
    return history
Ejemplo n.º 19
0
 def run(self):
     print("预测线程启动")
     global PredictionResult
     global PredictionModelPath
     global PredictionResult
     global PredictionSpeed
     prediction = ImagePrediction()
     PredictionResult.set('')
     if PredictionModel.get() == 'SqueezeNet':
         print('预测模型选中:SqueezeNet')
         prediction.setModelTypeAsSqueezeNet()
     elif PredictionModel.get() == 'ResNet50':
         print('预测模型选中:ResNet50')
         prediction.setModelTypeAsResNet()
     elif PredictionModel.get() == 'InceptionV3':
         print('预测模型选中:InceptionV3')
         prediction.setModelTypeAsInceptionV3()
     elif PredictionModel.get() == 'DenseNet121':
         print('预测模型选中:DenseNet121')
         prediction.setModelTypeAsDenseNet()
     PredictionModelPath = prediction_model()
     print('模型路径:' + PredictionModelPath)
     prediction.setModelPath(PredictionModelPath)
     speedindex = SpeedSelector.get()
     print('识别速度' + PredictionSpeed[speedindex - 1])
     bk.clear_session()
     prediction.loadModel(prediction_speed=PredictionSpeed[speedindex - 1])
     predictions, probabilities = prediction.predictImage(
         imagePath, result_count=CountSelector.get())
     for eachPrediction, eachProbability in zip(predictions, probabilities):
         PredictionResult.set(PredictionResult.get() + "\n" +
                              str(eachPrediction) +
                              zh_cn(str(eachPrediction)) + " : " +
                              str(eachProbability))
     print("预测线程结束")
Ejemplo n.º 20
0
def reset_keras():
    sess = K.get_session()
    K.clear_session()
    sess.close()
    sess = K.get_session()
    seed(123)
    tensorflow.random.set_seed(1)
    np.random.seed(123)
    SEED = 123
    os.environ['PYTHONHASHSEED'] = str(SEED)
    os.environ['TF_DETERMINISTIC_OPS'] = '1'
    try:
        del model  # this is from global space - change this as you need
    except:
        pass
    try:
        del history  # this is from global space - change this as you need
    except:
        pass
    print('gc.collect() : ')
    print(gc.collect()
          )  # if it's done something you should see a number being outputted
    # use the same config as you used to create the session
    # config = tf.compat.v1.ConfigProto()
    # config.gpu_options.per_process_gpu_memory_fraction = 1
    # config.gpu_options.visible_device_list = "0"
    # config.gpu_options.allow_growth = True
    # K.set_session(tf.Session(config=config))
    tf.keras.backend.clear_session()
    print("reset keras")
def Funcion_analisis(Vector):
    Vector_ensayo = r'C:\Users\Juatarto\Desktop\TFM\Imagenes\Test\TRAIN_RGB_256x256_Filtradas\Entrenamiento\Pista\*.jpg'
    Neuronas = int(Vector[0])
    Capas = int(Vector[1])
    Color = int(Vector[2])
    Filtros = int(Vector[3])
    Fila_pooling = int(Vector[4])
    Columna_pooling = int(Vector[5])
    Fila_filtros = int(Vector[6])
    Columna_filtros = int(Vector[7])
    Valor_LeakyReLu = Vector[8] / 100
    Epocas = int(Vector[9])
    K.clear_session()
    try:
        Direccion_modelo, Direccion_pesos = Entrenar(
            Red_Neuronal(Neuronas, Capas, Color, Filtros, Fila_pooling,
                         Columna_pooling, Fila_filtros, Columna_filtros,
                         Valor_LeakyReLu), Color, Epocas)
        if Color == 3:
            Imagenes = glob.glob(Vector_ensayo)
        else:
            Imagenes = glob.glob(Vector_ensayo)
        Vector = Ensayo_2(Imagenes, Color)
        Peso_modelo = os.path.getsize(
            Direccion_modelo)  #Obtener el valor del peso del modelo en bytes
        Peso_pesos = os.path.getsize(
            Direccion_pesos)  #Obtener el valor del peso del modelo en bytes
        Peso_total = Peso_modelo + Peso_pesos
        os.remove(Direccion_modelo)  # Eliminar el modelo una vez medido
        os.remove(Direccion_pesos)  # Eliminar el modelo una vez medido
        Vector.append(Peso_total)
    except:
        Vector = np.array([1000, 1000, 1000, 1000000])
    return Vector
Ejemplo n.º 22
0
 def wrapper(*args, **kwargs):
     for data_format in {'channels_first', 'channels_last'}:
         K.set_image_data_format(data_format)
         func(*args, **kwargs)
         if K.backend() == 'tensorflow':
             K.clear_session()
             tf.reset_default_graph()
Ejemplo n.º 23
0
def reset_session():
    """ Clear keras and tensorflow sessions.
    """
    if _is_tf_1():
        K.clear_session()
    else:
        tf.keras.backend.clear_session()
Ejemplo n.º 24
0
def generate_uni_directional_RNN_model():
    tf.reset_default_graph()
    try:
        del model
    except:
        pass
    K.clear_session()

    FEATURES = self.X_test.shape[2]
    LOOKBACK = self.X_test.shape[1]

    model = Sequential()

    for layer_number in range(INPUT_LAYERS):
        model.add(
            LSTM(HIDDEN_LAYER_SIZE,
                 input_shape=(LOOKBACK, FEATURES),
                 kernel_initializer="he_normal",
                 return_sequences=True))
        model.add(Dropout(DROPOUT=0.2))
    model.add(TimeDistributed(Dense(1, kernel_initializer="he_normal")))

    model.add(Activation('sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])
    return model
Ejemplo n.º 25
0
    def __init__(self, hps,
                 batch_size=16,
                 encoder_frames_no=30):
        self.hps = hps
        self.encoder_frames_no = encoder_frames_no

        K.clear_session()

        self.encoder = NVAEEncoder128(hps,
                                      batch_size=batch_size,
                                      frames_no=encoder_frames_no)
        self.encoder_model = self.encoder.model

        self.mask_encoder = MaskEncoder128(hps,
                                           batch_size=batch_size)
        self.mask_encoder_model = self.mask_encoder.model

        self.decoder = NVAEDecoder128(hps,
                                      batch_size=batch_size)
        self.decoder_model = self.decoder.model

        super(NVAEAutoEncoder128, self).__init__(input_shape=(128, 128, 3),
                                                 batch_size=batch_size,
                                                 latent_size=1024,
                                                 frames_no=encoder_frames_no)
        # Loss Function
        self.kl_weight = K.variable(self.hps['kl_weight_start'], name='kl_weight', dtype=np.float32)
        self.mask_kl_weight = K.variable(self.hps['mask_kl_weight_start'], name='mask_kl_weight', dtype=np.float32)
        self.face_metric = FaceMetric(None, gamma=self.hps['gamma']).get_loss_from_batch
        self.loss_func = self.model_loss()
Ejemplo n.º 26
0
    def load_class(self, dir_path, clear_session=True, engine=None):
        savename = dir_path.split("/")
        if len(savename[-1]) == 0:
            savename = savename[-2]
        else:
            savename = savename[-1]
        if clear_session:
            K.clear_session()

        with open('%s/%s.pkl' % (dir_path, savename), 'rb') as Input:
            save_dict = pickle.load(Input)
        self.board_height = save_dict["board_height"]
        self.board_width = save_dict["board_width"]
        self.n_feature_plane = save_dict["n_feature_plane"]
        self.n_filter = save_dict["n_filter"]
        self.kernel_size_conv = save_dict["kernel_size_conv"]
        self.kernel_size_res = save_dict["kernel_size_res"]
        self.n_res_blocks = save_dict["n_res_blocks"]
        self.l2_regularization = save_dict["l2_regularization"]
        self.bn_axis = save_dict["bn_axis"]

        try:
            self.model = load_model('%s/%s.h5' % (dir_path, savename))
        except:
            print(
                "Fail to load model directly. Trying to create a new model and then loading weights ..."
            )
            self.model = self.build_model()
            self.model.load_weights("%s/%s_weight.h5" % (dir_path, savename))

        if engine == "tpu":
            self.turn_to_tpu_model()
Ejemplo n.º 27
0
 def create_model(self):
     K.clear_session()
     input0 = Input(shape=(self.c['sentencepad'], self.c['wordvectdim']))
     input1 = Input(shape=(self.c['sentencepad'], self.c['wordvectdim']))
     Convolt_Layer = []
     MaxPool_Layer = []
     Flatten_Layer = []
     for kernel_size, filters in self.c['cnnfilters'].items():
         Convolt_Layer.append(
             Convolution1D(filters=filters,
                           kernel_size=kernel_size,
                           padding='valid',
                           activation=self.c['cnnactivate'],
                           kernel_initializer=self.c['cnninitial']))
         MaxPool_Layer.append(
             MaxPooling1D(pool_size=int(self.c['sentencepad'] -
                                        kernel_size + 1)))
         Flatten_Layer.append(Flatten())
     Convolted_tensor0 = []
     Convolted_tensor1 = []
     for channel in range(len(self.c['cnnfilters'])):
         Convolted_tensor0.append(Convolt_Layer[channel](input0))
         Convolted_tensor1.append(Convolt_Layer[channel](input1))
     MaxPooled_tensor0 = []
     MaxPooled_tensor1 = []
     for channel in range(len(self.c['cnnfilters'])):
         MaxPooled_tensor0.append(MaxPool_Layer[channel](
             Convolted_tensor0[channel]))
         MaxPooled_tensor1.append(MaxPool_Layer[channel](
             Convolted_tensor1[channel]))
     Flattened_tensor0 = []
     Flattened_tensor1 = []
     for channel in range(len(self.c['cnnfilters'])):
         Flattened_tensor0.append(Flatten_Layer[channel](
             MaxPooled_tensor0[channel]))
         Flattened_tensor1.append(Flatten_Layer[channel](
             MaxPooled_tensor1[channel]))
     if len(self.c['cnnfilters']) > 1:
         Flattened_tensor0 = concatenate(Flattened_tensor0)
         Flattened_tensor1 = concatenate(Flattened_tensor1)
     else:
         Flattened_tensor0 = Flattened_tensor0[0]
         Flattened_tensor1 = Flattened_tensor1[0]
     absDifference = Lambda(lambda X: K.abs(X[0] - X[1]))(
         [Flattened_tensor0, Flattened_tensor1])
     mulDifference = multiply([Flattened_tensor0, Flattened_tensor1])
     allDifference = concatenate([absDifference, mulDifference])
     for ilayer, densedimension in enumerate(self.c['densedimension']):
         allDifference = Dense(
             units=int(densedimension),
             activation=self.c['denseactivate'],
             kernel_initializer=self.c['denseinitial'])(allDifference)
     output = Dense(
         name='output',
         units=self.c['num_classes'],
         activation='softmax',
         kernel_initializer=self.c['denseinitial'])(allDifference)
     self.model = Model(inputs=[input0, input1], outputs=output)
     self.model.compile(loss='mean_squared_error',
                        optimizer=self.c['optimizer'])
Ejemplo n.º 28
0
def model_body(input_shape,
               anchors,
               num_classes,
               update_callback,
               load_pretrained=True,
               freeze_body=0,
               init_weights='model_data/yolo_weights.h5',
               last_save='model_data/tfmodel'):
    K.clear_session()  # get a new session
    image_input = Input(shape=(None, None, 3))
    h, w = input_shape
    num_anchors = len(anchors)

    model_body = yolo_body(image_input, num_anchors // 3, num_classes)
    print('Create YOLOv3 model with {} anchors and {} classes.'.format(
        num_anchors, num_classes))
    if os.path.exists(last_save):
        tf.saved_model.load(last_save)
    elif load_pretrained:
        model_body.load_weights(init_weights, by_name=True)
        print('Load weights {}.'.format(init_weights))
        if freeze_body in [1, 2]:
            # Freeze darknet53 body or freeze all but 3 output layers.
            num = (185, len(model_body.layers) - 3)[freeze_body - 1]
            for i in range(num):
                model_body.layers[i].trainable = False
            print('Freeze the first {} layers of total {} layers.'.format(
                num, len(model_body.layers)))

    return model_body
def run_experiment(repeats=1,
                   model_type=None,
                   train_data=None,
                   test_data=None,
                   tb_log_dir=None):

    f1s = []
    precisions = []
    recalls = []
    accs = []

    model = None
    for r in range(repeats):
        K.clear_session()
        _log_dir = os.path.join(tb_log_dir, "run_{}".format(r))
        model = get_model(name=model_type,
                          log_dir=_log_dir,
                          train_data=train_data,
                          test_data=test_data)

        precision, recall, f1, acc = model.evaluate(log_dir=_log_dir)
        print(
            '>>>>> repeat #{}--> Precision: {:.5f}, Recall: {:.5f}, F1: {:.5f}, ACC: {:.5f}'
            .format(r + 1, precision, recall, f1, acc))
        f1s.append(f1)
        precisions.append(precision)
        recalls.append(recall)
        accs.append(acc)
    # summarize results
    p, r, f1, a = summarize_results(precisions, recalls, f1s, accs)
    return p, r, f1, a, model
 def fit_with(units, dropout, learning_rate, epochs, batch_size):     
     model = get_model_opt(units, dropout)
     
     model.compile(
         optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
         loss=keras.losses.BinaryCrossentropy(),
         metrics=[
             keras.metrics.BinaryAccuracy(name='accuracy'),
             keras.metrics.AUC(name='auc')
             ])
     
     initial_weights = os.path.join(tempfile.mkdtemp(), 'initial_weights')
     model.save_weights(initial_weights)
     model.load_weights(initial_weights)
 
     history = model.fit(x=train_features, 
                         y=train_labels,
                         epochs=int(epochs), 
                         batch_size=int(batch_size),
                         validation_data=(val_features,val_labels),
                         callbacks=[early_stopping],
                         class_weight=class_weight)
     # Evaluate the model with the eval dataset.
     accuracy = history.history['accuracy'][-1]
     
     K.clear_session()
     tf.compat.v1.reset_default_graph()
     
     # Return the accuracy.
     return accuracy