Ejemplo n.º 1
0
    
writer.writeheader()
loss_val = []
    
for epoch in range(num_epochs):
    print("EPOCH ", epoch, "/", num_epochs)
    temp_loss_list = []
    label_list = []
    pred_list = []
    
    temp_loss_val = []
    label_val = []
    pred_val = []
    
    'Update weights on training set'
    for i in range(training_generator.__len__()):
        x_train,y_train=training_generator.__getitem__(i)
        with tf.GradientTape() as tape:
            logits = model(x_train, training = True)
            loss_values = loss(y_train, logits)
            
        'Follow training by printing the loss and prediction for each image'
        #print("Batch ", i, "/",training_generator.__len__(), 
        #    ", Loss: ", loss_values.numpy()[0]) 
        #print("Prediction: ", logits.numpy()[0,:], ", Label: ", y_train[0])

        temp_loss_list.append(loss_values.numpy().mean())
        label_list.append(y_train[0])
        pred_list.append(logits.numpy()[0,1])
        
        grads = tape.gradient(loss_values, model.trainable_variables[-4:])
Ejemplo n.º 2
0
 print("EPOCH ", epoch, "/", num_epochs)
 
 "Reset all metrics"
 train_loss.reset_states()
 train_accuracy.reset_states()
 test_loss.reset_states()
 test_accuracy.reset_states()
 
 label_list = []
 pred_list = []
 label_val = []
 pred_val = []
 pred_val_ema = []
 
 "Iterate over all images of training set"
 for i in range(training_generator.__len__()):
     print("Batch ", i, "/",training_generator.__len__() - 1)
     x_train, y_train = training_generator.__getitem__(i)
     "Update model"
     with tf.GradientTape() as tape:
         predictions = model(x_train, training = True)
         loss = loss_object(y_train, predictions)
     gradients = tape.gradient(loss, 
         model.trainable_variables[-nb_train_layers:])
     optimizer.apply_gradients(zip(gradients, 
         model.trainable_variables[-nb_train_layers:]))
         
     "Collect labels and predictions"
     train_loss(loss)
     train_accuracy(y_train[0], predictions)
     label_list.append(y_train[0])
Ejemplo n.º 3
0
    mean_cm = cnf_matrix_frame
else:
    mean_cm += cnf_matrix_frame

mean_train.append(history.history['acc'][-1])
mean_test.append(history.history['val_acc'][-1])
mean_train_loss.append(history.history['loss'][-1])
mean_test_loss.append(history.history['val_loss'][-1])
mean_test_3.append(history.history['val_top_3_accuracy'][-1])
mean_test_5.append(history.history['val_top_5_accuracy'][-1])

if config_params['logging']['enable']:
    with open(LOGGING_FILE, 'a') as f:
        f.write('{},{},{},{},{},{},{},{},{}\n'.format(
            SUBJECT,
            train_generator.__len__() * PARAMS_TRAIN_GENERATOR['batch_size'],
            valid_generator.__len__(), mean_train_loss[-1], mean_train[-1],
            mean_test_loss[-1], mean_test[-1], mean_test_3[-1],
            mean_test_5[-1]))

metrics_dict = {
    'mean_cm': mean_cm,
    'mean_test': mean_test,
    'mean_test_3': mean_test_3,
    'mean_test_5': mean_test_5,
    'mean_train': mean_train,
    'mean_train_loss': mean_train_loss,
    'mean_test_loss': mean_test_loss
}
scipy.io.savemat(METRICS_SAVE_FILE.format(SUBJECT), metrics_dict)
Ejemplo n.º 4
0
def train(hires=False,
          filters=16,
          max_depth=2,
          kernel_size=2,
          pool_size=2,
          doubling=False,
          algorithm='adam'):
    '''
        *** 学習時のパラメータを設定 ***
    '''
    input_length = 552
    batch_size = 10
    epochs = 10
    learn_rate = 1.0e-3
    '''
        *** 学習と評価用のデータを選ぶ ***
    '''
    if hires is True:
        # 80次元のデータ.こちらの方が詳細なデータ
        input_dim = 80
        train_data = 'train_hires.h5'
        test_data = 'test_hires.h5'
    else:
        # 40次元のデータ.デフォルト
        input_dim = 40
        train_data = './train.h5'
        test_data = './test.h5'
    '''
        *** 学習に使うアルゴリズム(最適化手法)の選択 ***
    '''
    if algorithm == 'sgd':
        optimizer = tf.keras.optimizers.SGD()
    elif algorithm == 'adadelta':
        optimizer = tf.keras.optimizers.Adadelta()
    elif algorithm == 'rmsprop':
        optimizer = tf.keras.optimizers.RMSprop()
    else:
        optimizer = tf.keras.optimizers.Adam()
    '''
        *** 入力ベクトルの形状(次元)を指定 ***
        ( 特徴量の次元,時間(長さ)の次元,フィルタ数の次元 ) = numpyのshape
    '''
    input = tf.keras.layers.Input(shape=(input_dim, input_length, 1))
    '''
        *** ニューラルネットワークを作成 ***
        どのようにして作っているかはcnn.pyを読むこと
    '''
    output = CNNClassifier()(input)
    '''
        *** モデルの設定 ***
        ニューラルネットワークの入出力を指定し,学習・評価できるように設定する
    '''
    model = tf.keras.models.Model(input, output)
    # 学習可能な計算グラフを作成
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['categorical_accuracy'])
    '''
        *** 学習データの準備 ***
    '''
    training_generator = DataGenerator(train_data,
                                       dim=(input_dim, input_length),
                                       batch_size=batch_size)
    # 入力データが正規分布にしたがうよう,偏りをなくす(正規化)
    training_generator.compute_norm()
    mean, var = training_generator.get_norm()
    '''
        *** 評価データの準備 ***
    '''
    validation_generator = DataGenerator(test_data,
                                         dim=(input_dim, input_length),
                                         batch_size=batch_size)
    validation_generator.set_norm(mean, var)
    '''
        *** Tensorboardを使って学習状況のログを保存する***
    '''
    try:
        shutil.rmtree('./logs')
    except:
        pass
    os.makedirs('./logs')
    tensorboard = tf.keras.callbacks.TensorBoard(log_dir='./logs')
    '''
        *** 学習 ***
        tensorflowの以下の関数を呼び出して行う.
    '''
    model.fit(training_generator,
              validation_data=validation_generator,
              epochs=epochs,
              callbacks=[tensorboard],
              shuffle=True)
    '''
        *** 評価 ***
        学習したニューラルネットワークを使って評価を行う
    '''
    # 混同行列の初期化
    mat = np.zeros((ASC_CLASS, ASC_CLASS), dtype=int)
    # 正解数の初期化
    acc = 0

    for bt in range(validation_generator.__len__()):
        # データを10個取り出す
        # x ... メルスペクトル特徴, y...ラベル
        x, y = validation_generator.__getitem__(bt)
        # 学習したモデルで予測する
        # pred は5つの鳴き声の確率が入ったベクトルを10個ならべたもの
        pred = model.predict_on_batch(x)

        # 確率が最大となるラベルの番号を求める
        y_pred = np.argmax(pred, axis=1)
        y_true = np.argmax(y, axis=1)

        # 混同行列を作る
        mat += confusion_matrix(y_true, y_pred, labels=[0, 1, 2, 3, 4])

        # 正解と予測ラベルを比較して,正解した数を加算
        acc += np.sum(y_pred == y_true)

    # 評価データの総数で割る
    acc = float(acc) / validation_generator.__num_samples__()

    return acc, mat
Ejemplo n.º 5
0
              metrics=['accuracy'])

tbCallBack = keras.callbacks.TensorBoard(log_dir='../Graph',
                                         histogram_freq=0,
                                         write_graph=True,
                                         write_images=True)

training_generator = DataGenerator(IMAGES_PER_CLASS,
                                   CLASSES,
                                   RATIO,
                                   data_path,
                                   batch_size=batch_size,
                                   dim=(SIZE, SIZE))
validation_generator = DataGenerator(IMAGES_PER_CLASS,
                                     CLASSES,
                                     RATIO,
                                     data_path,
                                     batch_size=batch_size,
                                     dim=(SIZE, SIZE),
                                     mode="validation")

model.fit_generator(training_generator,
                    epochs=epochs,
                    steps_per_epoch=training_generator.__len__(),
                    validation_data=validation_generator,
                    validation_steps=validation_generator.__len__(),
                    callbacks=[tbCallBack])

model.save('../models/keras/model_{}.h5'.format(
    len(next(os.walk("../models/keras"))[2])))