Esempio n. 1
0
    elif MODEL == 'LSTM':

        class_nums = 2
        lstm = LSTM(class_nums, embeddings_matrix, maxlen=maxlen)
        _ = lstm(inputs=inputs)
        adam = tf.keras.optimizers.Adam(lr=0.001,
                                        beta_1=0.9,
                                        beta_2=0.999,
                                        epsilon=1e-08)
        rmsprop = tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)
        lstm.compile(loss='categorical_crossentropy', optimizer=adam)
        if MODE == 'train':
            lstm.fit(input_train,
                     tr_one_hot,
                     batch_size=32,
                     epochs=20,
                     verbose=1,
                     shuffle=True)

            # Save model
            lstm.save_weights(r'./saved_model/LSTM/lstm_20.HDF5')
            print('model saved.')
        else:
            # Load model
            lstm.load_weights(r'./saved_model/LSTM/lstm_20.HDF5')
            print('model loaded.')

        pred = lstm.predict(input_validation)
        pred = np.argmax(pred, axis=1)
        print('On validate data: ')
        metrics(y_test, pred)
model = LSTM()
model.compile(optimizer="adam",
              loss="categorical_crossentropy",
              metrics=["accuracy"])

cp_path = "training_2/cp-{epoch:04d}.h5py"
cp_dir = os.path.dirname(cp_path)

# Create a callback that saves the model's weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=cp_path,
                                                 verbose=1,
                                                 save_weights_only=True,
                                                 period=1)
log_dir = "./runs"
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)

print("Initiating Training.")
# model.fit(train_ds, epochs = 10, validation_data = val_ds, steps_per_epoch = train_ct, validation_steps = val_ct, shuffle = True, callbacks = [cp_callback])
model.fit(train_ds,
          epochs=10,
          validation_data=val_ds,
          steps_per_epoch=train_ct,
          validation_steps=val_ct,
          shuffle=True,
          callbacks=[cp_callback, tensorboard_callback])
model.save_weights("./training_1/model.h5py")
print("Finished Training.")

# export PATH=/usr/local/cuda-10.0/bin:/usr/local/cuda-10.0/NsightCompute-1.0${PATH:+:${PATH}}
# export LD_LIBRARY_PATH=/usr/local/cuda-10.0/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
Esempio n. 3
0
def main(args):
    with open(args.config, 'r') as f:
        config = json.load(f)

    if config['optimizer'] == 'SGD':
        optimizer = SGD(lr=config['learning_rate'],
                        decay=config['learning_rate'] / config['epochs'],
                        momentum=config['momentum'])
    else:
        raise Exception('Unsupported optimizer: {}.'.format(
            config['optimizer']))

    model_name = str.lower(config['model'])
    if model_name == 'lstm':
        model = LSTM(config['input_length'], 2)
    elif model_name == 'conv1d':
        model = Conv1D(config['input_length'], 2)
    elif model_name == 'conv2d':
        model = Conv2D(config['input_length'], 2)
    else:
        raise Exception('Unsupported model: {}.'.format(config['model']))

    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    wav_paths = glob('{}/**'.format(args.data_dir), recursive=True)
    wav_paths = [x for x in wav_paths if '.wav' in x]
    classes = sorted(os.listdir(args.data_dir))
    le = LabelEncoder()
    le.fit(classes)
    labels = [get_class(x, args.data_dir) for x in wav_paths]
    labels = le.transform(labels)

    print('CLASSES: ', list(le.classes_))
    print(le.transform(list(le.classes_)))

    wav_train, wav_val, label_train, label_val = train_test_split(
        wav_paths,
        labels,
        test_size=config['validation_split'],
        random_state=0)
    tg = DataGenerator(wav_train,
                       label_train,
                       config['input_length'],
                       len(set(label_train)),
                       batch_size=config['batch_size'])
    vg = DataGenerator(wav_val,
                       label_val,
                       config['input_length'],
                       len(set(label_val)),
                       batch_size=config['batch_size'])

    output_sub_dir = os.path.join(args.output_dir, model_name,
                                  datetime.now().strftime('%Y%m%d_%H%M%S'))
    os.makedirs(output_sub_dir)

    callbacks = [
        EarlyStopping(monitor='val_loss',
                      patience=config['patience'],
                      restore_best_weights=True,
                      verbose=1),
        ModelCheckpoint(filepath=os.path.join(
            output_sub_dir, 'model.{epoch:02d}-{val_loss:.4f}.h5'),
                        monitor='val_loss',
                        save_best_only=True,
                        verbose=1),
        CSVLogger(os.path.join(output_sub_dir, 'epochs.csv'))
    ]

    model.fit(tg,
              validation_data=vg,
              epochs=config['epochs'],
              verbose=1,
              callbacks=callbacks)