def main(data_dir):
    BATCH_SIZE = 16
    N_SAMPLES_IN = 40  # divide by 10 to get # hours the sequence covers
    N_SAMPLES_OUT = 5

    generator = MyData(
        data_dir,
        BATCH_SIZE,
        N_SAMPLES_IN,
        N_SAMPLES_OUT
    )

    lstm = LSTM(N_SAMPLES_IN, N_SAMPLES_OUT, 1, 1)
    lstm.build()
    lstm.compile()

    lstm.model.fit(generator,
                   epochs=6,
                   shuffle=True,
                   verbose=1,
                   steps_per_epoch=len(generator))

    lstm.model.save('models/lstm_%d_%d_%d.h5' % (
        BATCH_SIZE, N_SAMPLES_IN, N_SAMPLES_OUT
    ))
val_ct = 0
for audio, lbl in val_ds:
    val_ct += 1

test_ct = 0
for audio, lbl in test_ds:
    test_ct += 1

train_ds.repeat(None)
val_ds.repeat(None)
test_ds.repeat(None)

model = LSTM()
model.compile(optimizer="adam",
              loss="categorical_crossentropy",
              metrics=["accuracy"])

cp_path = "training_2/cp-{epoch:04d}.h5py"
cp_dir = os.path.dirname(cp_path)

# Create a callback that saves the model's weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=cp_path,
                                                 verbose=1,
                                                 save_weights_only=True,
                                                 period=1)
log_dir = "./runs"
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)

print("Initiating Training.")
# model.fit(train_ds, epochs = 10, validation_data = val_ds, steps_per_epoch = train_ct, validation_steps = val_ct, shuffle = True, callbacks = [cp_callback])
示例#3
0
        ones_tr = np.ones(shape=(len(train_seg), num_actions))
        pred_tr = predict(actor_q_model, (input_train, ones_tr))
        print('On train data: ')
        metrics(y_train, pred_tr)

    elif MODEL == 'LSTM':

        class_nums = 2
        lstm = LSTM(class_nums, embeddings_matrix, maxlen=maxlen)
        _ = lstm(inputs=inputs)
        adam = tf.keras.optimizers.Adam(lr=0.001,
                                        beta_1=0.9,
                                        beta_2=0.999,
                                        epsilon=1e-08)
        rmsprop = tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)
        lstm.compile(loss='categorical_crossentropy', optimizer=adam)
        if MODE == 'train':
            lstm.fit(input_train,
                     tr_one_hot,
                     batch_size=32,
                     epochs=20,
                     verbose=1,
                     shuffle=True)

            # Save model
            lstm.save_weights(r'./saved_model/LSTM/lstm_20.HDF5')
            print('model saved.')
        else:
            # Load model
            lstm.load_weights(r'./saved_model/LSTM/lstm_20.HDF5')
            print('model loaded.')
示例#4
0
def main(args):
    with open(args.config, 'r') as f:
        config = json.load(f)

    if config['optimizer'] == 'SGD':
        optimizer = SGD(lr=config['learning_rate'],
                        decay=config['learning_rate'] / config['epochs'],
                        momentum=config['momentum'])
    else:
        raise Exception('Unsupported optimizer: {}.'.format(
            config['optimizer']))

    model_name = str.lower(config['model'])
    if model_name == 'lstm':
        model = LSTM(config['input_length'], 2)
    elif model_name == 'conv1d':
        model = Conv1D(config['input_length'], 2)
    elif model_name == 'conv2d':
        model = Conv2D(config['input_length'], 2)
    else:
        raise Exception('Unsupported model: {}.'.format(config['model']))

    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    wav_paths = glob('{}/**'.format(args.data_dir), recursive=True)
    wav_paths = [x for x in wav_paths if '.wav' in x]
    classes = sorted(os.listdir(args.data_dir))
    le = LabelEncoder()
    le.fit(classes)
    labels = [get_class(x, args.data_dir) for x in wav_paths]
    labels = le.transform(labels)

    print('CLASSES: ', list(le.classes_))
    print(le.transform(list(le.classes_)))

    wav_train, wav_val, label_train, label_val = train_test_split(
        wav_paths,
        labels,
        test_size=config['validation_split'],
        random_state=0)
    tg = DataGenerator(wav_train,
                       label_train,
                       config['input_length'],
                       len(set(label_train)),
                       batch_size=config['batch_size'])
    vg = DataGenerator(wav_val,
                       label_val,
                       config['input_length'],
                       len(set(label_val)),
                       batch_size=config['batch_size'])

    output_sub_dir = os.path.join(args.output_dir, model_name,
                                  datetime.now().strftime('%Y%m%d_%H%M%S'))
    os.makedirs(output_sub_dir)

    callbacks = [
        EarlyStopping(monitor='val_loss',
                      patience=config['patience'],
                      restore_best_weights=True,
                      verbose=1),
        ModelCheckpoint(filepath=os.path.join(
            output_sub_dir, 'model.{epoch:02d}-{val_loss:.4f}.h5'),
                        monitor='val_loss',
                        save_best_only=True,
                        verbose=1),
        CSVLogger(os.path.join(output_sub_dir, 'epochs.csv'))
    ]

    model.fit(tg,
              validation_data=vg,
              epochs=config['epochs'],
              verbose=1,
              callbacks=callbacks)