Example #1
0

gendata = generatorData()

train_datas, train_labels = gendata.gen_Data(batch_size=60)
test_datas, test_labels = gendata.gen_Data(batch_size=40)

train_datas_shapes = np.shape(train_datas)

train_labels_s = np.reshape(train_labels, (int(len(train_labels) / 4), 4))
train_hot = to_categorical(train_labels_s)

input_shapes = np.shape(train_datas)[1:]

model = CRNN(input_shapes).CRNN_model

model.summary()

model.compile(loss={
    'ctc': lambda train_hot, y_pred: y_pred
},
              optimizer=RMSprop())

history = model.fit(
    [train_datas, train_hot,
     np.ones(1) * 15, np.ones(1) * 37],
    np.ones(1),
    batch_size=10,
    epochs=100,
    verbose=2)
Example #2
0
    print('Num. of images:', len(mj_synth.all_image_paths))
    print('All Train {} / All Val {} / All Test {}'.format(
        len(mj_synth.annotation_train), len(mj_synth.annotation_val),
        len(mj_synth.annotation_test)))

    X_train, y_train, X_val, y_val, X_test, y_test = mj_synth.random_choice(
        random_choice_rate=0.005)
    print('Train {} / Val {} / Test {}'.format(len(y_train), len(y_val),
                                               len(y_test)))

    train_ds, val_ds, test_ds = mj_synth.create_datasets(
        X_train, y_train, X_val, y_val, X_test, y_val)

    # Model definition
    crnn = CRNN()
    crnn.compile(mj_synth.max_label_len)

    # Train the model
    ckpt = ModelCheckpoint(filepath=args.save_model_path,
                           monitor='val_loss',
                           verbose=1,
                           save_best_only=True,
                           mode='auto')
    callbacks_list = [ckpt]
    crnn.training_model.fit(x=[*train_ds],
                            y=np.zeros(len(train_ds[0])),
                            batch_size=args.batch_size,
                            epochs=args.epochs,
                            validation_data=([*val_ds],
                                             [np.zeros(len(val_ds[0]))]),
                            verbose=1,
Example #3
0
def train_model(spectr_dir,
                train_csv,
                test_csv,
                model_type,
                use_cache=False,
                resume=False):

    print('Loading data... ')

    if not use_cache:
        x_train, y_train = load_data_from_spectrogram_dir(
            spectr_dir, ['h', 'p'], train_csv)
        x_test, y_test_ori = load_data_from_spectrogram_dir(
            spectr_dir, ['h', 'p'], test_csv)

        print('Saving data to cache...')
        np.save('train_data.npy', x_train)
        np.save('train_label.npy', y_train)
        np.save('val_data.npy', x_test)
        np.save('val_label.npy', y_test_ori)
    else:
        print('Loading data from cache...')
        x_train = np.load('train_data.npy')
        y_train = np.load('train_label.npy')
        x_test = np.load('val_data.npy')
        y_test_ori = np.load('val_label.npy')
        print(y_train.shape)

    label_count = np.bincount(y_train)
    print('Train label count: ', label_count)

    from sklearn.utils import class_weight
    y_train = list(y_train)
    class_weights = class_weight.compute_class_weight('balanced',
                                                      np.unique(y_train),
                                                      y_train)

    print('sample weight per class', class_weights)

    print('-' * 130)
    print('Model train')
    print('-' * 130)

    input_shape = (rows, cols, channels)

    from model import ResnetBuilder, CRNN, simple_CNN

    if model_type == "resnet18":
        model = ResnetBuilder.build_resnet_18(input_shape, num_classes)
    elif model_type == "resnet34":
        model = ResnetBuilder.build_resnet_18(input_shape, num_classes)
    elif model_type == "CRNN":
        model = CRNN(input_shape, num_classes)
    else:
        model = simple_CNN(input_shape, num_classes)

    optimizer = Adadelta(0.1, rho=0.7)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    x_train = x_train.reshape(x_train.shape[0], rows, cols, channels)
    x_test = x_test.reshape(x_test.shape[0], rows, cols, channels)
    y_train = to_categorical(y_train, num_classes)
    y_test = to_categorical(y_test_ori, num_classes)

    ### Load weights
    if resume:
        model.load_weights('./model/' + model_name)
    #
    checkpoint = ModelCheckpoint('./model/' + model_name,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')
    early_stop = EarlyStopping(monitor='val_acc', patience=10, mode='max')
    callbacks_list = [checkpoint, early_stop]

    print('epochs', epochs)

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_data=(x_test, y_test),
              verbose=1,
              shuffle=True,
              callbacks=callbacks_list,
              class_weight=class_weights)

    print('Report')
    y_prob = model.predict(x_test)
    y_pred = y_prob.argmax(axis=-1)
    print('test_y', np.bincount(y_test_ori))
    print(classification_report(y_test_ori, y_pred, target_names=CLASSES))

    model.save(model_name)