Ejemplo n.º 1
0
def train_beats(**kwargs):
    # ============================================================= #
    # ======================== TRAINING BEATS ===================== #
    # ============================================================= #

    extraction = kwargs.get('extraction', 'pixel')

    max_normalize = kwargs.get('max_normalize', 255)

    learn_rate = kwargs.get('learning_rate', 0.05)
    n_epochs = kwargs.get('max_epoch', 100)
    n_codebooks = kwargs.get('n_codebooks', 3)

    print("learning rate: " + str(learn_rate))
    print("epoch: " + str(n_epochs))
    print("class: " + str(n_codebooks))
    print()

    train_beats = LVQ()
    train_beats.set_n_codebooks(n_codebooks)

    # load and prepare data train
    filename = 'train_beats.csv'
    train_beats.load_csv(filename, 'train')
    if extraction == 'pixel':
        for i in range(len(train_beats.data_train[0]) - 1):
            train_beats.min_max_normalize(train_beats.data_train, i, 0, 255)
    else:
        for i in range(len(train_beats.data_train[0]) - 1):
            train_beats.min_max_normalize(train_beats.data_train, i, 0, 50)

    # Training process
    start_time = time.time()
    train_beats.train_codebooks(learn_rate, n_epochs)
    duration = time.time() - start_time

    print("\nclass codebooks: ", end="")
    print([row[-1] for row in train_beats.codebooks])

    score, wrong_data, actual, predictions = train_beats.accuracy_metric(
        'train')

    print("===============train beats==============")
    print("Waktu proses pembelajaran: %s detik ---" % (duration))
    print("score: " + str(round(score, 3)) + "%\n")
    print("wrong data: ", end="")
    print(wrong_data)

    train_beats.export_codebooks("beats_codebooks")

    beats, beats_test, dataset_path = create_dataset.group_data_beats()

    # Show wrong data image
    # helper.show_wrong_data(wrong_data, predictions, beats, dataset_path)
    # exit()

    return score, duration
Ejemplo n.º 2
0
# load and prepare data test
filename = 'test_three.csv'
train_beats.load_csv(filename, 'test')
for i in range(len(train_beats.data_test[0]) - 1):
    if i != 5:
        train_beats.min_max_normalize(train_beats.data_test, i, 0, 50)
    else:
        train_beats.min_max_normalize(train_beats.data_test, i, 0, 30)

train_beats.train_codebooks(learn_rate, n_epochs)

print("class codebooks: ", end="")
print([row[-1] for row in train_beats.codebooks])

score, wrong_data, actual, predictions = train_beats.accuracy_metric('train')
score_test, wrong_data_test, actual_test, predictions_test = train_beats.accuracy_metric(
    'test')

print("===============train==============")
print("score: " + str(round(score, 3)) + "%")
print("\n")
print("wrong data: ", end="")
print(wrong_data)

print("\n===============test===============")
print("score test: " + str(round(score_test, 3)) + "%")
print("\n")
print("wrong data test: ", end="")
print(wrong_data_test)
Ejemplo n.º 3
0
n_epochs = 400
n_codebooks = 3

train_beats = LVQ()
train_beats.set_n_codebooks(n_codebooks)

# load and prepare data
filename = 'train_beats.csv'
train_beats.load_csv(filename, 'train')

train_beats.train_codebooks(learn_rate, n_epochs)

for c in train_beats.codebooks:
    print(c[-1])

score, wrong_data = train_beats.accuracy_metric('train')

print(score)
print(wrong_data)
# print(len(wrong_data))
f = open("codebooks.csv", 'w+')
for codebook in train_beats.codebooks:
    for data in codebook:
        f.write(str(data) + ", ")
    f.write("\n")
f.close()
os.rename('codebooks.csv', 'codebooks(' + str(score) + ').csv')
exit()
img_data = list()

# print(len(beats[0]))
Ejemplo n.º 4
0
# load and prepare data train
filename = 'train_three.csv'
train_beats.load_csv(filename, 'train')

# load and prepare data test
filename = 'test_three.csv'
train_beats.load_csv(filename, 'test')

start_time = time.time()
train_beats.train_codebooks(learn_rate, n_epochs)
print("--- %s seconds ---" % (time.time() - start_time))

print("class codebooks: ", end="")
print([row[-1] for row in train_beats.codebooks])

score, wrong_data = train_beats.accuracy_metric('train')
score_test, wrong_data_test = train_beats.accuracy_metric('test')

print("===============train==============")
print("score: " + str(score) + "%")
print("\n")
print("wrong data: ", end="")
print(wrong_data)

print("\n===============test===============")
print("score test: " + str(score_test) + "%")
print("\n")
print("wrong data test: ", end="")
print(wrong_data_test)

img_data = list()
Ejemplo n.º 5
0
def train_pitch(**kwargs):
    # ============================================================= #
    # ======================== TRAINING PITCH ===================== #
    # ============================================================= #

    identifier = kwargs.get('identifier', 'quarter')

    extraction = kwargs.get('extraction', 'paranada')

    max_normalize = kwargs.get('max_normalize', 255)

    learn_rate = kwargs.get('learning_rate', 0.05)
    n_epochs = kwargs.get('max_epoch', 100)
    n_codebooks = kwargs.get('n_codebooks', 9)

    show_wrong_data = kwargs.get('show_wrong_data', False)

    print("learning rate: " + str(learn_rate))
    print("epoch: " + str(n_epochs))
    print("class: " + str(n_codebooks))
    print()

    train_pitch = LVQ()
    train_pitch.set_n_codebooks(n_codebooks)

    # load and prepare data train
    filename = 'train_' + identifier + '.csv'
    train_pitch.load_csv(filename, 'train')
    if extraction == 'paranada':
        for i in range(len(train_pitch.data_train[0]) - 1):
            if i != 5:  # difference normalization for average value
                train_pitch.min_max_normalize(train_pitch.data_train, i, 0, 50)
            else:
                train_pitch.min_max_normalize(train_pitch.data_train, i, 0, 30)
    elif extraction == 'pixel':
        for i in range(len(train_pitch.data_train[0]) - 1):
            train_pitch.min_max_normalize(train_pitch.data_train, i, 0, 255)
    else:
        for i in range(len(train_pitch.data_train[0]) - 1):
            train_pitch.min_max_normalize(train_pitch.data_train, i, 0, 30)

    # load and prepare data test
    # filename = 'test_whole.csv'
    # train_pitch.load_csv(filename, 'test')
    # for i in range(len(train_pitch.data_test[0])-1):
    #     if i != 5:
    #         train_pitch.min_max_normalize(train_pitch.data_test, i, 0, 50)
    #     else:
    #         train_pitch.min_max_normalize(train_pitch.data_test, i, 0, 30)

    # Training process
    start_time = time.time()
    train_pitch.train_codebooks(learn_rate, n_epochs)
    duration = time.time() - start_time

    print("class codebooks: ", end="")
    print([row[-1] for row in train_pitch.codebooks])

    score, wrong_data, actual, predictions = train_pitch.accuracy_metric(
        'train')
    # score_test, wrong_data_test, actual_test, predictions_test = train_pitch.accuracy_metric('test')

    print("===============train " + identifier + "==============")
    print("score: " + str(round(score, 3)) + "%")
    print("\n")
    print("wrong data: ", end="")
    print(wrong_data)

    # print("\n===============test===============")
    # print("score test: " + str(round(score_test, 3)) + "%")
    # print("\n")
    # print("wrong data test: ", end="")
    # print(wrong_data_test)

    train_pitch.export_codebooks(identifier + "_codebooks")

    pitch, pitch_test, dataset_path = helper.get_dataset_info(
        identifier, "train")
    # print(pitch)
    # print()
    # print(pitch_test)
    # print()
    # print(dataset_path)
    # exit()

    # Show wrong data train image
    if show_wrong_data:
        helper.show_wrong_data(wrong_data, predictions, pitch, dataset_path)
    # exit()

    # Show wrong data test image
    # helper.show_wrong_data(wrong_data_test, predictions_test, whole_test, dataset_path)
    # exit()

    return score, duration
Ejemplo n.º 6
0
# load and prepare data test
# filename = 'test_whole.csv'
# train_whole.load_csv(filename, 'test')
# for i in range(len(train_whole.data_test[0])-1):
#     if i != 5:
#         train_whole.min_max_normalize(train_whole.data_test, i, 0, 50)
#     else:
#         train_whole.min_max_normalize(train_whole.data_test, i, 0, 30)

train_whole.train_codebooks(learn_rate, n_epochs)

print("class codebooks: ", end="")
print([row[-1] for row in train_whole.codebooks])

score, wrong_data, actual, predictions = train_whole.accuracy_metric('train')
# score_test, wrong_data_test, actual_test, predictions_test = train_whole.accuracy_metric('test')

print("===============train whole==============")
print("score: " + str(round(score, 3)) + "%")
print("\n")
print("wrong data: ", end="")
print(wrong_data)

# print("\n===============test===============")
# print("score test: " + str(round(score_test, 3)) + "%")
# print("\n")
# print("wrong data test: ", end="")
# print(wrong_data_test)

train_whole.export_codebooks("whole_codebooks")
Ejemplo n.º 7
0
# load and prepare data test
# filename = 'test_whole.csv'
# train_beats.load_csv(filename, 'test')
# for i in range(len(train_beats.data_test[0])-1):
#     if i != 5:
#         train_beats.min_max_normalize(train_beats.data_test, i, 0, 50)
#     else:
#         train_beats.min_max_normalize(train_beats.data_test, i, 0, 30)

train_beats.train_codebooks(learn_rate, n_epochs)

print("class codebooks: ", end="")
print([row[-1] for row in train_beats.codebooks])

score, wrong_data, actual, predictions = train_beats.accuracy_metric('train')
# score_test, wrong_data_test, actual_test, predictions_test = train_beats.accuracy_metric('test')

print("===============train==============")
print("score: " + str(round(score, 3)) + "%")
print("\n")
print("wrong data: ", end="")
print(wrong_data)

# print("\n===============test===============")
# print("score test: " + str(round(score_test, 3)) + "%")
# print("\n")
# print("wrong data test: ", end="")
# print(wrong_data_test)

img_data = list()
Ejemplo n.º 8
0
max_epoh = 150
n_codebooks = 3

test_beats = LVQ()
test_beats.set_n_codebooks(n_codebooks)

# load and prepare data test
filename = 'beats_codebooks.csv'
test_beats.import_codebooks(filename)
test_beats.load_csv("test_histogram.csv", "test")
print([item[-1] for item in test_beats.codebooks])

# for i in range(len(test_beats.data_test[0])-1):
#     test_beats.min_max_normalize(test_beats.data_test, i, 0, 255)

score_beats, wrong_data_beats, actual_beats, predictions_beats = test_beats.accuracy_metric(
    'test')

# ==========
# TEST PITCH
# ==========
create_dataset.create_csv_test(max_num_class=0, length_area=7)

learning_rate = 0.05
max_epoh = 4000
n_codebooks = 9

test_pitch = LVQ()
test_pitch.set_n_codebooks(n_codebooks)

# load and prepare data test
filename = 'all_codebooks.csv'