batch_size=32,
                seed=42,
                shuffle=True,
                class_mode='categorical')
        valid_generator = datagen.flow_from_directory(
                'mel/',
                subset="validation",
                target_size=(64, 64),
                batch_size=32,
                seed=42,
                shuffle=True,
                class_mode='categorical')

        #Fitting keras model
        STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
        STEP_SIZE_VALID=valid_generator.n//valid_generator.batch_size
        model.fit_generator(generator=train_generator,
                        steps_per_epoch=STEP_SIZE_TRAIN,
                        validation_data=valid_generator,
                        validation_steps=STEP_SIZE_VALID,
                        epochs=150
                        )
        res = model.evaluate_generator(generator=valid_generator, steps=STEP_SIZE_VALID)

        # write results
        with open('res_uniform.txt', 'a') as file:
                file.write(str(res)+'\n')

if __name__ == '__main__':
        classification_task(model_construction(20))
param.x_length = len(dataset.x[0])
param.input_size = param.x_length // (param.window // 2)
length_of_data = len(dataset.y)
x_temp, y_temp = dataset.data_shuffle(x=dataset.x, y=dataset.y)
dataset.x_test = x_temp[int(length_of_data * param.proportion):]
dataset.y_test = y_temp[int(length_of_data * param.proportion):]
dataset.x = x_temp[:int(length_of_data * param.proportion)]
dataset.y = y_temp[:int(length_of_data * param.proportion)]
print('=============================================')
print('data dividing')

config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.1
with tf.Session(config=config) as sess:
    model_on = model.model_construction(layer_info = param.layer_info, acti_function = param.acti_function, \
                                        x_length = param.x_length, y_length = 2, name = 'global', \
                                        batch_size = param.batch_size)
    model_on.lstm_param_setting(input_size = param.input_size, timestep_size = param.window//2, hidden_size = param.hiddensize, \
                                layer_num = param.layer_num)
    model_on.estimation()
    global_step = tf.Variable(0, trainable=False)
    optimizer = tf.train.AdamOptimizer(1e-3)
    grads_and_vars = optimizer.compute_gradients(model_on.loss)
    train_step = optimizer.apply_gradients(grads_and_vars,
                                           global_step=global_step)
    sess.run(tf.global_variables_initializer())

    for i in range(0, param.num_epoch):
        train_tp = 0
        train_tn = 0
        train_fp = 0