예제 #1
0
def main():
    # 学習用のimages, labelsのbatchを取得
    train, test = inputs.get_data()
    train_images, train_labels = inputs.train_batch(train)
    # 推論結果、誤差、学習のためのOperationを定義
    train_logits = cnn.inference(train_images)
    losses = loss(train_labels, train_logits)
    train_op = training(losses)

    test_images, test_labels = inputs.test_batch(test)
    test_logits = cnn.inference(test_images, reuse=True)
    correct_prediction = tf.equal(tf.argmax(test_logits, 1), tf.to_int64(test_labels))
    accuracy = tf.reduce_mean(tf.to_float(correct_prediction))

    with tf.Session() as sess:
        # batchからデータを取り出すためのスレッドの準備
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        sess.run(tf.global_variables_initializer())

        # 学習を繰り返す
        for i in range(300):
            _, loss_value, accuracy_value = sess.run([train_op, losses, accuracy])
            print("step {:3d} : {:5f} ({:3f})".format(i + 1, loss_value, accuracy_value * 100.0))

        coord.request_stop()
        coord.join(threads)
예제 #2
0
def main():
    train, valid, test = inputs.get_data()
    train_images, train_labels = inputs.make_batch(train)
    valid_images, valid_labels = inputs.make_batch(valid)
    test_images, test_labels = inputs.make_batch(test)

    num_classes = 47
    train_y = keras.utils.to_categorical(train_labels, num_classes)
    valid_y = keras.utils.to_categorical(valid_labels, num_classes)
    test_y = keras.utils.to_categorical(test_labels, num_classes)

    model = network(num_classes)
    model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer='Adam',
                  metrics=['accuracy'])

    # verbose: display mode: 0:no display, 1: progress bar
    batch_size = 32
    history = model.fit(train_images,train_y,batch_size=32,\
    epochs=10,verbose=1,validation_data = (valid_images, valid_y))
    score = model.evaluate(test_images, test_y, verbose=0)

    # score[0]: loss, score[1]: accuracy
    print('Loss:', score[0])
    print('Accuracy:', score[1])

    backend.clear_session()
def get_all_data(data_path, smell):
    print("reading data...")

    train_data, train_labels, eval_data, eval_labels, max_input_length = \
        inputs.get_data(data_path, OUT_FOLDER, "rq1_rnn_" + smell,
                                                train_validate_ratio=TRAIN_VALIDATE_RATIO, max_training_samples= 5000)

    train_data = train_data.reshape((len(train_labels), max_input_length))
    eval_data = eval_data.reshape((len(eval_labels), max_input_length))
    print("reading data... done.")
    return input_data.Input_data(train_data, train_labels, eval_data,
                                 eval_labels, max_input_length)
예제 #4
0
def get_all_data(data_path):
    print("reading data...")

    # Load training and eval data
    train_data, train_labels, eval_data, eval_labels, max_input_length = \
        inputs.get_data(data_path, OUT_FOLDER, "rq1_cnn1d_" + smell,
                                                train_validate_ratio=TRAIN_VALIDATE_RATIO, max_training_samples= 5000)

    # train_data = train_data.reshape((len(train_labels), max_input_length))
    # eval_data = eval_data.reshape((len(eval_labels), max_input_length))
    print("train_data: " + str(len(train_data)))
    print("train_labels: " + str(len(train_labels)))
    print("eval_data: " + str(len(eval_data)))
    print("eval_labels: " + str(len(eval_labels)))
    print("reading data... done.")
    return input_data.Input_data(train_data, train_labels, eval_data,
                                 eval_labels, max_input_length)
예제 #5
0
def get_all_data(data_path, smell):
    print("reading data...")

    if smell in ["ComplexConditional", "ComplexMethod"]:
        max_eval_samples = 150000  # for impl smells (methods)
    else:
        max_eval_samples = 50000  # for design smells (classes)

    train_data, train_labels, eval_data, eval_labels, max_input_length = \
        inputs.get_data(data_path,
                        train_validate_ratio=TRAIN_VALIDATE_RATIO, max_training_samples=5000,
                        max_eval_samples=max_eval_samples, is_c2v=C2V)

    train_data = train_data.reshape((len(train_labels), max_input_length))
    eval_data = eval_data.reshape((len(eval_labels), max_input_length))
    print("reading data... done.")
    return input_data.Input_data(train_data, train_labels, eval_data,
                                 eval_labels, max_input_length)
예제 #6
0
"""

# just deleting variables quickly while I play with file
for name in dir():
    if not name.startswith('_'):
        del globals()[name]

import numpy as np
import pylab as pl
from model import steps
from likelihood import lnlike, model_outcomes
from inputs import get_data, get_params, get_initial_conditions
from mcmc import mcmc

pops = ['fertile_women', '0-1', '1-6', '6-12', '12-24', '24-72']
D = get_data()
P = get_params()
C0 = get_initial_conditions()

t_steps = 1000  # model run time (months)

# Run the model
y = steps(P, C0, t_steps)

LL = lnlike(C0, P, D, t_steps, pops)

# Parameters to calibrate
meta_prefit = dict()
meta_prefit['mortality, 0-1, non-stunted'] = P['mortality']['0-1'][
    'non-stunted']
meta_prefit['mortality, 0-1, stunted'] = P['mortality']['0-1']['stunted']
예제 #7
0
# just deleting variables quickly while I play with file
for name in dir():
    if not name.startswith('_'):
        del globals()[name]


import numpy as np
import pylab as pl
from model import steps
from likelihood import lnlike, model_outcomes
from inputs import get_data, get_params, get_initial_conditions
from mcmc import mcmc

pops = ['fertile_women', '0-1', '1-6', '6-12', '12-24', '24-72']
D = get_data()
P = get_params()
C0 = get_initial_conditions()

t_steps = 1000  # model run time (months)

# Run the model
y = steps(P, C0, t_steps)

LL = lnlike(C0, P, D, t_steps, pops)


# Parameters to calibrate
meta_prefit = dict()
meta_prefit['mortality, 0-1, non-stunted'] = P['mortality']['0-1']['non-stunted']
meta_prefit['mortality, 0-1, stunted'] = P['mortality']['0-1']['stunted']
예제 #8
0
])

if LOAD_OLD:
    model = load_model(MODEL_PATH)

checkpoint = ModelCheckpoint(MODEL_PATH,
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True,
                             mode='max')
adam = Adam(lr=LR, decay=0.0)
model.compile(loss='mean_squared_error', optimizer=adam, metrics=['accuracy'])

if __name__ == '__main__':
    print('Loading data...')
    X, Y = get_data()
    print('Fitting model...')

    history = model.fit(X,
                        Y,
                        validation_split=VAL_SPLIT,
                        epochs=EPOCHS,
                        batch_size=BATCH_SIZE,
                        shuffle=True,
                        callbacks=[checkpoint])

    # summarize history for accuracy
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('Model Accuracy')
    plt.ylabel('accuracy')