コード例 #1
0
ファイル: main.py プロジェクト: LouCHANCRIN/Happy-House
def main():
    alpha = 0.008
    num_epoch = 500
    nb_class = np.shape(data.train_label)[1]
    batch_size = 32
    nb_image, line, col, nb_chanel = np.shape(data.train_data)
    data.train_data = np.reshape(data.train_data,
                                 [nb_image, line, col, nb_chanel])
    data.test_data = np.reshape(
        data.test_data, [np.shape(data.test_data)[0], line, col, nb_chanel])
    x = tf.placeholder("float", [None, line, col, nb_chanel])
    y = tf.placeholder("float", [None, nb_class])

    weight, bias = my_weight_and_bias(nb_class, nb_chanel, batch_size)

    pred = mod.MyModel(x, weight, bias)
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y))
    optimizer = tf.train.AdamOptimizer(learning_rate=alpha).minimize(cost)
    correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        train_loss = []
        test_loss = []
        train_accuracy = []
        test_accuracy = []
        best_accuracy = 0
        for e in range(0, num_epoch):
            for i in range(0, int(nb_image / batch_size)):
                batch_x = data.train_data[i * batch_size:min(
                    (i + 1) * batch_size, len(data.train_data))]
                batch_y = data.train_label[i * batch_size:min(
                    (i + 1) * batch_size, len(data.train_label))]
                sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
            TmpLoss, TmpAcc = sess.run([cost, accuracy],
                                       feed_dict={
                                           x: data.train_data,
                                           y: data.train_label
                                       })
            TmpTestLoss, TmpTestAcc = sess.run([cost, accuracy],
                                               feed_dict={
                                                   x: data.test_data,
                                                   y: data.test_label
                                               })
            print("Epoch :", e, ", Loss :", TmpLoss, "test lost :",
                  TmpTestLoss, ", Train acc :", TmpAcc, ", Test acc :",
                  TmpTestAcc)
            if (TmpTestAcc > best_accuracy):
                best_accuracy = TmpTestAcc
                print("\nBest acc :", best_accuracy, ", index :", e, "\n")
            train_loss.append(TmpLoss)
            test_loss.append(TmpTestLoss)
            train_accuracy.append(TmpAcc)
            test_accuracy.append(TmpTestAcc)
    plot(train_loss, test_loss, train_accuracy, test_accuracy)
コード例 #2
0
def my_learning_rate(epoch_index, step):
    if epoch_index != 0:
        return 0.001 * (0.7 ** (epoch_index - 1)) / (1 + step * 0.000001)
        # return 0.001
    else:
        return 0.000001


def cal_loss(logits, lab_batch):
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=lab_batch, logits=logits)
    loss = tf.reduce_mean(cross_entropy)
    return loss


the_model = model.MyModel(num_class=num_class)
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
# optimizer = tf.keras.optimizers.RMSprop(lr=0.001)

step = 1
try:  # 捕获 input_data 在数据输送结束时的异常
    while True:
        batch_x, batch_y, epoch_index = fuckdata.next_batch(batch_size=batch_size, epoch=epoch)
        learning_rate = my_learning_rate(epoch_index, step)
        if epoch_index != 0:
            is_training = True
        else:
            is_training = False

        with tf.GradientTape() as tape:
            logits = the_model.call(batch_x, is_training=is_training)
import dash
import dash_core_components as dcc
import dash_html_components as html

import plotly.plotly as py
import plotly.graph_objs as go
import plotly
import plotly.figure_factory as ff

import numpy as np
import pandas as pd

import model as mm

M = mm.MyModel()

df = pd.read_csv('processed.cleveland.data')
df.columns = [
    'age', 'sex', 'chest_pain_type', 'resting_blood_pressure', 'cholesterol',
    'fasting_blood_sugar', 'rest_ecg', 'max_heart_rate_achieved',
    'exercise_induced_angina', 'st_depression', 'st_slope',
    'num_major_vessels', 'thalassemia', 'target'
]

df['target'] = df['target'].apply(lambda x: 0 if x == 0 else 1)
df = df.applymap(lambda x: 0 if x == '?' else x)

#just for test
fig_2 = {
    'data': [
        {
コード例 #4
0
            transforms.ToPILImage(),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'val': transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }

    datasets = {
        'train': cuzDataset.MyDataSet(X_train, Y_train, patterns, data_transforms['train']),
        'val': cuzDataset.MyDataSet(X_test, Y_test, patterns, data_transforms['val'])
    }

    dataloaders = {
        x: torch.utils.data.DataLoader(datasets[x], batch_size=3, shuffle=True, num_workers=4) for x in ['train', 'val']
    }
 
    model = cuzModel.MyModel(patterns)
    model.show_moel_info()
    model.trainMyModel(20, dataloaders)
    # torch.save(model.state_dict(), 'model_manualAndGlass150_20.pkl')


コード例 #5
0
 def __init__(self,parent):
     self.parent = parent
     self.model = model.MyModel(self)
     self.view = view.MyView(parent, self)
コード例 #6
0
print(data.shape)
print(type(labels), len(labels))
batched_tensor_data = opendata.make_tensors_data(data, 50)
batched_tensor_label = opendata.make_tensors_label(labels, 50)

print(len(batched_tensor_data))
# 1er batch
# print(batched_tensor_data[0])
# 1er batch, 1ere image
# print(batched_tensor_data[0][0])

#a = Perceptron(labels)
#a.update(data[:10],labels[:10])
#print ("Loss rate : ", str(a.score(data[:10],labels[:10])*100)+"%")

model = model.MyModel(3072, 10)
learning_rate = 1e-1
loss_fn = nn.NLLLoss()

if th.cuda.is_available():
    model.cuda()
    loss_fn.cuda()

optimizer = th.optim.Adam(model.parameters(), lr=learning_rate)

EPOCH = 10

# Boucle d'apprentissage
for i in range(EPOCH):
    model.train()
    total_loss = 0
コード例 #7
0
def main(hParam):

	# 학습 때의 여러 정보를 기록하는 용도
	# logger 나 tensorboard 등을 사용하는 경우도 있습니다
	log = {'loss': list(), 'time': list()}

	# GPU 가 사용할 수 있는지 확인합니다
	device = 'cuda' if torch.cuda.is_available() else 'cpu'

	model = m.MyModel(inDim = hParam['inDim'], hiddenDim = hParam['hiddenDim'],
					outDim = hParam['outDim'], numLayers = hParam['numLayers'])
	model = model.to(device)

	# 경우에 따라 train / test data 를 따로 불러와야하지만 여기서는 하나로 통일합니다
	# GPU 가 연산하는 동안 CPU 에서는 다음 이터레이션에 필요한 데이터를 불러오는데요,
	# 데이터를 준비하는 데에 사용할 CPU 의 개수가 num_workers 입니다
	# 이 값은 너무 적어도, 너무 많아도 성능에 악영향을 줍니다
	data = dl.ExampleData(hParam['data_path'], hParam['option'])
	loader = torch.utils.data.DataLoader(data, batch_size = hParam['batch'],
											num_workers = 4, pin_memory = True, shuffle = True)

	# F.log_softmax 에 알맞은 loss function 은 NLLLoss 입니다
	nll = nn.NLLLoss()
	# Adam optimizer 를 사용합니다
	# 어떤 파라미터를 최적화하고 싶은지 optimizer 에게 알려주어야 하는데요, list(model.parameters()) 를 넘겨주면 됩니다
	# 만약 전체 모델의 일부만 최적화 (finetuning 등) 하고 싶다면, 위 list 내부의 값을 조정해주면 됩니다
	# 혹은 파라미터별로 다른 learning rate 를 적용하고 싶다면, 아래와 같이 해주시면 됩니다
	optimizer = torch.optim.Adam(list(model.parameters()), lr = hParam['lr'])
	"""
	optimzer.add_param_group(
		{
			'params': model.parameters() 의 일부,
			'lr': 0.01,
			...
		}
	) 를 필요에 따라 적절하게 설정
	"""
	# 아래와 같이 learning rate scheduler 를 사용할 수도 있습니다
	# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, ...)

	# 모델을 학습 모드로 설정합니다
	# batch norm, dropout 등은 학습 / 테스트 시에 다르게 작동하기 때문에, 꼭 확인해줘야 합니다
	model.train()

	for epoch in range(hParam['epoch']):

		# epoch 당 시간 측정
		timeNow = timeit.default_timer()

		# tqdm 같은 library 를 사용해도 됩니다
		for idx, (txt, label) in enumerate(loader)

			txt, label = txt.to(device), label.to(device)

			pred = model(txt)
			loss = nll(pred, label)

			# Pytorch 에서 loss 에서 계산된 gradient 는 계속 누적됩니다
			# 그 이유는 배치를 여러 번 나누어 gradient 를 계산하고 이를 누적해 사용하면,
			# full-batch gradient descent 를 해줄 수 있기 때문입니다
			# 만약 mini-batch gradient descent 를 하거나 새로운 epoch 이 되면, 누적된 gradient 를 없애야 합니다
			# optimizer.zero_grad() 를 사용해 누적된 gradient 를 0 으로 초기화합니다
			optimizer.zero_grad()
			# loss 를 미분해줍니다
			loss.backward()
			# 지금까지 계산된 gradient 를 가지고 모델의 파라미터를 업데이트해줍니다
			optimizer.step()
			# 만약 loss 를 여러 개 사용하면, loss 를 서로 더해주거나 각 loss 마다 backward 를 해줍니다
			# 예시 1)
			# optimizer.zero_grad()
			# total_loss = loss1 + loss2 + loss3
			# total_loss.backward()
			# optimizer.step()
			# 예시 2)
			# optimizer.zero_grad()
			# loss1.backward()
			# loss2.backward()
			# loss3.backward()
			# optimizer.step()

		scheduler.step()

		log['loss'].append(loss.item())
		log['time'].append(timeit.default_timer() - timeNow)

		if hParam['verbose']:

			# 진행 상황 출력
			print('[info] Epoch : [{}/{}], Loss : {}'.format(epoch + 1, hParam['epoch'], log['loss'][-1]))
			print('[info] Time : {}'.format(log['time'][-1]))

	# 모델을 테스트 모드로 설정합니다
	model.eval()

	# 테스트 시에는 gradient 계산이 필요 없으므로 설정해줍니다
	with torch.no_grad():

		for epoch in range(hParam['epoch']):

			for idx, (txt, label) in enumerate(loader)

				txt, label = txt.to(device), label.to(device)

				pred = model(txt)

				# pred 를 이용해 하고싶은 일을 합니다
				# 예시 1) 예측 결과를 numpy array 로 변경
				# pred = pred.cpu().numpy()
				# 예시 2) 정답과 일치한 횟수를 세기 -> label = [1, 1, 0, 1], pred = [0, 1, 0, 0]
				# (label == pred).sum() ----> [False, True, True, False] ----> 2

	# 학습 / 테스트 종료 및 결과 저장
	print('Train finished')

	# 모델 파라미터 저장
	torch.save(model.state_dict(), hParam['model_path'])

	# log 저장 (pickle)
	with open(hParam['log_path'], 'wb') as fs:

		pickle.dump(log, fs)
コード例 #8
0
from flask import Flask, render_template, jsonify, request
import model
app = Flask(__name__)


@app.route('/')
@app.route('/index')
def chat():
    return render_template('chat.html')


@app.route('/predict', methods=['POST', 'GET'])
def predict_emotion():
    if request.method == 'POST':
        msg = request.form['user_msg']
        print('[app.py] predict(POST), msg=', msg)
    else:
        msg = request.args.get('user_msg')
        print('[app.py] predict(GET), msg=', msg)
    # predict_result = mymodel.predict('엘사는 예전보다 더 강한 여자가 되어 있었다.')
    predict_result = mymodel.predict(msg)
    print("[app.py] 예측값: ", float(predict_result[0]), ", 판단: ",
          predict_result[1])
    data = {'result': float(predict_result[0]), 'answer': predict_result[1]}
    return jsonify(data)


mymodel = model.MyModel('./../model/model_file.h5')
# app.run(debug = True)
コード例 #9
0
    print("Creating model...")
    model.create_model(multi_gpu=False)

    print("Now training...")
    history = model.training(x_train, y_train, x_test, y_test)
    accuracy = history.history["accuracy"]
    loss = history.history["loss"]
    eval = model.evaluate(x_test, y_test)
    
    print("accuracy = " + str(eval))
    model.save('./model.h5')

    if not os.path.exists('./result_keras'):
        os.mkdir('./result_keras')
    for i in range(TEST_DATA_SIZE):
        ret = model.predict(x_test[i, :, :, 0].reshape([1, IMG_SIZE, IMG_SIZE, 1]), 1)
        np.savetxt('./result_keras/' + str(i) + '.txt', ret[0, :, :, 0])
    
    with open("training_log.txt", "w") as f:
        for i in range(training_epochs):
            f.write(str(loss[i]) + "," + str(accuracy[i]) + "\n")
    ax1 = plt.subplot()
    ax1.plot(loss, color="blue")
    ax2 = ax1.twinx()
    ax2.plot(accuracy, color="orange")
    plt.show()

if __name__=='__main__':
    data = data.MyLoadData(IMG_SIZE, OUTPUT_SIZE)
    model = model.MyModel((IMG_SIZE, IMG_SIZE, 1), batch_size, training_epochs)
    main(data, model)
コード例 #10
0
ファイル: train.py プロジェクト: heefe92/PyTorch_Tutorial
def run_train(train_verbose=False):
    pass
    dataset = Dataset()
    dataloader = data_.DataLoader(dataset, \
                                      batch_size=opt.batch_size, \
                                      shuffle=True, \
                                      # pin_memory=True,

                                      num_workers=opt.num_workers)

    testset = Dataset()
    test_dataloader = data_.DataLoader(
        testset,
        batch_size=opt.batch_size,
        num_workers=opt.num_workers,
        shuffle=False  #, \
        #pin_memory=True
    )

    my_model = model.MyModel()
    my_model = my_model.cuda()

    optimizer = optim.Adam(my_model.parameters(), lr=opt.lr)

    loss_hist = collections.deque(maxlen=500)
    epoch_loss_hist = []
    my_trainer = Trainer(my_model, optimizer, model_name='MyModel')

    best_loss = 10
    best_loss_epoch_num = 0
    num_bad_epochs = 0
    max_bad_epochs = 5

    for epoch_num in range(opt.epoch):
        my_trainer.train_mode()
        train_start_time = time.time()
        train_epoch_loss = []
        start = time.time()
        for iter_num, data in enumerate(dataloader):
            curr_loss = my_trainer.train_step(data)
            loss_hist.append(float(curr_loss))
            train_epoch_loss.append(float(curr_loss))

            if (train_verbose):
                print(
                    'Epoch: {} | Iteration: {} | loss: {:1.5f} | Running loss: {:1.5f} | Iter time: {:1.5f} | Train'
                    ' time: {:1.5f}'.format(epoch_num, iter_num,
                                            float(curr_loss),
                                            np.mean(loss_hist),
                                            time.time() - start,
                                            time.time() - train_start_time))
                start = time.time()

            del curr_loss
        print('train epoch time :', time.time() - train_start_time)
        print('Epoch: {} | epoch train loss: {:1.5f}'.format(
            epoch_num, np.mean(train_epoch_loss)))

        vali_start_time = time.time()
        vali_epoch_loss = []
        my_trainer.eval_mode()

        for iter_num, data in enumerate(test_dataloader):
            curr_loss = my_trainer.get_loss(data)
            vali_epoch_loss.append(float(curr_loss))

            del curr_loss

        print('vali epoch time :', time.time() - vali_start_time)
        print('Epoch: {} | epoch vali loss: {:1.5f}'.format(
            epoch_num, np.mean(vali_epoch_loss)))

        if (best_loss < np.mean(vali_epoch_loss)):
            num_bad_epochs += 1
        else:
            best_loss = np.mean(vali_epoch_loss)
            best_map_epoch_num = epoch_num
            num_bad_epochs = 0
            my_trainer.model_save(epoch_num)
        if (num_bad_epochs > max_bad_epochs):
            num_bad_epochs = 0
            my_trainer.model_load(best_loss_epoch_num)
            my_trainer.reduce_lr(factor=0.1, verbose=True)

        print('best epoch num', best_loss_epoch_num)
        print('----------------------------------------')

    print(epoch_loss_hist)