Esempio n. 1
0
def classify_wine_type(wines_df):
    x = wines_df.iloc[:, 0:11]
    y = np.ravel(wines_df.type)
    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.33,
                                                        random_state=42)

    scaler = StandardScaler().fit(x_train)
    x_train = scaler.transform(x_train)
    x_test = scaler.transform(x_test)

    model = Sequential()
    model.add(Dense(12, activation='relu', input_shape=(11, )))
    model.add(Dense(8, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    # print_model(model)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.fit(x_train, y_train, epochs=5, batch_size=30, verbose=2)
    # Return the prediction score
    # y_pred = model.predict(X_test, verbose=1)
    # Return the prediction class
    y_pred = model.predict_classes(x_test, verbose=1)

    # The score is a list that holds the combination of the loss and the accuracy
    score = model.evaluate(x_test, y_test, verbose=1)
    print(f'score is: {score}')
Esempio n. 2
0
    callbacks=[tsb]
    # コールバック関数 学習率も変化させる場合
    #callbacks=[tsb, reduce_lr]
)

# 結果の表示
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])

# モデルの保存
FILE_PATH = "my_model.h5"
model.save(FILE_PATH)

# 予測
predict_classes = model.predict_classes(x_test, batch_size=32)
true_classes = np.argmax(y_test, 1)
info = "-" * 10 + "info" + "-" * 10 + "\n"
print("-" * 10 + "info" + "-" * 10)
info += f"{LABEL}\n"
print(LABEL)
info += f"train : {y_train.shape[0]*(1-SPLIT)}\n"
print("train :", y_train.shape[0] * (1 - SPLIT))
info += f"validation : {y_train.shape[0]*SPLIT}\n"
print("validation :", y_train.shape[0] * SPLIT)
info += f"test : {y_test.shape[0]} \n"
print("test :", y_test.shape[0])
info += f"epochs : {EPOCHS} \n"
print("epochs :", EPOCHS)
info += f"保存フォルダー先: {folder}\n"
print("保存フォルダー先:", folder)
Esempio n. 3
0
sgd = tf.keras.optimizers.SGD(lr=learning_rate)

model.compile(optimizer=sgd, loss='binary_crossentropy')

model.fit(x=x_data, y=y_data, epochs=2000, verbose=0)

x_result = model.predict(x_data)
print(x_result)
print('-' * 30)

# 0 : 강아지, 1 : 고양이
x_test = [[2, 1], [6, 5], [11, 6]]


def getCategory(mydata):
    mylist = ['강아지', '고양이']
    print('예측 : %s, %s' % (mydata, mylist[mydata[0]]))


# flatten() : 차원을 1차원으로 만들어 주는 함수
for item in x_test:
    H = model.predict(np.array([item]))
    print(H.flatten())
    print('*' * 30)

    pred = model.predict_classes(np.array([item]))
    print('테스트 데이터 :', np.array([item]))
    getCategory(pred.flatten())
    print('-' * 30)

print('finished')

model = Sequential()
model.add(Flatten(input_shape = (28,28)))#此行代码是将图的大小数据转换成一维的数据
model.add(Dense(128,activation = 'relu'))#定义第一层神经网络有128个单元,并且选择的激活函数是ReLu函数,也可以是其他函数性sigmoid函数
# 这里要是不懂可以查看吴恩达老师深度学习的3.6节课
model.add(Dense(10,activation = 'softmax'))#定义输出层,有10类所以输出10,激活函数是max函数

print("查看自己写的代码的总体参数 " , model.summary())#查看自己写的代码的总体参数


#模型补充
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])#定义损失函数

#使用的优化器名叫AdamOptimizer,使用的损失函数是稀疏分类交叉熵
model.fit(X_train,y_train,epochs = 10)#进行训练,epochs是显示运行多少次

test_loss, test_acc = model.evaluate(X_test,y_test)#利用测试集测试训练下的模型的准确度
print(test_acc)

#预测模型精确度
from sklearn.metrics import accuracy_score
y_pred = model.predict_classes(X_test)

print(accuracy_score(y_test, y_pred))

#print(tf.test.is_gpu_available())
print(tf.config.list_physical_devices('GPU'))


Esempio n. 5
0
plt.plot(history.history["loss"], color="g", label="Train")
plt.plot(history.history["val_loss"], color="b", label="Validation")
plt.legend(loc="best")
plt.tight_layout()
plt.show()

for i in range(5):
    idx = np.random.randint(len(Xtest))
    xtest = Xtest[idx].reshape(1, 40)
    ylabel = ytest[idx]
    ypred = model.predict(xtest)[0][0]
    sent = " ".join([index2word[x] for x in xtest[0].tolist() if x != 0])
    print("Predicted\tActual\t\tsentence")
    print("%.0f\t\t%d\t%s" % (ypred, ylabel, sent))

text = "Funny that harvard was almost more stressful than MY AUDITION!"

words = nltk.word_tokenize(text.lower())
seqs_test = []

for word in words:
    if word in word2index:
        seqs_test.append(word2index[word])
    else:
        seqs_test.append(word2index["UNK"])

seqs_test = pad_sequences([seqs_test], maxlen=MAX_SENTENCE_LENGTH)
test = seqs_test.reshape(1, 40)

print("Real Time :", model.predict_classes(test)[0][0])
model.summary()

# Train the model each generation and show predictions against the validation
# dataset.
for iteration in range(1, 20):
    print()
    print('-' * 50)
    print('Iteration', iteration)
    model.fit(x_train,
              y_train,
              batch_size=BATCH_SIZE,
              epochs=1,
              validation_data=(x_val, y_val))
    # Select 10 samples from the validation set at random so we can visualize
    # errors.
    for i in range(10):
        ind = np.random.randint(0, len(x_val))
        rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]
        preds = model.predict_classes(rowx, verbose=0)
        q = ctable.decode(rowx[0])
        correct = ctable.decode(rowy[0])
        guess = ctable.decode(preds[0], calc_argmax=False)
        print('Q', q[::-1] if INVERT else q, end=' ')
        print('T', correct, end=' ')
        if correct == guess:
            print(colors.ok + '☑' + colors.close, end=' ')
        else:
            print(colors.fail + '☒' + colors.close, end=' ')
        print(guess)
Esempio n. 7
0
x_train = data[:, 0:x_column]
y_train = data[:, x_column:]

x_test = [[5], [11]]
model = Sequential()
'''
퍼셉트론(perceptron) : ex) 전구 on / off ( 0, 1)처럼 0과 1의 값을 가지고 있는 신경망
perceptron이 linear, logistic, another machine learning으로 바뀌기 위해서는 함수를 지정해줘야 한다
이 때 사용되는 함수를 activation function이라고 한다.
linear = 'linear', logistic = 'sigmoid' 
'''
model.add(Dense(input_dim=x_column, units=y_column, activation='sigmoid'))

# 비용 함수는 이진 분류 이므로 'binary_crossentropy'
# optimizer 사용 시 지정 문자열을 사용해도 되지만, 객체 생성을 통해 만들 수 있습니다.
learning_rate = 0.01  # 학습률
import tensorflow
sgd = tensorflow.keras.optimizers.SGD(lr=learning_rate)
model.compile(loss='binary_crossentropy', optimizer=sgd)
model.fit(x_train, y_train, epochs=5000, batch_size=100, verbose=0)
# 해당 모델에 훈련용 데이터를 이용하여 확률 값을 예측
H2 = model.predict(x_train)
# print(H2)
for i in x_test:
    # predict_classes : 정답이 가지고 있는 클래스의 값을 출력해 줌.
    pred = np.argmax(model.predict_classes(np.array([i])))
    print(f'test data : {np.array([i])}')
    print(pred)
    H = model.predict(np.array([i]))
    # print(H)
Esempio n. 8
0
        print("Xu ly anh")
        #test_image = cv2.imread('left.jpg', 0);
        #print(test_image.shape)
        #print(test_image.size)
        test_image = cv2.resize(test_image, (128, 128))
        #cv2.imshow('asdggg', test_image)
        #cv2.waitKey(2)
        test_image = np.array(test_image)
        test_image = test_image.astype('float32')
        test_image /= 255

        test_image = test_image.reshape(1, 128, 128, 1)
        #val = model.predict(test_image)
        #print(val)
        #print(model.predict_classes(test_image))
        value = model.predict_classes(test_image)[0]
        print(value)

        for direction in directions:
            #print(dataset)
            if labels_name[direction] == model.predict_classes(test_image)[0]:
                print("Day la:" + direction)
                #playsound('./mp3/'+ dataset + '.mp3')
                filename = dirr + "newdata/" + direction + "/" + str(
                    idx[value]) + ".jpg"
                idx[value] += 1
                print("ABC")
                cv2.imwrite(filename, saved_image)  #save image
                print("Luu anh")

                break
Esempio n. 9
0
    train_score = model.evaluate(x_train, y_train, verbose=0)
    print('Train loss: {}, Train accuracy: {}'.format(train_score[0],
                                                      train_score[1]))
    test_score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss: {}, Test accuracy: {}'.format(test_score[0],
                                                    test_score[1]))
    ACC.append(test_score[1])
    LOSS.append(test_score[0])

print(LOSS)
print(ACC)
print('Acc:', sum(ACC) / repeat, '+/-', (max(ACC) - min(LOSS)) / 2)
print('Loss:', sum(LOSS) / repeat, '+/-', (max(LOSS) - min(LOSS)) / 2)

predictions = model.predict_classes(x_test)
confusion_matrix = metrics.confusion_matrix(y_true=testy_norm,
                                            y_pred=predictions)
print(confusion_matrix)
normalised_confusion_matrix = np.array(
    confusion_matrix, dtype=np.float32) / np.sum(confusion_matrix) * 100
print("")
print("Confusion matrix (normalised to % of total test data):")
print(normalised_confusion_matrix)
print(metrics.classification_report(testy_norm, predictions))

width = 12
height = 12
# fig, ax = plt.subplots()
plt.figure(figsize=(width, height))
plt.imshow(normalised_confusion_matrix,
Esempio n. 10
0
              metrics=['accuracy'])
hist = model.fit(x_train,
                 y_train,
                 batch_size=batch_size,
                 epochs=epochs,
                 verbose=1,
                 validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

n = 0
plt.imshow(x_test[n].reshape(28, 28), cmap='Greys', interpolation='nearest')
plt.show()

print('The Answer is ', model.predict_classes(x_test[n].reshape(
    (1, 28, 28, 1))))

import random

predicted_result = model.predict(x_test)
predicted_labels = np.argmax(predicted_result, axis=1)

test_labels = np.argmax(y_test, axis=1)

wrong_result = []

for n in range(0, len(test_labels)):
    if predicted_labels[n] != test_labels[n]:
        wrong_result.append(n)

#samples = np.random.choice(1, 1)
Esempio n. 11
0
def train_and_save(path,
                   epochs=25,
                   split=0.75,
                   save_path='C:\\',
                   m_name="model",
                   save=False,
                   ret_transfer=False):
    if split > 1.0 or split < 0.0:
        print("Split out of range")
    train_data, number_of_categories, CATEGORY = process_images_cache(path)
    random.shuffle(train_data)
    m_name = m_name + ".model"

    save_path = os.path.join(save_path, m_name)

    x = list()
    y = list()

    for i, j in train_data:
        x.append(i)
        y.append(j)

    x = np.asarray(x, dtype=np.float16)
    y = np.asarray(y, dtype=np.float16)
    #print(len(transfer_values[0]

    #input_size = Input(shape = ( len(transfer_values[0]), ))
    #output_size = Dense(16)(input_size)

    #model = Model(inputs=input_size,outputs = output_size)

    #model.compile(optimizer = "")

    print('number of categories:', number_of_categories)

    model = Sequential()
    model.add(Flatten())
    model.add(Dense(2048, activation='sigmoid'))
    model.add(Dense(512, activation='sigmoid'))
    model.add(Dense(512, activation='sigmoid'))
    model.add(Dense(number_of_categories, activation='sigmoid'))
    model.compile(optimizer='Adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['sparse_categorical_accuracy'])
    #model.summary()
    #model.plot_model('model.jpg')
    print('Compiled')

    try:
        sex = model.fit(x[:int(len(x) * split)],
                        y[:int(len(y) * split)],
                        epochs=epochs,
                        shuffle=True)
        evalsex = model.evaluate(x[int(len(x) * split):],
                                 y[int(len(y) * split):])
    except Exception as e:
        print('Error while training/internl testing Cause:', e)

    try:
        print('\n\n\n Full Discription')
        print(CATEGORY)
        y_test = y[int(len(y) * split):]
        y_pred = model.predict_classes(x[int(len(x) * split):])
        print(classification_report(y_test, y_pred))
    except Exception as e:
        print('Cannot evaluate induvisually', e)

    # use sex / evalsex for acuu checking
    plot_scatter(x, y, number_of_categories)
    plt.plot(sex.history['loss'])
    #plt.plot(evalsex.history['loss'])
    plt.show()
    plt.plot(sex.history['sparse_categorical_accuracy'])
    plt.plot(evalsex[0])
    #plt.plot(evalsex.history['sparse_categorical_accuracy'])
    plt.show()
    if save:
        try:
            save_model(model, save_path)
        except Exception as e:
            print("Error while saving", e, sep='\n')
    if not ret_transfer: return None
    else: return train_data
Esempio n. 12
0
def evaluate_model(trainX, trainy, testX, testy, testy_norm):
    """
    Create, fit and evaluate a model
    :param trainX: (array)
    :param trainy: (array)
    :param testX: (array)
    :param testy: (array)
    :param testy_norm: (array)
    :return:
        accurancy (float)
        loss (float)
    """
    verbose, epochs, batch_size = 1, 60, 16  # 16
    trainX, testX = scale_data(trainX, testX)
    #    trainX, testX = Magnitude(trainX,testX)
    #    trainX, testX = AutoCorallation(trainX, testX)
    n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[
        2], trainy.shape[1]
    print(testX.shape)
    print(testy.shape)
    model = Sequential()

    # Small structure
    model.add(
        Conv1D(32,
               5,
               activation='relu',
               padding='same',
               input_shape=(n_timesteps, n_features)))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(64, 5, activation='relu', padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(128, 5, activation='relu', padding='same'))
    model.add(SpatialDropout1D(0.5))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='relu'))
    model.add(Dense(n_outputs, activation='softmax'))
    model.summary()
    plot_model(model, 'model_info.png', show_shapes=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # fit network
    tensorboard = TensorBoard(log_dir="logs_3xconv/{}".format(time()),
                              histogram_freq=1,
                              write_images=True)
    history = model.fit(trainX,
                        trainy,
                        epochs=epochs,
                        batch_size=batch_size,
                        verbose=verbose,
                        validation_split=0.15,
                        shuffle=True,
                        callbacks=[tensorboard])
    # evaluate model
    loss, accuracy = model.evaluate(testX,
                                    testy,
                                    batch_size=batch_size,
                                    verbose=0)
    export_model(model)
    predictions = model.predict_classes(testX)
    print(metrics.classification_report(testy_norm, predictions))
    confusion_matrix = metrics.confusion_matrix(y_true=testy_norm,
                                                y_pred=predictions)
    print(confusion_matrix)
    normalised_confusion_matrix = np.array(
        confusion_matrix, dtype=np.float32) / np.sum(confusion_matrix) * 100
    print("")
    print("Confusion matrix (normalised to % of total test data):")
    print(normalised_confusion_matrix)

    width = 12
    height = 12
    # fig, ax = plt.subplots()
    plt.figure(figsize=(width, height))
    plt.imshow(normalised_confusion_matrix,
               interpolation='nearest',
               cmap=plt.cm.rainbow)
    plt.title("Confusion matrix \n(normalized to the entire test set [%])")
    plt.colorbar()
    tick_marks = np.arange(2)
    LABELS = ["Dynamic", "Static"]
    plt.xticks(tick_marks, LABELS, rotation=90)
    plt.yticks(tick_marks, LABELS)
    plt.tight_layout()
    plt.ylabel('Real value')
    plt.xlabel('Prediction value')

    plt.figure()
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Training', 'Validation'], loc='upper left')
    plt.figure()
    plt.plot(history.history['accuracy'])
    plt.plot(history.history['val_accuracy'])
    plt.title('Model accurancy')
    plt.ylabel('Accurancy')
    plt.xlabel('Epoch')
    plt.legend(['Training', 'Validation'], loc='upper left')
    plt.show()
    return accuracy, loss
# %% PARTITION TRAINING AND TESTING DATA

X = np.expand_dims(np.stack(df.mfcc.values, axis=0), -1)
y = k.utils.to_categorical(df.digit, NUM_CLASSES)

X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.1,
                                                    random_state=1)

# %% TRAIN MODEL
keras_callback = k.callbacks.TensorBoard(log_dir='./data/tf_log',
                                         histogram_freq=1,
                                         write_graph=True,
                                         write_images=True)

model.fit(X_train,
          y_train,
          batch_size=64,
          epochs=200,
          verbose=2,
          validation_split=0.1,
          callbacks=[keras_callback])

# %% EVALUATE ON TEST SET
score = model.evaluate(X_test, y_test, verbose=0)
model.predict_classes(X_test)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
Esempio n. 14
0
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)

model = Sequential()
# hidden layer
# input_dim param은 처음에 한 번만 적는다.
model.add(Dense(input_dim=x_column, units=512, activation='relu'))
# output layer
model.add(Dense(units=y_column, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam')
model.fit(x_train, y_train, epochs=5000, batch_size=100, verbose=0)
print(x_test.shape)

total, hit = 0, 0
for i in range(len(x_test)):
    # pred = np.argmax(model.predict(np.array([i])), axis=-1)
    pred = model.predict_classes(np.array([x_test[i]]))
    print('테스트용 데이터 : %s' % x_test[i])
    if pred == np.array([0]):
        print(f'정답 : {y_test[i]}, 꽁치')
    else:
        print(f'정답 : {y_test[i]}, 고등어')
    print('예측 값 : %s' % str(pred.flatten()))

    total += 1

    # 예측 값과 정답이 같은 경우 1추가
    hit += int(y_test[i] == pred.flatten())
    print('-' * 30)
# end for

accuracy = hit / total * 100
def neuralnet(no_model,dnafull,dna0,dna1,dna2,dna3,dna4,dna5,dna6):
    
    """
    dna_temp[0] hid_layer_num INT 1~5
    dna_temp[1] hid_layer_node INT 16~128
    dna_temp[2] epoch INT 100~500
    dna_temp[3] dropout FLOAT 0.00~0.20
    dna_temp[4] maxlen INT 9~19
    dna_temp[5] time_bias INT 1~9
    dna_temp[6] layer INT 1~3
    """
    
    """
    パラメーター設定
    """
    #入力層次元
    n_in = 20
    #中間層次元、層数
    n_hiddens = list()
    for i in range(dna0):
        n_hiddens.append(dna1)
    n_centors = dna0
    #出力層次元、層数
    n_out = 5
    #活性化関数
    activation = 'relu'
    #ドロップアウト率
    p_keep = dna3
    #計算回数
    epochs = dna2
    #EarlyStoppingするか
    isEs= False
    #EarlyStoppingをするまでの回数
    es_patience= 60
    #ミニバッチ処理のサイズ
    batch_size = 1000
    #最適化アルゴリズム
    opt='rmsprop'
    #学習率(本プログラムでは未使用でデフォルト値を使用)
#    learning_rate=0.001
    #Adamのパラメータ(最適化アルゴリズムがAdamの時のみ使用・本プログラムでは未使用)
#    beta_1=0.9
#    beta_2=0.999
    #reccrentの参照数
    maxlen= dna4
    #Yを何秒ずらすか(=0だと過去maxlen秒参照、=maxlen/2だと前後maxlen/2秒参照、=maxlenだと未来maxlen秒参照になる)
    time_bias= dna5
    
    #RNNの種類(SimpleRNN,LSTM,GRU)
    layer_int = dna6
    
    #双方向性を使用するか
    BiDir= False
    
    #RNNの偶数層を逆向きにするか
    back= False
    
    #乱数の固定シード
#    ranseed= 12345
    
#    weight1 = 1
#    weight2 = 1
#    
    print('No_%d' % no_model)
    print(dna0,dna1,dna2,dna3,dna4,dna5,dna6)
    
    #乱数固定
    
    import os
    os.environ['PYTHONHASHSEED']='0'
#    np.random.seed(ranseed)
#    rn.seed(ranseed)
    
    #スレッド数等を1に固定(再現性に必要)
    session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
    
    from tensorflow.python.keras import backend as K
#    tf.compat.v1.set_random_seed()
    
     
    sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(),config=session_conf)
    K.set_session(sess)
    
    
    #重み初期化
    init=initializers.TruncatedNormal()
    
    #ファイルの名前
    name = 'linear_data_FIR8_comAngle&AveStd_coor_s1_ntd2_26'
#    number = '-2'
    #ファイル読み込み
    csv_input = pd.read_csv(filepath_or_buffer= name+".csv", encoding="ms932", sep=",")
    array = csv_input.values
    
    #仮の入出力値を読み取り
    
    X=array[:,1:n_in+1].astype(np.float32)
    
    Y=array[:,n_in+1].astype(np.int)
    
    
    #タイムスタンプを読み取り
    TIME=array[:,0]
    
    leng = len(Y)
    data = []
    target = []
    
    i = 0

    for i in range(maxlen, leng):
    	#入力データを参照秒数ごとにまとめる
    	data.append(X[i-maxlen+1:i+1,:])
    	#出力データをN秒ごとの作業に変換
    	target.append(Y[i-time_bias])
    #入出力データのshapeの調整
    X = np.array(data).reshape(len(data), maxlen, n_in)
    Y = np.array(target)
    
    #タイムスタンプを入出力データと同期
    TIME=TIME[maxlen-time_bias:leng-time_bias]
    
    #学習データとテストデータの分割
    x_train, x_test, y_train0, y_test0,time_train,time_test = train_test_split(X, Y,TIME, train_size=0.85,shuffle=False)
    
    #学習データをtrainとvalidationに分割
    x_train, x_validation, y_train0, y_validation0 = train_test_split(x_train, y_train0,train_size=0.9,shuffle=False)
        
    #yを1ofKデータに変換(train,val,test)
    ntr=y_train0.size
    y_train=np.zeros(n_out*ntr).reshape(ntr,n_out).astype(np.float32)
    for i in range(ntr):
    	y_train[i,y_train0[i]]=1.0
    
    nte=y_test0.size
    y_test=np.zeros(n_out*nte).reshape(nte,n_out).astype(np.float32)
    for i in range(nte):
    	y_test[i,y_test0[i]]=1.0
    
    
    y_validation=np.eye(n_out)[(y_validation0.reshape(y_validation0.size))]
        
#    nrow=y_test0.size
   
    # モデル設定
    
    model = Sequential()
        
    for i in range(n_centors):
    	if(i==n_centors-1):
    		retSeq=False
    	else:
    		retSeq=True
    	if(i%2==1 and back):
    		gBack=True
    	else:
    		gBack=False
    	if(i==0):
    		in_dir=n_in
    	else:
    		in_dir=n_hiddens[i-1]
        
    	if (layer_int==1):
    		if(BiDir):
    			model.add(Bidirectional(SimpleRNN(n_hiddens[i],activation=activation,kernel_initializer=init,recurrent_initializer=init,dropout=p_keep,recurrent_dropout=p_keep, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) )))
    		else:
    #			model.add(SimpleRNN(n_hiddens[i],activation=activation,kernel_initializer=init,recurrent_initializer=init, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) ))
    			model.add(SimpleRNN(n_hiddens[i],activation=activation,kernel_initializer=init,recurrent_initializer=init,dropout=p_keep,recurrent_dropout=p_keep, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) ))
    
    	elif(layer_int==2):
    		if(BiDir):
    			model.add(Bidirectional(LSTM(n_hiddens[0],activation=activation,kernel_initializer=init,recurrent_initializer=init,dropout=p_keep,recurrent_dropout=p_keep, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) )))
    		else:
    			model.add(LSTM(n_hiddens[0],activation=activation,kernel_initializer=init,recurrent_initializer=init,dropout=p_keep,recurrent_dropout=p_keep, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) ))
    	
    	elif(layer_int==3):
    		if(BiDir):
    			model.add(Bidirectional(GRU(n_hiddens[0],activation=activation,kernel_initializer=init,recurrent_initializer=init,dropout=p_keep,recurrent_dropout=p_keep, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) )))
    		else:
    			model.add(GRU(n_hiddens[0],activation=activation,kernel_initializer=init,recurrent_initializer=init,dropout=p_keep,recurrent_dropout=p_keep, return_sequences=retSeq,go_backwards=gBack,  input_shape=(maxlen, in_dir) ))	
    
    model.add(Dense(n_out,kernel_initializer=init))
    model.add(Activation('softmax'))
    
    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
    
    early_stopping =EarlyStopping(monitor='val_loss', patience=es_patience, verbose=1)
    
#    now = datetime.now().strftime('%Y%m%d%H%M')
#    flog = name+number+'.log1.csv'
#    
#    csv_logger=CSVLogger(flog)
    
    if (isEs):
    	caBacks=[early_stopping]#,csv_logger]
    
    else:
    	caBacks=[]#csv_logger]
    
    #モデル学習
    
#    start = time.time()
    
    model.fit(x_train,y_train, epochs=epochs, batch_size=batch_size,validation_data=(x_validation,y_validation),callbacks=caBacks)
    #hist = model.fit(x_train,y_train, epochs=epochs, batch_size=batch_size,callbacks=caBacks)
    #,callbacks=[early_stopping]
    
#    slapsed_time=time.time() - start
#    
#    
#    val_acc = hist.history['val_acc']
#    acc = hist.history['acc']
#    val_loss = hist.history['val_loss']
#    loss = hist.history['loss']
#

#now = datetime.now().strftime('%Y%m%d%H%M')
#
#plt.rc('font',family='serif')
#fig = plt.figure()
#plt.plot(range(len(loss)), loss, label='loss', color='r')
#plt.plot(range(len(val_loss)), val_loss, label='val_loss', color='b')
#plt.xlabel('epochs')
#plt.legend()
#plt.show()
#plt.savefig(name+number+'.loss.png')
#
##plt.rc('font',family='serif')
##fig = plt.figure()
##plt.plot(range(len(val_acc)), val_acc, label='acc', color='b')
##plt.xlabel('epochs')
##plt.show()
##plt.savefig(name+number+'.val_acc.png')
       
    classes = model.predict_classes(x_test, batch_size=1)
#prob = model.predict_proba(x_test, batch_size=1)

#重みの出力
#L1 = model.get_weights()
#W1 = np.dot(L1[0],L1[1])+L1[2]
#W2 = np.dot(W1,L1[3])
#W3 = np.dot(W2,L1[4])
#W4 = W3+L1[5]
#weight1 = np.dot(W4,L1[6])+L1[7]
#weight = weight1.transpose()

#結果を出力
    im = [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
    ip = [0,0,0,0]
    it = [0,0,0,0]
    f = [0,0,0,0]

    ia = [0,0,0,0]
    ib = [0,0,0,0]

    j = 0
    for i in range(4):
        for j in range(y_test0.size):
            if y_test0[j]==i+1:
                it[i] += 1
                       
                if classes[j] == 1:
                    im[i][0] += 1
                if classes[j] == 2:
                    im[i][1] += 1
                if classes[j] == 3:
                    im[i][2] += 1
                if classes[j] == 4:
                    im[i][3] += 1
            else:
                pass
    
    for i in range(4):        
        for k in range(y_test0.size):
            if classes[k]==i+1:
                ip[i]+=1
            else:
                pass

    #再現率を導出        
    for i in range(4):
        if it[i]==0:
            ia[i] = 0
        else:
            ia[i] = im[i][i]/it[i]
    
    #適合率を導出    
    for i in range(4):
        if ip[i]==0:
            ib[i] = 0
        else:
            ib[i] = im[i][i]/ip[i]
    
    #F値を導出
    for i in range(4):
        if ia[i]+ib[i]==0:
            f[i] = 0
        else:
            f[i] = 2*ia[i]*ib[i]/(ia[i]+ib[i])
    
#    it_sum = sum(it)
#    ip_sum = sum(ip)
#    ii = im[0][0]+im[1][1]+im[2][2]+im[3][3]#+i5
    
    if_ave = sum(f)/4
    
    model.save(name+'_'+str(no_model)+".h5")
#    model.save("kanno_"+str(no_model)+".model")
   
# =============================================================================
    backend.clear_session()
# =============================================================================
    
    
    return if_ave
plt.xlabel('num of Epochs')
plt.ylabel('accuracy')
plt.title('train_acc vs val_acc')
plt.grid(True)
plt.legend(['train', 'val'], loc=4)
plt.style.use(['classic'])

# Evaluating the model
print('\nEvaluating the model')
score = model.evaluate(X_test, y_test, verbose=0)  # show_accuracy=True,
print('\nTest Loss:', score[0])
print('Test accuracy:', score[1])

print('Predicting an image from the trained dataset:')
test_image = X_test[0:1]
if model.predict_classes(test_image) == 0:
    print(model.predict_classes(test_image), '--> Maze with solution path')
else:
    print(model.predict_classes(test_image), '--> Maze without solution path')

# Predicting images from the test dataset
test_data_path = 'D:/FIT Study Material/Semester 2/Artificial Intelligence/Project - Maze Problem/Dataset/Testing'

test_img_list = os.listdir(test_data_path)
for test_image in test_img_list:
    print('\nPredicting the images from the test dataset: ' + test_image)
    test_image = cv2.imread(test_data_path + '/' + test_image)
    test_image = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)
    test_image = cv2.resize(test_image, (128, 128))
    test_image = np.array(test_image)
    test_image = test_image.astype('float32')
train_size = 30000
train_file = "D:/Darse ha/kaggle/Digit Recognizer/train.csv"
raw_data = pd.read_csv(train_file)

x, y = data_prep(raw_data)

model = Sequential()
model.add(
    Conv2D(30,
           kernel_size=(3, 3),
           strides=2,
           activation='relu',
           input_shape=(img_rows, img_cols, 1)))
model.add(Dropout(0.5))
model.add(Conv2D(30, kernel_size=(3, 3), strides=2, activation='relu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer='adam',
              metrics=['accuracy'])
model.fit(x, y, batch_size=128, epochs=2, validation_split=0.2)

test_file = "D:/Darse ha/kaggle/Digit Recognizer/test.csv"
raw_test = pd.read_csv(test_file)
raw_test = raw_test.values
test_shaped_array = raw_test.reshape(28000, img_rows, img_cols, 1)
preds = model.predict_classes(test_shaped_array)
Esempio n. 18
0
def predict(model: Sequential, sample):
    prediction = model.predict_classes(sample)
    return prediction
Esempio n. 19
0
def _train(mutated, module_name):
    mutated = mutated[mutated['mod_keys_found_string'] == module_name]
    train_set, val_set, test_set = np.split(
        mutated.sample(frac=1),
        [int(.6 * len(mutated)),
         int(.8 * len(mutated))])
    tasks_sent_train = [row for row in train_set['task_complete']]
    model_tasks3 = Word2Vec(tasks_sent_train,
                            sg=0,
                            size=100,
                            window=6,
                            min_count=1,
                            workers=4,
                            iter=1000)

    train_set['task_complete_one_string'] = train_set['task_complete'].apply(
        lambda x: list_to_string(x))
    test_set['task_complete_one_string'] = test_set['task_complete'].apply(
        lambda x: list_to_string(x))
    val_set['task_complete_one_string'] = val_set['task_complete'].apply(
        lambda x: list_to_string(x))

    y_train = train_set['consistent'].astype(int)
    print(y_train.value_counts(), y_train.shape)

    y_test = test_set['consistent'].astype(int)
    print(y_test.value_counts(), y_test.shape)

    y_val = val_set['consistent'].astype(int)

    tokenizer_train = Tokenizer(lower=False)
    tokenizer_train.fit_on_texts(train_set['task_complete'])
    print(tokenizer_train)
    tokenizer_train = Tokenizer(lower=False)
    tokenizer_train.fit_on_texts(train_set['task_complete'])
    print(tokenizer_train)

    tokenizer_test = Tokenizer(lower=False)
    tokenizer_test.fit_on_texts(test_set['task_complete'])
    print(tokenizer_test)

    tokenizer_val = Tokenizer(lower=False)
    tokenizer_val.fit_on_texts(val_set['task_complete'])

    tasks_train_tokens = tokenizer_train.texts_to_sequences(
        train_set['task_complete_one_string'])
    tasks_test_tokens = tokenizer_test.texts_to_sequences(
        test_set['task_complete_one_string'])
    tasks_val_tokens = tokenizer_val.texts_to_sequences(
        val_set['task_complete_one_string'])

    num_tokens = [len(tokens) for tokens in tasks_train_tokens]
    num_tokens = np.array(num_tokens)
    np.max(num_tokens)
    np.argmax(num_tokens)
    max_tokens = np.mean(num_tokens) + 2 * np.std(num_tokens)
    max_tokens = int(max_tokens)
    tasks_train_pad = pad_sequences(tasks_train_tokens,
                                    maxlen=max_tokens,
                                    padding='post')
    tasks_test_pad = pad_sequences(tasks_test_tokens,
                                   maxlen=max_tokens,
                                   padding='post')
    tasks_val_pad = pad_sequences(tasks_val_tokens,
                                  maxlen=max_tokens,
                                  padding='post')

    embedding_size = 100
    num_words = len(list(tokenizer_train.word_index)) + 1

    embedding_matrix = np.random.uniform(-1, 1, (num_words, embedding_size))
    for word, i in tokenizer_train.word_index.items():
        if i < num_words:
            embedding_vector = model_tasks3[word]
            if embedding_vector is not None:
                embedding_matrix[i] = embedding_vector

    sequence_length = max_tokens
    batch_size = 256

    tensorflow.compat.v1.disable_eager_execution()

    # CNN architecture

    num_classes = 2

    # Training params
    num_epochs = 20

    # Model parameters
    num_filters = 64
    weight_decay = 1e-4

    print("training CNN ...")
    model = Sequential()

    # Model add word2vec embedding

    model.add(
        Embedding(
            input_dim=num_words,
            output_dim=embedding_size,
            weights=[embedding_matrix],
            input_length=max_tokens,
            trainable=True,  # the layer is trained
            name='embedding_layer'))
    model.add(
        layers.Conv1D(filters=num_filters,
                      kernel_size=max_tokens,
                      activation='relu',
                      padding='same',
                      kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(layers.MaxPooling1D(2))
    model.add(Dropout(0.25))

    model.add(
        layers.Conv1D(filters=num_filters + num_filters,
                      kernel_size=max_tokens,
                      activation='relu',
                      padding='same',
                      kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(layers.GlobalMaxPooling1D())
    model.add(Dropout(0.25))

    model.add(layers.Flatten())
    model.add(
        layers.Dense(128,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(layers.Dense(num_classes, activation='softmax'))

    sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss=tensorflow.keras.losses.MeanAbsoluteError(),
                  optimizer=sgd,
                  metrics=['accuracy'])
    model.summary()

    model.fit(tasks_train_pad,
              to_categorical(y_train),
              batch_size=batch_size,
              epochs=num_epochs,
              validation_data=(tasks_test_pad, to_categorical(y_test)),
              shuffle=True,
              verbose=2)

    score = model.evaluate(tasks_val_pad, to_categorical(y_val), verbose=0)
    print('loss:', score[0])
    print('Validation accuracy:', score[1])
    y_pred = model.predict_classes(tasks_val_pad)

    cm = confusion_matrix(y_val, y_pred)
    tp = cm[1][1]
    fp = cm[0][1]
    fn = cm[1][0]
    tn = cm[0][0]
    precision = round(tp / (tp + fp), 2)
    print('Consistent: precision=%.3f' % (precision))
    recall = round(tp / (tp + fn), 2)
    print('Consistent: recall=%.3f' % (recall))
    f1_score = (2 * precision * recall) / (precision + recall)
    print('Consistent: f1_score=%.3f' % (f1_score))
    precision_neg = round(tn / (tn + fn), 2)
    print('Inconsistent: precision=%.3f' % (precision_neg))
    recall_neg = round(tn / (tn + fp), 2)
    print('Inconsistent: recall=%.3f' % (recall_neg))
    f1_score_neg = (2 * precision_neg * recall_neg) / (precision_neg +
                                                       recall_neg)
    print('Inconsistent: f1_score=%.3f' % (f1_score_neg))
    ns_probs = [0 for _ in range(len(y_val))]
    ns_auc = roc_auc_score(y_val, ns_probs)
    lr_auc = roc_auc_score(y_val, y_pred)
    mcc = matthews_corrcoef(y_val, y_pred)
    print(precision)
    print('No Skill: ROC AUC=%.3f' % (ns_auc))
    print('Our model: ROC AUC=%.3f' % (lr_auc))
    print('Our model: MCC=%.3f' % (mcc))

    json_out = {"module": module_name, "MCC": mcc, "AUC": lr_auc}
    model.save('models/' + module_name)
    return json_out
plot_model(model, to_file='model.png')
SVG(model_to_dot(model).create(prog='dot', format='svg'))

train_model = model.fit(X_train, y_train,
                        batch_size=BATCH_SIZE,
                        epochs=NO_EPOCHS,
                        verbose=1,
                        validation_data=(X_val, y_val))

plot_accuracy_and_loss(train_model)

score = model.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

#get the predictions for the test data
predicted_classes = model.predict_classes(X_test)
#get the indices to be plotted
y_true = test_data.iloc[:, 0]

p = predicted_classes[:10000]
y = y_true[:10000]
correct = np.nonzero(p==y)[0]
incorrect = np.nonzero(p!=y)[0]

print("Correct predicted classes:",correct.shape[0])
print("Incorrect predicted classes:",incorrect.shape[0])

target_names = ["Class {} ({}) :".format(i,labels[i]) for i in range(NUM_CLASSES)]
print(classification_report(y_true, predicted_classes, target_names=target_names))
Esempio n. 21
0
# sgd = SGD(lr=0.01, decay=1e-6,  momentum=0.9, nesterov=False)
# model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

# Fit
model.fit(x=X_train,
          y=Y_train,
          validation_data=(X_val, Y_val),
          epochs=500,
          batch_size=256,
          verbose=1,
          callbacks=[tensorboard])

# Evaluation
result = model.evaluate(x=X_test, y=Y_test)

for name, value in zip(model.metrics_names, result):
    print(name, value)

model.save("src/Models/Myo_Model_" + model_name + ".h5")

# Confusion Matrix
rounded_predictions = model.predict_classes(X_test, batch_size=1, verbose=0)
conf_matrix = confusion_matrix(label_test, rounded_predictions)

cm_plot_labels = ['ex1', 'ex2', 'ex3', 'ex4', 'ex5']

plot_confusion_matrix(conf_matrix, cm_plot_labels, title='Confusion Matrix')
Esempio n. 22
0
model.load_weights("best_model_ever_adam0")

#files = os.listdir("songs\\")
#for file in files:
#    _createSpectrogram("songs\\" + file, mode="test")

test_Spect = os.listdir(spectrogramsDir + "test\\")

genres = getAllGenres()

for spect in test_Spect:

    data = crop(spectrogramsDir + "test\\" + spect)

    testX = numpy.asarray(data)
    testX = testX.reshape([-1, 128, 128, 1])

    predictions = model.predict_classes(testX)

    classses = [0] * 10
    for c in predictions:
        classses[c] += 1

    if max(classses) > 0.35 * len(predictions):
        print(
            os.path.splitext(os.path.basename(spect))[0] + ' is ' +
            genres[classses.index(max(classses))])
    else:
        print("I'm not sure what genre is " +
              os.path.splitext(os.path.basename(spect))[0])
Esempio n. 23
0
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=30, batch_size=10, verbose=0)

# 모델 관련 속성
# input 텐서의 정보
print(model.inputs)
print('-' * 30)

# output 텐서의 정보
print(model.outputs)
print('-' * 30)

# model.add() 함수를 사용한 레이어들의 주소 정보
print(model.layers)
print('-' * 30)

# metrics : 성능에 대한 지표
# 기본값으로 비용 함수는 무조건 보여준다.
# 더 추가하려면 compile 함수의 매개 변수 metrics에 리스트 형식을 넣어 주면 된다.
print(model.metrics_names)
print('-' * 30)

pred = model.predict_classes(x_test)

for idx in range(len(pred)):
    label = y_test[idx]
    print(f'real : {label}, prediction : {pred[idx]}')
Esempio n. 24
0
model.add(Dense(1, input_dim=3, activation="sigmoid"))

# Hidden Layer, 4 neurons
model.add(Dense(4, activation="sigmoid"))

# Output Layer
model.add(Dense(1, activation="sigmoid"))

# Configuring the learning process
model.compile(loss='mean_squared_error',
              optimizer='sgd',
              metrics=['accuracy'])

# Training the machine with class based binary fitting, and sgd optimizer with
# our training input arrays and single output values
model.fit(training_set_inputs, training_set_outputs, epochs=10, batch_size=7)

test = array([[0, 1, 0]])
print("Considering brand new data", test)

# Calculating a prediction of a 0 or 1 binary classifier based on the input
prediction = model.predict_classes(test)

# !!! Should print 0, or whichever value (0 or 1) is the XOR of the first two
# values in the validating test array !!!
print("AI predicts this new data classifies as", prediction[0][0])

if test[0][0] != prediction[0][0]:
    print("The predicted classification of", prediction[0][0], "is correct")
else:
    print("The predicted classification of", prediction[0][0], "is incorrect")
Esempio n. 25
0
# output layer
model.add(Dense(units=y_column, activation='sigmoid'))

import tensorflow as tf
learning_rate = 0.01
sgd = tf.keras.optimizers.SGD(lr=learning_rate)

model.compile(loss='binary_crossentropy', optimizer=sgd)

model.fit(x=x_train, y=y_train, epochs=200, verbose=1)

total, hit = 0, 0  # 총개수, 맞춘 개수

for idx in range(len(x_test)):
    result = model.predict_classes(np.array([x_test[idx]]))
    print('테스트용 데이터 : %s' % x_test[idx])
    print('정답 : %s' % y_test[idx], end=' ')
    print('예측 값 : %s' % str(result.flatten()))

    total += 1

    # 예측 값과 정답이 같은 경우 1추가
    hit += int(y_test[idx] == result.flatten())
    print('-' * 30)
# end for

accuracy = hit / total
print('정확도 = %.4f' % (accuracy))

print('finished')
Esempio n. 26
0
print(r.history.keys())

plt.plot(r.history['accuracy'])
plt.plot(r.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

plt.plot(r.history['loss'])
plt.plot(r.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'])
plt.show()
y_pred = classifier.predict(x_test)
y_pred = y_pred[:, 0]
y_classes = classifier.predict_classes(x_test, verbose=0)
y_classes = y_classes[:, 0]
precision = precision_score(y_test, y_classes)

recall = recall_score(y_test, y_classes)

score = f1_score(y_classes, y_test)

cm = confusion_matrix(y_test, y_classes)
ac = accuracy_score(y_test, y_classes)