コード例 #1
0
print "loading data"

f = pydub.AudioSegment.from_mp3('../ml-music/07_-_Brad_Sucks_-_Total_Breakdown.mp3')
data = np.fromstring(f._data, np.int16)
data = data.astype(np.float64).reshape((-1,2))
print data.shape
data = data[:,0]+data[:,1]
#data = data[:,:subsample*int(len(data)/subsample)-1,:]
data -= data.min()
data /= data.max() / 2.
data -= 1.
print data.shape

print "Setting up decoder"
decoder = Sequential()
decoder.add(Dense(2048, input_dim=32768, activation='relu'))
decoder.add(Dropout(0.5))
decoder.add(Dense(1024, activation='relu'))
decoder.add(Dropout(0.5))
decoder.add(Dense(1, activation='sigmoid'))

sgd = SGD(lr=0.01, momentum=0.1)
decoder.compile(loss='binary_crossentropy', optimizer=sgd)

print "Setting up generator"
generator = Sequential()
generator.add(Dense(2048*2, input_dim=2048, activation='relu'))
generator.add(Dense(1024*8, activation='relu'))
generator.add(Dense(32768, activation='linear'))
コード例 #2
0
def run():

    train_e = getParticleSet('/home/drozd/analysis/data_train_elecs.npy')
    train_p = getParticleSet('/home/drozd/analysis/data_train_prots.npy')
    train = np.concatenate((train_e, train_p))
    np.random.shuffle(train)

    X_train = train[:, 0:-1]
    Y_train = train[:, -1]
    del train_e, train_p, train

    val_e = np.concatenate(
        (getParticleSet(
            '/home/drozd/analysis/fraction1/data_validate_elecs_1.npy'),
         getParticleSet('/home/drozd/analysis/fraction1/data_test_elecs_1.npy')
         ))
    val_p = np.concatenate(
        (getParticleSet(
            '/home/drozd/analysis/fraction1/data_validate_prots_1.npy'),
         getParticleSet('/home/drozd/analysis/fraction1/data_test_prots_1.npy')
         ))

    val = np.concatenate((val_e, val_p))
    np.random.shuffle(val)

    X_val = val[:, 0:-1]
    Y_val = val[:, -1]

    val_imba = np.concatenate((val_e[0:int(val_p.shape[0] / 100)], val_p))
    np.random.shuffle(val_imba)
    X_val_imba = val_imba[:, 0:-1]
    Y_val_imba = val_imba[:, -1]

    del val_e, val_p, val, val_imba

    model = Sequential()
    model.add(
        Dense(300,
              input_shape=(X_train.shape[1], ),
              kernel_initializer='he_uniform',
              activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(150, kernel_initializer='he_uniform', activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(70, kernel_initializer='he_uniform', activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(1, kernel_initializer='he_uniform', activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['binary_accuracy'])

    rdlronplt = ReduceLROnPlateau(monitor='loss', patience=3, min_lr=0.001)
    earl = EarlyStopping(monitor='loss', min_delta=0.0001, patience=5)
    callbacks = [rdlronplt, earl]

    history = model.fit(X_train,
                        Y_train,
                        batch_size=150,
                        epochs=40,
                        verbose=0,
                        callbacks=callbacks,
                        validation_data=(X_val, Y_val))

    # --------------------------------

    predictions_balanced = model.predict(X_val)
    predictions_imba = model.predict(X_val_imba)
    predictions_train = model.predict(X_train)

    del X_val, X_val_imba, X_train

    sk_l_precision_b, sk_l_recall_b, sk_l_thresholds_b = precision_recall_curve(
        Y_val, predictions_balanced)
    sk_l_precision_i, sk_l_recall_i, sk_l_thresholds_i = precision_recall_curve(
        Y_val_imba, predictions_imba)
    sk_l_precision_t, sk_l_recall_t, sk_l_thresholds_t = precision_recall_curve(
        Y_train, predictions_train)

    sk_l_fpr_b, sk_l_tpr_b, sk_l_roc_thresholds_b = roc_curve(
        Y_val, predictions_balanced)
    sk_l_fpr_i, sk_l_tpr_i, sk_l_roc_thresholds_i = roc_curve(
        Y_val_imba, predictions_imba)
    sk_l_fpr_t, sk_l_tpr_t, sk_l_roc_thresholds_t = roc_curve(
        Y_train, predictions_train)

    man_l_precision_b, man_l_recall_b, man_l_thresholds_b = getPR(
        Y_val, predictions_balanced, 100)
    man_l_precision_i, man_l_recall_i, man_l_thresholds_i = getPR(
        Y_val_imba, predictions_imba, 100)

    man_l_fpr_b, man_l_tpr_b, man_l_roc_thresholds_b = getROC(
        Y_val, predictions_balanced, 100)
    man_l_fpr_i, man_l_tpr_i, man_l_roc_thresholds_i = getROC(
        Y_val_imba, predictions_imba, 100)

    print("----- AUC -----")
    print("Train:", average_precision_score(Y_train, predictions_train))
    print("Validate:", average_precision_score(Y_val, predictions_balanced))
    print("----- F1 -----")
    print("Train:", f1_score(Y_train, np.around(predictions_train)))
    print("Validate:", f1_score(Y_val, np.around(predictions_balanced)))
    print("----- Precision/Recall -----")
    print("Train:", precision_score(Y_train, np.around(predictions_train)),
          " / ", recall_score(Y_train, np.around(predictions_train)))
    print("Validate:", precision_score(Y_val, np.around(predictions_balanced)),
          " / ", recall_score(Y_val, np.around(predictions_balanced)))

    fig1 = plt.figure()
    plt.plot(sk_l_precision_b, sk_l_recall_b, label='balanced')
    plt.plot(sk_l_precision_i, sk_l_recall_i, label='imbalanced')
    #~ plt.plot(sk_l_precision_t, sk_l_recall_t,label='training set')
    #~ plt.plot(man_l_precision_b, man_l_recall_b,'o',label='balanced, hand')
    #~ plt.plot(man_l_precision_i, man_l_recall_i,'o',label='imbalanced, hand')
    plt.xlabel('Precision')
    plt.ylabel('Recall')
    plt.legend(loc='best')
    plt.savefig('PR')

    fig1b = plt.figure()
    plt.plot(sk_l_precision_b, sk_l_recall_b, label='validation set')
    plt.plot(sk_l_precision_t, sk_l_recall_t, label='training set')
    plt.xlabel('Precision')
    plt.ylabel('Recall')
    plt.legend(loc='best')
    plt.savefig('PRb')

    fig2 = plt.figure()
    plt.plot(sk_l_fpr_b, sk_l_tpr_b, label='balanced')
    plt.plot(sk_l_fpr_i, sk_l_tpr_i, label='imbalanced')
    #~ plt.plot(man_l_fpr_b, man_l_tpr_b,'o',label='balanced, hand')
    #~ plt.plot(man_l_fpr_i, man_l_tpr_i,'o',label='imbalanced, hand')
    plt.xlabel('False Positive')
    plt.ylabel('True Positive')
    plt.legend(loc='best')
    plt.savefig('ROC')

    fig2b = plt.figure()
    plt.plot(sk_l_fpr_b, sk_l_tpr_b, label='validation set')
    plt.plot(sk_l_fpr_t, sk_l_tpr_t, label='training set')
    plt.xlabel('False Positive')
    plt.ylabel('True Positive')
    plt.legend(loc='best')
    plt.savefig('ROCb')

    Nbins = 50
    binList = [x / Nbins for x in range(0, Nbins + 1)]

    elecs_t, prots_t = getClassifierScore(Y_train, predictions_train)
    fig3 = plt.figure()
    plt.hist(elecs_t,
             bins=binList,
             label='e',
             alpha=0.7,
             histtype='step',
             color='green')
    plt.hist(prots_t,
             bins=binList,
             label='p',
             alpha=0.7,
             histtype='step',
             color='red')
    plt.xlabel('Classifier score')
    plt.ylabel('Number of events')
    plt.title('Training set')
    plt.legend(loc='best')
    plt.yscale('log')
    plt.savefig('predHisto_train')

    fig3b = plt.figure()
    plt.hist(elecs_t,
             bins=binList,
             label='e',
             alpha=0.7,
             histtype='step',
             color='green',
             normed=True)
    plt.hist(prots_t,
             bins=binList,
             label='p',
             alpha=0.7,
             histtype='step',
             color='red',
             normed=True)
    plt.xlabel('Classifier score')
    plt.ylabel('Fraction of events')
    plt.title('Training set - normalised')
    plt.legend(loc='best')
    plt.yscale('log')
    plt.savefig('predHisto_train_n')
    del elecs_t, prots_t, Y_train, predictions_train

    elecs_b, prots_b = getClassifierScore(Y_val, predictions_balanced)
    fig4 = plt.figure()
    plt.hist(elecs_b,
             bins=binList,
             label='e',
             alpha=0.7,
             histtype='step',
             color='green')
    plt.hist(prots_b,
             bins=binList,
             label='p',
             alpha=0.7,
             histtype='step',
             color='red')
    plt.xlabel('Classifier score')
    plt.ylabel('Number of events')
    plt.title('Balanced validation set')
    plt.legend(loc='best')
    plt.yscale('log')
    plt.savefig('predHisto_bal')

    fig4b = plt.figure()
    plt.hist(elecs_b,
             bins=binList,
             label='e',
             alpha=0.7,
             histtype='step',
             color='green',
             normed=True)
    plt.hist(prots_b,
             bins=binList,
             label='p',
             alpha=0.7,
             histtype='step',
             color='red',
             normed=True)
    plt.xlabel('Classifier score')
    plt.ylabel('Fraction of events')
    plt.title('Balanced validation set - normalised')
    plt.legend(loc='best')
    plt.yscale('log')
    plt.savefig('predHisto_bal_n')
    del elecs_b, prots_b, Y_val, predictions_balanced

    elecs_i, prots_i = getClassifierScore(Y_val_imba, predictions_imba)
    fig5 = plt.figure()
    plt.hist(elecs_i,
             bins=binList,
             label='e',
             alpha=0.7,
             histtype='step',
             color='green')
    plt.hist(prots_i,
             bins=binList,
             label='p',
             alpha=0.7,
             histtype='step',
             color='red')
    plt.xlabel('Classifier score')
    plt.ylabel('Number of events')
    plt.legend(loc='best')
    plt.title('Imbalanced validation set')
    plt.yscale('log')
    plt.savefig('predHisto_imba')

    fig5b = plt.figure()
    plt.hist(elecs_i,
             bins=binList,
             label='e',
             alpha=0.7,
             histtype='step',
             color='green',
             normed=True)
    plt.hist(prots_i,
             bins=binList,
             label='p',
             alpha=0.7,
             histtype='step',
             color='red',
             normed=True)
    plt.xlabel('Classifier score')
    plt.ylabel('Fraction of events')
    plt.title('Imbalanced validation set - normalised')
    plt.legend(loc='best')
    plt.yscale('log')
    plt.savefig('predHisto_imba_n')
コード例 #3
0
def training_code():
    #titanic-complete3.arff är som det vi fick men:
    #har attributen normaliserade mellan 0 till 1
    #Filter:ordinal to numeric,
    #cabin och embarked attributet är borttagna

    #Gör om arff filen till numpy arrays.
    raw_data = loadarff('titanic-complete3.arff')
    df_data = pd.DataFrame(raw_data[0])
    arr = df_data.to_numpy()

    #Survival attributet blev en string, fixas här
    numbers = []
    x = 0
    for i in range(arr.size//6):
        for word in arr[x,5].split():
            if word.isdigit():
                numbers.append(int(word))
                x += 1
    y=0
    for i in range(len(numbers)):
        arr[y,5]= numbers[y]
        y+=1

    arr = arr.astype("float32")

    # X_input är de 5 attributen 
    # Y_output är de respektive matchande klasstillhörighetern
    X_input = arr[:,0:5]
    Y_output = arr[:,5]

    num_folds = 10
    acc_per_fold = []
    loss_per_fold = []

    # initialiserar vikterna
    np.random.seed(42)

    skf = StratifiedKFold(n_splits=num_folds, shuffle=True)
    fold_no = 1
    visualo = []
    visuaacc = []
    num_of_epochs = 2000
    #genomför cross validation och tränar modellen
    for train_index, test_index in skf.split(X_input, Y_output):

        # One hot encoding
        X_train, X_test = X_input[train_index], X_input[test_index]
        Y_train, Y_test = np_utils.to_categorical(Y_output[train_index], 2), np_utils.to_categorical(Y_output[test_index], 2)

        #bygger min mlp
        model = Sequential()
        model.add(Dense(4,input_dim = (5), activation="relu"))
        model.add(Dense(2, activation="sigmoid"))

        #Adam är en form av SGD
        model.compile(optimizer="Adam", loss="binary_crossentropy", metrics = ["accuracy"])
        print('------------------------------------------------------------------------')
        print(f'Training for fold {fold_no} ...')

        csv_logger = CSVLogger("training_mlp.log", append=True, separator=";")

        history = model.fit(x=X_train, y=Y_train, epochs=num_of_epochs, callbacks=[csv_logger], verbose=0)

        model.save("Titanic_mlp.h5")
        scores = model.evaluate(X_test, Y_test, verbose=0)
        print(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%')
        acc_per_fold.append(scores[1] * 100)
        loss_per_fold.append(scores[0])
        fold_no += 1
        visualo.append(history.history["loss"])
        visuaacc.append(history.history["accuracy"])

    print('------------------------------------------------------------------------')
    print('Score per fold')
    for i in range(0, len(acc_per_fold)):
        print('------------------------------------------------------------------------')
        print(f'> Fold {i+1} - Loss: {loss_per_fold[i]} - Accuracy: {acc_per_fold[i]}%')
    print('------------------------------------------------------------------------')
    print('Average scores for all folds:')
    print(f'> Accuracy: {np.mean(acc_per_fold)} (+- {np.std(acc_per_fold)})')
    print(f'> Loss: {np.mean(loss_per_fold)}')
    print('------------------------------------------------------------------------')

    #plottar träningen med snittet för varje epok per k_fold
    herek = np.asarray(visualo, dtype=np.float32)
    loss_visu = np.sum(herek, axis=0)
    plt.plot(loss_visu/num_folds, label='Binary Crossentropy (loss)')
    herek2 = np.asarray(visuaacc, dtype=np.float32)
    acc_visu2 = np.sum(herek2, axis=0)
    plt.plot(acc_visu2/num_folds, label='accuracy/100')
    plt.title('training progress for titanic_mlp')
    plt.ylabel('value')
    plt.xlabel('No. epoch')
    plt.legend(loc="upper left")
    plt.show()
コード例 #4
0
def build_part1_RNN(window_size):
    model = Sequential()
    model.add(LSTM(5, input_shape = (window_size,1)))
    model.add(Dense(1))
    return model
コード例 #5
0
import pickle
import pandas as pd
import numpy as np
from keras.models import Sequential
from Libraries import data_preprocessing as pp
from Libraries import data_evaluation as eval
from Libraries import model_evaluation as m_Eval
from sklearn.model_selection import train_test_split
from Libraries import model_setup

RunName = 'bla'
file = open('Data.p', 'rb')
Data = pickle.load(file)

x = Data[0]
m = Sequential()
dropChannels = [
    'time', 'stopId', 'trg1', 'n1', 'trot1', 'tlin1', 'tlin2', 'tamb1'
]

InputDataSet = pp.shape_Data_to_LSTM_format(Data[0][0], dropChannels)
input_shape = (None, InputDataSet.shape[2])
m = model_setup.distributed_into_one(input_shape)
test_data = list()
histories = list()
epochs = 1

for currData in Data:
    seed = 0
    X = pp.shape_Data_to_LSTM_format(currData[0], dropChannels)
    #y = pp.shape_Labels_to_LSTM_format(currData[1])
コード例 #6
0
def run(gParameters):

    print ('Params:', gParameters)

    file_train = gParameters['train_data']
    file_test = gParameters['test_data']
    url = gParameters['data_url']

    train_file = candle.get_file(file_train, url+file_train, cache_subdir='Pilot1')
    test_file = candle.get_file(file_test, url+file_test, cache_subdir='Pilot1')

    X_train, Y_train, X_test, Y_test = load_data(train_file, test_file, gParameters)

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    print('Y_train shape:', Y_train.shape)
    print('Y_test shape:', Y_test.shape)

    x_train_len = X_train.shape[1]

    # this reshaping is critical for the Conv1D to work

    X_train = np.expand_dims(X_train, axis=2)
    X_test = np.expand_dims(X_test, axis=2)

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    model = Sequential()

    layer_list = list(range(0, len(gParameters['conv']), 3))
    for l, i in enumerate(layer_list):
        filters = gParameters['conv'][i]
        filter_len = gParameters['conv'][i+1]
        stride = gParameters['conv'][i+2]
        print(int(i/3), filters, filter_len, stride)
        if gParameters['pool']:
            pool_list=gParameters['pool']
            if type(pool_list) != list:
                pool_list=list(pool_list)

        if filters <= 0 or filter_len <= 0 or stride <= 0:
                break
        if 'locally_connected' in gParameters:
                model.add(LocallyConnected1D(filters, filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1)))
        else:
            #input layer
            if i == 0:
                model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid', input_shape=(x_train_len, 1)))
            else:
                model.add(Conv1D(filters=filters, kernel_size=filter_len, strides=stride, padding='valid'))
        model.add(Activation(gParameters['activation']))
        if gParameters['pool']:
                model.add(MaxPooling1D(pool_size=pool_list[int(i/3)]))

    model.add(Flatten())

    for layer in gParameters['dense']:
        if layer:
            model.add(Dense(layer))
            model.add(Activation(gParameters['activation']))
            if gParameters['drop']:
                    model.add(Dropout(gParameters['drop']))
    model.add(Dense(gParameters['classes']))
    model.add(Activation(gParameters['out_act']))

#Reference case
#model.add(Conv1D(filters=128, kernel_size=20, strides=1, padding='valid', input_shape=(P, 1)))
#model.add(Activation('relu'))
#model.add(MaxPooling1D(pool_size=1))
#model.add(Conv1D(filters=128, kernel_size=10, strides=1, padding='valid'))
#model.add(Activation('relu'))
#model.add(MaxPooling1D(pool_size=10))
#model.add(Flatten())
#model.add(Dense(200))
#model.add(Activation('relu'))
#model.add(Dropout(0.1))
#model.add(Dense(20))
#model.add(Activation('relu'))
#model.add(Dropout(0.1))
#model.add(Dense(CLASSES))
#model.add(Activation('softmax'))

    kerasDefaults = candle.keras_default_config()

    # Define optimizer
    optimizer = candle.build_optimizer(gParameters['optimizer'],
                                                gParameters['learning_rate'],
                                                kerasDefaults)

    model.summary()
    model.compile(loss=gParameters['loss'],
                  optimizer=optimizer,
                  metrics=[gParameters['metrics']])

    output_dir = gParameters['save']

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # calculate trainable and non-trainable params
    gParameters.update(candle.compute_trainable_params(model))

    # set up a bunch of callbacks to do work during model training..
    model_name = gParameters['model_name']
    path = '{}/{}.autosave.model.h5'.format(output_dir, model_name)
    # checkpointer = ModelCheckpoint(filepath=path, verbose=1, save_weights_only=False, save_best_only=True)
    csv_logger = CSVLogger('{}/training.log'.format(output_dir))
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
    candleRemoteMonitor = candle.CandleRemoteMonitor(params=gParameters)
    timeoutMonitor = candle.TerminateOnTimeOut(gParameters['timeout'])
    history = model.fit(X_train, Y_train,
                    batch_size=gParameters['batch_size'],
                    epochs=gParameters['epochs'],
                    verbose=1,
                    validation_data=(X_test, Y_test),
                    callbacks = [csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor])

    score = model.evaluate(X_test, Y_test, verbose=0)

    if False:
        print('Test score:', score[0])
        print('Test accuracy:', score[1])
        # serialize model to JSON
        model_json = model.to_json()
        with open("{}/{}.model.json".format(output_dir, model_name), "w") as json_file:
            json_file.write(model_json)

        # serialize model to YAML
        model_yaml = model.to_yaml()
        with open("{}/{}.model.yaml".format(output_dir, model_name), "w") as yaml_file:
            yaml_file.write(model_yaml)

        # serialize weights to HDF5
        model.save_weights("{}/{}.weights.h5".format(output_dir, model_name))
        print("Saved model to disk")

        # load json and create model
        json_file = open('{}/{}.model.json'.format(output_dir, model_name), 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model_json = model_from_json(loaded_model_json)


        # load yaml and create model
        yaml_file = open('{}/{}.model.yaml'.format(output_dir, model_name), 'r')
        loaded_model_yaml = yaml_file.read()
        yaml_file.close()
        loaded_model_yaml = model_from_yaml(loaded_model_yaml)


        # load weights into new model
        loaded_model_json.load_weights('{}/{}.weights.h5'.format(output_dir, model_name))
        print("Loaded json model from disk")

        # evaluate json loaded model on test data
        loaded_model_json.compile(loss=gParameters['loss'],
            optimizer=gParameters['optimizer'],
            metrics=[gParameters['metrics']])
        score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0)

        print('json Test score:', score_json[0])
        print('json Test accuracy:', score_json[1])

        print("json %s: %.2f%%" % (loaded_model_json.metrics_names[1], score_json[1]*100))

        # load weights into new model
        loaded_model_yaml.load_weights('{}/{}.weights.h5'.format(output_dir, model_name))
        print("Loaded yaml model from disk")

        # evaluate loaded model on test data
        loaded_model_yaml.compile(loss=gParameters['loss'],
            optimizer=gParameters['optimizer'],
            metrics=[gParameters['metrics']])
        score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0)

        print('yaml Test score:', score_yaml[0])
        print('yaml Test accuracy:', score_yaml[1])

        print("yaml %s: %.2f%%" % (loaded_model_yaml.metrics_names[1], score_yaml[1]*100))

    return history
コード例 #7
0
#values from range 0-255 change to range 0-1
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
#print(X_train[0], )

#preprocessing test data
classes = 10
y_train = to_categorical(y_train, classes)
y_test = to_categorical(y_test, classes)

#building model

nn5 = Sequential()
nn5.add(Dense(1000, activation='relu', input_shape=(784,)))
nn5.add(Dense(1000, activation ='relu'))
nn5.add(Dense(1000, activation ='relu'))
nn5.add(Dense(1000, activation ='relu'))
nn5.add(Dense(1000, activation ='relu'))
nn5.add(Dense(1000, activation ='relu'))
nn5.add(Dense(1000, activation ='relu'))
nn5.add(Dense(1000, activation ='relu'))
nn5.add(Dense(1000, activation ='relu'))
nn5.add(Dense(classes, activation='softmax'))
nn5.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

print(nn5.summary())

csv_logger = CSVLogger('training2.log', separator=',', append=False)
コード例 #8
0
'''

###############################################################################
## Finished model
METRICS = [
    keras.metrics.MeanSquaredError(name='MSE'),
    keras.metrics.RootMeanSquaredError(name='RMSE'),
    keras.metrics.MeanAbsoluteError(name='MAE'),
]
NUMBER_OF_EPOCHS = 25
BATCH_SIZE = 5000
LEARNING_RATE = 0.001

print('\nCreating learning model.')
clf = Sequential()
clf.add(
    Dense(X_train.shape[1],
          activation='relu',
          input_shape=(X_train.shape[1], )))
clf.add(Dense(32, activation='relu'))
clf.add(Dense(8, activation='relu'))
clf.add(Dense(32, activation='relu'))
clf.add(Dense(X_train.shape[1], activation=None))

###############################################################################
## Compile the network
###############################################################################
print('\nCompiling the network.')
clf.compile(loss='mean_squared_error',
            optimizer=Adam(lr=LEARNING_RATE),
コード例 #9
0
train_x = np.array(train_data[x_label])      # train_x存入train.csv文件中所有商品的feat信息
test_x = np.array(test_data[x_label])        # test_x存入test.csv文件中所有商品的feat信息



# ------ 将train_y的数据转换成one_hot向量(9维) ------ #
train_y = np.zeros([len(train_y_raw), 9])    # 构建train_y矩阵(49502*9)
for i in range(len(train_y_raw)):
    lable_data = int(train_y_raw[i][-1])     # lable_data存入了49502个商品的类别号
    train_y[i, lable_data-1] = 1             # train_y存入class的one_hot向量(class7=000000100)
##print(train_x.shape, train_y.shape, test_x.shape)# (49502, 93) (49502, 9) (12376, 93)



# ------ 构建模型与模型训练,93-128-64-32-16-9神经网络结构 ------ #
model = Sequential()                         # 序贯(Sequential)模型(多个网络层的线性堆叠)
model.add(Dense(128, input_shape=(93,), activation="relu"))  
model.add(Dense(64, activation="relu"))
model.add(Dense(32, activation="relu"))
model.add(Dense(16, activation="relu"))
model.add(Dense(9))
model.add(Activation('softmax'))             # 前几层用relu激活函数,最后一层使用softmax激活函数
#model.summary()
model.compile(loss='mean_squared_logarithmic_error',
              optimizer='adadelta', metrics=['accuracy'])
model.fit(x = train_x, y = train_y, batch_size = 2048, nb_epoch = 250)  # 训练模型,分批迭代样本的数据



# ------ 预测答案 ------ #
test_y = model.predict(test_x)
コード例 #10
0
def save_bottlebeck_features():
    np.random.seed(2929)

    vgg_model = applications.VGG16(weights='imagenet',
                                   include_top=False,
                                   input_shape=(150, 150, 3))
    print('Model loaded.')

    #initialise top model
    top_model = Sequential()
    top_model.add(Flatten(input_shape=vgg_model.output_shape[1:]))
    top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(1, activation='sigmoid'))

    model = Model(inputs=vgg_model.input, outputs=top_model(vgg_model.output))

    model.trainable = True

    model.summary()

    #Total of 20 layers. The classification is considered as one layer
    #Therefore, intermediate is 19 layers
    #0, 4[:4], 3[:7], 4[:11], 4[:15], 4[:19] (Group 0, 1, 2, 3, 4, 5)
    #0 -> All trainable
    #5 -> All non-trainable except classification layer
    #Always keep layer 20 trainable because it is classification layer
    #layer_count = 1
    for layer in model.layers[:7]:
        layer.trainable = False
    #print("NO-Top: Layer is %d trainable" %layer_count)
    #layer_count = layer_count + 1

    model.summary()

    train_datagen = ImageDataGenerator(rescale=1. / 255)
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='binary')

    sgd = optimizers.Adam(
        lr=1e-6
    )  #optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    model.compile(loss="binary_crossentropy",
                  optimizer=sgd,
                  metrics=['accuracy'])

    #        model.compile(optimizer='rmsprop',
    #            loss='binary_crossentropy', metrics=['accuracy'])

    history = model.fit_generator(
        train_generator,
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_generator,
        validation_steps=nb_validation_samples // batch_size,
        verbose=1)

    history_dict = history.history

    #Plotting the training and validation loss
    history_dict = history.history
    loss_values = history_dict['loss']
    val_loss_values = history_dict['val_loss']
    epochs_0 = range(1, len(history_dict['acc']) + 1)
    plt.plot(epochs_0, loss_values, 'bo', label='Training loss')
    plt.plot(epochs_0, val_loss_values, 'b', label='Validation loss')
    plt.title(
        'ADvsMC_64_VGG16_Freeze_data2_group2 - Training and validation loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    #plt.show()
    plt.savefig('ADvsMC_64_VGG16_Freeze_data2_group2_loss.png')
    plt.close()

    #Plotting the training and validation accuracy
    acc_values = history_dict['acc']
    val_acc_values = history_dict['val_acc']
    plt.plot(epochs_0, acc_values, 'bo', label='Training acc')
    plt.plot(epochs_0, val_acc_values, 'b', label='Validation acc')
    plt.title(
        'ADvsMC_64_VGG16_Freeze_data2_group2 - Training and validation accuracy'
    )
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    #plt.show()
    plt.savefig('ADvsMC_64_VGG16_Freeze_data2_group2_acc.png')
    plt.close()
コード例 #11
0
def SegNet():
    model = Sequential()
    #encoder   256, 256
    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               input_shape=(img_w, img_h, 3),
               padding='same',
               activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(128,128)

    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(64,64)

    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(32,32)

    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(16,16)

    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #(8,8)

    #decoder
    model.add(UpSampling2D(size=(2, 2)))

    #(16,16)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(32,32)
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(64,64)
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(128,128)
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))
    #(256,256)
    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               input_shape=(3, img_w, img_h),
               padding='same',
               activation='relu'))
    model.add(BatchNormalization())
    model.add(
        Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(n_label, (1, 1), strides=(1, 1), padding='same'))

    model.add(Reshape((n_label, img_w * img_h)))
    #axis=1和axis=2互换位置,等同于np.swapaxes(layer,1,2)
    model.add(Permute((2, 1)))
    model.add(Activation('softmax'))
    sgd = optimizers.SGD(lr=0.005, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    model.summary()
    return model
コード例 #12
0
from sklearn.model_selection import train_test_split
import numpy as np
import keras
from keras.layers import Merge
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD

DOC_VEC_DIM=10
NUMERICAL_FEATURE_DIM=5
y_train=[0.8,0.9]

# semantic
semantic_input=np.random.standard_normal(size=(2,10))

semantic_model= Sequential()
semantic_model.add(Dense(100,input_dim=DOC_VEC_DIM))
semantic_model.add(PReLU())
semantic_model.add(Dropout(0.2))
semantic_model.add(BatchNormalization())

# numerical
numerical_input=np.random.standard_normal(size=(2,NUMERICAL_FEATURE_DIM))

numerical_model= Sequential()
numerical_model.add(Dense(100, input_dim=NUMERICAL_FEATURE_DIM))
numerical_model.add(PReLU())
numerical_model.add(Dropout(0.2))
numerical_model.add(BatchNormalization())

#merge
コード例 #13
0
ファイル: trainning.py プロジェクト: DavidDavilaRTF/fake_news
    def mesure_1(self):
        for i in range(self.cv):
            self.split_train_test()
            k_y = 1
            y_pred = None
            for col_y in self.y_train:
                cor_xy = []
                for c in self.x_train:
                    if numpy.var(self.x_train[c]) > 0:
                        self.x_train[c] = (numpy.array(self.x_train) - numpy.mean(self.x_train)) / numpy.var(self.x_train)
                model = linear_model.LinearRegression()
                model.fit(self.x_train,self.y_train[col_y])
                cor_xy = numpy.abs(model.coef_)

                # for c in self.x_train:
                #     cor_xy.append(abs(self.y_train[col_y].corr(self.x_train[c])))
                # cor_xy = numpy.array(cor_xy)
                # sel = cor_xy.astype(str) == 'nan'
                # cor_xy[sel] = 0

                cor_xy = pandas.DataFrame(cor_xy)
                cor_xy.columns = ['corr']
                cor_xy = cor_xy.sort_values(['corr'],ascending = False)
                k = 1
                col_x = self.x_train.columns
                
                while k <= self.nb_split:
                    x_an_train = self.x_train[col_x[cor_xy.index[0:int(k / self.nb_split * len(col_x))]]]
                    x_an_test = self.x_test[col_x[cor_xy.index[0:int(k / self.nb_split * len(col_x))]]]
                    
                    if self.model_type == 'logistic':
                        model = linear_model.LogisticRegression(penalty='none',solver='newton-cg')
                    elif self.model_type == 'tree':
                        model = tree.DecisionTreeClassifier()
                    elif self.model_type == 'rf':
                        model = ensemble.RandomForestClassifier()
                    elif self.model_type == 'svm':
                        model = svm.SVC(probability = True)
                    elif self.model_type == 'xgboost':
                        model = xgb.XGBClassifier(objective="binary:logistic")
                    
                    if  self.model_type != 'deeplearning':
                        model.fit(x_an_train,self.y_train[col_y])
                        y_pred = model.predict_proba(x_an_test)[:,1]

                    if self.model_type == 'deeplearning':
                        model = Sequential()
                        model.add(Dense(128, activation='relu'))
                        # model.add(Dense(128, activation='softplus'))
                        # model.add(Dense(128, activation='tanh'))
                        # model.add(Dropout(0.5))
                        model.add(Dense(1, activation='sigmoid'))
                        model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
                        model.fit(numpy.array(x_an_train), numpy.array(self.y_train[col_y]),batch_size=32, nb_epoch=10, verbose=0)
                        y_pred = model.predict_proba(x_an_test)[:,0]

                    fpr, tpr, thresholds = metrics.roc_curve(self.y_test[:,0], y_pred, pos_label=1)
                    auc_res = metrics.auc(fpr, tpr)
                    sel_11 = (y_pred >= 0.5) *  (self.y_test[:,0] == 1)
                    sel_10 = (y_pred >= 0.5) *  (self.y_test[:,0] == 0)
                    sel_01 = (y_pred < 0.5) * (self.y_test[:,0] == 1)
                    sel_00 = (y_pred < 0.5) * (self.y_test[:,0] == 0)
                    self.mes['11'].iloc[k-1] += sum(sel_11)
                    self.mes['10'].iloc[k-1] += sum(sel_10)
                    self.mes['01'].iloc[k-1] += sum(sel_01)
                    self.mes['00'].iloc[k-1] += sum(sel_00)
                    self.mes['auc'].iloc[k-1] += auc_res
                    print(str(k_y) + ' - ' + str(k) + ' - ' + str(i))
                    k += 1
                k_y += 1
        self.mes.to_csv('C:/netflix/model_outecomes_imdb.csv',sep = ';',index = False)
コード例 #14
0
ファイル: trainning.py プロジェクト: DavidDavilaRTF/fake_news
    def mesure(self):
        for i in range(self.cv):
            self.split_train_test()
            k_y = 1
            y_pred = None
            col_y_i = 0
            for col_y in self.y_train:
                cor_xy = []
                # for c in self.x_train:
                #     if numpy.var(self.x_train[c]) > 0:
                #         self.x_train[c] = (numpy.array(self.x_train[c]) - numpy.mean(self.x_train[c])) / numpy.var(self.x_train[c])
                # model = linear_model.LinearRegression()
                # model.fit(self.x_train,self.y_train[col_y])
                # cor_xy = numpy.abs(model.coef_)

                for c in self.x_train:
                    cor_xy.append(abs(self.y_train[col_y].corr(self.x_train[c])))
                cor_xy = numpy.array(cor_xy)
                sel = cor_xy.astype(str) == 'nan'
                cor_xy[sel] = 0

                cor_xy = pandas.DataFrame(cor_xy)
                cor_xy.columns = ['corr']
                cor_xy = cor_xy.sort_values(['corr'],ascending = False)
                k = 1
                col_x = self.x_train.columns
                
                x_an_train = self.x_train[col_x[cor_xy.index[0:int(self.prop_db * len(col_x))]]]
                x_an_test = self.x_test[col_x[cor_xy.index[0:int(self.prop_db * len(col_x))]]]
                
                if self.model_type == 'logistic':
                    model = linear_model.LogisticRegression(penalty='none',solver='newton-cg')
                elif self.model_type == 'tree':
                    model = tree.DecisionTreeClassifier()
                elif self.model_type == 'rf':
                    model = ensemble.RandomForestClassifier()
                elif self.model_type == 'svm':
                    model = svm.SVC(probability = True)
                elif self.model_type == 'xgboost':
                    model = xgb.XGBClassifier(objective="binary:logistic")
                
                if  self.model_type != 'deeplearning':
                    model.fit(x_an_train,self.y_train[col_y])
                    try:
                        y_pred = numpy.c_[y_pred,model.predict_proba(x_an_test)[:,1]]
                    except:
                        y_pred = model.predict_proba(x_an_test)[:,1]
                
                if self.model_type == 'deeplearning':
                    model = Sequential()
                    model.add(Dense(128, activation='relu'))
                    # model.add(Dense(128, activation='softplus'))
                    # model.add(Dense(128, activation='tanh'))
                    # model.add(Dropout(0.5))
                    model.add(Dense(1, activation='sigmoid'))
                    model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
                    model.fit(numpy.array(x_an_train), numpy.array(self.y_train[col_y]),batch_size=32, nb_epoch=10, verbose=0)

                    try:
                        y_pred = numpy.c_[y_pred,model.predict_proba(x_an_test)[:,0]]
                    except:
                        y_pred = model.predict_proba(x_an_test)[:,0]
                try:
                    fpr, tpr, thresholds = metrics.roc_curve(self.y_test[:,col_y_i], y_pred, pos_label=1)
                    auc_res = metrics.auc(fpr, tpr)
                except:
                    fpr, tpr, thresholds = metrics.roc_curve(self.y_test[:,col_y_i], y_pred[:,y_pred.shape[1] - 1], pos_label=1)
                    auc_res = metrics.auc(fpr, tpr)

                self.mes[str(col_y_i) + '_auc'].iloc[0] += auc_res

                col_y_i += 1

                print(str(k_y) + ' - ' + str(self.prop_db) + ' - ' + str(i))
                k_y += 1

            y_dec = numpy.apply_along_axis(numpy.argmax,1,y_pred)
            kyd = 0
            for d in y_dec:
                self.mes['all_1'].iloc[0] += self.y_test[kyd,d]
                self.mes['all_0'].iloc[0] += 1 - self.y_test[kyd,d]
                self.mes[str(d) + '_1'] += self.y_test[kyd,d]
                self.mes[str(d) + '_0'].iloc[0] += 1 - self.y_test[kyd,d]

                kyd += 1

        self.mes.to_csv('C:/fake_news/model_outecomes_' + str(self.model_type) + '_' + str(self.prop_db) + '.csv',sep = ';',index = False)
コード例 #15
0
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
# truncate and pad input sequences
max_review_length = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
print("")


# -+-+-+-+-+-+-+- BUILDING MODEL -+-+-+-+-+-+-+-

print("BUILDING MODEL")
embedding_vecor_length = 32

input_layer = Embedding(top_words, embedding_vecor_length, input_length=max_review_length)

branch_1 = Sequential()
branch_1.add(input_layer)
branch_1.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))

branch_3 = Sequential()
branch_3.add(input_layer)
branch_3.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))
branch_3.add(MaxPooling1D(pool_size=2))
branch_3.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))

branch_4 = Sequential()
branch_4.add(input_layer)
branch_4.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))
branch_4.add(MaxPooling1D(pool_size=2))
branch_4.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
コード例 #16
0
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)

labels = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
labels_to_idx = dict((name, idx) for idx, name in enumerate(labels))

all_image_labels = [labels_to_idx[pathlib.Path(path).parent.name] for path in all_image_paths]

train_val_imgs, test_imgs, train_val_labels, test_labels = train_test_split(all_image_paths, all_image_labels, test_size=0.10, random_state=21)
train_imgs, val_imgs, train_labels, val_labels = train_test_split(train_val_imgs, train_val_labels, test_size=0.1111, random_state=21)

base_model = keras.applications.vgg19.VGG19(weights=None, include_top=False, input_shape=(224,224,5))

inter = Sequential()
inter.add(base_model)
#inter.add(MaxPooling2D(pool_size=(7,7)))
inter.add(Flatten())
inter.add(Dropout(0.3))
inter.add(Dense(1024, kernel_regularizer=regularizers.l2(0.01)))
inter.add(Dense(512, kernel_regularizer=regularizers.l2(0.01)))
inter.add(Dense(50, activation='softmax'))
# x = MaxPooling2D(pool_size=(2,2))(x)
# x = Flatten()(x)
# x = Dense(1000)(x)
# x = Dense(500)(x)
# preds = Dense(50, activation='softmax')(x)
#
# final_model = Model(inputs=noise.input, outputs=preds)
        zoom_range=0.2,
        height_shift_range=0.2,
        width_shift_range=0.2,
        rescale=1./255)

optimize_test=ImageDataGenerator(
        rescale=1./255)

train_batches=optimize.flow_from_directory(train_path,target_size=(img_width,img_height),classes=['Normal','Pneumonia'],batch_size=batch_size,shuffle=True)
valid_batches=optimize.flow_from_directory(valid_path,target_size=(img_width,img_height),classes=['Normal','Pneumonia'],batch_size=12,shuffle=True)
test_batches=optimize_test.flow_from_directory(test_path,target_size=((img_width,img_height),classes=['Normal','Pneumonia'],batch_size=1170,shuffle=False)



#create model
model = Sequential()#add model layers

model.add(Conv2D(32,(3,3),activation='relu',padding='same',input_shape=(img_width,img_height,3)))
model.add(Conv2D(32,(3,3),activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))

model.add(Conv2D(64,(3,3),activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
 
model.add(Flatten())
model.add(Dense(2, activation='softmax'))

model.summary() 
 
adam = optimizers.Adam(lr=0.001)
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
コード例 #18
0
for i in range(0, df.shape[0]):
    features.append(df['pixels'].values[i])

features = np.array(features)
features = features.reshape(features.shape[0], 224, 224, 3)
features.shape

features /= 255  #normalize in [0, 1]

train_x, test_x, train_y, test_y = train_test_split(
    features, target_classes,
    test_size=0.30)  #, random_state=42), stratify=target_classes)

#VGG-Face model
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
コード例 #19
0
ファイル: rnn29.py プロジェクト: leopiel/mastersthesis
################################################################################################

reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.7,
                              patience=2, min_lr=0.0001, verbose=1)


kernel_regularizer = regularizers.l2(0.0001)

model_29 = Sequential([
    Conv2D(128, (3, 20), activation='relu', kernel_regularizer=kernel_regularizer,  border_mode='valid', input_shape=(1107, 20, 1)),
    Conv2D(128, (5, 1), strides=(3,1), activation='relu', kernel_regularizer=kernel_regularizer, border_mode='valid'),
    Conv2D(128, (5, 1), strides=(3,1), activation='relu',  kernel_regularizer=kernel_regularizer, border_mode='valid'),
    Conv2D(128, (5, 1), strides=(3,1), activation='relu',  kernel_regularizer=kernel_regularizer, border_mode='valid'),
    Conv2D(128, (5, 1), strides=(3,1), activation='relu',  kernel_regularizer=kernel_regularizer, border_mode='valid'),
    Conv2D(128, (5, 1), strides=(3,1), activation='relu',  kernel_regularizer=kernel_regularizer, border_mode='valid'),

    Reshape((-1, 128)),
    Bidirectional(LSTM(64, return_sequences=True)),
    AttentionWithContext(),
    Dense(3, activation='softmax')
])

print (model_29.summary)
print ("model_29 BUILT")

model_29.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
print ("model_29 COMPILED")


checkpoint = ModelCheckpoint(filepath='/models/model_29.hdf5', monitor='val_loss', save_best_only=True)
コード例 #20
0
y_train = to_categorical(y_train,
                         num_classes=num_classes)  ###_YOUR_CODE_GOES_HERE_###)
y_test = to_categorical(y_test,
                        num_classes=num_classes)  ###_YOUR_CODE_GOES_HERE_###)

# ## 2. Exercise part: model definition
#
# Next, initialise a Keras *Sequential* model and add three layers to it:
#
#     Layer: Add a *Dense* layer with in input_shape=(max_words,), 512 output units and "relu" activation.
#     Layer: Add a *Dropout* layer with dropout rate of 50%.
#     Layer: Add a *Dense* layer with num_classes output units and "softmax" activation.

# In[18]:

model = Sequential(
)  ###_YOUR_CODE_GOES_HERE_###  # Instantiate sequential model
model.add(
    Dense(512, input_dim=max_words, activation="relu")
)  ###_YOUR_CODE_GOES_HERE_###) # Add first layer. Make sure to specify input shape
model.add(Dropout(0.5))  ###_YOUR_CODE_GOES_HERE_###) # Add second layer
model.add(Dense(
    num_classes,
    activation="softmax"))  ###_YOUR_CODE_GOES_HERE_###) # Add third layer

# ## 3. Exercise part: model compilation
#
# As the next step, we need to compile our Keras model with a training configuration. Compile your model with "categorical_crossentropy" as loss function, "adam" as optimizer and specify "accuracy" as evaluation metric. NOTE: In case you get an error regarding h5py, just restart the kernel and start from scratch

# In[19]:

model.compile(optimizer="adam",
コード例 #21
0
BATCH_SIZE = 64
NUM_EPOCHS = 100

# lookup tables
print("building lookup tables...")
word2id = collections.defaultdict(lambda: 1)
word2id["PAD"] = 0
word2id["UNK"] = 1
for v, (k, _) in enumerate(word_freqs.most_common(VOCAB_SIZE - 2)):
    word2id[k] = v + 2
id2word = {v:k for k, v in word2id.items()}

# define autoencoder
print("defining autoencoder...")
autoencoder = Sequential()
autoencoder.add(Embedding(VOCAB_SIZE, EMBED_SIZE, input_length=SEQUENCE_LEN,
                          init="glorot_uniform",
                          name="encoder_word2emb"))
autoencoder.add(LSTM(LATENT_SIZE, name="encoder_lstm"))
autoencoder.add(RepeatVector(SEQUENCE_LEN, name="decoder_repeat"))
autoencoder.add(LSTM(EMBED_SIZE, return_sequences=True, name="decoder_lstm"))
autoencoder.add(TimeDistributed(Dense(1, activation="softmax"), 
                          name="decoder_emb2word"))
autoencoder.add(Reshape((SEQUENCE_LEN,), name="decoder_reshape"))

# display autoencoder model summary
for layer in autoencoder.layers:
    print(layer.name, layer.input_shape, layer.output_shape)
    
autoencoder.compile(optimizer="adam", loss="categorical_crossentropy")
    horizontal_flip=True)

validgen = ImageDataGenerator(
    rotation_range=20,
    width_shift_range=0.2,
    height_shift_range=0.2,
    horizontal_flip=True)
	
datagen.fit(x_train)
validgen.fit(x_test)

#Define the NN architecture
from keras.models import Sequential
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten
#Two hidden layers
nn = Sequential()
nn.add(Conv2D(32, 3, 3, activation='relu', input_shape=input_shape))
nn.add(Conv2D(32, 3, 3, activation='relu'))
nn.add(MaxPooling2D(pool_size=(2, 2)))
nn.add(Conv2D(64, 3, 3, activation='relu'))
nn.add(Conv2D(64, 3, 3, activation='relu'))
nn.add(MaxPooling2D(pool_size=(2, 2)))
nn.add(Conv2D(128, 3, 3, activation='relu'))
nn.add(Flatten())
nn.add(Dense(256, activation='relu'))
nn.add(Dense(10, activation='softmax'))

#Model visualization
#We can plot the model by using the ```plot_model``` function. We need to install *pydot, graphviz and pydot-ng*.
#from keras.util import plot_model
#plot_model(nn, to_file='nn.png', show_shapes=true)
コード例 #23
0
def build_part2_RNN(window_size, num_chars):
    model = Sequential()
    model.add(LSTM(200, input_shape=(window_size, num_chars)))
    model.add(Dense(num_chars))
    model.add(Activation('softmax'))
    return model
コード例 #24
0
ファイル: NN2.py プロジェクト: sheda24/astro_github
def f_NN2(X_train, Y_train, X_test, Y_test, reuse):

    # LOADS THE SAMPLES UNPROCESSED
    # CNN ARE USED FOR VISUAL FEATURES DETECTION SO 
    # PROCESSING WOULD HARM THE APPEARANCE OF THE SPECTRA
    X_train = np.load('Data_files/X_train.npy', mmap_mode='r')
    X_test = np.load('Data_files/X_test.npy', mmap_mode='r')
    
    # CONVERTS THE TARGETS TO ONE HOT ENCODING VECTORS
    Y_train_ = keras.utils.to_categorical(Y_train)
    Y_test_ = keras.utils.to_categorical(Y_test)

    # RESHAPES THE INPUT TO BE 1 CHANNEL SAMPLES
    train_shape = (X_train.shape[0], X_train.shape[1], 1)
    test_shape = (X_test.shape[0], X_test.shape[1], 1)
    X_train_ = X_train.reshape(train_shape)
    X_test_ = X_test.reshape(test_shape)

    del X_train, X_test

    X_train_ = keras.utils.normalize(X_train_, axis=1)
    X_test_ = keras.utils.normalize(X_test_, axis=1)

    if reuse:
        path = 'Algos/NN_folder/NN2'
        with open(path+'_history.pkl', 'rb') as filehandler:
            history = pickle.load(filehandler)
        model = load_model(path+'.h5')
    else:
		# MODEL CONSTRUCTION
        model = Sequential()
        model.add(AveragePooling1D(pool_size=5))
        model.add(Conv1D(16, 3, activation='relu'))
        # model.add(BatchNormalization())
        model.add(Conv1D(32, 3, activation='relu'))
        model.add(Flatten())
        model.add(Dense(64, activation='relu'))
        # model.add(Dropout(0.25))
        # model.add(BatchNormalization())
        model.add(Dense(5, activation='softmax'))

        model.compile(optimizer='SGD',
                      loss='categorical_crossentropy',
                      metrics=['acc'])

        call_back = ModelCheckpoint('Algos/NN_folder/weights.{epoch:02d}'
                                    '-{val_loss:.2f}.h5',
                                    monitor='val_loss')
        # FIT THE MODEL TO THE TRAINING DATA FOR 20 EPOCHS
        # BY BATCHES OF 30 SAMPLES 
        # VERBOSE IS THE DEGREE OF INFORMATIONS OUTPUT IN
        # THE TERMINAL DURING TRAINING
        # CALLBACKS (MODELCHECKPOINT HERE) SAVES THE INTERMEDIATE 
        # STATES OF THE NETWORK TO BE ABLE TO RESTART TRAINING 
        # FROM LAST STATE
        model_history = model.fit(X_train_, Y_train_,
                                  epochs=20, batch_size=30,
                                  validation_data=(X_test_, Y_test_),
                                  verbose=1, callbacks=[call_back])
        history = model_history.history
    # PLOTS THE MODEL HISTORY
    plot_history(history)

    # PREDICTS ON THE TEST SET AND COMPUTES THE CONFUSION MATRIX
    expected = np.argmax(model.predict(X_test_), axis=1)
    conf_matrix = confusion_matrix(expected, Y_test)
    normalisation = np.sum(conf_matrix, axis=1, keepdims=True)
    conf_matrix = conf_matrix/normalisation
    print(conf_matrix)

    # SAVES THE MODEL AND ITS HISTORY
    path = 'Algos/NN_folder/NN2'
    model.save(path+'.h5')
    # keras.utils.plot_model(model, to_file='Images/architecture_NN2.png')
    with open(path+'_history.pkl', 'wb') as filehandler:
        pickle.dump(history, filehandler)
    print('Model saved')

    return conf_matrix
コード例 #25
0
# y_test[0:594]=0
# y_test[594:1188]=1
# y_test[1188:1782]=2
# y_test[1782:2376]=3
# y_test[2376:2970]=4
# y_test[2970:3564]=5


# convert class vectors to binary class matrices - this is for use in the
# categorical_crossentropy loss below
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)


model_adam = Sequential()
model_adam.add(Dense(128, input_dim=39))
model_adam.add(Activation('relu'))
model_adam.add(Dense(256))
model_adam.add(Activation('relu'))
model_adam.add(Dense(6))
model_adam.add(Activation('softmax'))

''' Set up the optimizer '''
from keras.optimizers import SGD, Adam, RMSprop, Adagrad
# sgd = SGD(lr=0.01,momentum=0.0,decay=0.0,nesterov=False)

''' Compile model with specified loss and optimizer '''
model_adam.compile(loss='categorical_crossentropy',
				optimizer='Adam',
				metrics=['accuracy'])
コード例 #26
0
        For 2++ (two or more) inputs, we can use Model Keras model
    """
    """
        Layers:
        *Dense - regular deeply connected neural network layer
        *Dropout - regularization.. Easily implemented by randomly selecting nodes to be dropped-out with a given probability 
        *Conv1D, Conv2D, Conv3D - Convolution Layers 
        *LSTM - Recurrent Layer
        *GRU - Recurrent Integration version Layer
        *MaxPooling2D - Max pooling to a convolutional neural network in code
        *Flatten - used to reshape the tensor to such a shape which is equal to the number of elements present in the tensor

    """

    # Training model from one input (CT scan picture)
    model_CT_lungs = Sequential()
    model_CT_lungs.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=(224, 224, 3)))
    model_CT_lungs.add(Conv2D(128, (3, 3), activation='relu'))
    model_CT_lungs.add(MaxPooling2D(pool_size=(2, 2)))
    model_CT_lungs.add(Dropout(0.25))

    model_CT_lungs.add(Conv2D(64, (3, 3), activation='relu'))
    model_CT_lungs.add(MaxPooling2D(pool_size=(2, 2)))
    model_CT_lungs.add(Dropout(0.25))

    model_CT_lungs.add(Conv2D(128, (3, 3), activation='relu'))
    model_CT_lungs.add(MaxPooling2D(pool_size=(2, 2)))
コード例 #27
0
onehotencoder = OneHotEncoder(sparse=False)
X = onehotencoder.fit_transform(X)
y = pd.get_dummies(y)

import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, PReLU

from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.30,
                                                    random_state=42)

classifier = Sequential()
classifier.add(
    Dense(units=250, kernel_initializer="random_normal", input_dim=47))
classifier.add(Dropout(0.2))
classifier.add(keras.layers.PReLU())
classifier.add(Dense(units=150, kernel_initializer="random_normal"))
classifier.add(Dropout(0.2))
classifier.add(keras.layers.PReLU())

classifier.add(
    Dense(units=74, kernel_initializer="random_normal", activation='softmax'))

classifier.compile(optimizer='adam',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])
classifier.fit(X_train, y_train, batch_size=32, epochs=250)
コード例 #28
0
    X_train.append(training_set_scaled[i-60:i, 0])
    y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)

# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))

# Part 2 - Building the RNN

from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout

# Initialising the RNN
regressor = Sequential()

# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(0.2))

# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))

# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))

# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50))
コード例 #29
0
    for i in range(len(X)):
        axes = ax[i]
        axes.imshow(X[i], aspect='auto')
        #axes.set_title("Angle" + str(y[i]))
        axes.axis('off')
    plt.show()


"""
 Setup the model architecture
"""

input_shape = (X_batch.shape[1], X_batch.shape[2], X_batch.shape[3])
pool_size = (2, 2)

model = Sequential()
model.add(
    Lambda(lambda x: x / 127.5 - 1.,
           input_shape=input_shape,
           output_shape=input_shape))

model.add(
    Convolution2D(24,
                  5,
                  5,
                  subsample=(2, 2),
                  border_mode="valid",
                  init="he_normal"))
model.add(ELU())

model.add(
コード例 #30
0
ファイル: views.py プロジェクト: ImUjjwalMittal/collaboration
def index(request):

    back_period = 30 

    stock = yf.Ticker("AAPL")
    hist = stock.history(period='max', interval='1d')
    data = hist.filter(items=['Close'])
    dataset1=data.values

    stock = yf.Ticker("CIPLA.NS")
    hist = stock.history(period='max', interval='1d')
    data = hist.filter(items=['Close'])
    dataset2=data.values

    dataset=np.concatenate((dataset1,dataset2))

    scaler=MinMaxScaler()
    scaled_dataset=scaler.fit_transform(dataset)
    scaled_dataset1= scaled_dataset[0:dataset1.shape[0]]
    scaled_dataset2= scaled_dataset[dataset1.shape[0]:]


    data_x=[]
    data_y=[]

    for i in range(back_period,scaled_dataset1.shape[0]):
        data_x.append(scaled_dataset1[i-back_period:i,:])
        data_y.append(scaled_dataset1[i,0])

    for i in range(back_period,scaled_dataset2.shape[0]):
        data_x.append(scaled_dataset2[i-back_period:i,:])
        data_y.append(scaled_dataset2[i,0])


    data_x=np.array(data_x)
    data_y=np.array(data_y)

    data_y =data_y.reshape(-1,1)

    training_size = math.ceil(data_x.shape[0]*0.7)

    data_x,data_y= shuffle(data_x,data_y,random_state=1)

    train_x= data_x[0:training_size,:]
    train_y= data_y[0:training_size,:]

    model = Sequential()
    model.add(LSTM(40, input_shape=(train_x.shape[1],1),return_sequences=True))
    model.add(LSTM(25))
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam')
    model.fit(train_x, train_y, epochs=0, batch_size=1)

    test_x=data_x[training_size:,:]
    test_y=data_y[training_size:,:]
    test_predict= model.predict(test_x)
    test_predict= scaler.inverse_transform(test_predict)
    test_y= scaler.inverse_transform(test_y)

    error= np.sqrt(np.mean(((test_predict- test_y)**2)))


    stock = yf.Ticker("GOOGL")
    hist = stock.history(period='max', interval='1d')

    vol_data= hist['Volume']
    data = hist.filter(items=['Close'])
    dataset=data.values

    scaled_dataset= scaler.transform(dataset)

    training_size = math.ceil(dataset.shape[0]*0.7)
    train_data= data[0:training_size+back_period]
    test_data= data[training_size+back_period:]

    test_scaled_data = scaled_dataset[training_size:]
    test_x=[]

    for i in range(back_period,test_scaled_data.shape[0]):
        test_x.append(test_scaled_data[i-back_period:i,:])

    test_x=np.array(test_x)
    test_predict= model.predict(test_x)
    # test_predict = np.reshape(test_predict,(test_predict.shape[0],test_predict.shape[1]))

    test_predict= scaler.inverse_transform(test_predict)


    test_data['Predictions'] = test_predict

    fig = plt.figure(figsize=(15,12))
    plt.plot(train_data['Close'])
    plt.plot(test_data['Predictions'])
    plt.plot(test_data['Close'])
    buf = io.BytesIO()
    fig.savefig(buf,format='png')
    buf.seek(0)
    stri = base64.b64encode(buf.read())
    uri =  urllib.parse.quote(stri)
    return Response({'context' : uri})