Ejemplo n.º 1
0
def create_model():
    model = Sequential()
    model.add(Conv2D(LAYER1_SIZE, activation="relu", kernel_size=(3, 3),
                     input_shape=(2, BOARD_SIZE, BOARD_SIZE),
                     data_format="channels_first",
                     kernel_regularizer=l2(L2_REGULARISATION),
                     padding='same'))
    model.add(Conv2D(LAYER1_SIZE, activation="relu", kernel_size=(3, 3),
                     data_format="channels_first",
                     kernel_regularizer=l2(L2_REGULARISATION),
                     padding='same'))
    model.add(MaxPooling2D((2, 2), data_format="channels_first"))
    model.add(Conv2D(LAYER1_SIZE * 2, activation="relu", kernel_size=(3, 3),
                     data_format="channels_first",
                     kernel_regularizer=l2(L2_REGULARISATION),
                     padding='same'))
    model.add(Conv2D(LAYER1_SIZE * 2, activation="relu", kernel_size=(3, 3),
                     data_format="channels_first",
                     kernel_regularizer=l2(L2_REGULARISATION),
                     padding='same'))
    model.add(MaxPooling2D((2, 2), data_format="channels_first"))
    model.add(Flatten())
    model.add(Dense(LAYER2_SIZE, activation='relu', kernel_regularizer=l2(L2_REGULARISATION)))
    model.add(Dense(1, activation='tanh'))

    optimizer = Adam(decay=DECAY, lr=LR)
    model.compile(loss='mse', optimizer=optimizer, metrics=['accuracy', 'mae'])
    model.summary()

    return model
Ejemplo n.º 2
0
    def create(cls, tokenizer: Tokenizer, hidden: int, dropout: float) -> "LanguageModel":
        from keras import Sequential
        from keras.layers import LSTM, Dropout, Dense

        if tokenizer.vocabulary_size == 0:
            logging.warning("Creating a model using a codec with an empty vocabulary.")
        model = Sequential()
        model.add(LSTM(hidden, input_shape=(tokenizer.context_size, 1)))
        model.add(Dropout(dropout))
        model.add(Dense(tokenizer.vocabulary_size, activation="softmax"))
        model.compile(loss="categorical_crossentropy", optimizer="adam")
        return cls(model, tokenizer)
Ejemplo n.º 3
0
def train_model(train_test_path):
    """
    Creates a model and performs training.
    """
    # Load train/test data
    train_test_data = np.load(train_test_path)
    x_train = train_test_data['X_train']
    y_train = train_test_data['y_train']

    print("x_train:", x_train.shape)
    print("y_train:", y_train.shape)

    del train_test_data

    x_train = np.expand_dims(x_train, axis=3)

    # Create network
    model = Sequential()
    model.add(Conv1D(128, 5, input_shape=x_train.shape[1:], padding='same', activation='relu'))
    model.add(MaxPooling1D(5))
    model.add(Conv1D(128, 5, padding='same', activation='relu'))
    model.add(MaxPooling1D(5))
    model.add(Dropout(0.5))

    model.add(Flatten())

    model.add(Dense(1024, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(512, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(256, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(128, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(len(language_codes), kernel_initializer='glorot_uniform', activation='softmax'))

    model_optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    model.compile(loss='categorical_crossentropy', optimizer=model_optimizer, metrics=['accuracy'])

    # Train
    model.fit(x_train, y_train,
              epochs=10,
              validation_split=0.10,
              batch_size=64,
              verbose=2,
              shuffle=True)

    model.save(model_path)
def build_model(spec, X_train):
    model = Sequential()
    # create first layer
    layer = spec[0]
    num_posts, bow_dim = X_train[0].shape
    model.add(InputLayer(input_shape=(num_posts, bow_dim)))
    model.add(Flatten())
    if 'none' in layer:
        model.add(Dense(  # input_dim=1,
            units=int(layer.split('none')[1]),
            activation=None
        ))
    elif 'relu' in layer:
        model.add(Dense(  # input_shape=train_X[0].shape,
            units=int(layer.split('relu')[1]),
            activation='relu'
        ))
    elif 'sig' in layer:
        model.add(Dense(  # input_shape=train_X[0].shape,
            units=int(layer.split('sig')[1]),
            activation='sigmoid'
        ))
    else:
        return None

    for layer in spec[1:]:
        if 'none' in layer:
            model.add(Dense(int(layer.split('none')[1]), activation=None))
        elif 'relu' in layer:
            model.add(Dense(int(layer.split('relu')[1]), activation='relu'))
        elif 'sig' in layer:
            model.add(Dense(int(layer.split('sig')[1]), activation='sigmoid'))
        elif 'drop' in layer:
            model.add(Dropout(float(layer.split('drop')[1]), seed=None))
        elif 'l1' in layer:
            model.add(ActivityRegularization(l1=float(layer.split('l1')[1])))
        elif 'l2' in layer:
            model.add(ActivityRegularization(l2=float(layer.split('l2')[1])))
        else:
            return None

    # add softmax layer
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

    return model
Ejemplo n.º 5
0
def create_lstm_model(num_features):
    model = Sequential()
    # "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE.
    # Note: In a situation where your input sequences have a variable length,
    # use input_shape=(None, num_feature).
    # By setting return_sequences to True, return not only the last output but
    # all the outputs so far in the form of (num_samples, timesteps,
    # output_dim). This is necessary as TimeDistributed in the below expects
    # the first dimension to be the timesteps.
    model.add(RNN(HIDDEN_SIZE, input_shape=(None, num_features), return_sequences=True))

    # Apply a dense layer to the every temporal slice of an input. For each of step
    # of the output sequence, decide which character should be chosen.
    model.add(layers.TimeDistributed(layers.Dense(len(LABEL_CLASS_MAPPING) + 1)))
    model.add(layers.Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()
    return model
Ejemplo n.º 6
0
import numpy as np
import tensorflow as tf

from keras import Sequential
from keras.layers import Dense

print("Tensorflow version", tf.__version__)

model1 = Sequential()
model1.add(Dense(10, input_shape=(1000,)))

model1.add(Dense(3, activation='relu'))
model1.compile('sgd', 'mse')


def gen():
    while True:
        yield np.zeros([10, 1000]), np.ones([10, 3])


import os
import psutil

process = psutil.Process(os.getpid())
g = gen()
while True:
    print(process.memory_info().rss / float(2 ** 20))
    model1.fit_generator(g, 100, 2, use_multiprocessing=True, verbose=0)
    model1.evaluate_generator(gen(), 100, use_multiprocessing=True, verbose=0)

from keras.layers import Dense, Flatten, Convolution2D, MaxPooling2D
from keras import Sequential
from keras_preprocessing.image import ImageDataGenerator

classifier = Sequential()
classifier.add(
    Convolution2D(32, 3, 3, input_shape=(32, 32, 3),
                  activation='relu'))  #relu=rectifier linear fiuntion
classifier.add(MaxPooling2D(pool_size=(2, 2)))  #pooling layer for kernal
classifier.add(Flatten())  #flatenning the inputs
classifier.add(Dense(64, activation='relu')
               )  #creating ANN and flanetting outputs are now inputs for ANN
classifier.add(Dense(1, activation='sigmoid'))  #says prob of what ans is
classifier.compile(
    optimizer='adam', metrics=['accuracy'], loss='binary_crossentropy'
)  #adam=sgd,in we hace more than 2 inputs then loss = 'catogirical_crossentropy'
#layers are finished till here NN is done
#to increase accuracy add deepNN oincrease dense to 64 or 128 or anything up to you
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_set = train_datagen.flow_from_directory(
    'C:/Users/hrith/Downloads/cat-and-dog/training_set/training_set',
    target_size=(32, 32),
    batch_size=25,
    class_mode='binary')
Ejemplo n.º 8
0
    model.add(MaxPooling2D((3, 3)))
    model.add(Dropout(0.3))

    model.add(Flatten())

    model.add(Dense(128, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.3))
    model.add(Dense(64, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.3))

    model.add(Dense(10, activation='softmax'))

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=Adam(lr=0.002, epsilon=None),
                  metrics=['acc'])

    learning_history = model.fit_generator(train_generator,
                                           epochs=2000,
                                           validation_data=valid_generator,
                                           callbacks=[es, mc, reLR])

    # predict
    model.load_weights('best_cvision.h5')
    result += model.predict_generator(test_generator, verbose=True) / 40

    # save val_loss
    hist = pd.DataFrame(learning_history.history)
    val_loss_min.append(hist['val_loss'].min())
Ejemplo n.º 9
0
    def fitting(self):   
   
        timesteps = self.lags   # tiempo
        features = 1    # features or chanels (Volume)
        num_classes = 3  # 3 for categorical
        
        
        #data = np.random.random((1000, dim_row, dim_col))
        #clas = np.random.randint(3, size=(1000, 1))
        ##print(clas)
        #clas = to_categorical(clas)
        ##print(clas)
        data = self.X_train
        data_test = self.X_test
        print(data)
                
        data = data.values.reshape(len(data), timesteps, 1)
        data_test = data_test.values.reshape(len(data_test), timesteps, 1)
        print(data)
        
        clas = self.y_train
        clas_test = self.y_test 
        clas = to_categorical(clas)
        clas_test = to_categorical(clas_test)

        cat0 = self.y_train.tolist().count(0)
        cat1 = self.y_train.tolist().count(1)
        cat2 = self.y_train.tolist().count(2)
        
        print("may: ", cat1, "  ", "menor: ", cat2, " ", "neutro: ", cat0)
        
        n_samples_0 = cat0
        n_samples_1 = (cat1 + cat2)/2.0
        n_samples_2 = (cat1 + cat2)/2.0

        class_weight={
                0: 1.0,
                1: n_samples_0/n_samples_1,
                2: n_samples_0/n_samples_2}            
        
        def class_1_accuracy(y_true, y_pred):
        # cojido de: http://www.deepideas.net/unbalanced-classes-machine-learning/
            class_id_true = K.argmax(y_true, axis=-1)
            class_id_preds = K.argmax(y_pred, axis=-1)
            
            accuracy_mask = K.cast(K.equal(class_id_preds, 1), 'int32')
            class_acc_tensor = K.cast(K.equal(class_id_true, class_id_preds), 'int32') * accuracy_mask
            
            class_acc = K.sum(class_acc_tensor) / K.maximum(K.sum(accuracy_mask), 1)
            return class_acc
        
        
        class SecondOpinion(Callback):
            def __init__(self, model, x_test, y_test, N):
                self.model = model
                self.x_test = x_test
                self.y_test = y_test
                self.N = N
                self.epoch = 1
        
            def on_epoch_end(self, epoch, logs={}):
                if self.epoch % self.N == 0:
                    y_pred = self.model.predict(self.x_test)
                    pred_T = 0
                    pred_F = 0
                    for i in range(len(y_pred)):
                        if np.argmax(y_pred[i]) == 1 and np.argmax(self.y_test[i]) == 1:
                            pred_T += 1
                        if np.argmax(y_pred[i]) == 1 and np.argmax(self.y_test[i]) != 1:
                            pred_F += 1
                    if pred_T + pred_F > 0:
                        Pr_pos = pred_T/(pred_T + pred_F)
                        print("Yoe: epoch, Probabilidad pos: ", self.epoch, Pr_pos)
                    else:
                        print("Yoe Probabilidad pos: 0")
                self.epoch += 1
        
        
        
        
        
#################################################################################################################        
        model = Sequential()
        if self.nConv == 0:
            model.add(LSTM(units=self.lstm_nodes, return_sequences=True, activation='tanh', input_shape=(timesteps, features),
                           bias_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)))
        for i in range(self.nLSTM - 2):
            model.add(LSTM(units=self.lstm_nodes, return_sequences=True, activation='tanh'))
        model.add(LSTM(units=self.lstm_nodes, activation='tanh'))
        model.add(Dropout(0.5))
        model.add(Dense(num_classes, activation='softmax')) # the dimension of index one will be considered to be the temporal dimension
        #model.add(Activation('sigmoid'))  # for loss = 'binary_crossentropy'
        
        # haciendo x: x[:, -1, :], la segunda dimension desaparece quedando solo 
        # los ULTIMOS elementos (-1) de dicha dimension:
        # Try this to see:
        # data = np.random.random((5, 3, 4))
        # print(data)
        # print(data[:, -1, :])  
        
#        model.add(Lambda(lambda x: x[:, -1, :], output_shape = [output_dim]))
        print(model.summary())
        
        tensorboard_active = False
        val_loss = False
        second_opinion = True
        callbacks = []
        if tensorboard_active:
            callbacks.append(TensorBoard(
                log_dir=self.putmodel + "Tensor_board_data",
                histogram_freq=0,
                write_graph=True,
                write_images=True))
        if val_loss:
            callbacks.append(EarlyStopping(
                monitor='val_loss', 
                patience=5))
        if second_opinion:
            callbacks.append(SecondOpinion(model, data_test, clas_test, 10))
        #model.compile(loss = 'categorical_crossentropy', optimizer='Adam', metrics = ['categorical_accuracy'])
        #model.compile(loss = 'binary_crossentropy', optimizer=Adam(lr=self.learning), metrics = ['categorical_accuracy'])
        model.compile(loss = 'categorical_crossentropy', optimizer='Adam', metrics = [class_1_accuracy])
                
        model.fit(x=data, 
                  y=clas,
                  batch_size=self.batch_size, epochs=800, verbose=2, 
                  callbacks = callbacks,
                  class_weight = class_weight)
                  #validation_data=(data_test, clas_test))
        
#####################################################################################################################
        
        # serialize model to YAML
        model_yaml = model.to_yaml()
        with open("model.yaml", "w") as yaml_file:
            yaml_file.write(model_yaml)
        # serialize weights to HDF5
        model.save_weights("model.h5")
        print("Saved model to disk")
        
#        # load YAML and create model
#        yaml_file = open('model.yaml', 'r')
#        loaded_model_yaml = yaml_file.read()
#        yaml_file.close()
#        loaded_model = model_from_yaml(loaded_model_yaml)
#        # load weights into new model
#        loaded_model.load_weights("model.h5")
#        print("Loaded model from disk")
#        loaded_model.compile(loss = 'categorical_crossentropy', optimizer='Adam', metrics = [class_1_accuracy])
#        
        print("Computing prediction ...")
        y_pred = model.predict_proba(data_test)
        
        model.reset_states()
        print("Computing train evaluation ...")
        score_train = model.evaluate(data, clas, verbose=2)
        print('Train loss:', score_train[0])
        print('Train accuracy:', score_train[1])

        model.reset_states()
#        score_train_loaded = loaded_model.evaluate(data, clas, verbose=2)
#        loaded_model.reset_states()
#        print('Train loss loaded:', score_train[0])
#        print('Train accuracy loaded:', score_train[1])

        print("Computing test evaluation ...")
        score_test = model.evaluate(data_test, clas_test, verbose=2)
        print('Test loss:', score_test[0])
        print('Test accuracy:', score_test[1])

        model.reset_states()
#        score_test_loaded = loaded_model.evaluate(data_test, clas_test, verbose=2)
#        loaded_model.reset_states()
#        print('Test loss loaded:', score_test[0])
#        print('Test accuracy loaded:', score_test[1])

        
        pred_T = 0
        pred_F = 0        
        for i in range(len(y_pred)):
            if np.argmax(y_pred[i]) == 1 and np.argmax(clas_test[i]) == 1:
                pred_T += 1
#                print(y_pred[i])
            if np.argmax(y_pred[i]) == 1 and np.argmax(clas_test[i]) != 1:
                pred_F += 1
        if pred_T + pred_F > 0:
            Pr_pos = pred_T/(pred_T + pred_F)
            print("Yoe Probabilidad pos: ", Pr_pos)
        else:
            print("Yoe Probabilidad pos: 0")
        
        history = DataFrame([[self.skip, self.nConv, self.nLSTM, 
                    self.learning, self.batch_size, 
                    self.conv_nodes, self.lstm_nodes, 
                    score_train[0], score_train[1], 
                    score_test[0], score_test[1]]], columns = ('Skip', 'cConv', 'nLSTM', 'learning', 
                                 'batch_size', 'conv_nodes', 'lstm_nodes', 
                                 'loss_train', 'acc_train', 'loss_test', 'acc_test'))
        self.history = self.history.append(history)
Ejemplo n.º 10
0
class RLAgent(AgentInterface):
    """description of class"""
    def __init__(self,
                 actionSelector,
                 memoryFileName=None,
                 discountFactor=0.95):

        self.discountFactor = discountFactor
        self.model = Sequential()

        self.model.add(
            Dense(100,
                  input_shape=(42, ),
                  activation='relu',
                  kernel_initializer='zeros'))
        self.model.add(
            Dense(100, activation='relu', kernel_initializer='zeros'))
        self.model.add(
            Dense(100, activation='relu', kernel_initializer='zeros'))
        self.model.add(
            Dense(100, activation='relu', kernel_initializer='zeros'))
        self.model.add(Dense(7, kernel_initializer='zeros'))
        self.model.compile(sgd(lr=.2), 'mse')
        #self.model.compile( loss = 'mse', optimizer = 'adam', metrics = [ 'mae' ] )

        if memoryFileName:
            inputs, targets = self._prepare_memory(memoryFileName)
            self.model.train_on_batch(inputs, targets)

        self.reset()

        self.actionSelector = actionSelector
        self.memory = []

    @staticmethod
    def _prepare_memory(memoryFileName):
        memory = pickle.load(open(memoryFileName, 'rb'))
        memory_size = len(memory)
        inputs = np.zeros((memory_size, 42))
        targets = np.zeros((memory_size, 7))
        for i in range(memory_size):
            state, action, reward = memory[i]
            inputs[i] = state.board.reshape(1, -1)
            targets[i, action] = reward
        return inputs, targets

    def __del__(self):
        #self.save()
        pass

    def getAction(self):
        action = self.actionSelector.getAction(
            self.model.predict(self.board.asVector()))
        self.lastAction = action
        return action

    def _remember(self, state, action, reward):
        self.memory.append((state, action, reward))

    def save(self):
        t = datetime.now()
        file = open(
            'c:\\work\\memories_{}_{}.pkl'.format(t.date(),
                                                  t.time().microsecond), 'wb')
        pickle.dump(self.memory, file)

    def update(self, nextState, reward):
        return
        if self.lastAction is None:
            self.board = nextState.make_copy()
            return

        if reward == 0:  ## if there's a non zero reward, that's the last move of the game and there's no discounting of future rewards
            #target                    = self.discountFactor * np.max( self.model.predict( nextState.asVector() ) )
            self.board = nextState.make_copy()
            return
        else:
            self._remember(self.board, self.lastAction, reward)

        #    target                    = reward
        #target_vec                    = self.model.predict( self.board.asVector() )[ 0 ]
        #target_vec[ self.lastAction ] = target
        #target_vec                    = target_vec.reshape( -1, 7 )

        #self.model.fit( self.board.asVector(), target_vec, epochs = 1, verbose = 0 )
        #return
        #if reward == 0:
        #    self.board = nextState.make_copy()

    def reset(self):
        self.board = Board()
        self.lastAction = None
Ejemplo n.º 11
0
def neural_network():
	dataset = read_dataset('dataset','csv')
	df = normalize(dataset)
	y = np.array(df['DO%'])
	df.drop(columns='DO%',inplace=True)
	df.head()
	x = df.values
	folds = 2
	rmse_avg = []
	r2_avg = []
	nrmse_avg = []
	loss_avg = []
	folds = 1
	for i in range(folds):

		X_train,X_test,Y_train,Y_test = train_test_split(x,y,test_size = 0.2)
		print(X_train.shape,X_test.shape,Y_train.shape,Y_test.shape)
		model = Sequential()
		model.add(layers.Dense(256,activation = 'relu',input_shape = (X_train.shape[1],)))
		#model.add(layers.Dropout(0.3))
		model.add(layers.Dense(128,activation='relu'))
		model.add(layers.Dense(64,activation = 'relu'))
		#model.add(layers.Dropout(0.3))
		model.add(layers.Dense(32,activation = 'relu'))
		#model.add(layers.Dropout(0.3))
		model.add(layers.Dense(16, activation = 'relu'))
		model.add(layers.Dense(8, activation = 'relu'))


		model.add(layers.Dense(1,activation = 'linear'))

		sgd = keras.optimizers.SGD(lr =0.001,decay=1e-6,momentum = 0.9,nesterov = True)
		model.compile(optimizer = 'Adam',loss = 'mean_squared_error',metrics = ['mean_squared_error'])

		print(model.summary())

		count = 0
		history = model.fit(X_train,Y_train,epochs = 100,batch_size=64,validation_data = (X_test,Y_test))
		preds = model.predict(X_test)
		preds1 = model.predict(x)
		result = model.evaluate(X_test,Y_test) 
		loss = result[0]
		print(result)
		rmse_test = mean_squared_error(Y_test, preds)
		r2_test = r2_score(Y_test, preds)
		print("MSE of test set is {}".format(rmse_test))
		print("R score of test set is {}".format(r2_test))
		nrmse = cal_nrmse(Y_test,preds)
		print("nrmse of test set is {}".format(nrmse))
		loss_avg.append(loss)
		rmse_avg.append(rmse_test)
		r2_avg.append(r2_test)
		nrmse_avg.append(nrmse)
		with open('res_nn_paper.txt','a') as f:
			count = count + 1
			f.write("fold=%s\n" % str(count))
			f.write("rmse=%s\n" % str(rmse_test))
			f.write("loss=%s\n" % str(loss))
			f.write("r score=%s\n" % str(r2_test))
			f.write("nrmse=%s\n" % str(nrmse))
			f.write("-------------------------------------------------\n")
		#print("Mean_absolute_percentage_error of test set is",mean_absolute_percentage_error(Y_test,preds))

		plt.plot(history.history['mae'])
		plt.plot(history.history['val_mae'])
		plt.title('model mae')
		plt.ylabel('mae')
		plt.xlabel('epoch')
		plt.legend(['train', 'val'], loc='upper left')
		plt.show(block=False)
		plt.pause(1)
		plt.close()

		plt.plot(history.history['mse'])
		plt.plot(history.history['val_mse'])
		plt.title('model mse')
		plt.ylabel('mae')
		plt.xlabel('epoch')
		plt.legend(['train', 'val'], loc='upper left')
		plt.show(block=False)
		plt.pause(1)
		plt.close()

		plt.plot(history.history['loss'])
		plt.plot(history.history['val_loss'])
		plt.title('model loss')
		plt.ylabel('loss')
		plt.xlabel('epoch')
		plt.legend(['train', 'val'], loc='upper left')
		plt.show(block=False)
		plt.pause(1)
		plt.close()

		print('True 10 samples',Y_test[0:10])
		print('Predicted 10 samples',preds[0:10])
		arra = []
		arrak = []


		from tqdm import tqdm

		'''
		for i in tqdm(range(0,39988)):
		  arrak.append(i)  

		plt.gca().set_prop_cycle("color", ['blue','green'])
		plt.title('For DO% NN')
		plt.plot(np.array(arrak),y)
		plt.plot(np.array(arrak),np.array(preds1))
		plt.xlabel('Index')
		plt.ylabel('Values')
		plt.legend(['Normal data', ' Predicted Output'], loc='upper left')
		plt.show(block=False)
		plt.pause()
		plt.close()
		'''

	nrmse_avg = np.array(nrmse_avg)
	r2_avg = np.array(r2_avg)
	rmse_avg = np.array(rmse_avg)
	loss_avg = np.array(loss_avg)

	print("RMSE is {}".format(rmse_avg.mean()))
	print("R score is {}".format(r2_avg.mean()))
	print("loss_avgis{}".format(loss_avg.mean()))
	print("nrmse is {}".format(nrmse_avg.mean()))
Ejemplo n.º 12
0
classifier.add(Conv2D(128, kernel_size=3, padding="same", activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Dropout(0.25))

classifier.add(Conv2D(256, kernel_size=3, padding="same", activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Dropout(0.25))

classifier.add(Flatten())
classifier.add(Dense(256, activation='relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(10, activation='softmax'))

# Compiling the ANN
classifier.compile(loss='sparse_categorical_crossentropy',
                   optimizer="Adam",
                   metrics=['accuracy'])

# Fitting the ANN to the Training set
classifier.fit(train_feature, train_y, batch_size=1, epochs=100)

# Part 3 - Making predictions and evaluating the classifier

# Predicting the Test set results
y_pred = classifier.predict(test_feature)
results = np.argmax(y_pred, axis=1)

data_out = pd.DataFrame({'id': range(1, 10001), 'label': results})
data_out.to_csv('submission.csv', index=None)
Ejemplo n.º 13
0
    cls = Sequential()

    cls.add(
        Dense(100,
              input_dim=SIZE,
              activation='relu',
              kernel_initializer='random_uniform'))
    cls.add(
        Dense(len(y_train[0]),
              activation='softmax',
              kernel_initializer='random_uniform'))

    opt = Adam(lr=INIT_LR)

    cls.compile(loss="categorical_crossentropy",
                optimizer=opt,
                metrics=["accuracy"])

    cls.fit(X_train, y_train, epochs=EPOCHS, steps_per_epoch=10, verbose=0)

    inferenceTest = cls.predict(X_test)
    gt = y_test

    correct = 0

    for t in range(len(gt)):
        if (get_highest(inferenceTest[t]) == gt[t]).all():
            correct += 1

    #print correct
    score = correct / (len(gt) * 1.0)
Ejemplo n.º 14
0
def lstm():
	dataset = read_dataset('dataset','csv')
	df = normalize(dataset)
	y = np.array(df['DO%'])
	df.drop(columns='DO%',inplace=True)
	df.head()
	x = df.values
	count = 0
	rmse_avg = []
	r2_avg = []
	nrmse_avg = []
	loss_avg = []
	folds = 5
	my_learning_rate = 0.01
	for i in range(folds):
		model = Sequential()
		X_train,X_test,Y_train,Y_test = train_test_split(x,y,test_size = 0.2)
		print(X_train.shape,X_test.shape,Y_train.shape,Y_test.shape)

		es = keras.callbacks.EarlyStopping(monitor='val_loss',patience=10, verbose=1, mode='auto',restore_best_weights = True)
		X_train=X_train.reshape(X_train.shape[0],X_train.shape[1],1)
		X_test = X_test.reshape(X_test.shape[0],X_test.shape[1],1)
		model.add(layers.LSTM(1024,input_shape = (X_train.shape[1],1)))
		#model.add(layers.Dense(2048,activation = 'relu'))
		#model.add(layers.Dropout(0.3))
		#model.add(layers.Dense(1024,activation = 'relu'))
		#model.add(layers.Dropout(0.3))
		model.add(layers.Dense(512, activation = 'relu'))
		model.add(layers.Dense(256,activation='relu'))
		model.add(layers.Dense(128,activation='relu'))
		#model.add(layers.Dropout(0.3))
		model.add(layers.Dense(64,activation = 'relu'))
		#model.add(layers.Dropout(0.3))
		model.add(layers.Dense(32,activation = 'relu'))
		#model.add(layers.Dropout(0.3))
		model.add(layers.Dense(16, activation = 'relu'))
		model.add(layers.Dense(8, activation = 'relu'))


		model.add(layers.Dense(1,activation = 'linear'))

		sgd = keras.optimizers.SGD(lr =0.001,decay=1e-6,momentum = 0.9,nesterov = True)
		model.compile(optimizer=tf.keras.optimizers.Adam(lr=my_learning_rate),loss="mean_squared_error",metrics=[tf.keras.metrics.MeanSquaredError()])

		print(model.summary())


		history = model.fit(X_train,Y_train,epochs = 150,batch_size =64,validation_data = (X_test,Y_test),callbacks=[es])
		preds = model.predict(X_test)
		result = model.evaluate(X_test,Y_test) 
		loss = result[0]
		print(result)
		rmse_test = mean_squared_error(Y_test, preds)
		r2_test = r2_score(Y_test, preds)
		print("MSE of test set is {}".format(rmse_test))
		print("R score of test set is {}".format(r2_test))
		nrmse = cal_nrmse(Y_test,preds)
		print("nrmse of test set is {}".format(nrmse))
		loss_avg.append(loss)
		rmse_avg.append(rmse_test)
		r2_avg.append(r2_test)
		nrmse_avg.append(nrmse)
		with open('res_lstm_paper.txt','a') as f:
			count = count + 1
			f.write("fold=%s\n" % str(count))
			f.write("rmse=%s\n" % str(rmse_test))
			f.write("loss=%s\n" % str(loss))
			f.write("r score=%s\n" % str(r2_test))
			f.write("nrmse=%s\n" % str(nrmse))
			f.write("-------------------------------------------------\n")
		#print("Mean_absolute_percentage_error of test set is",mean_absolute_percentage_error(Y_test,preds))

		# plt.plot(history.history['mae'])
		# plt.plot(history.history['val_mae'])
		# plt.title('model mae')
		# plt.ylabel('mae')
		# plt.xlabel('epoch')
		# plt.legend(['train', 'val'], loc='upper left')
		# plt.show(block=False)
		# plt.pause(3)
		# plt.close()

		plt.plot(history.history["mean_squared_error"])
		plt.plot(history.history["val_mean_squared_error"])
		plt.title('model mse')
		plt.ylabel('mae')
		plt.xlabel('epoch')
		plt.legend(['train', 'val'], loc='upper left')
		plt.show(block=False)
		plt.pause(3)
		plt.close()

		plt.plot(history.history['loss'])
		plt.plot(history.history['val_loss'])
		plt.title('model loss')
		plt.ylabel('loss')
		plt.xlabel('epoch')
		plt.legend(['train', 'val'], loc='upper left')
		plt.show(block=False)
		plt.pause(3)
		plt.close()

	nrmse_avg = np.array(nrmse_avg)
	r2_avg = np.array(r2_avg)
	rmse_avg = np.array(rmse_avg)
	loss_avg = np.array(loss_avg)

	print("RMSE is {}".format(rmse_avg.mean()))
	print("R score is {}".format(r2_avg.mean()))
	print("loss_avgis{}".format(loss_avg.mean()))
	print("nrmse is {}".format(nrmse_avg.mean()))
Ejemplo n.º 15
0
trainDATA = trainDS[:,1:]
trainLABLE = trainDS[:,0]

print("Start training!")
print("{} secondes have passed. ".format(str(time.time() - startTime)))


np.random.seed(10)

myModel = Sequential()

myModel.add(Dense(units=64,input_dim=39, activation='relu'))
myModel.add(Dense(units=8, activation='relu'))
myModel.add(Dense(1, activation='sigmoid'))

myModel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

myModel.fit(x=trainDATA, y=trainLABLE, epochs=NUM_EPOCH, batch_size=64)

# Save the trained model
myModel_json = myModel.to_json()
with open(modelDIR + '/' + modelFILE, 'w') as json_file:
    json_file.write(myModel_json)
myModel.save_weights(modelDIR + '/' + weightFILE)
print("The trained model has been saved.")


print("Start testing!")
print("Time point 3 is " + str(time.time() - startTime))

# Load the model
Ejemplo n.º 16
0
def train(datapath,layer):
    train_cycle,c_c,threshold,activate,pool,imgpath,row,col,bands,value,dtype,dtypevalue=extract()
    values = []
    c_l={}
    path=datapath
    c=0
    for add in path:
        c=int(c)+1
        print("{} class {} ".format(add,c))
        c_l[add]=c
    clicks={}

    for address in path:
        with open(address, "rb") as f:
            k = len(f.read())
            clicks[address] = (k // 2 // bands) if (k // 2 // bands) < 400 else (k // 2 // bands) // 4
            print('{} ==> {}'.format(address, clicks[address]))

    for address in path:
        with open(address, "rb") as f:
            b = array.array("H")
            b.fromfile(f, clicks[address]*bands)
            if sys.byteorder == "little":
                b.byteswap()
            for v in b:
                values.append(v)

    ll = (len(values))
    rex = ll // bands
    print(ll, rex)

    f_in = np.zeros([ll], dtype)
    x = 0
    for i in range(ll):
        f_in[x] = values[i]
        x += 1

    y_train = np.zeros([rex], dtype)

    mark = 0
    for add in path:
        for i in range(clicks[add]):
            y_train[mark+i] = c_l[add]
        mark = mark + clicks[add]


    x_train = f_in.reshape(rex, bands)

    seed = 6
    np.random.seed(seed)

    x_train = x_train / (2**(dtypevalue-1))
    num_pixels = bands

    for v in y_train:
        print(v, end=" ")

    y_train = np_utils.to_categorical(y_train)
    n_classes = c_c
    print(x_train)
    print(20*'#')
    print(y_train)

    print(x_train.shape)
    print(y_train.shape)

    X = x_train.reshape(x_train.shape[0], bands, 1)

    n_units=128
    n_classes=c_c
    batch_size=50
    j=3
    t=int(len(layer))-1
    model = Sequential()
    for i in range(0,len(layer)):
        if(layer[i]=="Convolution"):
            if(i==0):
                model.add(Conv1D(2 ** j, 2, activation=activate, padding='same', input_shape=[bands, 1]))
            else:
                model.add(Conv1D(2 ** j, 2, activation="relu", padding='same'))
            j=j+1
        elif(layer[i]=="MaxPooling"):
            model.add(MaxPooling1D(2))
        elif(layer[i]=="AveragePooling"):
            model.add(AveragePooling1D(2))
        elif(layer[i]=="LSTM"):
            if(i==0):
                model.add(LSTM(2**j,return_sequences=False, input_shape=(bands ,1)))
            elif(i==t):
                model.add(LSTM(2**(j-1)))
            else:
                model.add(LSTM(2**j, return_sequences=True))
            j=j+1

    model.add(Dense(n_classes, activation='sigmoid'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    model.summary()
    #model.fit(X, y_train, batch_size=10, epochs=10)
    model.fit(X, y_train, batch_size=50, epochs=train_cycle)

    return model,c_c,activate,threshold
Ejemplo n.º 17
0
test_x = data[:10000]
test_y = targets[:10000]
train_x = data[10000:]
train_y = targets[10000:]

model = Sequential()
model.add(layers.Dense(50, activation="relu", input_shape=(idim,)))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(50, activation="relu"))
model.add(layers.Dropout(0.45))
model.add(layers.Dense(50, activation="relu"))
model.add(layers.Dropout(0.3))
model.add(layers.Dense(1, activation="sigmoid"))

model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
history = model.fit(train_x, train_y, epochs=5, batch_size=3000, validation_data=(test_x, test_y))

plot_single_history(history.history)
plot.show()

results = model.evaluate(test_x, test_y)
print(results)

goodFiles = [
    "good/1.txt"
]
badFiles = [
    "bad/1.txt"
]
Ejemplo n.º 18
0
    return sample_X, sample_Y


#this_x, this_y = gen_sample(0)

model = Sequential()
#model.add(LSTM(276, input_shape = (6,69), return_sequences = True))
#model.add(LSTM(276, input_shape = (6,69), return_sequences = True))
model.add(LSTM(276, input_shape=(6, 69), return_sequences=True))
#model.add(LSTM(138, input_shape = (6,69), return_sequences = True))
model.add(LSTM(207, input_shape=(6, 69)))
model.add(tf.keras.layers.Dense(138, activation='relu'))
#model.add(tf.keras.layers.Dense(69, activation='swish'))
model.add(tf.keras.layers.Dense(69, activation='softmax'))

model.compile(optimizer=keras.optimizers.Adagrad(lr=.0001),
              loss="categorical_crossentropy")
model.summary()

test_1 = [9, 36, 49, 56, 62, 9]
#june 27
test_2 = [15, 28, 52, 53, 63, 18]
#july 1
test_3 = [16, 21, 27, 60, 61, 6]
#july4
test_4 = [3, 10, 34, 36, 62, 5]
#july 8
test_5 = [14, 19, 61, 62, 64, 4]
#july11
test_6 = [27, 47, 61, 62, 69, 4]
#july15
test_7 = [13, 16, 32, 58, 59, 9]
def getModel():
    print("Using", __name__)
    input("Press enter to continue...")

    from keras.layers import Conv2D, Dense, MaxPool2D, Activation, Flatten, Reshape, BatchNormalization, Dropout
    from keras.initializers import VarianceScaling
    from keras.optimizers import Adam
    from keras import Sequential

    model = Sequential()

    model.add(
        Conv2D(filters=32,
               kernel_size=(5, 5),
               strides=(5, 5),
               kernel_initializer=VarianceScaling(),
               input_shape=(INPUT_WIDTH, INPUT_HEIGHT, INPUT_CHANNELS)))

    model.add(Activation('relu'))

    model.add(
        Conv2D(filters=64,
               kernel_size=(2, 2),
               kernel_initializer=VarianceScaling()))

    model.add(Activation('relu'))

    model.add(
        Conv2D(filters=256,
               kernel_size=(2, 2),
               strides=(2, 2),
               kernel_initializer=VarianceScaling()))

    model.add(Activation('relu'))

    model.add(Flatten())

    model.add(
        Dense(
            units=128,
            activation='relu',
            kernel_initializer=VarianceScaling(),
        ))

    model.add(Dropout(0.3))

    model.add(
        Dense(
            units=64,
            activation='relu',
            kernel_initializer=VarianceScaling(),
        ))

    model.add(Dropout(0.3))

    model.add(
        Dense(
            units=OUTPUT_LENGTH,
            activation='softmax',
            kernel_initializer=VarianceScaling(),
        ))
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.00001),
                  metrics=['accuracy'])
    model.summary()
    return model
Ejemplo n.º 20
0
model.add(
    Conv2D(64, (3, 3),
           activation='relu',
           padding='same',
           input_shape=(32, 32, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(10, activation='softmax'))

model.compile(loss='sparse_categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
filepath = "best.hd5f"
callback = ModelCheckpoint(filepath,
                           monitor='val_loss',
                           verbose=0,
                           save_best_only=True)
history_callback = model.fit(X_train,
                             Y_train,
                             batch_size=128,
                             epochs=30,
                             validation_split=0.1,
                             callbacks=[callback])
model.load_weights("best.hd5f")
Y_test = np.argmax(model.predict(X_test), axis=1)
with open(outputfilename, "w") as w:
Ejemplo n.º 21
0
    def createRegularizedModel(self, inputs, outputs, hiddenLayers,
                               activationType, learningRate):
        bias = True
        dropout = 0
        regularizationFactor = 0.01
        model = Sequential()
        if len(hiddenLayers) == 0:
            model.add(
                Dense(self.output_size,
                      input_shape=(self.input_size, ),
                      kernel_initializer='lecun_uniform',
                      bias=bias))
            model.add(Activation("linear"))
        else:
            if regularizationFactor > 0:
                model.add(
                    Dense(hiddenLayers[0],
                          input_shape=(self.input_size, ),
                          kernel_initializer='lecun_uniform',
                          W_regularizer=l2(regularizationFactor),
                          bias=bias))
            else:
                model.add(
                    Dense(hiddenLayers[0],
                          input_shape=(self.input_size, ),
                          kernel_initializer='lecun_uniform',
                          bias=bias))

            if (activationType == "LeakyReLU"):
                model.add(LeakyReLU(alpha=0.01))
            else:
                model.add(Activation(activationType))

            for index in range(1, len(hiddenLayers)):
                layerSize = hiddenLayers[index]
                if regularizationFactor > 0:
                    model.add(
                        Dense(layerSize,
                              kernel_initializer='lecun_uniform',
                              W_regularizer=l2(regularizationFactor),
                              bias=bias))
                else:
                    model.add(
                        Dense(layerSize,
                              kernel_initializer='lecun_uniform',
                              bias=bias))
                if (activationType == "LeakyReLU"):
                    model.add(LeakyReLU(alpha=0.01))
                else:
                    model.add(Activation(activationType))
                if dropout > 0:
                    model.add(Dropout(dropout))
            model.add(
                Dense(self.output_size,
                      kernel_initializer='lecun_uniform',
                      bias=bias))
            model.add(Activation("linear"))
        optimizer = optimizers.RMSprop(lr=learningRate, rho=0.9, epsilon=1e-06)
        model.compile(loss="mse", optimizer=optimizer)
        model.summary()
        return model
Ejemplo n.º 22
0
                        Conv1D(name="conv0",
                               filters=4,
                               kernel_size=k,
                               strides=s,
                               padding=p,
                               data_format=f,
                               dilation_rate=d,
                               activation="sigmoid",
                               use_bias=True,
                               input_shape=input_shape))
                    #model.add(Activation(name="out", activation='sigmoid'))

                    opt = keras.optimizers.RMSprop(learning_rate=0.0001,
                                                   decay=1e-6)
                    model.compile(loss='binary_crossentropy',
                                  optimizer=opt,
                                  metrics=['accuracy'])

                    print('Saving model details')

                    save_model_details(model, prefix=name, out_dir=OUT_DIR)

                    exp_out = model.predict(features)

                    print("Input = ", str(features.shape), ", Out = ",
                          str(exp_out.shape), " case - ", name)

                    print('Saving model outputs')
                    save_model_output(model,
                                      features,
                                      exp_out,
Ejemplo n.º 23
0
"""### **Treina a Rede Neural**"""

# treinamento

#Otimização por Gradiente Descendente (SGD)
#otimizador = SGD(lr=0.001, decay=1e-6, momentum=0.9) #decay=1e-6,
#otimizador = RMSprop()
#otimizador = SGD()
#otimizador = Adam()
otimizador = Nadam()

#Configura o modelo para treinamento
#RN.compile(optimizer = sgd, loss = 'mean_squared_error', metrics = ['accuracy', 'mean_squared_error'])
RN.compile(optimizer = otimizador, loss = 'mean_absolute_percentage_error', #tf.keras.losses.MeanAbsolutePercentageError(), #tf.keras.losses.MeanAbsoluteError(), #tf.keras.losses.MeanSquaredError(), 
           metrics = ['mean_absolute_error',  #tf.keras.metrics.RootMeanSquaredError(), 
                      'mean_squared_error', 'mean_absolute_percentage_error','accuracy' ])


# Treina o modelo para um certo numero de epocas
trainedRN = RN.fit(X_train_normalized,y_train, epochs = 350, verbose = 0)

"""# **Métricas de Avaliação**

### **Métricas Calculadas Automaticamente pelo Keras**
"""

trainedRN.model.metrics_names

import math
score = trainedRN.model.evaluate(X_test_normalized, y_test, verbose = 0)
    # embedded = Embedding(input_dim=max_features, output_dim=num_features, input_length=maxlen, weights=[W], trainable=False) (sequence)
    model.add(Dropout(0.25))

    # bi-directional LSTM
    # hidden = Bidirectional(LSTM(hidden_dim//2, recurrent_dropout=0.25)) (embedded)

    # bi-directional GRU
    model.add(Bidirectional(GRU(hidden_dim // 2, recurrent_dropout=0.25)))

    # 双向循环神经网络 (Bidirectional Recurrent Neural Network, Bi-RNN) 由两层循环神经网络组成,
    # 它们的输入相同, 只是信息传递的方向不同

    model.add(Dense(2, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])
    print('plot model...')

    plot_model(model,
               to_file='imdb_bidirectional_lstm.png',
               show_shapes=True,
               show_layer_names=True)  # 网络可视化

    history = model.fit(X_train,
                        y_train,
                        validation_data=[X_dev, y_dev],
                        batch_size=batch_size,
                        epochs=nb_epoch,
                        verbose=2)
    y_pred = model.predict(X_test, batch_size=batch_size)
Ejemplo n.º 25
0
class RNN():
    def __init__(self, model=None):

        print("Setting up model...\n")

        if model == None:

            # setting up a sequential model
            self.model = Sequential()

            # setting up a long-short-term-memory cell, with an input size of the substring and the unique characters
            # the LSTM cell will be connected to a 32 dense layer
            self.model.add(
                LSTM(32,
                     return_sequences=False,
                     input_shape=(length, len(chars))))

            # here we set up a drop out cell of 10%, this is used to make sure that we won't be overfitting the model in the long run
            self.model.add(Dropout(0.1))

            self.model.add(Dense(512))
            # self.model.add(Dropout(0.3))

            # this is the final layer, it is the size of the unique characters
            # it will spill out a probability of what the next character should be,
            # we used a softmax activation function to normalize the vector between [-1,1]
            self.model.add(Dense(len(chars), activation='softmax'))

            # we are going to use Adam optimizer on the model, with a learning rate of 0.01
            optimizer = Adam(learning_rate=0.01)
            self.model.compile(loss='categorical_crossentropy',
                               optimizer=optimizer)

        else:
            # in the case a user want to load up a previous model
            print("Loading model...\n")
            self.model = load_model(model)
            print("\nModel loaded!\n")

        self.model.summary()

    def train(self):

        print("\nSetting up the data...\n")

        # we first going to shuffle the files
        random.shuffle(data)

        # the model name will be always unique as we attach the time into it
        # each model is named based on the substring length, batch size, and epoch size
        # this is used for setting up the program later when loading the model
        NAME = "Model_" + str(int(time.time())) + "x" + str(
            length) + "x" + str(batch_size) + "x" + str(epoch_size) + ".h5"

        # while we could specify the epoch size when compiling the model,
        # it was necessary to use a for loop on each file. That is because if ...
        # each data from the file was extracted and was appended to the array, we could run
        # out of memory space. A for loop also allows you to speed up the runtime of the program as ...
        # we can clear the previous extracted data from the memory.
        for eps in range(epoch_size):
            for filename in data:

                subtext = []
                target = []

                print("\nExtracting text from " + filename + "...\n")

                data_file = codecs.open("./Dataset/Converted/" + filename, "r",
                                        "utf-8")

                lines = ""
                # this variable is used in order to indicate that we at the line of the compressed data
                start_reading = 0

                for line in data_file:
                    if ("START READING" in line):
                        start_reading = 1
                    elif (start_reading == 1):
                        lines = lines + line

                # we going to break the text into a substrings
                # the target character which the model is going to try to predict ...
                # would be the next character after the substring

                for i in range(0, len(lines) - length, 1):
                    subtext.append(lines[i:i + length])
                    target.append(lines[i + length])

                # LSTM cell requires a 3D matrix as an input shape
                x = np.zeros((len(subtext), length, len(chars)), dtype=np.bool)
                y = np.zeros((len(subtext), len(chars)), dtype=np.bool)
                for i, text in enumerate(subtext):
                    for t, char in enumerate(text):
                        if char in chars:
                            x[i, t, chars.index(char)] = 1
                    if target[i] in chars:
                        y[i, chars.index(target[i])] = 1

                # TensorBoard callback is used to visuals the process of our model training,
                # we can use this to better understand and debug our model
                tb_callback = TensorBoard(log_dir='logs/' +
                                          NAME[:len(NAME) - 3])

                # checkpoint call back is used for backup
                cp_callback = ModelCheckpoint(filepath='./Models/CP_' + NAME,
                                              monitor='loss',
                                              save_best_only=True,
                                              save_weights_only=False,
                                              mode='auto',
                                              save_freq='epoch')

                # for better optimization, its worth to consider stoping the model early if it's not improving at all
                es_callback = EarlyStopping(monitor='loss',
                                            min_delta=0,
                                            patience=3,
                                            verbose=0,
                                            mode='auto')

                self.model.fit(
                    x,
                    y,
                    epochs=1,
                    batch_size=batch_size,
                    callbacks=[cp_callback, tb_callback, es_callback])
                save_model(self.model, './Models/' + NAME)

    def generate(self, size, diversity):

        print("\nGenerating music...\n")

        # this is a general text that will be added at the start of the text file
        # the trained model would generate a compressed text of the music data ...
        # but not the arbitrary information such as the title or music instrument type

        generated = "0, 0, Header, 1, 2, 480\n"
        generated = generated + "1, 0, Start_track\n"
        generated = generated + "1, 0, Title_t, Generated Music\n"
        generated = generated + "1, 0, Text_t, Sample for MIDIcsv Distribution\n"
        generated = generated + "1, 0, Copyright_t, This file is in the public domain\n"
        generated = generated + "1, 0, Time_signature, 4, 2, 24, 8\n"
        generated = generated + "1, 0, Tempo, 500000\n"
        generated = generated + "1, 0, Instrument_name_t, A-01\n"
        generated = generated + "1, 0, End_track\n"
        generated = generated + "2, 0, Start_track\n"
        generated = generated + "START READING\n"
        '''sentence = ''
        for i in range(length-1):
            sentence = sentence + " "
        sentence = sentence + random.choice(chars)'''

        # here we shuffling a data and picking a random text file as our starting point
        random.shuffle(data)
        filename = data[0]

        data_file = codecs.open("./Dataset/Converted/" + filename, "r",
                                "utf-8")

        lines = ""
        start_reading = 0

        for line in data_file:
            if ("START READING" in line):
                start_reading = 1
            elif (start_reading == 1):
                lines = lines + line

        # we going to choose a random index from the text file as the starting point of a substring
        rnd_index = random.randint(0, len(lines) - length - 1)
        sentence = lines[rnd_index:rnd_index + length]
        generated += sentence

        for i in range(size):
            # creating a prediction matrix
            x_pred = np.zeros((1, length, len(chars)))

            # set up the prediction matrix based on the random substring that we got
            for t, char in enumerate(sentence):
                x_pred[0, t, chars.index(char)] = 1

            # let the model predict a vector of unique characters
            preds = self.model.predict(x_pred, verbose=0)[0]

            # sample the next character
            next_index = sample(preds, diversity)
            next_char = chars[next_index]

            # add the predicted character to the string and shift the input substring by one ...
            # while including the next predicted character
            generated += next_char
            sentence = sentence[1:] + next_char

        file = codecs.open(
            "./Generated/Text/generatedx" + str(size) + "x" + str(diversity) +
            ".txt", "w", "utf-8")
        file.write(generated)
        file.close()

        # we going to automatically generate a midi file using our decompressor program
        Convert.run().create_mid("./Generated/Text/generatedx" + str(size) +
                                 "x" + str(diversity) + ".txt")

        print("\nGenerated midi file can be found at " +
              "/Generated/Midis/decomprssed_generatedx" + str(size) + "x" +
              str(diversity) + ".MID \n")
Ejemplo n.º 26
0
train_data = train_data.astype('float')
test_data = test_data.astype('float')

#scale data
#train_data /=255.0
#test_data /=255.0

#change the labels frominteger to one-hot encoding
train_labels_one_hot = to_categorical(train_labels)
test_labels_one_hot = to_categorical(test_labels)

#creating network
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(dimData, )))
model.add(Dense(512, activation='relu'))
model.add(Dense(10, activation='softmax'))

model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
history = model.fit(train_data,
                    train_labels_one_hot,
                    batch_size=256,
                    epochs=20,
                    verbose=1,
                    validation_data=(test_data, test_labels_one_hot))

[test_loss, test_acc] = model.evaluate(test_data, test_labels_one_hot)
print("Evaluation result on Test Data : Loss = {}, accuracy = {}".format(
    test_loss, test_acc))
Ejemplo n.º 27
0
model.add(Dropout(0.25))
model.add(Conv1D(1024, 5, activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling1D(2))
model.add(Dropout(0.25))
model.add(Conv1D(1024, 5, activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(GlobalMaxPooling1D())
model.add(Dropout(0.25))
model.add(Dense(1024, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(len(labels_index), activation='softmax'))

model.compile(loss='sparse_categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['acc'])

print('model summary: ')
model.summary()
# save model summary in model folder so we can reference it later when comparing models
with open(MODEL_PATH + '/summary.txt', 'w') as handle:
    model.summary(print_fn=lambda x: handle.write(x + '\n'))

# make sure we only keep the weights from the epoch with the best accuracy, rather than the last set of weights
checkpointer = ModelCheckpoint(filepath=MODEL_PATH + '/model.h5',
                               verbose=1,
                               save_best_only=True)
history_checkpointer = util.SaveHistoryCheckpoint(model_path=MODEL_PATH)

util.print_memory()
Ejemplo n.º 28
0
for units in results.keys():
    for train_index, test_index in kf.split(df2_data):
        # print('Train')
        # print(train_index)
        # print('Test')
        # print(test_index)
        X_train = df2_data.iloc[train_index]
        X_test =  df2_data.iloc[test_index]
        y_train = to_categorical([df2_labels[i] for i in train_index],num_classes=64)
        y_test =  to_categorical([df2_labels[i] for i in test_index],num_classes=64)
        #
        classifier = Sequential()
        classifier.add(Dense(units, activation='relu', kernel_initializer='random_normal', input_dim=X_train.shape[1]))
        classifier.add(Dense(64, activation='softmax', kernel_initializer='random_normal'))
        classifier.compile(optimizer ='adam',loss='categorical_crossentropy', metrics =['accuracy'])
        #
        #Fitting the data to the training dataset
        classifier.fit(X_train,y_train, batch_size=50, epochs=50,use_multiprocessing=True,verbose=0)
        eval_model=classifier.evaluate(X_train, y_train,use_multiprocessing=True,verbose=0)
        eval_model
        y_pred=classifier.predict(X_test)
        # y_pred =(y_pred>0.5)
        cm = confusion_matrix(y_test.argmax(axis=1), y_pred.argmax(axis=1))
        # print(cm)
        # np.sum(cm.diagonal())
        # np.sum(cm)
        #
        print('Units:',units)
        results[units]['accuracy'].append(accuracy_score(y_test.argmax(axis=1), y_pred.argmax(axis=1)))
        results[units]['recall'].append(recall_score(y_test.argmax(axis=1), y_pred.argmax(axis=1),average='weighted'))
from sklearn.model_selection import train_test_split
# from matplotlib.pyplot import plt
train_data=pd.read_csv('D:\sufe\A\contest_basic_train.tsv',sep='\t')
train_data=train_data.drop(['REPORT_ID',"ID_CARD",'LOAN_DATE'],1)
train_data=train_data.dropna()
# print(train_data.info())
X=train_data.drop(['Y'],1).as_matrix()#7
y=train_data['Y'].as_matrix()#1
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)

model=Sequential()
model.add(Dense(14,input_shape=(7,)))
model.add(Activation('relu'))
model.add(Dense(1))
model.add((Dropout(0.3)))
model.compile(loss='mean_squared_error', optimizer='adam')
model.summary()

model.fit(X_train,y_train,epochs=10000,batch_size=16)
t=model.predict(X_test)

rate=0

for i in range(len(t)):
    if t[i]==y_test[i]:
        rate+=1
    else:
        pass
rate=1.0*rate/len(t)

print(rate)
Ejemplo n.º 30
0
def model_train_and_fit(samples_num=80000,
                        n_in=100,
                        epochs=25,
                        batch_size=128):
    values = ndata.iloc[:samples_num, :].values
    scaler = MinMaxScaler(feature_range=(0, 1))
    scaled = scaler.fit_transform(values)
    scaled_y = scaled[:, 0:1]
    scaled_exc_y = scaled[:, 1:]
    scaled_columns = [
        'speed_wind_30s_avr', 'temp_de', 'speed_generator', 'temp_nde',
        'speed_rotor', 'speed_high_shaft', 'temp_ambient', 'temp_main_bearing'
    ]

    # 将序列数据转化为监督学习数据
    """
           Frame a time series as a supervised learning dataset.
           Arguments:
               data: Sequence of observations as a list or NumPy array.
               n_in: Number of lag observations as input (X).
               n_out: Number of observations as output (y).
               dropnan: Boolean whether or not to drop rows with NaN values.
           Returns:
               Pandas DataFrame of series framed for supervised learning.
       """
    # n_in = 10
    # 调整n_in 即可

    # 将序列数据转化为监督学习数据
    reframed = series_to_supervised(scaled_exc_y, scaled_columns, n_in, 0)

    reframed.drop(reframed.columns[range(8, reframed.shape[1])],
                  axis=1,
                  inplace=True)
    print(reframed.columns)

    # 对齐powert 与  t-5的数据
    scaled_y = scaled[n_in:, 0:1]
    reframed['power(t)'] = scaled_y

    values = reframed.values

    # 划分训练集和测试集
    train_size = round(len(values) * 0.6)
    val_size = round(len(values) * 0.2)
    train = values[:train_size, :]
    val = values[train_size:val_size + train_size, :]
    test = values[val_size + train_size:, :]
    train_x, train_y = train[:, :-1], train[:, -1]
    test_x, test_y = test[:, :-1], test[:, -1]
    val_x, val_y = val[:, :-1], val[:, -1]

    # 为了在LSTM中应用该数据,需要将其格式转化为3D format,即[Samples, timesteps, features]
    train_X = train_x.reshape((train_x.shape[0], 1, train_x.shape[1]))
    test_X = test_x.reshape((test_x.shape[0], 1, test_x.shape[1]))
    val_X = val_x.reshape((val_x.shape[0], 1, val_x.shape[1]))

    model = Sequential()
    model.add(LSTM(16, input_shape=(train_X.shape[1], train_X.shape[2])))
    # model.add(LSTM(10, input_shape=(train_X.shape[1], train_X.shape[2]), return_sequences=True))
    # model.add(Dropout(0.3))
    # model.add(LSTM(10,return_sequences=True))
    # model.add(Dropout(0.3))
    # model.add(GRU(10))
    # model.add(Dropout(0.3))
    model.add(Dense(1))
    model.compile(loss='mae', optimizer='adam')
    history = model.fit(train_X,
                        train_y,
                        epochs,
                        batch_size,
                        validation_data=(val_X, val_y))
    # , validation_data=(test_X, test_y)
    '''
           对数据绘图
       '''
    plt.plot(history.history['loss'], label='train')
    plt.plot(history.history['val_loss'], label='val')
    plt.legend()
    plt.show()

    # make the prediction,为了在原始数据的维度上计算损失,需要将数据转化为原来的范围再计算损失
    yHat = model.predict(test_X)
    '''
           这里注意的是保持拼接后的数组  列数  需要与之前的保持一致
       '''
    inv_yHat = concatenate((yHat, test_x[:, :8]), axis=1)  # 数组拼接
    inv_yHat = scaler.inverse_transform(inv_yHat)
    inv_yHat = inv_yHat[:, 0]

    test_y = test_y.reshape((len(test_y), 1))
    inv_y = concatenate((test_y, test_x[:, :8]), axis=1)
    inv_y = scaler.inverse_transform(inv_y)  # 将标准化的数据转化为原来的范围
    inv_y = inv_y[:, 0]

    rmse = sqrt(mean_squared_error(inv_yHat, inv_y))
    print('Test RMSE: %.3f' % rmse)

    ahead_second = n_in * 30
    ahead_hour = round(n_in / 120, 2)

    plt.figure(12)
    plt.suptitle("%s samples,%s h ahead,Test RMSE:%s" %
                 (samples_num, ahead_hour, rmse))
    plt.subplot(221), plt.plot(inv_yHat, label='predict')
    plt.legend()
    plt.subplot(223), plt.plot(inv_y, label='raw')
    plt.legend()
    plt.subplot(122), plt.plot(inv_y, label='raw'), plt.plot(inv_yHat,
                                                             label='predict')
    plt.legend()
    plt.show()
    print("samples_num:", samples_num, "ahead_hour:", ahead_hour, "rmse", rmse)
    return inv_yHat
Ejemplo n.º 31
0
class NeuralNetwork(object):
    def __init__(self, input_nodes, hidden_nodes, output_nodes, lr=None):
        self.input_nodes = input_nodes
        self.hidden_nodes = hidden_nodes
        self.output_nodes = output_nodes
        self.lr = lr
        self.scales_x = []
        self.scales_y = []

        input_kernel_range = np.sqrt(6) / (np.sqrt(input_nodes) + np.sqrt(hidden_nodes))
        input_kernel_initializer = RandomUniform(minval=-input_kernel_range, maxval=input_kernel_range)
        input_layer = Dense(input_nodes,
                            kernel_initializer=input_kernel_initializer,
                            name='input')

        hidden_kernel_range = np.sqrt(6) / (np.sqrt(hidden_nodes) + np.sqrt(output_nodes))
        hidden_kernel_initializer = RandomUniform(minval=-hidden_kernel_range, maxval=hidden_kernel_range)
        hidden_layer = Dense(hidden_nodes,
                             kernel_initializer=hidden_kernel_initializer,
                             name='hidden')

        output_layer = Dense(output_nodes,
                             name='output')

        self.model = Sequential()
        self.model.add(input_layer)
        self.model.add(hidden_layer)
        self.model.add(output_layer)

    def train(self, x_train, y_train):
        self.set_normalize_scales(x_train, y_train)
        x_train = self.normalize(x_train, self.scales_x)
        y_train = self.normalize(y_train, self.scales_y)

        optimizer = SGD(lr=self.lr)
        self.model.compile(loss='mse', optimizer=optimizer)
        self.model.fit(x_train, y_train, batch_size=20, epochs=500)

    def evaluate(self, x_test, y_test):
        x_test = self.normalize(x_test, self.scales_x)
        y_test = self.normalize(y_test, self.scales_y)
        return self.model.evaluate(x_test, y_test)

    def predict(self, x):
        x = self.normalize(x, self.scales_x)
        y = self.model.predict(x)
        return self.unnormalize(y, self.scales_y)

    def set_normalize_scales(self, x, y):
        for i in range(x.shape[1]):
            mean, std = x[:, i].mean(), x[:, i].std()
            self.scales_x.append([mean, std])
        for i in range(y.shape[1]):
            mean, std = y[:, i].mean(), y[:, i].std()
            self.scales_y.append([mean, std])

    @staticmethod
    def normalize(data, scales):
        for i in range(0, len(scales)):
            mean, std = scales[i]
            data[:, i] = (data[:, i] - mean) / std
        return data

    @staticmethod
    def unnormalize(data, scales):
        for i in range(0, len(scales)):
            mean, std = scales[i]
            data[:, i] = data[:, i] * std + mean
        return data
Ejemplo n.º 32
0
#add LSTM to model
model = Sequential()
model.add(
    TimeDistributed(cnn, input_shape=(seq_length, 72, 72, 3), trainable=False))
model.add(
    LSTM(128, input_shape=(batch_size, seq_length, 1), return_sequences=True))
model.add(TimeDistributed(Dropout(0.5)))

# add FC
model.add(TimeDistributed(Dense(64)))
model.add(TimeDistributed(Dropout(0.5)))
model.add(TimeDistributed(Dense(class_num, activation='linear')))

# train
print(model.summary())
model.compile(optimizer=adam, loss='mse', metrics=[util.ccc, 'mse'])

# save the model
if not os.path.exists("./model"):
    os.makedirs("./model")

filepath = "./model/model.h5"
checkpoint = ModelCheckpoint(filepath, save_best_only=True)
callbacks_list = [checkpoint]

hist = model.fit(x=x_train,
                 y=y_train,
                 validation_data=(x_val, y_val),
                 epochs=epoch,
                 batch_size=batch_size,
                 callbacks=callbacks_list)
Ejemplo n.º 33
0
model = Sequential()
print(base_model.output)
model.add(Flatten(input_shape=base_model.output_shape[1:]))
#7*7*512---->256
model.add(Dense(num_classes))
model.add(Activation('softmax'))
#输出为VGG16的数据,经过VGG16的特征层,2层全连接到num_classes输出
model = Model(inputs=base_model.input, outputs=model(base_model.output))
#冻结VGG16的前15层,权值不变,即在训练过程中不训练
for layer in model.layers[:15]:
    layer.trainable = False
#优化器
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
#编译模型
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

#----------------------------------------------------------训练-----------------------------------------------
#如果没有使用数据增强
if not data_augmentation:
    print('Not using data augmentation')
    history = model.fit(x_train,
                        y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        validation_data=(x_test, y_test),
                        shuffle=True)
else:
    # 进行数据增强
    print('Using real-time data augmentation.')
Ejemplo n.º 34
0
Archivo: mlp.py Proyecto: will4906/srep
import keras
import numpy as np

from keras import Sequential
from keras.layers import Dense, Convolution2D, Flatten, Convolution1D
from keras.optimizers import SGD, Adam

from util import load_single_train_data

model = Sequential()

model.add(Convolution1D(input_shape=[16, 8], filters=64, kernel_size=3, activation='relu'))
model.add(Flatten())
model.add(Dense(units=8, activation='softmax'))
adam = Adam(lr=0.1, decay=0.0)

model.compile(loss=keras.losses.categorical_crossentropy,
               optimizer=adam, metrics=['accuracy'])
train_x, train_y, test_x, test_y = load_single_train_data('.cache/dba/data', 1)
train_x = train_x.reshape(train_x.shape[0], 16, 8)
test_x = test_x.reshape(train_x.shape[0], 16, 8)
train_y = keras.utils.to_categorical(train_y - 1, 8)
test_y = keras.utils.to_categorical(test_y - 1, 8)

model.fit(train_x, train_y, batch_size=1000, validation_data=(test_x, test_y), epochs=28)
def SegNet():
    model = Sequential()

    model.add(Conv2D(8, (3, 3), input_shape=(128, 128, 1), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(8, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(16, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(16, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(32, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))

    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))

    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(32, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))

    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(16, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(16, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))

    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(8, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))
    model.add(Conv2D(8, (3, 3), padding='same'))
    model.add(BatchNormalization(axis=3))
    model.add(Activation('relu'))

    model.add(Conv2D(1, (1, 1), activation='sigmoid'))
    model.compile(optimizer=Adam(lr=0.01),
                  loss='binary_crossentropy',
                  metrics=[processor.mean_iou])

    return model
Ejemplo n.º 36
0
import numpy as np
from keras import Sequential
from keras.layers import Dense

data = np.random.random((1000, 32))
label = np.random.random((1000, 10))

model = Sequential()
model.add(Dense(64, activation='relu', input_shape=(32, )))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))

model.compile('adam', 'categorical_crossentropy')

model.fit(data, label, epochs=100)

model.save('my_model.h5')

Ejemplo n.º 37
0
class lstm:
    def __init__(self, market: Market = Market.US):
        self.model = None
        self.market = market
        self.sp500 = self.load_stock_data('^GSPC')

    def add_features(self, ohlcv: pd.DataFrame):
        # add SMA 5, 30, 90, 180
        for feature in [StockPriceField.Close.value]:
            for sma in [5, 30, 90, 180]:
                ohlcv['%s_sma_%s' % (feature, sma)] = talib.abstract.SMA(
                    ohlcv, timeperiod=sma, price=feature)
        # normalize to pct_change
        ohlcv = ohlcv.pct_change()
        # drop na
        ohlcv.dropna(inplace=True)
        return ohlcv

    def load_stock_data(self, symbol: str) -> pd.DataFrame:
        data = Utility.load_stock_price(self.market, symbol)
        del data[StockPriceField.Symbol.value]
        return self.add_features(data)

    def prepare_data(self,
                     symbol: str,
                     timesteps,
                     validate_pct: float = 0.2,
                     test_pct: float = 0.2):
        base_data = self.load_stock_data(symbol)
        # combine with sp500
        base_data = pd.concat([base_data, self.sp500], axis=1,
                              join='inner').dropna()
        training_steps = [base_data]
        for step in range(timesteps, 0, -1):
            one_step = base_data.shift(step)
            one_step.columns = [
                '%s(-%d)' % (col, step) for col in base_data.columns
            ]
            training_steps.append(one_step)
        featured_data = pd.concat(training_steps, axis=1,
                                  join='inner').dropna()
        input = featured_data.iloc[:, base_data.shape[1]:].values
        output = featured_data.iloc[:, 3].values  # close price

        # scaler = MinMaxScaler(feature_range=(0, 1))
        scaled_input = input.reshape(-1, timesteps, base_data.shape[1])
        scaled_output = output

        training_ends = math.floor(scaled_input.shape[0] *
                                   (1 - validate_pct - test_pct))
        validate_ends = math.floor(scaled_input.shape[0] * (1 - test_pct))

        training_data = scaled_input[:training_ends]
        training_label = scaled_output[:training_ends]
        validate_data = scaled_input[training_ends:validate_ends]
        validate_label = scaled_output[training_ends:validate_ends]
        test_data = scaled_input[validate_ends:]
        test_label = scaled_output[validate_ends:]
        return training_data, training_label, validate_data, validate_label, test_data, test_label

    def create_model(self, timesteps, feature_size, outputs=1):
        self.model = Sequential()
        self.model.add(
            GRU(2048, input_shape=(timesteps, feature_size), dropout=0.5))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(outputs))
        self.model.compile(loss='mae', optimizer='adam')

    def train_model(self, train_data, train_label, validation_data,
                    validation_label, epochs, batch_size):
        return self.model.fit(train_data,
                              train_label,
                              validation_data=(validation_data,
                                               validation_label),
                              epochs=epochs,
                              batch_size=batch_size,
                              shuffle=False,
                              verbose=2)
Ejemplo n.º 38
0
print('Minimum review length: {}'.format(len(min((X_test + X_test), key=len))))


from keras.preprocessing import sequence
max_words = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_words)
X_test = sequence.pad_sequences(X_test, maxlen=max_words)

from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
embedding_size=32
model=Sequential()
model.add(Embedding(vocabulary_size, embedding_size, input_length=max_words))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())


model.compile(loss='binary_crossentropy',
             optimizer='adam',
             metrics=['accuracy'])

batch_size = 64
num_epochs = 3
X_valid, y_valid = X_train[:batch_size], y_train[:batch_size]
X_train2, y_train2 = X_train[batch_size:], y_train[batch_size:]
model.fit(X_train2, y_train2, validation_data=(X_valid, y_valid), batch_size=batch_size, epochs=num_epochs)

scores = model.evaluate(X_test, y_test, verbose=0)
print('Test accuracy:', scores[1])
Ejemplo n.º 39
0
def phase_estimator_100k(time_sequence):
    seq = Sequential()
    seq.add(
        ConvLSTM2D(filters=50,
                   kernel_size=(2, 2),
                   input_shape=(None, 10, 10, time_sequence),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    #seq.add(Dropout(0.2))

    seq.add(
        ConvLSTM2D(filters=75,
                   kernel_size=(2, 2),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    #seq.add(Dropout(0.2))

    seq.add(
        ConvLSTM2D(filters=100,
                   kernel_size=(2, 2),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    #seq.add(Dropout(0.1))

    seq.add(
        ConvLSTM2D(filters=75,
                   kernel_size=(2, 2),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(
        ConvLSTM2D(filters=50,
                   kernel_size=(2, 2),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(
        Conv3D(filters=time_sequence,
               kernel_size=(2, 2, 2),
               activation='relu',
               padding='same',
               data_format='channels_last'))
    seq.add(BatchNormalization())
    seq.add(Dropout(.15))

    seq.add(
        Conv3D(filters=time_sequence,
               kernel_size=(2, 2, 2),
               activation='softmax',
               padding='same',
               data_format='channels_last'))
    seq.compile(
        loss='categorical_crossentropy',
        optimizer='adadelta',
        metrics=[metrics.categorical_accuracy, metrics.binary_accuracy])
    return seq