コード例 #1
0
ファイル: tsc_mainR.py プロジェクト: yangyiqwer/vue-sp
import numpy as np
import tensorflow as tf  #TF 1.1.0rc1

tf.logging.set_verbosity(tf.logging.ERROR)
#import matplotlib.pyplot as plt
from tsc_model import Model, sample_batch, load_data
import os
#Set these directories
#direc = '/home/rob/Dropbox/ml_projects/LSTM/UCR_TS_Archive_2015'
#summaries_dir = '/home/rob/Dropbox/ml_projects/LSTM_TSC/log_tb'
"""Load the data"""
#ratio = np.array([0.8,0.9]) #Ratios where to split the training and validation set
X_train, X_test, y_train, y_test = load_data()
N, sl = X_train.shape
num_classes = len(np.unique(y_train))
"""Hyperparamaters"""
batch_size = 100
max_iterations = 20000
dropout = 0.5
config = {
    'num_layers': 3,  #number of layers of stacked RNN's
    'hidden_size': 16,  #memory cells in a layer
    'max_grad_norm': 5,  #maximum gradient norm during training
    'batch_size': batch_size,
    'learning_rate': .005,
    'sl': sl,
    'num_classes': num_classes
}

epochs = np.floor(batch_size * max_iterations / N)
#print('Train %.0f samples in approximately %d epochs' %(N,epochs))
コード例 #2
0
"""Hyperparamaters"""

config = {}                             #Put all configuration information into the dict
config['num_layers'] = 80               #number of layers of stacked RNN's
config['hidden_size'] = 40              #memory cells in a layer
config['max_grad_norm'] = 100           #maximum gradient norm during training
config['batch_size'] = batch_size = 30  
config['learning_rate'] = .0001

max_iterations = 5000
dropout = 0.8
ratio = np.array([0.8,0.9]) #Ratios where to split the training and validation set

"""Load the data"""
direc = '/home/kyle/dvlp/ml/LSTM_tsc/UCR_TS_Archive_2015'
X_train,X_val,X_test,y_train,y_val,y_test = load_data(direc,ratio,dataset='IXIC')
N,sl = X_train.shape
config['sl'] = sl = X_train.shape[1]
config['num_classes'] = num_classes = len(np.unique(y_train))

# Collect the costs in a numpy fashion
epochs = np.floor(batch_size*max_iterations / N)
print('Train %.0f samples in approximately %d epochs' %(N,epochs))
perf_collect = np.zeros((4,int(np.floor(max_iterations /100))))

#Instantiate a model
model = Model(config)



"""Session time"""
コード例 #3
0
ファイル: tsc_main.py プロジェクト: zzd837649498/LSTM_tsc
generates an output to be classified with Softmax
"""

import numpy as np
import tensorflow as tf  #TF 1.1.0rc1
tf.logging.set_verbosity(tf.logging.ERROR)
import matplotlib.pyplot as plt
from tsc_model import Model, sample_batch, load_data, check_test

#Set these directories
direc = '/home/rob/Dropbox/ml_projects/LSTM/UCR_TS_Archive_2015'
summaries_dir = '/home/rob/Dropbox/ml_projects/LSTM_TSC/log_tb'
"""Load the data"""
ratio = np.array([0.8,
                  0.9])  #Ratios where to split the training and validation set
X_train, X_val, X_test, y_train, y_val, y_test = load_data(
    direc, ratio, dataset='ChlorineConcentration')
N, sl = X_train.shape
num_classes = len(np.unique(y_train))
"""Hyperparamaters"""
batch_size = 30
max_iterations = 3000
dropout = 0.8
config = {
    'num_layers': 3,  #number of layers of stacked RNN's
    'hidden_size': 120,  #memory cells in a layer
    'max_grad_norm': 5,  #maximum gradient norm during training
    'batch_size': batch_size,
    'learning_rate': .005,
    'sl': sl,
    'num_classes': num_classes
}
コード例 #4
0
ファイル: tsc_main.py プロジェクト: Dhalwani/TouchAnalyze
import numpy as np
import tensorflow as tf  #TF 1.1.0rc1
tf.logging.set_verbosity(tf.logging.ERROR)
import matplotlib.pyplot as plt
from tsc_model import Model,sample_batch,load_data


ratio = np.array([0.8,0.9]) #Ratios where to split the training and validation set
X_train,X_val,X_test,y_train,y_val,y_test = load_data(ratio,dataset='new_features.csv')
N,sl = X_train.shape
num_classes = len(np.unique(y_train))


batch_size = 30
max_iterations = 3000
dropout = 0.8
config = {    'num_layers' :    3,               #number of layers of stacked RNN's
              'hidden_size' :   120,             #memory cells in a layer
              'max_grad_norm' : 5,             #maximum gradient norm during training
              'batch_size' :    batch_size,
              'learning_rate' : .005,
              'sl':             sl,
              'num_classes':    num_classes}



epochs = np.floor(batch_size*max_iterations / N)
print('Train %.0f samples in approximately %d epochs' %(N,epochs))

#Instantiate a model
model = Model(config)
コード例 #5
0
ファイル: tsc_alt_main.py プロジェクト: jjsullivan5196/flex
generates an output to be classified with Softmax
"""

import numpy as np
import tensorflow as tf  #TF 1.1.0rc1
tf.logging.set_verbosity(tf.logging.ERROR)
import matplotlib.pyplot as plt
from tsc_model import Model, sample_batch, load_data

#Set these directories
direc = 'data_half'
summaries_dir = 'log_tb'
"""Load the data"""
ratio = np.array([0.8,
                  0.9])  #Ratios where to split the training and validation set
X_train, X_val, X_test, y_train, y_val, y_test = load_data(
    direc, ratio, dataset='ArmSensor_s1')
N, sl = X_train.shape
num_classes = len(np.unique(y_train))
"""Hyperparamaters"""
batch_size = 30
max_iterations = 5000
dropout = 0.8
config = {
    'num_layers': 3,  #number of layers of stacked RNN's
    'hidden_size': 120,  #memory cells in a layer
    'max_grad_norm': 5,  #maximum gradient norm during training
    'batch_size': batch_size,
    'learning_rate': .005,
    'sl': sl,
    'num_classes': num_classes
}
コード例 #6
0
ファイル: tsc_main.py プロジェクト: RobRomijnders/LSTM_tsc
generates an output to be classified with Softmax
"""

import numpy as np
import tensorflow as tf  #TF 1.1.0rc1
tf.logging.set_verbosity(tf.logging.ERROR)
import matplotlib.pyplot as plt
from tsc_model import Model,sample_batch,load_data,check_test

#Set these directories
direc = '/home/rob/Dropbox/ml_projects/LSTM/UCR_TS_Archive_2015'
summaries_dir = '/home/rob/Dropbox/ml_projects/LSTM_TSC/log_tb'

"""Load the data"""
ratio = np.array([0.8,0.9]) #Ratios where to split the training and validation set
X_train,X_val,X_test,y_train,y_val,y_test = load_data(direc,ratio,dataset='ChlorineConcentration')
N,sl = X_train.shape
num_classes = len(np.unique(y_train))

"""Hyperparamaters"""
batch_size = 30
max_iterations = 3000
dropout = 0.8
config = {    'num_layers' :    3,               #number of layers of stacked RNN's
              'hidden_size' :   120,             #memory cells in a layer
              'max_grad_norm' : 5,             #maximum gradient norm during training
              'batch_size' :    batch_size,
              'learning_rate' : .005,
              'sl':             sl,
              'num_classes':    num_classes}