def __call__(self, shape, dtype=None):
     map_sparse = csr_matrix(self.map)
     # init = np.random.rand(*map_sparse.data.shape)
     init = np.random.normal(10.0, 1., *map_sparse.data.shape)
     print 'connection map data shape {}'.format(map_sparse.data.shape)
     # init = np.random.randn(*map_sparse.data.shape).astype(np.float32) * np.sqrt(2.0 / (map_sparse.data.shape[0]))
     initializers.glorot_uniform().__call__()
     map_sparse.data = init
     return K.variable(map_sparse.toarray())
 def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8,
              rate_dropout=0.2, loss='risk_estimation_buy',
              act_last='relu_buy'):
     print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." %(
             lr, n_layers, n_hidden, rate_dropout))
     self.model = Sequential()
     self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))
     for i in range(0, n_layers - 1):
         self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
                             recurrent_activation='hard_sigmoid',
                             kernel_initializer='glorot_uniform',
                             recurrent_initializer='orthogonal',
                             bias_initializer='zeros',
                             dropout=rate_dropout,
                             recurrent_dropout=rate_dropout))
     self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
                             recurrent_activation='hard_sigmoid',
                             kernel_initializer='glorot_uniform',
                             recurrent_initializer='orthogonal',
                             bias_initializer='zeros',
                             dropout=rate_dropout,
                             recurrent_dropout=rate_dropout))
     self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
     # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
     #               moving_variance_initializer=Constant(value=0.25)))
     self.model.add(BatchRenormalization(axis=-1, beta_init=Constant(value=0.5)))
     self.model.add(Activation(act_last))
     opt = RMSprop(lr=lr)
     self.model.compile(loss=loss,
                   optimizer=opt,
                   metrics=['accuracy'])
Example #3
0
    def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss=risk_estimation): # risk_estimation, risk_estimation_bhs

        print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." %(lr, n_layers, n_hidden, rate_dropout))

		# build a model with Sequential()
        self.model = Sequential()

		# todo: ask why dropout on input layer?
        self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))

		# build a number of LSTM layers
        for i in range(0, n_layers - 1):
            self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))
		# add another LSTM layer
        self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))


        self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
        self.model.add(Activation('sigmoid'))


		# compile model
        opt = RMSprop(lr=lr)
        self.model.compile(loss=loss,
                      optimizer=opt,
                      metrics=['accuracy'])
Example #4
0
    def __init__(self,
                 input_shape,
                 lr=0.01,
                 n_layers=2,
                 n_hidden=8,
                 rate_dropout=0.2,
                 loss='mse'):  # risk_estimation, risk_estimation_bhs

        print(
            "initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s."
            % (lr, n_layers, n_hidden, rate_dropout))

        # 使用 Sequential() 构建模型
        self.model = Sequential()

        # 构建dropout层,每次训练要扔掉20%的神经元,输入值的纬度(30,61)
        self.model.add(
            Dropout(rate=rate_dropout,
                    input_shape=(input_shape[0], input_shape[1])))

        # 构建几个LSTM层
        for i in range(0, n_layers - 1):  # 需要建几个

            # 每一层LSTM
            self.model.add(
                LSTM(
                    n_hidden * 4,  # 要 8*4=32个神经元
                    return_sequences=
                    True,  # 每个time_step(共30个time_step周期)都要产生一个输出值,所有我们有 (none,30,32)输出值, none 是样本个数,这里不考虑,所以用none
                    activation='tanh',
                    recurrent_activation='hard_sigmoid',
                    kernel_initializer='glorot_uniform',
                    recurrent_initializer='orthogonal',
                    bias_initializer='zeros',
                    dropout=rate_dropout,  # 扔掉输入层神经元个数
                    recurrent_dropout=rate_dropout  #扔掉循环层神经元个数
                ))

# 构建一个LSTM层
        self.model.add(
            LSTM(
                n_hidden,  # 8 个神经元
                return_sequences=
                False,  # 每一层只有一个输出值,30 个time_span, 但只要求每个神经元推出一个输出值,所以输出值纬度 (none,8)
                activation='tanh',
                recurrent_activation='hard_sigmoid',
                kernel_initializer='glorot_uniform',
                recurrent_initializer='orthogonal',
                bias_initializer='zeros',
                dropout=rate_dropout,
                recurrent_dropout=rate_dropout))

        # 构建一个简单的输出层,只输出一个值
        self.model.add(
            Dense(
                1,  # 只有一个神经元,只输出一个值
                kernel_initializer=initializers.glorot_uniform()))
Example #5
0
    def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss='mse'):

        print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." %(lr, n_layers, n_hidden, rate_dropout))

		# 使用 Sequential() 构建模型
        self.model = Sequential()

		# 构建dropout层,每次训练要扔掉20%的神经元,输入值的纬度(30,61)
        self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))

		# 构建几个LSTM层
        for i in range(0, n_layers - 1): # 需要建几个

			# 每一层LSTM
            self.model.add(LSTM(n_hidden * 4, # 要 8*4=32个神经元
                                return_sequences=True, # 每个time_step(共30个time_step周期)都要产生一个输出值,所有我们有 (none,30,32)输出值, none 是样本个数,这里不考虑,所以用none
                                activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, # 扔掉输入层神经元个数
                                recurrent_dropout=rate_dropout #扔掉循环层神经元个数
								))
		# 构建一个LSTM层
        self.model.add(LSTM(n_hidden, # 8 个神经元
                                return_sequences=False, # 每一层只有一个输出值,30 个time_span, 但只要求每个神经元推出一个输出值,所以输出值纬度 (none,8)
                                activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout
								))

		# 构建一个简单的输出层,只输出一个值
        self.model.add(Dense(1, # 只有一个神经元,只输出一个值
                        kernel_initializer=initializers.glorot_uniform()))
        # self.model.add(Activation('sigmoid')) # no more sigmoid as last layer needed


		# 选择一个学习算法
        opt = RMSprop(lr=lr)

		# 给模型设置: 损失函数,学习算法,度量效果的函数(准确度,即accuracy)
        self.model.compile(loss=loss,
                      optimizer=opt,
                      metrics=['accuracy'])
Example #6
0
    def __init__(self,
                 input_shape,
                 modelType=0,
                 lr=0.01,
                 n_layers=2,
                 n_hidden=8,
                 rate_dropout=0.2,
                 loss='risk_estimation'):

        print(
            "initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s."
            % (lr, n_layers, n_hidden, rate_dropout))
        '''
            Orig Model from deep trader keras
        '''
        if (modelType == 0):
            self.model = Sequential()
            # issue : maybe don't drop out input data
            self.model.add(
                Dropout(rate=0.2,
                        input_shape=(input_shape[0], input_shape[1])))
            #self.model.add(Input(shape=(input_shape[0], input_shape[1]),name='lstm_imput' ) )

            for i in range(0, n_layers - 1):
                self.model.add(
                    LSTM(n_hidden * 4,
                         return_sequences=True,
                         activation='tanh',
                         recurrent_activation='hard_sigmoid',
                         kernel_initializer='glorot_uniform',
                         recurrent_initializer='orthogonal',
                         bias_initializer='zeros',
                         dropout=rate_dropout,
                         recurrent_dropout=rate_dropout))
            self.model.add(
                LSTM(n_hidden,
                     return_sequences=False,
                     activation='tanh',
                     recurrent_activation='hard_sigmoid',
                     kernel_initializer='glorot_uniform',
                     recurrent_initializer='orthogonal',
                     bias_initializer='zeros',
                     dropout=rate_dropout,
                     recurrent_dropout=rate_dropout))
            self.model.add(
                Dense(1,
                      kernel_initializer=initializers.glorot_uniform(),
                      activity_regularizer=regularizers.l2(0.01)))
            # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
            #               moving_variance_initializer=Constant(value=0.25)))
            self.model.add(
                BatchRenormalization(axis=-1, beta_init=Constant(value=0.5)))
            self.model.add(Activation('relu_limited'))  #relu_limited
            #opt = RMSprop(lr=lr)
            opt = Nadam(lr=lr)
            self.model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])

        # Model Copied From Type 0 But Output negative Signal Only
        elif (modelType == 1):  #
            self.model = Sequential()
            # issue : maybe don't drop out input data
            self.model.add(
                Dropout(rate=0.2,
                        input_shape=(input_shape[0], input_shape[1])))
            for i in range(0, n_layers - 1):
                self.model.add(
                    LSTM(n_hidden * 4,
                         return_sequences=True,
                         activation='tanh',
                         recurrent_activation='tanh',
                         kernel_initializer='glorot_uniform',
                         recurrent_initializer='orthogonal',
                         bias_initializer='zeros',
                         dropout=rate_dropout,
                         recurrent_dropout=rate_dropout))
            self.model.add(
                LSTM(n_hidden,
                     return_sequences=False,
                     activation='tanh',
                     recurrent_activation='tanh',
                     kernel_initializer='glorot_uniform',
                     recurrent_initializer='orthogonal',
                     bias_initializer='zeros',
                     dropout=rate_dropout,
                     recurrent_dropout=rate_dropout))
            self.model.add(
                Dense(1,
                      kernel_initializer=initializers.glorot_uniform(),
                      activity_regularizer=regularizers.l2(0.01)))
            # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
            #               moving_variance_initializer=Constant(value=0.25)))
            self.model.add(
                BatchRenormalization(axis=-1, beta_init=Constant(value=0.5)))
            self.model.add(Activation('relu_inverse'))
            #opt = RMSprop(lr=lr)
            opt = Nadam(lr=lr)
            self.model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])

        elif (modelType == 2
              ):  # model for only output signal to predict positive/negative
            import tensorflow as tf

            def atan(x):
                return tf.atan(x)

            self.model = Sequential()
            self.model.add(
                LSTM(input_dim=input_shape[0],
                     output_dim=input_shape[1],
                     return_sequences=True,
                     activation=atan))  # ,output_dim=input_shape[1]
            self.model.add(Dropout(0.2))
            self.model.add(BatchNormalization())
            self.model.add(
                Dense(output_dim=int(input_shape[1] / 2),
                      activity_regularizer=regularizers.l2(0.01)))
            self.model.add(Activation(atan))
            self.model.add(
                Dense(output_dim=1,
                      activity_regularizer=regularizers.l2(0.01)))
            self.model.add(Activation(atan))
            self.model.compile(optimizer='adam', loss='mse', metrics=['mse'])

        elif (modelType == 3):  # model used by raj and add some experiments

            self.model = Sequential()
            self.model.add(
                LSTM(input_dim=input_shape[0],
                     output_dim=input_shape[1],
                     return_sequences=True))
            self.model.add(Dropout(0.2))
            self.model.add(BatchNormalization())

            self.model.add(LSTM(input_shape[2], return_sequences=False))
            self.model.add(Dropout(0.2))
            self.model.add(
                Dense(output_dim=input_shape[3],
                      activity_regularizer=regularizers.l2(0.01)))
            self.model.add(Activation("linear"))
            opt = Nadam(lr=lr)
            #self.model.compile(optimizer=opt , loss='risk_estimation', metrics=['accuracy'])
            self.model.compile(loss="mse", optimizer="rmsprop")
train_gen = train.flow_from_directory(flower_path, target_size=(img_size, img_size), batch_size=batch_size,
                                      class_mode='categorical', subset='training')
valid_gen = train.flow_from_directory(flower_path, target_size=(img_size, img_size), batch_size=batch_size,
                                      class_mode='categorical', subset='validation')

# Model

model = models.Sequential()

# use model.add() to add any layers you like
# read Keras documentation to find which layers you can use:
#           https://keras.io/layers/core/
#           https://keras.io/layers/convolutional/
#           https://keras.io/layers/pooling/
#
init = initializers.glorot_uniform(seed=1)
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(128, 128, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Dropout(0.5))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
# last layer should be with softmax activation function - do not change!!!
model.add(layers.Dense(classes, activation='softmax'))

# fill optimizer argument using one of keras.optimizers.
Example #8
0
    LSTM(
        n_hidden,  # 8 个神经元
        return_sequences=False,  # 每一层只有一个输出值,所以输出值纬度 (none,8)
        activation='tanh',
        recurrent_activation='hard_sigmoid',
        kernel_initializer='glorot_uniform',
        recurrent_initializer='orthogonal',
        bias_initializer='zeros',
        dropout=rate_dropout,
        recurrent_dropout=rate_dropout))

# 构建一个简单的输出层,只输出一个值
model.add(
    Dense(
        1,  # 只有一个神经元,只输出一个值
        kernel_initializer=initializers.glorot_uniform()))
# model.add(Activation('sigmoid')) # no more sigmoid as last layer needed

# compile model
opt = RMSprop(lr=lr)
model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])

# 模型训练
model.fit(
    train_features,  # x: 训练特征值
    train_targets,  # y: 训练目标值
    batch_size=32,  # 一次性使用多少个样本一起计算
    epochs=1,  # 训练次数
    verbose=1,  # 是否打印每次训练的损失值和准确度
    # callbacks=None, # 是否使用其他预处理函数
    # validation_split=0.0, # 从训练数据集中取多少作为验证数据 0.2,就是取剩下的20%作为验证
Example #9
0
    def __init__(self,
                 input_shape,
                 lr=0.01,
                 n_layers=2,
                 n_hidden=8,
                 rate_dropout=0.2,
                 loss=risk_estimation):  # risk_estimation, risk_estimation_bhs

        print(
            "initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s."
            % (lr, n_layers, n_hidden, rate_dropout))

        # build a model with Sequential()
        self.model = Sequential()

        # todo: ask why dropout on input layer?
        self.model.add(
            Dropout(rate=rate_dropout,
                    input_shape=(input_shape[0], input_shape[1])))

        # build a number of LSTM layers
        for i in range(0, n_layers - 1):
            self.model.add(
                LSTM(n_hidden * 4,
                     return_sequences=True,
                     activation='tanh',
                     recurrent_activation='hard_sigmoid',
                     kernel_initializer='glorot_uniform',
                     recurrent_initializer='orthogonal',
                     bias_initializer='zeros',
                     dropout=rate_dropout,
                     recurrent_dropout=rate_dropout))
# add another LSTM layer
        self.model.add(
            LSTM(n_hidden,
                 return_sequences=False,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 dropout=rate_dropout,
                 recurrent_dropout=rate_dropout))

        #######################
        # original deep trader
        #######################
        # # add a dense layer, with BatchRenormalization, relu_limited
        # self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))

        # self.model.add(BatchRenormalization(axis=-1, beta_init=Constant(value=0.5)))
        # self.model.add(Activation(relu_limited))

        #######################
        # revised version 1
        #######################
        self.model.add(
            Dense(1, kernel_initializer=initializers.glorot_uniform()))
        self.model.add(Activation('sigmoid'))

        #######################
        # revised 2 for classification style solution
        #######################
        # self.model.add(Dense(5, kernel_initializer=initializers.glorot_uniform()))
        # self.model.add(Activation('softmax'))

        #######################
        # revised 1.5 for buy_hold_sell activation function
        #######################
        # self.model.add(Activation(buy_hold_sell))

        # compile model
        opt = RMSprop(lr=lr)
        self.model.compile(loss=loss, optimizer=opt, metrics=['accuracy'])