Пример #1
0
    def generate(
            self, V: Variat, input_features_layer: DenseFeatures
    ) -> Generator[Model, None, None]:

        model = tf.keras.Sequential([
            input_features_layer,
            layers.Dense(158,
                         activation='selu',
                         kernel_initializer=initializers.lecun_normal()),
            layers.Dropout(0.2),
            layers.Dense(168,
                         activation='swish',
                         kernel_initializer=initializers.GlorotNormal()),
            layers.Dropout(0.2),
            layers.Dense(178,
                         activation='swish',
                         kernel_initializer=initializers.GlorotNormal()),
            layers.Dropout(0.2),
            layers.Dense(188,
                         activation='selu',
                         kernel_initializer=initializers.lecun_normal()),
            layers.Dropout(0.2),
            layers.Dense(1,
                         activation="sigmoid",
                         kernel_initializer=initializers.lecun_normal())
        ])

        yield model
Пример #2
0
 def begin_insert_layer(self, layer_dim):
     # `self.layers[0].get_weights()` -> [weights, bias]
     next_units = self.layers[0].get_weights()[0].shape[0]
     layer = Dense(
         units=next_units,
         activation=tf.nn.relu,
         kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),
         kernel_initializer=initializers.GlorotNormal(seed=self.seed),
         bias_initializer=initializers.Zeros())
     layer.build(input_shape=(None, layer_dim))
     self.layers.insert(0, layer)
Пример #3
0
def build_kernel_initializer(args):
    if args is None:
        return None
    kernel_initializer = None
    if args.name == 'RandomUniform':
        kernel_initializer = initializers.RandomUniform(
            args.minval, args.maxval)
    if args.name == 'GlorotNormal':
        kernel_initializer = initializers.GlorotNormal()

    return kernel_initializer
Пример #4
0
    def __init__(self,
                 input_dim,
                 output_dim=2,
                 hidden_dims=None,
                 l1=0.01,
                 l2=0.01,
                 seed=6):
        super(PartCoder, self).__init__()
        self.l1 = l1
        self.l2 = l2
        self.seed = seed
        # self.layers = NoDependency([])
        # self.__dict__['layers'] = []
        self.layers = []

        _input_dim = input_dim
        for i, dim in enumerate(hidden_dims):
            layer = Dense(
                units=dim,
                activation=tf.nn.relu,
                kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),
                kernel_initializer=initializers.GlorotNormal(seed=self.seed),
                bias_initializer=initializers.Zeros())
            layer.build(input_shape=(None, _input_dim))
            _input_dim = dim
            self.layers.append(layer)

        # Final, adding output_layer (latent/reconstruction layer)
        layer = Dense(units=output_dim,
                      activation=tf.nn.sigmoid,
                      kernel_regularizer=regularizers.l1_l2(l1=self.l1,
                                                            l2=self.l2),
                      kernel_initializer=initializers.GlorotNormal(seed=6),
                      bias_initializer=initializers.Zeros())
        layer.build(input_shape=(None, _input_dim))
        self.layers.append(layer)
Пример #5
0
    def _create_bottom_mlp(self):
        self._create_bottom_mlp_padding()
        self.bottom_mlp_layers = []
        for dim in self.bottom_mlp_dims:
            kernel_initializer = initializers.GlorotNormal()
            bias_initializer = initializers.RandomNormal(stddev=math.sqrt(1. / dim))

            if self.data_parallel_bottom_mlp:
                kernel_initializer = BroadcastingInitializer(kernel_initializer)
                bias_initializer = BroadcastingInitializer(bias_initializer)

            l = tf.keras.layers.Dense(dim, activation='relu',
                                      kernel_initializer=kernel_initializer,
                                      bias_initializer=bias_initializer)
            self.bottom_mlp_layers.append(l)
Пример #6
0
    def _create_top_mlp(self):
        self._create_top_mlp_padding()
        self.top_mlp = []
        for i, dim in enumerate(self.top_mlp_dims):
            if i == len(self.top_mlp_dims) - 1:
                # final layer
                activation = 'linear'
            else:
                activation = 'relu'

            kernel_initializer = BroadcastingInitializer(initializers.GlorotNormal())
            bias_initializer = BroadcastingInitializer(initializers.RandomNormal(stddev=math.sqrt(1. / dim)))

            l = tf.keras.layers.Dense(dim, activation=activation,
                                      kernel_initializer=kernel_initializer,
                                      bias_initializer=bias_initializer)
            self.top_mlp.append(l)
Пример #7
0
    def last_insert_layer(self, layer_dim):
        prev_weights, prev_bias = self.layers[len(self.layers) -
                                              1].get_weights()
        prev_units = prev_weights.shape[1]

        replace_prev_layer = Dense(
            units=prev_units,
            activation=tf.nn.relu,
            kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),
        )
        replace_prev_layer.build(input_shape=(None, prev_weights.shape[0]))
        replace_prev_layer.set_weights([prev_weights, prev_bias])

        added_layer = Dense(
            units=layer_dim,
            activation=tf.nn.sigmoid,
            kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),
            kernel_initializer=initializers.GlorotNormal(seed=self.seed),
            bias_initializer=initializers.Zeros())
        added_layer.build(input_shape=(None, prev_units))

        del self.layers[len(self.layers) - 1]
        self.layers.append(replace_prev_layer)
        self.layers.append(added_layer)
Пример #8
0
def build_1CNNBase(self, action_space=6, dueling=True):
        self.network_size = 256
        X_input = Input(shape=(self.REM_STEP*90) )
        # X_input = Input(shape=(self.REM_STEP*7,))
        input_reshape=(self.REM_STEP,90)
        X = X_input
        truncatedn_init = initializers.TruncatedNormal(0, 1e-2)
        x_init = initializers.GlorotNormal()
        y_init = initializers.glorot_uniform()
        const_init = initializers.constant(1e-2)        
        X_reshaped = Reshape(input_reshape)(X_input)

        # slice2 = Permute((2,1))(slice2)
        #normailsation for each layer
        normlayer_0 = LayerNormalization( 
                axis=-1,trainable=True,epsilon =0.0001,center=False,
                scale=True,
                beta_initializer="zeros",
                gamma_initializer="ones" )  
        normlayer_1 = LayerNormalization( 
                axis=-1,trainable=True,epsilon =0.001,center=True,
                scale=True,
                beta_initializer="zeros",
                gamma_initializer="ones") 
        normlayer_2 = LayerNormalization( 
                axis=-1,trainable=True,epsilon =0.001,center=True,
                scale=True,
                beta_initializer="zeros",
                gamma_initializer="ones")  
        normlayer_3 = LayerNormalization( 
                axis=-1,trainable=True,epsilon =0.001,center=True,
                scale=True,
                beta_initializer="zeros",
                gamma_initializer="ones")  

        X = normlayer_0(X_reshaped)
        # X = TimeDistributed(Dense(128, activation ="relu", kernel_initializer='he_uniform',))(X)
        # 
     

        # cnn2 = TimeDistributed(Dense(64, kernel_initializer='he_uniform',))(X) 
        # cnn2 = LeakyReLU(.4)(cnn2)
        # cnn2 = MaxPooling1D(2)(cnn2) 
        # cnn2 = Flatten()(cnn2) 

        # cnn2 = LocallyConnected1D(filters=64, kernel_initializer='he_uniform', kernel_size=2)(X)
        # cnn2 = LeakyReLU(0.3)(cnn2)
        # cnn2 = LocallyConnected1D(filters=64, kernel_initializer='he_uniform', kernel_size=2)(X)
        # cnn2 = Dense(128,activation="relu", kernel_initializer='he_uniform', )(cnn2)
        # cnn2 = Flatten()(cnn2)


        # X = Dense(32, 
        #           kernel_initializer='he_uniform',activation ="relu")(X) 
        # X = normlayer_3(X)
        # X = Dense(64, 
        #           kernel_initializer='he_uniform',activation =LeakyReLU(.3))(X)
        X = Flatten()(X)
        X = Dense(512, activation =LeakyReLU(.1),  kernel_initializer=x_init)(X)
        X = Dense(256, activation =LeakyReLU(.1), kernel_initializer=x_init)(X)
        if dueling:
            state_value = Dense(
                1, kernel_initializer=x_init , activation ="softmax")(X)
            state_value = Lambda(lambda s: K.expand_dims(
                s[:, 0], -1), output_shape=(action_space,))(state_value)

            action_advantage = Dense(
                action_space, kernel_initializer=x_init, activation = "linear") (X)
            action_advantage = Lambda(lambda a: a[:, :] - K.mean(
                a[:, :], keepdims=True), output_shape=(action_space,))(action_advantage)

            X = Add()([state_value, action_advantage])
        else:
            # Output Layer with # of actions: 2 nodes (left, right)
            X = Dense(action_space, activation="relu",
                      kernel_initializer='he_uniform')(X)

        model = Model(inputs=X_input, outputs=X, name='build_TMaxpoolin_3')
        model.compile(loss=tf.keras.losses.Huber(delta=2), metrics=['mean_absolute_error','accuracy'] ,optimizer=Adam(
            lr=self.learning_rate))


        # model.compile(loss="mean_squared_error", optimizer=Adam(lr=0.00025,epsilon=0.01), metrics=["accuracy"])
        model.summary()
        return model
Пример #9
0
def build_modelPar1(self, dueling=True):
        self.network_size = 256
        X_input = Input(shape=(self.REM_STEP*46))
        # X_input = Input(shape=(self.REM_STEP*7,))
        input_reshape=(self.REM_STEP,1,46)
        X = X_input
        truncatedn_init = initializers.TruncatedNormal(0, 1e-2)
        x_init = initializers.GlorotNormal()
        y_init = initializers.glorot_uniform()
        const_init = initializers.constant(1e-2)      

        X_reshaped = Reshape((2,-1))(X_input)
        
        normlayer_0 = LayerNormalization( 
                        axis=-1,trainable=True,epsilon =0.0001,center=True,
                        scale=True,
                        beta_initializer="zeros",)

        
        normlayer_1 = LayerNormalization( 
                        axis=-1,trainable=True,epsilon =0.0001,center=True,
                        scale=True,
                        beta_initializer="zeros",gamma_initializer="ones")  

        
        normlayer_2= LayerNormalization( 
                        axis=-1,trainable=True,epsilon =0.0001,center=True,
                        scale=True,
                        beta_initializer="zeros", gamma_initializer="ones")  


        const_init = initializers.constant(1e-2)
        
        t = Reshape(input_reshape)(X)
        cnn2 = (Conv2D(filters=32,  activation = "relu", kernel_initializer=x_init, kernel_size=(1), strides =(2,1), padding = "valid"))(t) 
        cnn2 = normlayer_0(cnn2)
        cnn2 = Reshape((1,-1,))(cnn2)
        cnn2 = (LocallyConnected1D(filters=64,  activation = "relu",kernel_initializer=x_init, kernel_size=(1), strides =(1), padding = "valid"))(cnn2)  
        # cnn2 = Flatten()(cnn2)    
 
    
        cnn1 = TimeDistributed(Dense(64, activation = "tanh" , kernel_initializer=x_init,))(X_reshaped)
        cnn1 = normlayer_1(cnn1)        
        cnn1 = TimeDistributed(Dense(32, activation="tanh", kernel_initializer=x_init,))(cnn1)       
        # cnn1 = Flatten()(cnn1) 
        cnn1 = Reshape((1,-1,))(cnn1)
        
        conc = concatenate([cnn1,cnn2])
        f = Flatten()(conc)
        w = Dense(512, activation="relu",kernel_initializer=x_init)(f)
        w = Dense(256, activation="relu",kernel_initializer=x_init)(w)        

        state_value = Dense(1, kernel_initializer=x_init)(w)
        state_value = Lambda(lambda s: K.expand_dims(
        s[:, 0], -1), output_shape=(self.action_space,))(state_value)

        action_advantage = Dense(
        self.action_space, activation='linear', kernel_initializer=x_init)(w)
        action_advantage = Lambda(lambda a: a[:, :] - K.mean(
        a[:, :], keepdims=True), output_shape=(self.action_space,))(action_advantage)

        out = Add()([state_value, action_advantage])
  
        model = Model([X_input], out, name='TCnn-model_1')

        if self.optimizer_model == 'Adam':
            optimizer = Adam(lr=self.learning_rate, clipnorm=2.)
        elif self.optimizer_model == 'RMSProp':
            optimizer = RMSprop(self.learning_rate, 0.99, 0.0, 1e-6,)
        else:
            print('Invalid optimizer!')

        model.compile(loss=tf.keras.losses.Huber(delta=10), metrics=['mean_absolute_error','accuracy'] , optimizer=optimizer)
        model.summary()
        return model
Пример #10
0
# Define some variables for ease of change in the model
Stock = 'NVDA'  # Stock to predict
checkpoint_path = f"V4models/{Stock} Check point.ckpt"
model_path = f'V4models/{Stock}.tf'
load_path = model_path  # CHANGE THIS
checkpoint_dir = os.path.dirname(checkpoint_path)
tolerant_rate = 2.0  # Threshold for holding stock instead of buy/sell
ACTIVATION = 'tanh'
RECURRENT_ACTIVATION = 'sigmoid'
DROPOUT = [0.003, 0.003, 0.003, 0.003]
UNITS = [128, 256, 256, 256]
BATCH_SIZE = 30
EPOCH = 100  # how many times to run at once
SEQ_LEN = 40  # how many days back used to predict, optimal 30/40 for NVDA
initializer = initializers.GlorotNormal()  # Xavier Normal initializer
LEARNING_RATE = 0.0007  # not using for now
LEARNING_DECAY = 1e-6  # not using for now
train_data_pct = 0.80  # percentage of data used for training
# opt = tf.keras.optimizers.Adam(lr=LEARNING_RATE, decay=LEARNING_DECAY)
opt = tf.keras.optimizers.Adam()
TODAY = pd.to_datetime('today').to_pydatetime()
offset = max(1, (TODAY.weekday() + 6) % 7 - 3)
timedelta = datetime.timedelta(offset)
START_DATE = '2010-1-1'
LAST_WORKING_DAY = TODAY - timedelta
print('All variables initialized.')

print(
    f'Stock to predict: {Stock}. The last working day is {LAST_WORKING_DAY}, getting data from {START_DATE} to {LAST_WORKING_DAY}.'
)
Пример #11
0
epochs = 100
batch_size = 75
steps_per_epoch = 18

# Model

model = Sequential()

# Input Layer
model.add(Input(shape=(height, width, 3)))

# Conv Layers
model.add(
    layers.Conv2D(4, (5, 5),
                  activation='relu',
                  kernel_initializer=initializers.GlorotNormal()))
model.add(layers.Conv2D(4, (5, 5), activation='relu'))
model.add(layers.Conv2D(4, (5, 5), activation='relu'))
model.add(layers.Conv2D(4, (5, 5), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))

model.add(layers.Conv2D(8, (5, 5), activation='relu'))
model.add(layers.Conv2D(8, (5, 5), activation='relu'))
model.add(layers.Conv2D(8, (5, 5), activation='relu'))
model.add(layers.Conv2D(8, (5, 5), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))

model.add(layers.Conv2D(32, (5, 5), activation='relu'))
model.add(layers.Conv2D(32, (5, 5), activation='relu'))
model.add(layers.Conv2D(32, (5, 5), activation='relu'))
model.add(layers.Conv2D(32, (5, 5), activation='relu'))
Пример #12
0
def model(batch_size, k, alpha, lamda):

    initializer = initializers.GlorotNormal()

    ## Encoder
    inputs = Layers.Input(shape=[28, 28])
    reshape = Layers.Reshape([28, 28, 1])(inputs)
    flatten = Layers.Flatten()(inputs)
    conv_1 = Layers.Conv2D(6,
                           kernel_size=5,
                           padding='valid',
                           activation='relu',
                           kernel_initializer=initializer)(reshape)
    max_1 = Layers.MaxPool2D(pool_size=2)(conv_1)
    conv_2 = Layers.Conv2D(16,
                           kernel_size=5,
                           padding='valid',
                           activation='relu',
                           kernel_initializer=initializer)(max_1)
    max_2 = Layers.MaxPool2D(pool_size=2)(conv_2)
    conv_3 = Layers.Conv2D(60,
                           kernel_size=4,
                           padding='valid',
                           activation='relu',
                           kernel_initializer=initializer)(max_2)
    latent = Layers.Flatten()(conv_3)
    [non_anchor,
     anchor] = Discriminative(alpha=alpha,
                              batch_size=batch_size,
                              k=k,
                              name='discriminative')([flatten, latent])
    encoder = Model(inputs=[inputs],
                    outputs=[non_anchor, anchor, conv_3, latent])

    ## Decoder
    decoder_inputs = Layers.Input(shape=[1, 1, 60])
    recon_1 = Layers.Conv2DTranspose(
        16,
        kernel_size=4,
        strides=1,
        padding="valid",
        activation="relu",
        kernel_initializer=initializer)(decoder_inputs)
    recon_2 = Layers.Conv2DTranspose(16,
                                     kernel_size=2,
                                     strides=2,
                                     padding="valid",
                                     activation="relu",
                                     kernel_initializer=initializer)(recon_1)
    recon_3 = Layers.Conv2DTranspose(6,
                                     kernel_size=5,
                                     strides=1,
                                     padding="valid",
                                     activation="relu",
                                     kernel_initializer=initializer)(recon_2)
    recon_4 = Layers.Conv2DTranspose(6,
                                     kernel_size=2,
                                     strides=2,
                                     padding="valid",
                                     activation="relu",
                                     kernel_initializer=initializer)(recon_3)
    recon_5 = Layers.Conv2DTranspose(1,
                                     kernel_size=5,
                                     strides=1,
                                     padding="valid",
                                     activation="relu",
                                     kernel_initializer=initializer)(recon_4)
    output = Layers.Reshape([28, 28])(recon_5)
    decoder = Model(inputs=[decoder_inputs], outputs=[output])

    ## Autoencoder
    _, _, codings, _ = encoder(inputs)
    reconstructions = decoder(codings)
    autoencoder = Model(inputs=[inputs], outputs=[reconstructions])

    ## Custom Loss
    def custom_loss(lamda):
        """" Wrapper function which calculates the loss function.
         Returns a *function* which calculates the complete loss given only the input and target output """
        # Latent loss
        latent_loss = tf.reduce_sum(non_anchor - anchor)

        def overall_loss(y_true, y_pred):
            """ Final loss calculation function to be passed to optimizer"""
            # Reconstruction loss
            reconstruction_loss = lamda * tf.norm(tf.subtract(y_true, y_pred))
            # Overall loss
            model_loss = reconstruction_loss + latent_loss
            return model_loss

        return overall_loss

    ## Compilation, adding loss and optimiser
    optimiser = Adam(learning_rate=0.002)
    autoencoder.compile(loss=custom_loss(lamda=lamda), optimizer=optimiser)
    return autoencoder, encoder, decoder
Пример #13
0
class Demucs(keras.Model):
    """
    Demucs speech enhancement model.
    Args:
        - chin (int): number of input channels.
        - chout (int): number of output channels.
        - hidden (int): number of initial hidden channels.
        - depth (int): number of layers.
        - kernel_size (int): kernel size for each layer.
        - stride (int): stride for each layer.
        - causal (bool): if false, uses BiLSTM instead of LSTM.
        - resample (int): amount of resampling to apply to the input/output.
            Can be one of 1, 2 or 4.
        - growth (float): number of channels is multiplied by this for every layer.
        - max_hidden (int): maximum number of channels. Can be useful to
            control the size/speed of the model.
        - normalize (bool): if true, normalize the input.
        - glu (bool): if true uses GLU instead of ReLU in 1x1 convolutions.
        - rescale (float): controls custom weight initialization.
            See https://arxiv.org/abs/1911.13254.
        - floor (float): stability flooring when normalizing.

    """
    @capture_init
    def __init__(self,
                 chin=1,
                 chout=1,
                 hidden=48,
                 depth=5,
                 kernel_size=8,
                 stride=4,
                 causal=True,
                 resample=4,
                 growth=2,
                 max_hidden=10_000,
                 normalize=True,
                 glu=True,
                 rescale=0.1,
                 floor=1e-3,
                 name='demucs',
                 **kwargs):

        super().__init__(name=name, **kwargs)

        if resample not in [1, 2, 4]:
            raise ValueError("Resample should be 1, 2 or 4.")

        self.chin = chin
        self.chout = chout
        self.hidden = hidden
        self.depth = depth
        self.kernel_size = kernel_size
        self.stride = stride
        self.causal = causal
        self.resample = resample
        self.length_calc = LengthCalc(depth, kernel_size, stride)

        if resample == 2:
            self.upsample = Upsample2()
            self.downsample = Downsample2()
        elif resample == 4:
            self.upsample = keras.Sequential([Upsample2(), Upsample2()], name="upsample4")
            self.downsample = keras.Sequential([Downsample2(), Downsample2()], name="downsample4")
        else:
            self.upsample = None
            self.downsample = None

        if normalize:
            self.normalize = Normalize(floor)

        self.encoder = []
        self.decoder = []

        if rescale:
            initializer = initializers.RandomNormal(stddev=rescale)
        else:
            initializer = initializers.GlorotNormal()

        for index in range(depth):
            # Encode layer: chin -> hidden
            encode = keras.Sequential(name=f"Encode_{index+1}")
            encode.add(layers.Conv1D(hidden, kernel_size, strides=stride, activation='relu', input_shape=(None,chin), kernel_initializer=initializer))
            if glu:
                encode.add(layers.Conv1D(hidden*2, 1, kernel_initializer=initializer))
                encode.add(GLU())
            else:
                encode.add(layers.Conv1D(hidden, 1, activation='relu', kernel_initializer=initializer))
            self.encoder.append(encode)

            # Decode layer: hidden -> chout
            decode = keras.Sequential(name=f"Decode_{index+1}")
            if glu:
                decode.add(layers.Conv1D(hidden*2, 1, kernel_initializer=initializer))
                decode.add(GLU())
            else:
                decode.add(layers.Conv1D(hidden, 1, activation='relu', kernel_initializer=initializer))
            decode.add(layers.Conv1DTranspose(chout, kernel_size, strides=stride, kernel_initializer=initializer))
            self.decoder.insert(0, decode)

            # Update chin, chout, hidden for next depth, growing hidden by growth
            chout = hidden
            chin = hidden
            hidden = min(int(growth * hidden), max_hidden)

        self.lstm = BLSTM(bi=not causal)