Beispiel #1
0
    def build_model(self, input_shape, nb_classes):

        input_layer = keras.layers.Input(input_shape)
        #num_feat=30
        nb_filters = 8
        kernel_size = 12
        dilations = [2**i for i in range(2)]
        padding = 'causal'
        nb_stacks = 1
        #max_len=X_train[0:1].shape[1]
        use_skip_connections = True
        use_batch_norm = True
        dropout_rate = 0.05
        kernel_initializer = 'he_normal'
        #lr=0.00
        activation = 'relu'
        use_layer_norm = True

        return_sequences = True
        #name='tcn_1'
        enc = TCN(nb_filters, kernel_size, nb_stacks, dilations, padding,
                  use_skip_connections, dropout_rate, return_sequences,
                  activation, kernel_initializer, use_batch_norm,
                  use_layer_norm)(input_layer)
        #output_layer = keras.layers.Dense(nb_classes,
        emb = keras.layers.Dense(16, activation='relu')(enc)
        #emb_rep = keras.layers.RepeatVector(input_shape[0])(emb)
        return_sequences = True
        nb_filters = 8
        decod = TCN(nb_filters, kernel_size, nb_stacks, dilations, padding,
                    use_skip_connections, dropout_rate, return_sequences,
                    activation, kernel_initializer, use_batch_norm,
                    use_layer_norm)(emb)

        output = emb = keras.layers.Dense(input_shape[1],
                                          activation='sigmoid')(enc)

        model = keras.models.Model(inputs=input_layer, outputs=output)

        model.compile(loss='mse', optimizer=Adam())

        self.encoder = keras.models.Model(inputs=input_layer, outputs=enc)
        self.decoder = keras.models.Model(inputs=input_layer, outputs=decod)
        self.emb = keras.models.Model(inputs=input_layer, outputs=emb)
        print(model.summary())

        reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                      factor=0.5,
                                                      patience=10,
                                                      min_lr=0.0001)

        file_path = self.output_directory + 'best_model'

        model_checkpoint = keras.callbacks.ModelCheckpoint(filepath=file_path,
                                                           monitor='loss',
                                                           save_best_only=True)

        self.callbacks = [reduce_lr, model_checkpoint]

        return model
def build_discriminator(time_steps, feature_size):
    print(backend.image_data_format())

    inputs = Input(batch_shape=(16, time_steps, feature_size))

    output1 = TCN(nb_filters=16,
                  dilations=[1, 2, 4, 8, 16, 32, 64],
                  kernel_size=3,
                  dropout_rate=0.1,
                  return_sequences=True)(inputs)
    output2 = TCN(nb_filters=32,
                  dilations=[1, 2, 4, 8, 16, 32, 64],
                  kernel_size=3,
                  dropout_rate=0.1,
                  return_sequences=True)(output1)
    output3 = TCN(nb_filters=32,
                  dilations=[1, 2, 4, 8, 16, 32, 64],
                  kernel_size=3,
                  dropout_rate=0.1,
                  return_sequences=False)(output2)

    output = Dense(8, activation='softmax')(output3)

    model = Model(inputs=[inputs], outputs=[output])
    model.summary()

    return model
Beispiel #3
0
def define_models(n_input, n_output):
    # 训练模型中的encoder  32 16 128
    encoder_inputs = Input(shape=(None, n_input))  #500  32
    encoder = TCN(64, activation='relu', return_sequences=True)(encoder_inputs)
    encoder2 = TCN(32, activation='relu', return_sequences=True)(encoder)
    encoder3 = TCN(16, activation='relu')(encoder2)
    decoder_dense = Dense(n_output, activation='linear')(encoder3)
    model = Model(encoder_inputs, decoder_dense)
    return model
    def model(self):
        #build model around TCN
        #As a general rule (keeping kernel_size fixed at 2) and dilations increasing with a factor of 2
        #The equation to find the ideal sizes is receptive field = nb_stacks_of_residuals_blocks(nb_stacks) * kernel_size * last_dilation)
        #Each layer adds linearly to the receptive field
        self.built = True
        i = Input(batch_shape=(self.batch_size, self.moments-1, self.input_dim))
        #Model 1: Simple TCN for lower layer and LSTM for upper, set to handel a receptive field of around 64
        #for TCN compressed down for LSTM, build for self.moments between 40-80, networth and accuracy can be used for testing
        #########################################################################
        #x1 = TCN(return_sequences=True, nb_filters=32, nb_stacks = 1, dropout_rate=.0, kernel_size=2)(i)
        #x1 = Dense(4, activation='linear')(x1)
        #o = LSTM(4, dropout=.3)(x1)
        #########################################################################

        # Model 2: 1*10^-6 error, average networth change per tick = 23/(774-60) = shit, build for self.moments between 40-80, networth and accuracy can be used for testing
        #########################################################################
        #i = LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, n_features))(i) #optional addition to try stacked LSTM not added yet
        #x = LSTM(50, dropout=.3, activation='relu')(i)
        #o = Dense(4, activation='softmax')(x)
        #########################################################################

        #Model 3: TCN with LSTM on top with dual bottom layer, build for self.moments between 40-80, networth and accuracy can be used for testing
        #########################################################################
        x1 = TCN(return_sequences=True, nb_filters=64, dilations = [1, 2, 4, 8, 16, 32], nb_stacks=1, dropout_rate=.1, kernel_size=2)(i)
        x2 = Lambda(lambda z: backend.reverse(z, axes=0))(i)
        x2 = TCN(return_sequences=True, nb_filters=64, dilations = [1, 2, 4, 8, 16, 32], nb_stacks=1, dropout_rate=.1, kernel_size=2)(x2)
        x = add([x1, x2])
        o = LSTM(4, dropout=.1)(x)
        #########################################################################

        # Model 4: Layered TCN with LSTM on top, build for self.moments between 40-80, networth and accuracy can be used for testing
        #########################################################################
        #x1 = TCN(return_sequences=True, nb_filters = 64, dilations = [1, 2, 4, 8, 16, 32], nb_stacks = 1, dropout_rate=.1, kernel_size=2)(i)
        #x1 = TCN(return_sequences=True, nb_filters = 64, dilations = [1, 2, 4, 8, 16, 32], nb_stacks = 1, dropout_rate=.1, kernel_size=2)(x1)
        #x1 = Dense(4, activation='linear')(x1)
        #x2 = LSTM(4, dropout=.3)(i)
        #x = add([x1, x2])
        #o = concatenate([GlobalMaxPooling1D()(x), GlobalAveragePooling1D()(x)])
        #o = Dense(4, activation='linear')(o)
        #########################################################################

        # Model 5: Dual TCN layered, build for self.moments between 40-80, networth and accuracy can be used for testing
        #########################################################################
        #x1 = TCN(return_sequences=True, nb_filters=64, dilations =[1, 2, 4, 8, 16, 32], nb_stacks=1, dropout_rate=.1, kernel_size=1)(i)
        #x2 = Lambda(lambda z: backend.reverse(z, axes=0))(i)
        #x2 = TCN(return_sequences=True, nb_filters=64, dilations =[1, 2, 4, 8, 16, 32], nb_stacks=1, dropout_rate=.1, kernel_size=1)(x2)
        #x = add([x1, x2])
        #x1 = TCN(return_sequences=True, nb_filters=64, dilations =[1, 2, 4, 8], nb_stacks=1, dropout_rate=.1, kernel_size=1)(x)
        #x2 = Lambda(lambda z: backend.reverse(z, axes=0))(x)
        #x2 = TCN(return_sequences=True, nb_filters=64, dilations =[1, 2, 4, 8], nb_stacks=1, dropout_rate=.1, kernel_size=1)(x2)
        #x = add([x1, x2])
        #o = concatenate([GlobalMaxPooling1D()(x), GlobalAveragePooling1D()(x)])
        #o = Dense(4, activation='linear')(o)
        self.m = Model(inputs=i, outputs=o)
        self.m.compile(optimizer='adam', loss=custom_loss) #optimizer and loss can be changed to what we want
Beispiel #5
0
def TCN_model(X_Train, Y_train, batch_size, activation, dilations, nbfilters,
              kernelsize, nbstacks, batch_norm, layer_norm, weight_norm,
              dropout_rate, lookback_window):
    from tcn import TCN, tcn_full_summary
    from tensorflow.keras.layers import Dense
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.callbacks import EarlyStopping
    from keras.optimizers.schedules import ExponentialDecay
    from keras.optimizers import Adam
    import tensorflow as tf
    from keras.regularizers import l2

    batch_size, time_steps, input_dim = batch_size, 10, X_Train.shape[2]
    lr_schedule = ExponentialDecay(initial_learning_rate=1e-2,
                                   decay_steps=1000,
                                   decay_rate=0.9)
    tcn_layer = TCN(input_shape=(lookback_window, X_Train.shape[2]),
                    activation=activation,
                    padding='causal',
                    dilations=dilations,
                    nb_filters=nbfilters,
                    kernel_size=kernelsize,
                    nb_stacks=nbstacks,
                    use_batch_norm=batch_norm,
                    use_layer_norm=layer_norm,
                    use_weight_norm=weight_norm,
                    dropout_rate=dropout_rate)
    model_all_data = Sequential([
        TCN(input_shape=(lookback_window, X_Train.shape[2]),
            activation=activation,
            padding='causal',
            dilations=dilations,
            nb_filters=nbfilters,
            kernel_size=kernelsize,
            nb_stacks=nbstacks,
            use_batch_norm=batch_norm,
            use_layer_norm=layer_norm,
            use_weight_norm=weight_norm,
            dropout_rate=dropout_rate),
        Dense(
            Y_train.shape[1],
            activation=activation,
            kernel_regularizer=l2(0.01),
        )
    ])
    # The receptive field tells you how far the model can see in terms of timesteps.
    print('Receptive field size =', tcn_layer.receptive_field)
    model_all_data.summary()
    adam = Adam(learning_rate=lr_schedule)
    model_all_data.compile(optimizer=adam,
                           loss=root_mean_squared_error,
                           metrics=['mse', 'mae', 'mape'])

    # tcn_full_summary(model_all_data, expand_residual_blocks=False)
    return model_all_data
 def model(self):
     #build model around TCN
     self.built = True
     i = Input(batch_shape=(self.batch_size, self.moments - 1,
                            self.input_dim))
     o = TCN(return_sequences=True)(i)
     o = TCN(return_sequences=False)(o)
     o = Dense(1)(o)
     self.m = Model(inputs=i, outputs=o)
     self.m.compile(
         optimizer='adam',
         loss='mse')  #optimizer and loss can be changed to what we want
Beispiel #7
0
def regression_model(input_dim = 31,tcn = False):
    if not tcn:
        from tensorflow.keras import layers, models
        import tensorflow as tf

        model = models.Sequential()
        model.add(layers.GRU(128, return_sequences=True, input_shape=(None, input_dim)))
        model.add(layers.TimeDistributed(layers.Dense(13, activation='linear')))
        model.summary()
        model.compile(loss='mse',
                      optimizer=tf.keras.optimizers.Adam(lr=0.01))
    else:
        import tcn
        import keras
        from keras.layers import Dense,TimeDistributed
        from keras.models import Input, Model
        from tcn import TCN
        i = Input(batch_shape=(None, None, input_dim))

        o = TCN(return_sequences=True)(i)  # The TCN layers are here.
        o = TimeDistributed(Dense(13, activation='linear'))(o)

        model = Model(inputs=[i], outputs=[o])
        model.summary()
        model.compile(loss='mse',
                      optimizer= keras.optimizers.Adam(lr=0.01))

    return model
Beispiel #8
0
def gen_stcn(seq_num, n_features, dropout, training_t):
    tcn_inputs = Input(batch_shape=(None, seq_num, n_features))
    tcn = TCN(nb_filters=64,
              kernel_size=2,
              activation='relu',
              dropout_rate=0.1,
              return_sequences=True)(tcn_inputs,
                                     training=training_t)  # o = 6,64
    tcn = TCN(nb_filters=64,
              kernel_size=2,
              activation='relu',
              dropout_rate=0.1,
              return_sequences=False)(tcn, training=training_t)  # o = 6,64
    op = Dense(seq_num * n_features)(tcn)
    model = Model(inputs=tcn_inputs, outputs=op)
    return model
def create_model(optimizer="Adam", init="he_normal"):

    # Input layer:
    i = Input(shape=input_shape)

    # Temporal convolutional layer
    o = TCN(nb_filters=nb_filters,
            kernel_size=filter_size,
            dilations=dilation_list,
            padding=padding,
            use_skip_connections=use_skip_connections,
            dropout_rate=dropout_rate,
            activation=activation,
            kernel_initializer=kernel_initializer,
            use_batch_norm=use_batch_norm,
            use_layer_norm=use_layer_norm,
            return_sequences=False)(i)

    # Output Layer
    o = Dense(1, activation=softmax)(o)

    model = Model(inputs=[i], outputs=[o])
    # Compile model
    model.compile(loss='binary_crossentropy',
                  optimizer='Adam',
                  metrics=['accuracy'])

    # history = model.fit(x_train, y_train,
    #                     validation_data=[x_val, y_val],
    #                     batch_size=params['batch_size'],
    #                     epochs=params['epochs'],
    #                     verbose=0)

    # finally we have to make sure that history object and model are returned
    return model  #, history
    def defineNetwork(self):
        numFeatsFlow = int(self.cnnModel.output.shape[1])
        # [ b, f, t ]
        flowInp = Input(shape=(self.flowSteps, numFeatsFlow))
        imuModel = self.imuBlock(self.imuShape)
        merge = concatenate([flowInp, imuModel.outputs[0]])
        # [ b, t, f ]
        y = Permute((2, 1))(merge)
        y = TCN(nb_filters=128,
                nb_stacks=3,
                kernel_size=3,
                use_skip_connections=True,
                return_sequences=False,
                dropout_rate=0.3,
                dilations=[1, 2, 4, 8])(merge)
        y = Dense(self.classes,
                  kernel_regularizer=regularizers.l2(0.01),
                  activation='softmax')(y)

        model = Model([flowInp, imuModel.inputs[0]], y)
        optimizer = SGD(lr=1e-2,
                        momentum=0.9,
                        decay=1e-4,
                        clipnorm=1.,
                        clipvalue=0.5)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'])
        return model
Beispiel #11
0
def decode(raw, event_id, tmin, tmax):
    epochs = helper.getEpochs(raw, event_id, tmin=1, tmax=2.5)
    X = epochs.get_data()

    #swapping features and time points
    X = np.transpose(
        X, (0, 2, 1)
    )  #prepare X shape to be compatible with LSTM (not sure this is the best way)
    y = epochs.events[:, -1]

    cv = StratifiedShuffleSplit(n_splits=10, test_size=0.2, random_state=42)

    for train, test in cv.split(X, y):
        X_train, X_test = X[train], X[test]
        y_train, y_test = y[train], y[test]

    batch_size, timesteps, input_dim = None, 376, 8

    i = Input(batch_shape=(batch_size, timesteps, input_dim))
    o = TCN(return_sequences=False)(i)
    o = Dense(1)(o)
    m = Model(inputs=[i], outputs=[o])
    m.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

    history = m.fit(X_train, y_train, validation_split=0.2, epochs=5)
    score = m.evaluate(X_test, y_test, batch_size=16)

    print("Accuracy: %.2f%%" % (score[1] * 100))
Beispiel #12
0
def get_tcn(seq_len,
            feature_dim,
            output_dim=2,
            nb_filters=8,
            nb_stacks=3,
            use_skip_connections=True,
            return_sequences=True,
            use_batch_norm=True,
            dilation_stages=5):

    dilations = [2**x for x in range(dilation_stages)]
    i = Input(shape=(seq_len, feature_dim))
    x = Reshape(target_shape=(1, seq_len, feature_dim))(i)
    x = MyConv1D(filters=nb_filters,
                 kernel_size=3,
                 name='initial_conv',
                 kernel_initializer='glorot_normal')(x)
    x = TCN(nb_filters=nb_filters,
            kernel_size=3,
            dilations=dilations,
            nb_stacks=nb_stacks,
            use_skip_connections=use_skip_connections,
            return_sequences=return_sequences,
            use_batch_norm=use_batch_norm,
            kernel_initializer='glorot_normal')(x)
    if return_sequences:
        x = Reshape(target_shape=(seq_len, nb_filters))(x)
    else:
        x = Reshape(target_shape=(nb_filters, ))(x)
    x = Dense(output_dim)(x)
    x = Softmax()(x)
    model = Model(inputs=[i], outputs=[x])

    return model
def train():
    # Good exercise: https://www.crcv.ucf.edu/data/UCF101.php
    # replace data() by this dataset.
    # Useful links:
    # - https://www.pyimagesearch.com/2019/07/15/video-classification-with-keras-and-deep-learning/
    # - https://github.com/sujiongming/UCF-101_video_classification
    x_train, y_train = data()

    inputs = Input(shape=(num_frames, h, w, c))
    # push num_frames in batch_dim to process all the frames independently of their orders (CNN features).
    x = Lambda(lambda y: K.reshape(y, (-1, h, w, c)))(inputs)
    # apply convolutions to each image of each video.
    x = Conv2D(16, 5)(x)
    x = MaxPool2D()(x)
    # re-creates the videos by reshaping.
    # 3D input shape (batch, timesteps, input_dim)
    num_features_cnn = np.prod(K.int_shape(x)[1:])
    x = Lambda(lambda y: K.reshape(y, (-1, num_frames, num_features_cnn)))(x)
    # apply the RNN on the time dimension (num_frames dim).
    x = TCN(16)(x)
    x = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=[inputs], outputs=[x])
    model.summary()
    model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
    print('Train...')
    model.fit(x_train, y_train, validation_split=0.2, epochs=5)
Beispiel #14
0
def build_model(
        num_feat,  # type: int
        nb_filters,  # type: int
        kernel_size,  # type: int
        dilations,  # type: List[int]
        nb_stacks,  # type: int
        max_len,  # type: int
        output_len=1,  # type: int
        padding='causal',  # type: str
        use_skip_connections=False,  # type: bool
        return_sequences=True,
        regression=False,  # type: bool
        dropout_rate=0.2,  # type: float
        name='tcn',  # type: str,
        kernel_initializer='he_normal',  # type: str,
        activation='relu',
        lr=0.001,
        use_batch_norm=False,
        use_layer_norm=False,
        use_weight_norm=False):

    input_layer = Input(shape=(max_len, num_feat))

    x = TCN(nb_filters,
            kernel_size,
            nb_stacks,
            dilations,
            padding,
            use_skip_connections,
            dropout_rate,
            return_sequences,
            activation,
            kernel_initializer,
            use_batch_norm,
            use_layer_norm,
            use_weight_norm,
            name=name)(input_layer)

    print('x.shape=', x.shape)

    x = MaxPooling1D(3)(x)

    x = Flatten()(x)
    x = Dropout(dropout_rate)(x)

    x = Dense(2 * Num_classes, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Dense(Num_classes, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('softmax')(x)

    output_layer = x
    model = Model(input_layer, output_layer)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.Adam(lr=lr),
                  metrics=['accuracy'])

    return model
Beispiel #15
0
    def __init__(self, tcn_out_channel=64, c3d_path='', tcn_path=''):
        super(C3D_TCN, self).__init__()

        self.c3d = C3D(in_channels=3) 
        self.tcn = TCN(245760, [128,128,64,tcn_out_channel]) # 245760 == 128, 983040 == 256, 384000 == 160

        self.load_models(c3d_path, tcn_path)
Beispiel #16
0
def TCN_model(filters, kernel_size, dilations, dropout, a, b):
    import numpy as np
    from tensorflow.keras import Sequential
    from tensorflow.keras.callbacks import Callback
    from tensorflow.keras.datasets import imdb
    from tensorflow.keras.layers import Dense, Dropout, Embedding
    from tensorflow.keras.preprocessing import sequence
    from tensorflow.keras import optimizers
    from tensorflow.keras.layers import MaxPooling1D

    from tcn import TCN

    model = Sequential()
    model.add(
        TCN(nb_filters=filters,
            kernel_size=kernel_size,
            dilations=dilations,
            input_shape=(a, b),
            return_sequences=False))
    #dilations=[1, 2, 4, 8, 16, 32, 64]))
    model.add(Dropout(dropout))
    #model.add(MaxPooling1D(pool_size=2, strides=None, padding='valid', data_format='channels_last'))
    #model.add(TCN(nb_filters=filters,
    #          kernel_size=kernel_size,
    #          dilations=dilations,
    #             return_sequences=False))
    #model.add(Dropout(dropout))
    model.add(Dense(1))
    #adm= optimizers.Adam(beta_1=0.9, beta_2=0.999, amsgrad=False, decay= 0.005)
    model.compile(optimizer='adam',
                  loss='mean_squared_error',
                  metrics=['accuracy'])

    return model
def build_model(X_train, passbands=('g', 'r'), reframe=False, probabilistic=False, nunits=100, bayesian=False,
                dropout_rate=0.0):
    if bayesian:
        mc_dropout = True
    else:
        mc_dropout = None
    npb = len(passbands)

    inputs = Input(shape=(X_train.shape[1], X_train.shape[2]))
    hidden = Masking(mask_value=0.)(inputs)

    hidden = TCN(nunits, return_sequences=True, kernel_size=2, nb_stacks=1, dilations=[1, 2, 4, 8],
                 padding='causal', use_skip_connections=True, dropout_rate=dropout_rate, activation='sigmoid')(hidden, training=mc_dropout)
    hidden = Dropout(dropout_rate)(hidden, training=mc_dropout)

    if reframe is True:
        hidden = LSTM(nunits)(hidden)
        hidden = Dense(npb)(hidden)
    else:
        if probabilistic:
            hidden = TimeDistributed(Dense(npb * 2))(hidden)
        else:
            hidden = TimeDistributed(Dense(npb * 1))(hidden)

    if probabilistic:
        outputs = tfp.layers.DistributionLambda(
            lambda t: tfd.Normal(loc=t[..., :npb], scale=1e-3 + tf.math.softplus(0.01 * t[..., npb:])))(hidden)
    else:
        outputs = hidden

    model = Model(inputs, outputs)

    return model
Beispiel #18
0
def create_tcn_model(num_features,
                     num_classes,
                     loss_func,
                     optimizer='adam',
                     nb_filters=64,
                     kernel_size=2,
                     nb_stacks=1,
                     dilations=[1, 2, 4],
                     dropout_rate=0.2,
                     use_skip_connections=True,
                     use_batch_norm=False,
                     activation='linear'):

    inp = Input(shape=(num_features, 1), name='input')

    c = TCN(nb_filters=nb_filters,
            kernel_size=kernel_size,
            nb_stacks=nb_stacks,
            dilations=dilations,
            use_skip_connections=use_skip_connections,
            dropout_rate=dropout_rate,
            activation=activation,
            use_batch_norm=use_batch_norm)(inp)
    c = layers.Dense(num_classes)(c)

    model = Model(inputs=inp, outputs=c)
    model.compile(optimizer=optimizer, loss=loss_func, metrics=['accuracy'])

    return model
Beispiel #19
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 dropout=0,
                 residual=True):
        super(ST_GCN, self).__init__()

        assert len(kernel_size) == 2
        assert kernel_size[0] % 2 == 1
        padding = ((kernel_size[0] - 1) // 2, 0)

        self.gcn = ConvTemporalGraphical(in_channels, out_channels,
                                         kernel_size[1])

        self.tcn = TCN(out_channels, kernel_size, stride, padding, dropout)

        if not residual:
            self.residual = zero

        elif (in_channels == out_channels) and (stride == 1):
            self.residual = x1

        else:
            self.residual = nn.Sequential(
                nn.Conv2d(
                    in_channels,
                    out_channels,
                    kernel_size=1,
                    stride=(stride, 1)),
                nn.BatchNorm2d(out_channels),
            )

        self.relu = nn.ReLU(inplace=True)
Beispiel #20
0
def tcn1(input_shape,
         logger,
         nb_filters=128,
         filter_size=2,
         optimizer="Adam",
         init="he_normal"):

    # Calculate number of blocks:
    def calc_dilations(filter_size, field):
        import math
        max_dil = field / filter_size
        max_dil = math.ceil(math.log(max_dil) / math.log(2))
        dil_list = [2**i for i in range(0, max_dil + 1)]
        return (dil_list)

    # TCN params
    nb_filters = nb_filters
    filter_size = filter_size
    dilation_list = calc_dilations(filter_size=filter_size,
                                   field=input_shape[1])

    padding = "same"
    use_skip_connections = True
    dropout_rate = 0.0
    activation = "relu"
    kernel_initializer = "he_normal"
    use_batch_norm = True
    use_layer_norm = True
    nb_stacks = 1

    logger.info(f"Dilation list: {dilation_list}")

    # Input layer:
    i = Input(shape=input_shape)

    # Temporal convolutional layer
    o = TCN(nb_filters=nb_filters,
            kernel_size=filter_size,
            dilations=dilation_list,
            padding=padding,
            use_skip_connections=use_skip_connections,
            dropout_rate=dropout_rate,
            activation=activation,
            kernel_initializer=kernel_initializer,
            use_batch_norm=use_batch_norm,
            use_layer_norm=use_layer_norm,
            return_sequences=False,
            nb_stacks=nb_stacks)(i)

    # Output Layer
    o = Dense(1, activation="sigmoid")(o)

    model = Model(inputs=[i], outputs=[o])
    # Compile model
    model.compile(loss='binary_crossentropy',
                  optimizer='Adam',
                  metrics=['accuracy'])

    return model
Beispiel #21
0
def modelo_CNN3(X_train, y_train, X_test, y_test, individual, epocas):
    """
    Cria um modelo CNN3
    :parametro X_train: dados para treinamento
    :parametro y_train: rótulo dos dados de treinamento
    :parametro individual: dicionário com os hiperparâmetros do modelo
    :return: o modelo
    """
    warnings.filterwarnings('ignore')
    call = [
        EarlyStopping(monitor='loss', mode='min', patience=15, verbose=1),
    ]
    if individual['filters'] == 0:
        filters = 16
    elif individual['filters'] == 1:
        filters = 32
    else:
        filters = 64

    if individual['norm'] == 0:
        norm = False
    else:
        norm = True

    if individual['kernel_size'] == 0:
        kernel_size = 2
    elif individual['kernel_size'] == 1:
        kernel_size = 3
    elif individual['kernel_size'] == 2:
        kernel_size = 5
    else:
        kernel_size = 11

    d = []
    for i in range(individual['num_conv']):
        d.append(2**i)
    i = Input(batch_shape=(None, X_train.shape[1], 1))
    o = TCN(nb_filters=filters,
            kernel_size=kernel_size,
            nb_stacks=individual['pilhas'],
            dilations=d,
            padding='causal',
            use_skip_connections=False,
            dropout_rate=individual['dropout'],
            return_sequences=False,
            name='tcn')(i)

    o = Dense(1)(o)
    model = Model(inputs=[i], outputs=[o])
    model.compile(optimizer='Adam', loss='mse')
    history = model.fit(X_train,
                        y_train,
                        epochs=epocas,
                        verbose=0,
                        batch_size=filters,
                        validation_data=(X_test, y_test),
                        callbacks=call)

    return model, history
def create_tcn(list_n_filters=[8],
               kernel_size=4,
               dilations=[1, 2],
               nb_stacks=1,
               activation='norm_relu',
               n_layers=1,
               dropout_rate=0.05,
               use_skip_connections=True,
               bidirectional=True):
    if bidirectional:
        padding = 'same'
    else:
        padding = 'causal'

    dilations = process_dilations(dilations)

    input_layer = Input(shape=(None, config.N_MELS))

    for i in range(n_layers):
        if i == 0:
            x = TCN(list_n_filters[i],
                    kernel_size,
                    nb_stacks,
                    dilations,
                    activation,
                    padding,
                    use_skip_connections,
                    dropout_rate,
                    return_sequences=True)(input_layer)
        else:
            x = TCN(list_n_filters[i],
                    kernel_size,
                    nb_stacks,
                    dilations,
                    activation,
                    padding,
                    use_skip_connections,
                    dropout_rate,
                    return_sequences=True,
                    name="tcn" + str(i))(x)

    x = Dense(config.CLASSES)(x)
    x = Activation('sigmoid')(x)
    output_layer = x

    return Model(input_layer, output_layer)
Beispiel #23
0
 def model_tcn(self):
     i = Input(shape=(FEED_LEN, 2))
     o = TCN(return_sequences=False, activation='relu', nb_filters=128)(i)
     o = Dense(WINDOW_LEN)(o)
     mdl = Model(inputs=[i], outputs=[o])
     mdl.compile(optimizer='adam', loss='mse', metrics=['mae', 'mape'])
     mdl.summary()
     return mdl
    def __init__(self, moments, model = 1, data_path = None, batch_size = None, input_dims = 6, trainset = 100, loadModel = None, reporducability = False):
        if reporducability:
            np.random.seed(2020)

        self.batch_size = batch_size

        save = f"model_{model}+moments_{moments}+batch_size{batch_size}.h5"
        self.save = ModelCheckpoint(save, save_best_only=True, monitor='val_loss', mode='min')
        self.moments = moments;
        self.stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)
        self.x, self.y = self.get_data(data_path)

        if loadModel == None:
            '''make the model here'''
            i = Input(batch_shape=(self.batch_size, self.moments, 4))
            if model == 1:
                x1 = TCN(return_sequences=False, nb_filters=(self.moments)*2, dilations=[2**i for i in range(int(np.log2(moments)))], nb_stacks=2, dropout_rate=.3,
                         kernel_size=2)(i)
                x2 = Lambda(lambda z: backend.reverse(z, axes=-1))(i)
                x2 = TCN(return_sequences=False, nb_filters=(self.moments)*2, dilations=[2**i for i in range(int(np.log2(moments)))], nb_stacks=2, dropout_rate=.1,
                         kernel_size=2)(x2)
                x = add([x1, x2])
                o = Dense(1, activation='linear')(x)

            elif model == 2:
                x1 = TCN(return_sequences=True, nb_filters=(self.moments) * 2, dilations=[2**i for i in range(int(np.log2(moments)))], nb_stacks=2,
                     dropout_rate=.3,
                     kernel_size=2)(i)
                x2 = Lambda(lambda z: backend.reverse(z, axes=-1))(i)
                x2 = TCN(return_sequences=True, nb_filters=(self.moments) * 2, dilations=[2**i for i in range(int(np.log2(moments)))], nb_stacks=2,
                         dropout_rate=.1,
                         kernel_size=2)(x2)
                x = add([x1, x2])
                x1 = LSTM(5, return_sequences=False, dropout=.3)(x)
                x2 = Lambda(lambda z: backend.reverse(z, axes=-1))(x)
                x2 = LSTM(5, return_sequences=False, dropout=.3)(x2)
                x = add([x1, x2])
                o = Dense(1, activation='linear')(x)

            elif model == 3:
                # print([2**i for i in range(int(np.log2(moments) - 1))])
                x = TCN(return_sequences=True, nb_filters=32, dilations=[2**i for i in range(int(np.log2(moments)))], nb_stacks=2, dropout_rate=.3,
                     kernel_size=4)(i)
                x1 = TCN(return_sequences=True, nb_filters = 16, dilations = [2**i for i in range(int(np.log2(moments)))], nb_stacks = 2, dropout_rate=.3, kernel_size=4)(x)
                x2 = LSTM(32, return_sequences=True, dropout=.3)(i)
                x2 = LSTM(16, return_sequences=True, dropout=.3)(x2)
                x = add([x1, x2])
                x = Dense(8, activation='linear')(x)
                x = TCN(return_sequences=True, nb_filters=4, dilations=[1, 2, 4], nb_stacks=1, dropout_rate=.3,
                        kernel_size=2, activation=wave_net_activation)(x)
                x = concatenate([GlobalMaxPooling1D()(x), GlobalAveragePooling1D()(x)])
                o = Dense(1, activation='linear')(x)

            self.m = Model(inputs=i, outputs=o)

        else:
            self.m = load_model(loadModel, custom_objects = {'TCN': TCN, 'wave_net_activation': wave_net_activation})

        self.m.summary();
        self.m.compile(optimizer='adam', loss='mse')
Beispiel #25
0
    def build_model(self, input_shape, nb_classes):
        input_layer = keras.layers.Input(input_shape)

        num_feat = 30
        num_classes = 2
        nb_filters = 32
        kernel_size = 5
        dilations = [2**i for i in range(4)]
        padding = 'causal'
        nb_stacks = 1
        #max_len=X_train[0:1].shape[1]
        use_skip_connections = True
        use_batch_norm = True
        dropout_rate = 0.05
        kernel_initializer = 'he_normal'
        #lr=0.00
        activation = 'linear'
        use_layer_norm = True

        return_sequences = true
        #name='tcn_1'
        x = TCN(nb_filters, kernel_size, nb_stacks, dilations, padding,
                use_skip_connections, dropout_rate, return_sequences,
                activation, kernel_initializer, use_batch_norm,
                use_layer_norm)(input_layer)
        """
		return_sequences=False
		#name='tcn_1'
		x = TCN(nb_filters, kernel_size, nb_stacks, dilations, padding,
		    use_skip_connections, dropout_rate, return_sequences,
		    activation, kernel_initializer, use_batch_norm, use_layer_norm)(input_layer)
        
		"""
        output_layer = keras.layers.Dense(nb_classes, activation='sigmoid')(x)

        model = keras.models.Model(inputs=input_layer, outputs=output_layer)

        model.compile(loss='binary_crossentropy',
                      optimizer=Adam(),
                      metrics=['accuracy'])

        print(model.summary())

        reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                      factor=0.5,
                                                      patience=50,
                                                      min_lr=0.0001)

        file_path = self.output_directory + 'best_model'

        model_checkpoint = keras.callbacks.ModelCheckpoint(filepath=file_path,
                                                           monitor='loss',
                                                           save_best_only=True)

        self.callbacks = [reduce_lr, model_checkpoint]

        return model
Beispiel #26
0
    def generator():
        with tf.compat.v1.variable_scope("generator",
                                         reuse=tf.compat.v1.AUTO_REUSE):

            i = Input(batch_shape=(batch_size, 24, 9))

            o = TCN(return_sequences=True)(i)  # The TCN layers are here.
            o = Dense(hidden_dim)(o)
        return o
Beispiel #27
0
    def vanilla_TCN(self, save_dir,model_name):
        i = Input(shape=(self.sw_width, self.features))
        # m = TCN()(i)\
        m = TCN(nb_filters=128, return_sequences=False)(i)
        # o = TCN(nb_filters=128,return_sequences=False)(i)
        # m = TCN(nb_filters=64,return_sequences=True)(o)
        # m = TCN(nb_filters=32,return_sequences=False)(o)
        m = Dense(1, activation='sigmoid')(m)
        model = Model(inputs=[i], outputs=[m])
        keras.optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        model.compile(optimizer='adam', loss='mse', metrics=['mean_absolute_error'])
        print(model.summary())
        # filepath = "model_{epoch:02d}-{val_loss:.2f}-{val_trend_accuracy:.2f}.hdf5"
        filepath = "model_{epoch:02d}-{val_loss:.7f}-{val_mean_absolute_error:.7f}.hdf5"
        checkpoint = ModelCheckpoint(os.path.join(save_dir, filepath), monitor='val_loss', verbose=1,
                                     save_best_only=True)

        history = model.fit(self.train_X, self.train_y, batch_size=512, epochs=self.epochs_num, validation_split=0.2,
                            verbose=self.verbose_set, callbacks=[checkpoint])
        # y_pred = model.predict(self.train_X).squeeze(axis = 1)  # 降低维度
        # y_true = self.train_y
        # # print(y_pred)
        # train_results = np.sign(y_pred * y_true)
        # cor = 0
        # for x in train_results:
        #     if x > 0:
        #         cor += 1
        # acc = cor * 1.0 / len(train_results)
        # print("The train acc is %f" % acc)
        y_pred = model.predict(self.test_X).squeeze(axis=1)  # 降低维度
        y_true = self.test_y
        # print(y_pred)
        test_results = np.sign(y_pred * y_true)
        cor = 0
        for x in test_results:
            if x > 0:
                cor += 1
        acc = cor * 1.0 / len(test_results)
        print("The test acc is %f" % acc)
        # ------------------save_model-----------------#
        if not os.path.isdir(save_dir):
            os.makedirs(save_dir)
            # 再存一次
        model_path = os.path.join(save_dir, model_name)
        model.save(model_path)
        print('Saved trained model at %s ' % model_path)
        # --------------------plt loss-------------------#
        plt.figure(figsize=(8, 8), dpi=200)
        plt.plot(history.history['loss'])
        plt.plot(history.history['val_loss'])
        plt.title('model train vs validation loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'validation'], loc='upper right')
        plt.savefig(save_dir +'/' + 'loss.jpg')
        plt.show()
Beispiel #28
0
    def recovery(H):
        with tf.compat.v1.variable_scope("recovery",
                                         reuse=tf.compat.v1.AUTO_REUSE):

            #i = Input(batch_shape=H)
            o = TCN(return_sequences=True)(H)
            o = Dense(9)(o)
    #m = Model(inputs=[i], outputs=[o])
    #m.summary()
        return o
Beispiel #29
0
 def _TCN(self, inputs, n_outputs, training):
     '''构建 TCN 模型'''
     outputs = TCN(inputs,
                   n_outputs,
                   self.num_channels,
                   self.sequence_length,
                   self.kernel_size,
                   self.dropout,
                   is_training=training)
     return outputs
Beispiel #30
0
def build_model():
    input = Input(batch_shape=(None, 100, 10))

    output = TCN(return_sequences=False,
                 dropout_rate=args.dropout_rate,
                 dilations=(1, 2, 4, 8, 16, 32,
                            64))(input)  # The TCN layers are here.
    output = Dense(1)(output)

    model = Model(inputs=[input], outputs=[output])
    return model