示例#1
0
def model_no_masking(discrete_time, init_alpha, max_beta):
    model = Sequential()
    model.add(TimeDistributed(Dense(2), input_shape=(n_timesteps, n_features)))

    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
                                                    "max_beta_value": max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete').loss_function
    else:
        loss = wtte.loss(kind='continuous').loss_function

    model.compile(loss=loss, optimizer=RMSprop(lr=lr))

    return model
示例#2
0
def model_masking(discrete_time,init_alpha,max_beta):
    model = Sequential()

    model.add(Masking(mask_value=mask_value,input_shape=(n_timesteps, n_features)))
    model.add(TimeDistributed(Dense(2)))
    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha":init_alpha, 
                                               "max_beta_value":max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete',reduce_loss=False).loss_function
    else:
        loss = wtte.loss(kind='continuous',reduce_loss=False).loss_function

    model.compile(loss=loss, optimizer=RMSprop(lr=lr),sample_weight_mode='temporal')       
    return model
示例#3
0
def model_no_masking(discrete_time,init_alpha,max_beta):
    model = Sequential()
    model.add(TimeDistributed(Dense(2), input_shape=(n_timesteps, n_features)))

    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha":init_alpha, 
                                               "max_beta_value":max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete').loss_function
    else:
        loss = wtte.loss(kind='continuous').loss_function

    model.compile(loss=loss, optimizer=RMSprop(lr=lr))        
        
    return model
示例#4
0
def model_masking(discrete_time, init_alpha, max_beta):
    model = Sequential()

    model.add(Masking(mask_value=mask_value,
                      input_shape=(n_timesteps, n_features)))
    model.add(TimeDistributed(Dense(2)))
    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
                                                    "max_beta_value": max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete', reduce_loss=False).loss_function
    else:
        loss = wtte.loss(kind='continuous', reduce_loss=False).loss_function

    model.compile(loss=loss, optimizer=RMSprop(
        lr=lr), sample_weight_mode='temporal')
    return model
示例#5
0
model = Sequential()

model.add(
    GRU(1,
        input_shape=(xg_train.shape[1:]),
        activation='tanh',
        return_sequences=True))

model.add(Dense(2))
model.add(
    Lambda(wtte.output_lambda,
           arguments={
               "init_alpha": init_alpha,
               "max_beta_value": 4.0
           }))
loss = wtte.loss(kind='discrete').loss_function

#model.load_weights('load_weight.hdf5')

model.compile(loss=loss, optimizer=adam(lr=.01))

# checkpoints

filepath = 'gen_cp/{epoch:02d}-{val_loss:.2f}.hdf5'
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             save_weights_only=False,
                             mode='auto',
                             period=1)
示例#6
0
## Base model
model = Sequential()
model.add(Masking(mask_value=mask_value,input_shape=(None, n_features)))
model.add(GRU(10,activation='tanh',return_sequences=True,recurrent_dropout=0.1,unroll=False))
model.add(BatchNormalization(axis=-1, momentum=0.9, epsilon=0.01))

model.add(TimeDistributed(Dense(10,activation='tanh')))

## Wtte-RNN part
model.add(TimeDistributed(Dense(2)))
model.add(Lambda(wtte.output_lambda, arguments={"init_alpha":init_alpha, 
                                                "max_beta_value":2.0,
                                                "alpha_kernel_scalefactor":0.5
                                               }))

loss = wtte.loss(kind='discrete',reduce_loss=False).loss_function
model.compile(loss=loss, optimizer=adam(lr=.01,clipvalue=0.5),sample_weight_mode='temporal')
model.summary()

K.set_value(model.optimizer.lr, 0.01)
model.fit(x_train, y_train,
          epochs=300,
          batch_size=300, 
          verbose=1,
          validation_data=(x_valid, y_valid,sample_weights_valid),
          sample_weight = sample_weights_train,
          callbacks=[nanterminator,history,weightwatcher,reduce_lr])

## model 학습이 강제 종료됨 : loss nan
"""
Train on 924 samples, validate on 105 samples
示例#7
0
    def build_model(self, X_train, y_train, size_dyn, size_sta):
        """Build time to event model using a GRU network.

        Args:
            X_train (object): training set input features of shape
                [n_examples, prefix, n_features]
            y_train (object): training set labels of shape
                [n_examples, n_features]
            size_dyn (int): GRU units size.
            size_sta (int): Static branch hidden layer size (optional)

        Returns:
            initialize self.model

        """
        logger.info('Initializing time to event model ...')
        # check if we have static features
        static_flag = False
        if X_train[1].shape[2] != 0:
            static_flag = True
            X_train_static = X_train[1]
            n_features_static = X_train_static.shape[-1]
            # Static model
            static_input = Input(shape=(n_features_static),
                                 name='static_input')
            dense_static1 = Dense(size_sta,
                                  name='hidden_static1')(static_input)
            bs1 = BatchNormalization()(dense_static1)
            # dense_static2 = Dense(size_sta//2, name='hidden_static2')(bs1)
            # bs2 = BatchNormalization()(dense_static2)
            static_output = Dense(1,
                                  name='static_output',
                                  activation='sigmoid')(bs1)
        X_train = X_train[0]

        tte_mean_train = np.nanmean(y_train[:, 0].astype('float'))
        mean_u = np.nanmean(y_train[:, 1].astype('float'))
        init_alpha = -1.0 / np.log(1.0 - 1.0 / (tte_mean_train + 1.0))
        init_alpha = init_alpha / mean_u
        n_features = X_train.shape[-1]

        # Fixing seeds
        np.random.seed(sd)
        tf.random.set_seed(sd)

        # Main model
        main_input = Input(shape=(None, n_features), name='main_input')
        l1 = GRU(size_dyn,
                 activation='tanh',
                 recurrent_dropout=0.25,
                 return_sequences=True)(main_input)
        b1 = BatchNormalization()(l1)
        l2 = GRU(size_dyn // 2,
                 activation='tanh',
                 recurrent_dropout=0.25,
                 return_sequences=False)(b1)
        b2 = BatchNormalization()(l2)
        if static_flag:
            dynamic_output = Dense(2, name='Dense_main')(b2)
            merged = Concatenate()([dynamic_output, static_output])
            l4 = Dense(2, name='output')(merged)
        else:
            l4 = Dense(2, name='output')(b2)

        output = Lambda(wtte.output_lambda,
                        name="lambda_layer",
                        arguments={
                            "init_alpha": init_alpha,
                            "max_beta_value": 100,
                            "scalefactor": 0.5
                        })(l4)
        loss = wtte.loss(kind='continuous', reduce_loss=False).loss_function
        np.random.seed(sd)
        tf.random.set_seed(sd)
        if static_flag:
            self.model = Model(inputs=[main_input, static_input],
                               outputs=[output])
        else:
            self.model = Model(inputs=[main_input], outputs=[output])
        self.model.compile(loss=loss,
                           optimizer=Nadam(lr=0.001,
                                           beta_1=0.9,
                                           beta_2=0.999,
                                           epsilon=1e-08,
                                           schedule_decay=0.004,
                                           clipvalue=3))
        # self.model.compile(loss=loss, optimizer=RMSprop(
        #     lr=0.001, clipvalue=3))
        return