예제 #1
0
파일: models.py 프로젝트: LIANGMA314/ATDL
def latent(data_shape):
    model = Sequential()
    model.add(Dense(mc._OUT_DIM, activation='relu', input_shape=(data_shape,),\
    kernel_regularizer=regularizers.l2(mc._L2_REGULARIZE_RATE)))
    model.add(Dense(mc._OUT_DIM, activation='relu', input_shape=(data_shape,),\
    kernel_regularizer=regularizers.l2(mc._L2_REGULARIZE_RATE)))
    return model
예제 #2
0
 def build_policy_head(self, input_layer):
     policy_head = layers.Conv2D(2, (1, 1),
                                 data_format=self.data_format,
                                 kernel_regularizer=regularizers.l2(
                                     self.reg_alpha))(input_layer)
     policy_head = layers.BatchNormalization(
         axis=self.batch_norm_axis)(policy_head)
     policy_head = layers.Activation("relu")(policy_head)
     policy_head = layers.Flatten()(policy_head)
     policy_head = layers.Dense(BOARD_SIZE * BOARD_SIZE + 1,
                                kernel_initializer='random_uniform',
                                bias_initializer='ones',
                                kernel_regularizer=regularizers.l2(
                                    self.reg_alpha))(policy_head)
     policy_head = layers.Activation("softmax",
                                     name="output_policy")(policy_head)
     return policy_head
예제 #3
0
 def build_convolutional_block(self, input_layer):
     conv_layer = layers.Conv2D(CONV_FILTERS,
                                CONV_KERNEL,
                                data_format=self.data_format,
                                padding='same',
                                kernel_regularizer=regularizers.l2(
                                    self.reg_alpha))(input_layer)
     conv_layer = layers.BatchNormalization(
         axis=self.batch_norm_axis)(conv_layer)
     conv_layer = layers.Activation("relu")(conv_layer)
     return conv_layer
예제 #4
0
 def build_residual_block(self, input_layer):
     reslayer = layers.Conv2D(CONV_FILTERS,
                              CONV_KERNEL,
                              data_format=self.data_format,
                              padding='same',
                              kernel_regularizer=regularizers.l2(
                                  self.reg_alpha))(input_layer)
     reslayer = layers.BatchNormalization(
         axis=self.batch_norm_axis)(reslayer)
     reslayer = layers.Activation("relu")(reslayer)
     reslayer = layers.Conv2D(CONV_FILTERS,
                              CONV_KERNEL,
                              data_format=self.data_format,
                              padding='same',
                              kernel_regularizer=regularizers.l2(
                                  self.reg_alpha))(reslayer)
     reslayer = layers.BatchNormalization(
         axis=self.batch_norm_axis)(reslayer)
     assert reslayer.shape.as_list() == input_layer.shape.as_list()
     reslayer = layers.Add()([reslayer, input_layer])
     reslayer = layers.Activation("relu")(reslayer)
     return reslayer
예제 #5
0
 def build_value_head(self, input_layer):
     value_head = layers.Conv2D(1, (1, 1),
                                data_format=self.data_format,
                                kernel_regularizer=regularizers.l2(
                                    self.reg_alpha))(input_layer)
     value_head = layers.BatchNormalization(
         axis=self.batch_norm_axis)(value_head)
     value_head = layers.Activation("relu")(value_head)
     value_head = layers.Flatten()(value_head)
     value_head = layers.Dense(DENSE_SIZE,
                               "relu",
                               kernel_initializer='random_uniform',
                               bias_initializer='ones',
                               kernel_regularizer=regularizers.l2(
                                   self.reg_alpha))(value_head)
     value_head = layers.Dense(1,
                               "tanh",
                               name="output_value",
                               kernel_initializer='random_uniform',
                               bias_initializer='ones',
                               kernel_regularizer=regularizers.l2(
                                   self.reg_alpha))(value_head)
     return value_head
예제 #6
0
 def __init__(self, learning_rate, layers, functions, optimizer_name,
              beta=0.0, dropout=1.0):
     
     self.n_input = layers[0]
     self.n_hidden = layers[1:-1]
     self.n_output = layers[-1]
     
     self.model = Sequential()
     
     if len(self.n_hidden) == 0:
         # single layer
         self.model.add(Dense(self.n_output, activation=functions[0],
                          kernel_regularizer=regularizers.l2(beta),
                          input_shape=(self.n_input,)))
         
     elif len(self.n_hidden) == 1:
         # hidden layer
         self.model.add(Dense(self.n_hidden[0], activation=functions[0],
                              kernel_regularizer=regularizers.l2(beta),
                              input_shape=(self.n_input,)))
         self.model.add(Dropout(dropout))
         # output layer
         self.model.add(Dense(self.n_output, activation=functions[1],
                              kernel_regularizer=regularizers.l2(beta)))
         
     else:
         # the first hidden layer
         self.model.add(Dense(self.n_hidden[0], activation=functions[0],
                              kernel_regularizer=regularizers.l2(beta),
                              input_shape=(self.n_input,)))
         self.model.add(Dropout(dropout))
         # the second hidden layer
         self.model.add(Dense(self.n_hidden[1], activation=functions[1],
                              kernel_regularizer=regularizers.l2(beta)))
         self.model.add(Dropout(dropout))
         # the output layer
         self.model.add(Dense(self.n_output, activation=functions[2],
                              kernel_regularizer=regularizers.l2(beta)))
     
     self.model.summary()
     
     if optimizer_name == 'Adam': optimizer = Adam(learning_rate)
     
     #self.model.compile(loss='mean_squared_error',
     #                   optimizer=optimizer,
     #                   metrics=['accuracy'])
     
     self.model.compile(loss='categorical_crossentropy',
                        optimizer=optimizer,
                        metrics=['accuracy'])
예제 #7
0
        def cross_val_model(params):
            keras_model = None
            # Destroys the current TF graph and creates a new one.
            # Useful to avoid clutter from old models / layers.
            K.clear_session()
            self._set_xpu_config(num_cpus, num_gpus)

            self.span = W = params.span
            (X_miss, X_train), (X_miss_val, X_test) = self.train_test_split(
                dataset,
                train_size=train_size,
                abnormal=abnormal,
            )
            if len(X_train) == 0:
                raise errors.NoData("insufficient training data")
            if len(X_test) == 0:
                raise errors.NoData("insufficient validation data")

            # expected input data shape: (batch_size, timesteps,)
            # network parameters
            input_shape = (W, )
            intermediate_dim = params.intermediate_dim
            latent_dim = params.latent_dim

            # VAE model = encoder + decoder
            # build encoder model
            main_input = Input(shape=input_shape)
            # bool vector to flag missing data points
            aux_input = Input(shape=input_shape)
            aux_output = Lambda(lambda x: x)(aux_input)
            x = Dense(intermediate_dim,
                      kernel_regularizer=regularizers.l2(0.01),
                      activation='relu')(main_input)
            z_mean = Dense(latent_dim, name='z_mean')(x)
            z_log_var = Dense(latent_dim, name='z_log_var')(x)

            # use reparameterization trick to push the sampling out as input
            # note that "output_shape" isn't necessary with the TensorFlow backend
            z = Lambda(sampling, output_shape=(latent_dim, ),
                       name='z')([z_mean, z_log_var])

            # build decoder model
            x = Dense(intermediate_dim,
                      kernel_regularizer=regularizers.l2(0.01),
                      activation='relu',
                      name='dense_1')(z)
            main_output = Dense(W, activation='linear', name='dense_2')(x)

            # instantiate Donut model
            keras_model = _Model([main_input, aux_input],
                                 [main_output, aux_output],
                                 name='donut')
            add_loss(keras_model, W)
            optimizer_cls = None
            if params.optimizer == 'adam':
                optimizer_cls = tf.keras.optimizers.Adam()

            keras_model.compile(optimizer=optimizer_cls, )

            _stop = EarlyStopping(
                monitor='val_loss',
                patience=5,
                verbose=_verbose,
                mode='auto',
            )
            keras_model.fit_generator(
                generator(X_train, X_miss, batch_size, keras_model),
                epochs=num_epochs,
                steps_per_epoch=len(X_train) / batch_size,
                verbose=_verbose,
                validation_data=([X_test, X_miss_val], None),
                callbacks=[_stop],
                workers=0,  # https://github.com/keras-team/keras/issues/5511
            )

            # How well did it do?
            score = keras_model.evaluate(
                [X_test, X_miss_val],
                batch_size=batch_size,
                verbose=_verbose,
            )

            self.current_eval += 1
            if progress_cb is not None:
                progress_cb(self.current_eval, max_evals)

            return score, keras_model
예제 #8
0
파일: dmnn.py 프로젝트: tobytoy/MotionGAN
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow.contrib.keras.api.keras.backend as K
from tensorflow.contrib.keras.api.keras.models import Model
from tensorflow.contrib.keras.api.keras.layers import Input
from tensorflow.contrib.keras.api.keras.layers import Conv2D, \
    Dense, Activation, Lambda, Reshape, Permute, Add, Concatenate, \
    BatchNormalization, Dropout
from tensorflow.contrib.keras.api.keras.optimizers import Adam
from tensorflow.contrib.keras.api.keras.regularizers import l2
from layers.edm import EDM
from layers.comb_matrix import CombMatrix
from utils.scoping import Scoping

CONV2D_ARGS = {'padding': 'same', 'data_format': 'channels_last', 'kernel_regularizer': l2(5e-4)}


class _DMNN(object):
    def __init__(self, config):
        self.name = config.model_type + '_' + config.model_version
        self.data_set = config.data_set
        self.batch_size = config.batch_size
        self.num_actions = config.num_actions
        self.seq_len = config.pick_num if config.pick_num > 0 else (
                       config.crop_len if config.crop_len > 0 else None)
        self.njoints = config.njoints
        self.body_members = config.body_members
        self.dropout = config.dropout

        real_seq = Input(
            batch_shape=(self.batch_size, self.njoints, self.seq_len, 3),