def __init__(self,
                 prefix=None,
                 optimizer=None,
                 layers=None,
                 loss=None,
                 metrics=None,
                 epochs=10,
                 batch_size=32,
                 callbacks=None,
                 input_shape=(40, 400, 1)
                 ):

        # Default parameter settings -------------------------------
        if optimizer is None:
            optimizer = {"SGD": {'lr': 0.01,
                                 'decay': 1e-6,
                                 'momentum': 0.9,
                                 'nesterov': True}}

        if loss is None:
            loss = "mean_squared_error"

        if metrics is None:
            metrics = ['acc', spIndex]

        get_custom_objects().update({"spIndex": spIndex})

        # Place input shape on first layer parameter list
        # layers[0][1]['input_shape'] = input_shape

        # Setting parameters --------------------------------------
        self.__dict__ = OrderedDict()
        self.__dict__['input_shape'] = input_shape
        self.__dict__['prefix'] = prefix

        self.__dict__['optimizer'] = Optimizer(optimizer)

        self.__dict__['layers'] = Layers()

        if layers is not None:
            for layer in layers:
                # name = layer['type']
                # args = layer
                # del args['type']
                self.layers.add(layer)

        self.__dict__['callbacks'] = callbacks
        if not callbacks is None:
            for args in callbacks:
                self.callbacks.add(args)

        self.__dict__['loss'] = loss[0] if isinstance(loss, list) else loss
        self.__dict__['metrics'] = metrics
        self.__dict__['epochs'] = epochs
        self.__dict__['batch_size'] = batch_size
示例#2
0
 def load(self, fullpath=None):
     """ Load model """
     fullpath = fullpath if fullpath else self.filename
     logger.debug("Loading model: '%s'", fullpath)
     try:
         network = load_model(self.filename, custom_objects=get_custom_objects())
     except ValueError as err:
         if str(err).lower().startswith("cannot create group in read only mode"):
             self.convert_legacy_weights()
             return True
         logger.warning("Failed loading existing training data. Generating new models")
         logger.debug("Exception: %s", str(err))
         return False
     except OSError as err:  # pylint: disable=broad-except
         logger.warning("Failed loading existing training data. Generating new models")
         logger.debug("Exception: %s", str(err))
         return False
     self.config = network.get_config()
     self.network = network  # Update network with saved model
     self.network.name = self.type
     return True
示例#3
0
    # Arguments
        x: Tensor or variable.
        kernel: kernel tensor.
        strides: strides tuple.
        padding: string, `"same"` or `"valid"`.
        data_format: string, `"channels_last"` or `"channels_first"`.
            Whether to use Theano or TensorFlow data format
            for inputs/kernels/ouputs.
        dilation_rate: tuple of 2 integers.

    # Returns
        A tensor, result of 2D convolution.

    # Raises
        ValueError: if `data_format` is neither `channels_last` or `channels_first`.
    """
    # Transform the filters
    transformed_filter = transform_filter_2d_nhwc(w=kernel,
                                                  flat_indices=gconv_indices,
                                                  shape_info=gconv_shape_info)
    return K.conv2d(x=x,
                    kernel=transformed_filter,
                    strides=strides,
                    padding=padding,
                    data_format=data_format,
                    dilation_rate=dilation_rate)


get_custom_objects().update({'GConv2D': GConv2D})
示例#4
0
def run():
    args = initialize_parameters()

    get_custom_objects()['PermanentDropout'] = PermanentDropout
    model = keras.models.load_model(args.model_file, compile=False)
    model.load_weights(args.weights_file)
    # model.summary()

    df_expr, df_desc = prepare_data(sample_set=args.sample_set,
                                    drug_set=args.drug_set,
                                    use_landmark_genes=args.use_landmark_genes)

    print('total available samples: ', df_expr[['Sample']].shape[0])
    print('total available drugs: ', df_desc[['Drug']].shape[0])

    if args.ns > 0 and args.si > 0:
        df_sample_ids = df_expr[['Sample']].iloc[args.si:args.si + args.ns]
    elif args.si > 0:
        df_sample_ids = df_expr[['Sample']].iloc[args.si:]
    elif args.ns > 0:
        df_sample_ids = df_expr[['Sample']].head(args.ns)
    else:
        df_sample_ids = df_expr[['Sample']].copy()
    if args.nd > 0:
        df_drug_ids = df_desc[['Drug']].head(args.nd)
    else:
        df_drug_ids = df_desc[['Drug']].copy()

    df_sum = cross_join3(df_sample_ids,
                         df_drug_ids,
                         df_drug_ids,
                         suffixes=('1', '2'))

    n_samples = df_sample_ids.shape[0]
    n_drugs = df_drug_ids.shape[0]
    n_rows = n_samples * n_drugs * n_drugs

    print(
        'Predicting drug response for {} combinations: {} samples x {} drugs x {} drugs'
        .format(n_rows, n_samples, n_drugs, n_drugs))
    n = args.n_pred
    df_sum['N'] = n
    df_seq = pd.DataFrame({'Seq': range(1, n + 1)})
    df_all = cross_join(df_sum, df_seq)

    total = df_sum.shape[0]
    for i in tqdm(range(0, total, args.step)):
        j = min(i + args.step, total)

        x_all_list = []
        df_x_all = pd.merge(df_all[['Sample']].iloc[i:j],
                            df_expr,
                            on='Sample',
                            how='left')
        x_all_list.append(df_x_all.drop(['Sample'], axis=1).values)

        drugs = ['Drug1', 'Drug2']
        for drug in drugs:
            df_x_all = pd.merge(df_all[[drug]].iloc[i:j],
                                df_desc,
                                left_on=drug,
                                right_on='Drug',
                                how='left')
            x_all_list.append(df_x_all.drop([drug, 'Drug'], axis=1).values)

        preds = []
        for k in range(n):
            y_pred = model.predict(x_all_list,
                                   batch_size=args.batch_size,
                                   verbose=0).flatten()
            preds.append(y_pred)
            df_all.loc[i * n + k:(j - 1) * n + k:n, 'PredGrowth'] = y_pred
            df_all.loc[i * n + k:(j - 1) * n + k:n, 'Seq'] = k + 1

        if n > 0:
            df_sum.loc[i:j - 1, 'PredGrowthMean'] = np.mean(preds, axis=0)
            df_sum.loc[i:j - 1, 'PredGrowthStd'] = np.std(preds, axis=0)
            df_sum.loc[i:j - 1, 'PredGrowthMin'] = np.min(preds, axis=0)
            df_sum.loc[i:j - 1, 'PredGrowthMax'] = np.max(preds, axis=0)

    # df = df_all.copy()
    # df['PredCustomComboScore'] = df.apply(lambda x: custom_combo_score(x['PredGrowth'],
    #                                                                    lookup(df, x['Sample'], x['Drug1'], value='PredGrowth'),
    #                                                                    lookup(df, x['Sample'], x['Drug2'], value='PredGrowth')), axis=1)

    csv_all = 'comb_pred_{}_{}.all.tsv'.format(args.sample_set, args.drug_set)
    df_all.to_csv(csv_all, index=False, sep='\t', float_format='%.4f')

    if n > 0:
        csv = 'comb_pred_{}_{}.tsv'.format(args.sample_set, args.drug_set)
        df_sum.to_csv(csv, index=False, sep='\t', float_format='%.4f')
示例#5
0
        config = {
            'alpha_initializer':
            initializers.serialize(self.alpha_initializer),
            'alpha_regularizer':
            regularizers.serialize(self.alpha_regularizer),
            'alpha_constraint': constraints.serialize(self.alpha_constraint),
            'beta_initializer': initializers.serialize(self.beta_initializer),
            'beta_regularizer': regularizers.serialize(self.beta_regularizer),
            'beta_constraint': constraints.serialize(self.beta_constraint),
            'shared_axes': self.shared_axes
        }
        base_config = super(PELU, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'PELU': PELU})


class SReLU(Layer):
    """S-shaped Rectified Linear Unit.

    It follows:
    `f(x) = t^r + a^r(x - t^r) for x >= t^r`,
    `f(x) = x for t^r > x > t^l`,
    `f(x) = t^l + a^l(x - t^l) for x <= t^l`.

    # Input shape
        Arbitrary. Use the keyword argument `input_shape`
        (tuple of integers, does not include the samples axis)
        when using this layer as the first layer in a model.
        # we won't even calculate the output of those steps, saving
        # some real computational time.
        if self.zeros_like_input is None:
            self.zeros_like_input = K.zeros_like(inputs,
                                                 name='zeros_like_input')
        # just because K.any(step_is_active) doesn't work in PlaidML
        any_step_is_active = K.greater(K.sum(K.cast(step_is_active, 'int32')),
                                       0)
        step_weighted_output = K.switch(
            any_step_is_active,
            K.expand_dims(halting_prob, -1) * inputs, self.zeros_like_input)
        if self.weighted_output is None:
            self.weighted_output = step_weighted_output
        else:
            self.weighted_output += step_weighted_output
        return [inputs, self.weighted_output]

    def compute_output_shape(self, input_shape):
        return [input_shape, input_shape]

    def finalize(self):
        self.add_loss(self.ponder_cost)


get_custom_objects().update({
    'LayerNormalization': LayerNormalization,
    'TransformerTransition': TransformerTransition,
    'TransformerACT': TransformerACT,
    'gelu': gelu,
})
示例#7
0
from callbacks import EpochLog
from my_json_encoder import MyJsonEncoder

parser = argparse.ArgumentParser()
parser.add_argument('--bn', action='store_true')
parser.add_argument('--gap', action='store_true')
parser.add_argument('--full', action='store_true')
parser.add_argument('--da', action='store_true')
args = parser.parse_args()
bn_enabled = args.bn
gap_enabled = args.gap
full_enabled = args.full
da_enabled = args.da

# Register custom activation function
get_custom_objects().update({'mish': functions.mish})

# Declare constants
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_FILE_NAME = os.path.splitext(os.path.basename(__file__))[0]
if bn_enabled:
    BASE_FILE_NAME += '-BN'
if gap_enabled:
    BASE_FILE_NAME += '-GAP'
if full_enabled:
    BASE_FILE_NAME += '-full'
if da_enabled:
    BASE_FILE_NAME += '-DA'
LOG_FILE_PATH = f'{CUR_DIR}/epoch_logs/{BASE_FILE_NAME}_log.txt'
# MODEL_FILE_PATH = f'{CUR_DIR}/saved_models/{BASE_FILE_NAME}_model.h5'
MODEL_WEIGHTS_PATH = f'{CUR_DIR}/saved_models/{BASE_FILE_NAME}_weights.h5'
示例#8
0
from keras.layers import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.engine.topology import get_source_inputs
from keras.utils import layer_utils, get_custom_objects
from keras.utils.data_utils import get_file

WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'


def swish(x):
    return (K.sigmoid(x) * x)


get_custom_objects().update({'swish': swish})


def conv2d_bn(x,
              filters,
              num_row,
              num_col,
              padding='same',
              strides=(1, 1),
              name=None):
    """Utility function to apply conv + BN.

    # Arguments
        x: input tensor.
        filters: filters in `Conv2D`.
        num_row: height of the convolution kernel.
示例#9
0
                'The layer can be called only with one tensor as an argument')
        _, seq_len, d_model = K.int_shape(inputs)
        # The first thing we need to do is to perform affine transformations
        # of the inputs to get the Queries, the Keys and the Values.
        qkv = K.dot(K.reshape(inputs, [-1, d_model]), self.qkv_weights)
        # splitting the keys, the values and the queries before further
        # processing
        pre_q, pre_k, pre_v = [
            K.reshape(
                # K.slice(qkv, (0, i * d_model), (-1, d_model)),
                qkv[:, i * d_model:(i + 1) * d_model],
                (-1, seq_len, self.num_heads, d_model // self.num_heads))
            for i in range(3)
        ]
        attention_out = self.attention(pre_q,
                                       pre_v,
                                       pre_k,
                                       seq_len,
                                       d_model,
                                       training=kwargs.get('training'))
        return attention_out

    def compute_output_shape(self, input_shape):
        return input_shape


get_custom_objects().update({
    'MultiHeadSelfAttention': MultiHeadSelfAttention,
    'MultiHeadAttention': MultiHeadAttention,
})
from keras.layers import GlobalMaxPooling2D
from keras.layers import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.engine.topology import get_source_inputs
from keras.utils import layer_utils, get_custom_objects
from keras.utils.data_utils import get_file


WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'

def swish(x):
    return (K.sigmoid(x) * x)

get_custom_objects().update({'swish': swish})

def conv2d_bn(x,
              filters,
              num_row,
              num_col,
              padding='same',
              strides=(1, 1),
              name=None):
    """Utility function to apply conv + BN.

    # Arguments
        x: input tensor.
        filters: filters in `Conv2D`.
        num_row: height of the convolution kernel.
        num_col: width of the convolution kernel.
示例#11
0
                self.updates.append(K.update(vhat, vhat_t))
            else:
                denom = (K.sqrt(v_t) + self.epsilon)

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))

            # Partial momentum adaption.
            new_p = p - (lr_t * (m_t / (denom ** (self.partial * 2))))

            # Apply constraints.
            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))
        return self.updates

    def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'beta_1': float(K.get_value(self.beta_1)),
                  'beta_2': float(K.get_value(self.beta_2)),
                  'decay': float(K.get_value(self.decay)),
                  'epsilon': self.epsilon,
                  'amsgrad': self.amsgrad,
                  'partial': self.partial}
        base_config = super(Padam, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'Padam': Padam})
示例#12
0
def register_permanent_dropout():
    get_custom_objects()['PermanentDropout'] = PermanentDropout
示例#13
0
import keras
from keras.utils import get_custom_objects
from keras import backend as K


class TauDecay(keras.callbacks.Callback):
    def __init__(self, tau, **kargs):
        self.tau = tau
        self.anneal_rate = 0.00003
        self.min_temperature = 0.1

    def on_train_end(self, logs={}):
        tau_value = K.get_value(self.tau)
        print('Tau Value: %s' % tau_value)

    def on_epoch_end(self, epoch, logs={}):
        tau_value = K.get_value(self.tau)
        print('Epoch %s, Current Tau Value: %s' % (epoch + 1, tau_value))

    def on_batch_end(self, batch, logs=None):
        if batch % 100 == 0:
            decay_rate = np.exp(-self.anneal_rate * batch)
            tau_value = K.get_value(self.tau) * decay_rate
            tau_value = np.max([tau_value, self.min_temperature])
            K.set_value(self.tau, tau_value)


get_custom_objects().update({
    'TauDecay': TauDecay,
})
示例#14
0
文件: Model.py 项目: Ecgbert/Face_Net
    def build(self):
        get_custom_objects().update({"swish": Swish(swish)})
        # Backend 為 Tensorflow 定義 channel axis 為 3
        channel_axis = 3
        # 定義 Input 的大小
        input_shape = Input(shape=(224, 224, 1), name='data')

        # Stem layer
        net = self.conv2d_bn(input_shape,
                             32,
                             3,
                             3,
                             strides=(2, 2),
                             padding='valid',
                             activate=self.activate)
        net = self.conv2d_bn(net,
                             32,
                             3,
                             3,
                             strides=(1, 1),
                             padding='valid',
                             activate=self.activate)
        net = self.conv2d_bn(net,
                             64,
                             3,
                             3,
                             strides=(1, 1),
                             activate=self.activate)
        branch_0 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(net)
        net = self.conv2d_bn(branch_0,
                             80,
                             3,
                             3,
                             strides=(2, 2),
                             padding='valid',
                             activate=self.activate)
        net = self.conv2d_bn(net,
                             192,
                             3,
                             3,
                             strides=(1, 1),
                             padding='valid',
                             activate=self.activate)
        x = MaxPooling2D((3, 3), strides=(2, 2), padding="valid")(net)

        # inception1
        branch_0 = self.conv2d_bn(x,
                                  96,
                                  1,
                                  1,
                                  strides=(1, 1),
                                  activate=self.activate)
        branch_1 = self.conv2d_bn(x,
                                  64,
                                  1,
                                  1,
                                  strides=(1, 1),
                                  activate=self.activate)
        branch_1 = self.conv2d_bn(branch_1,
                                  96,
                                  3,
                                  3,
                                  strides=(1, 1),
                                  activate=self.activate)
        branch_2 = self.conv2d_bn(x,
                                  64,
                                  1,
                                  1,
                                  strides=(1, 1),
                                  activate=self.activate)
        branch_2 = self.conv2d_bn(branch_2,
                                  96,
                                  3,
                                  3,
                                  strides=(1, 1),
                                  activate=self.activate)
        branch_2 = self.conv2d_bn(branch_2,
                                  96,
                                  3,
                                  3,
                                  strides=(1, 1),
                                  activate=self.activate)
        x = [branch_0, branch_1, branch_2]
        mix1 = concatenate(x, axis=channel_axis)
        x = self.conv2d_bn(mix1,
                           96,
                           1,
                           1,
                           strides=(1, 1),
                           padding='valid',
                           activate=self.activate)

        # inception2
        branch_0 = self.conv2d_bn(x,
                                  64,
                                  3,
                                  3,
                                  strides=(1, 1),
                                  activate=self.activate)
        branch_1 = self.conv2d_bn(x,
                                  96,
                                  1,
                                  1,
                                  strides=(1, 1),
                                  activate=self.activate)
        branch_1 = self.conv2d_bn(branch_1,
                                  128,
                                  3,
                                  3,
                                  strides=(1, 1),
                                  activate=self.activate)
        branch_1 = self.conv2d_bn(branch_1,
                                  160,
                                  3,
                                  3,
                                  strides=(1, 1),
                                  activate=self.activate)
        branch_3 = AveragePooling2D((3, 3),
                                    strides=(1, 1),
                                    padding='same',
                                    name="avg_pool_1")(x)
        if (self.use_dense_block):
            x1 = [x, branch_0, branch_1, branch_3]
        else:
            x1 = [branch_0, branch_1, branch_3]
        mix2 = concatenate(x1, axis=channel_axis)

        # inception3
        branch_0 = self.conv2d_bn(mix2,
                                  192,
                                  1,
                                  1,
                                  strides=(1, 1),
                                  activate=self.activate)
        branch_1 = self.conv2d_bn(mix2,
                                  128,
                                  1,
                                  1,
                                  strides=(1, 1),
                                  activate=self.activate)
        branch_1 = self.conv2d_bn(branch_1,
                                  160,
                                  1,
                                  7,
                                  strides=(1, 1),
                                  activate=self.activate)
        branch_1 = self.conv2d_bn(branch_1,
                                  160,
                                  7,
                                  1,
                                  strides=(1, 1),
                                  activate=self.activate)
        if (self.use_dense_block):
            x = [x, mix2, branch_0, branch_1]
        else:
            x = [mix2, branch_0, branch_1]
        mix3 = concatenate(x, axis=channel_axis, name='mixed3')

        # translate layer
        if (self.use_dense_block):
            x = self.conv2d_bn(mix3,
                               192,
                               1,
                               1,
                               strides=(1, 1),
                               padding='valid',
                               activate=self.activate)
            x = AveragePooling2D((2, 2), strides=(2, 2))(x)
            x1 = BatchNormalization(scale=True, axis=channel_axis)(x)
        else:
            x1 = self.conv2d_bn(mix3,
                                192,
                                1,
                                1,
                                strides=(1, 1),
                                padding='valid',
                                activate=self.activate)

        # inception4
        netb00 = self.conv2d_bn(x1,
                                192,
                                1,
                                1,
                                strides=(1, 1),
                                padding='same',
                                activate=self.activate)
        netb10 = self.conv2d_bn(x1,
                                192,
                                1,
                                1,
                                strides=(1, 1),
                                padding='same',
                                activate=self.activate)
        netb11 = self.conv2d_bn(netb10,
                                256,
                                3,
                                3,
                                strides=(1, 1),
                                padding='same',
                                activate=self.activate)
        netb20 = self.conv2d_bn(x1,
                                160,
                                1,
                                1,
                                strides=(1, 1),
                                padding='same',
                                activate=self.activate)
        netb21 = self.conv2d_bn(netb20,
                                192,
                                3,
                                3,
                                strides=(1, 1),
                                padding='same',
                                activate=self.activate)
        netb22 = self.conv2d_bn(netb21,
                                256,
                                3,
                                3,
                                strides=(1, 1),
                                padding='same',
                                activate=self.activate)
        netb30 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x1)
        netb31 = self.conv2d_bn(netb30,
                                160,
                                1,
                                1,
                                strides=(1, 1),
                                padding='same',
                                activate=self.activate)
        if (self.use_dense_block):
            x = concatenate([x, netb00, netb11, netb22, netb31],
                            axis=channel_axis,
                            name='mixed4')
        else:
            x = concatenate([netb00, netb11, netb22, netb31],
                            axis=channel_axis,
                            name='mixed4')

        # inception5 * 2
        feature_list = [x]
        for _ in range(2):
            branch_0 = self.conv2d_bn(x,
                                      256,
                                      1,
                                      1,
                                      strides=(1, 1),
                                      activate=self.activate)
            branch_1 = self.conv2d_bn(x,
                                      128,
                                      1,
                                      3,
                                      strides=(1, 1),
                                      activate=self.activate)
            branch_1 = self.conv2d_bn(branch_1,
                                      192,
                                      3,
                                      1,
                                      strides=(1, 1),
                                      activate=self.activate)
            branch_1 = self.conv2d_bn(branch_1,
                                      256,
                                      1,
                                      3,
                                      strides=(1, 1),
                                      activate=self.activate)
            a = [branch_0, branch_1]
            mix5 = concatenate(a, axis=channel_axis)
            x1 = self.conv2d_bn(mix5,
                                256,
                                1,
                                1,
                                strides=(1, 1),
                                padding='valid',
                                activate=self.activate)
            x = concatenate([x, x1], axis=channel_axis)
            feature_list.append(x)
        if (self.use_global_average_pool):
            x = concatenate(feature_list, axis=channel_axis)

        if (self.use_global_average_pool):
            # GlobalAveragePooling Layer
            x = GlobalAveragePooling2D(name='global_avg_pool')(x)
        else:
            # Fully Connection Layer
            x = Dense(2000)(x)
            x = Dense(1000)(x)
        x = Dense(7, name='Logits')(x)
        x = Activation('softmax', name='prob')(x)
        model = Model(inputs=input_shape, outputs=x, name='FNet')
        return model
示例#15
0
文件: Model.py 项目: Ecgbert/Face_Net
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import StandardScaler


def swish(x):
    return K.sigmoid(x) * x


class Swish(Activation):
    def __init__(self, activation, **kwargs):
        super(Swish, self).__init__(activation, **kwargs)
        self.__name__ = 'swish'


get_custom_objects().update({"swish": Swish(swish)})


# 模型類別
class CustomModel(object):
    # 卷積層 + Batch Normalization Layer + Activate Function
    def conv2d_bn(self,
                  x,
                  filters,
                  num_row,
                  num_col,
                  padding='same',
                  strides=(1, 1),
                  name=None,
                  activate='relu'):
        if name is not None:
示例#16
0
from tensorflow.python.keras.models import load_model
from scipy import stats
import random as rnd
from matplotlib import gridspec




def pure_linear(x):
    return x

def f(x):
    return np.int(x)
f2 = np.vectorize(f)

get_custom_objects().update({'pure_linear': pure_linear})



data_Train_Input = np.loadtxt('/home/andrea/JET/84786_84798_asimV.txt')     	#targets of the training set
data_Train_Input = data_Train_Input.reshape((-1, 1))
data_Train_Target =  np.loadtxt('/home/andrea/JET/84786_84798_efitV.txt') 		# inputs of the training set                      
data_Train_Target = data_Train_Target.reshape((-1, 1))


#% ------------------------------------------------------------------
#% Neural Network Layout
#% ------------------------------------------------------------------

data_Train_Input_norm = stats.zscore(data_Train_Input)
data_Train_Input_mean = np.mean(data_Train_Input)
示例#17
0
    def __init__(self,
                 prefix="convnet",
                 optimizer=None,
                 layers=None,
                 loss=None,
                 metrics=None,
                 epochs=10,
                 batch_size=32,
                 callbacks=[],
                 scale=False,
                 input_shape=(28, 28, 1)
                 ):

        # Default parameter settings -------------------------------
        if optimizer is None:
            optimizer = ["SGD", {'lr': 0.01,
                                 'decay': 1e-6,
                                 'momentum': 0.9,
                                 'nesterov': True}]
        if layers is None:
            layers = [["Conv2D", {"filters": 6,
                                  "kernel_size": (4, 4),
                                  "strides": 1,
                                  "data_format": "channels_last",
                                  "padding": "same"
                                  }
                       ],
                      ["Activation", {"activation": "relu"}],
                      ["MaxPooling2D", {"pool_size": (2, 2),
                                        "padding": "valid",
                                        "strides": None}
                       ],
                      ["Flatten", {}],
                      ["Dense", {"units": 50}],
                      ["Activation", {"activation": "tanh"}],
                      ["Dense", {"units": 4}],
                      ["Activation", {"activation": "softmax"}]]
        if loss is None:
            loss = "mean_squared_error"

        if metrics is None:
            metrics = ['acc', spIndex]

        get_custom_objects().update({"spIndex": spIndex})

        # Place input shape on first layer parameter list
        layers[0][1]['input_shape'] = input_shape

        # Setting parameters --------------------------------------
        self.__dict__ = OrderedDict()
        self.__dict__['input_shape'] = input_shape
        self.__dict__['prefix'] = prefix
        self.list2Optimizer(optimizer)
        self.list2layers(layers)
        self.__dict__['loss'] = loss[0] if isinstance(loss, list) else loss
        self.__dict__['metrics'] = metrics
        self.__dict__['epochs'] = epochs
        self.__dict__['batch_size'] = batch_size
        self.__dict__['scale'] = scale

        if isinstance(callbacks, Callbacks) or callbacks is None:
            self.__dict__['callbacks'] = Callbacks
        elif isinstance(layers, list):
            self.__dict__['callbacks'] = Callbacks(callbacks)
        else:
            raise ValueError('layers must be an instance of Layers or list'
                             '%s of type %s was passed' % (callbacks, type(callbacks)))
示例#18
0
            regularizers.serialize(self.bias_regularizer),
            'activity_regularizer':
            regularizers.serialize(self.activity_regularizer),
            'kernel_constraint':
            constraints.serialize(self.kernel_constraint),
            'bias_constraint':
            constraints.serialize(self.bias_constraint),
            'use_bias':
            self.use_bias
        }
        base_config = super(CosineConvolution2D, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


CosineConv2D = CosineConvolution2D
get_custom_objects().update({'CosineConvolution2D': CosineConvolution2D})
get_custom_objects().update({'CosineConv2D': CosineConv2D})


class SubPixelUpscaling(Layer):
    """ Sub-pixel convolutional upscaling layer.

    This layer requires a Convolution2D prior to it,
    having output filters computed according to
    the formula :

        filters = k * (scale_factor * scale_factor)
        where k = a user defined number of filters (generally larger than 32)
              scale_factor = the upscaling factor (generally 2)

    This layer performs the depth to space operation on
        base_lr = self._optimizer.lr
        for param, multiplier in mul_lr_params.items():
            self._optimizer.lr = base_lr * multiplier
            updates.extend(self._optimizer.get_updates(loss, [param]))

        self._optimizer.lr = base_lr
        updates.extend(self._optimizer.get_updates(loss, base_lr_params))

        return updates

    def get_config(self):
        config = {
            'optimizer': self._class,
            'lr_multipliers': self._lr_multipliers
        }
        base_config = self._optimizer.get_config()
        # noinspection PyTypeChecker
        return dict(list(base_config.items()) + list(config.items()))

    def __getattr__(self, name):
        return getattr(self._optimizer, name)

    def __setattr__(self, name, value):
        if name.startswith('_'):
            super(LearningRateMultiplier, self).__setattr__(name, value)
        else:
            self._optimizer.__setattr__(name, value)


get_custom_objects().update({'LearningRateMultiplier': LearningRateMultiplier})
示例#20
0
                                       shape=(d_model, ),
                                       initializer=self.kernel_initializer,
                                       regularizer=self.kernel_regularizer,
                                       constraint=self.kernel_constraint,
                                       trainable=True)
        return super().build(input_shape)

    def call(self, inputs, mask=None, training=None):
        input_shape = K.shape(inputs)
        d_model = input_shape[-1]
        step1 = self.activation(
            K.bias_add(K.dot(K.reshape(inputs, (-1, d_model)), self.weights1),
                       self.biases1,
                       data_format='channels_last'))
        if 0.0 < self.dropout_rate < 1.0:

            def dropped_inputs():
                return K.dropout(step1, self.dropout_rate, K.shape(step1))

            step1 = K.in_train_phase(dropped_inputs, step1, training=training)
        step2 = K.bias_add(K.dot(step1, self.weights2),
                           self.biases2,
                           data_format='channels_last')
        result = K.reshape(step2, (-1, input_shape[-2], input_shape[-1]))
        return result


get_custom_objects().update({
    'TransformerTransition': TransformerTransition,
})
示例#21
0
            model.add(SineReLU())
            model.add(Dropout(0.3))

            model.add(Dense(1024))
            model.add(SineReLU())
            model.add(Dropout(0.5))

            model.add(Dense(10, activation = 'softmax'))
        ```
    """

    def __init__(self, epsilon=0.0025, **kwargs):
        super(SineReLU, self).__init__(**kwargs)
        self.supports_masking = True
        self.epsilon = K.cast_to_floatx(epsilon)

    def call(self, Z):
        m = self.epsilon * (K.sin(Z) - K.cos(Z))
        A = K.maximum(m, Z)
        return A

    def get_config(self):
        config = {'epsilon': float(self.epsilon)}
        base_config = super(SineReLU, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    def compute_output_shape(self, input_shape):
        return input_shape

get_custom_objects().update({'SineReLU': SineReLU})
示例#22
0
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)

    def call(self, inputs):
        # l2 normalize parameters
        norm_x = K.l2_normalize(inputs)
        norm_w = K.l2_normalize(self.kernel)

        # compute arc distance
        output = K.dot(norm_x, norm_w)
        return output

    def compute_output_shape(self, input_shape):
        return (None, self.units)

    def get_config(self):
        config = {
            'units': self.units,
            'kernel_initializer':
            initializers.serialize(self.kernel_initializer),
            'kernel_regularizer':
            regularizers.serialize(self.kernel_regularizer),
            'kernel_constraint': constraints.serialize(self.kernel_constraint),
        }
        base_config = super().get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'ArcDense': ArcDense})
示例#23
0
    def call(self, inputs, **kwargs):
        block_depth = kwargs.get('step')
        sf_depth = kwargs.get('src_fact_step')
        if block_depth is None:
            raise ValueError("Please, provide current Transformer's step"
                             "using 'step' keyword argument.")
        if sf_depth is None:
            raise ValueError("Please, provide src and facts' step"
                             "using 'src_fact_step' keyword argument.")

        result = inputs + self.word_position_embeddings
        if block_depth is not None:
            result = result + self.depth_embeddings[block_depth]
        if sf_depth is not None:
            if sf_depth == 0:
                result = result + self.sf_depth_embeddings[sf_depth]
            elif sf_depth == 1:
                result = K.permute_dimensions(result, [0, 2, 1, 3])
                result = result + self.sf_depth_embeddings[1:, :]
                result = K.permute_dimensions(result, [0, 2, 1, 3])
        return result


get_custom_objects().update({
    'TransformerCoordinateEmbedding': TransformerCoordinateEmbedding,
    'SrcFactTransformerCoordinateEmbedding':
    SrcFactTransformerCoordinateEmbedding,
    'AddCoordinateEncoding': AddCoordinateEncoding,
    'AddPositionalEncoding': AddPositionalEncoding,
})
示例#24
0
        self.model.save(file)

    def load(self, file='D:/TensorFlow/stock_data/lstm_28.h5'):
        self.model = load_model(
            file, custom_objects={'risk_estimation': risk_estimation})

    def predict(self, test):
        predict = []
        for sample_index in range(test.shape[0]):
            test_data = test[sample_index].reshape(1, time_step, input_size)
            prev = self.model.predict(test_data)
            predict.append(prev)
        return np.array(predict)


get_custom_objects().update({'ReLU': ReLU})
model = Model(input_shape=(time_step, input_size), loss=risk_estimation)
net = model.lstmModel()

model.load()
# 训练模型
model.train()
# 储存模型
model.save()
# 读入模型
model.load()
# 预测
predict = model.predict(test_x)
predict = predict.reshape(-1, output_size)
print(predict)
示例#25
0
    batch_token_ids = []
    for i in range(0, len(X)):
        token_ids = X[i].tolist()
        segment_ids = [0 for token in X[i]]
        batch_token_ids.append(token_ids)
        batch_segment_ids.append(segment_ids)
    return [np.array(batch_token_ids), np.array(batch_segment_ids)]


model_path = "model_75_epochs_no_earlystopping.hd5"
print("Trained model path : %s" % model_path)
test_filename = "data/trial_data.txt"
print("Test dataset path : %s" % test_filename)
results_path = "data/res/submission.txt"
print("Results path : %s" % results_path)

# ALBERT predictions

print("\n === ALBERT predictions ===\n")

X_test, _, _ = create_dataset(test_filename)

model = load_model(model_path, custom_objects=get_custom_objects())
model.summary()
predictions = model.predict(data_creator(X_test))
word_id_lsts, post_lsts, _, _, _, _ = read_data(test_filename)
predictions_unpadded = unpad(np.array(predictions), word_id_lsts)

write_results(word_id_lsts, post_lsts, predictions_unpadded, results_path)
print("Results written")
示例#26
0
 def __init__(self, save_all_models=False):
     Callback.__init__(self)
     self.save_all_models = save_all_models
     get_custom_objects()['PermanentDropout'] = PermanentDropout
示例#27
0
    features extract layer , for extracting the CLS vector generally

    """

    def __init__(self, index=0, **kwargs):
        super(Extract, self).__init__(**kwargs)
        self.index = index
        self.supports_masking = True

    def get_config(self):
        config = {
            'index': self.index,
        }
        base_config = super(Extract, self).get_config()
        base_config.update(config)
        return base_config

    def compute_output_shape(self, input_shape):
        # [N, emb_dim]
        return input_shape[:1] + input_shape[2:]

    def compute_mask(self, inputs, mask=None):
        return None

    def call(self, x, mask=None):
        # [N, emd_dim]
        return x[:, self.index]


get_custom_objects().update(custom_config())
示例#28
0
                          K.transpose(embedding_matrix))
        if self.add_biases:
            projected = K.bias_add(projected,
                                   self.biases,
                                   data_format='channels_last')
        if 0 < self.projection_dropout < 1:
            projected = K.in_train_phase(
                lambda: K.dropout(projected, self.projection_dropout),
                projected,
                training=kwargs.get('training'))
        attention = projected  #K.dot(projected, K.transpose(embedding_matrix))
        if self.scaled_attention:
            # scaled dot-product attention, described in
            # "Attention is all you need" (https://arxiv.org/abs/1706.03762)
            sqrt_d = K.constant(math.sqrt(emb_output_dim), dtype=K.floatx())
            attention = attention / sqrt_d
        result = K.reshape(
            self.activation(attention),
            (input_shape_tensor[0], input_shape_tensor[1], emb_input_dim))
        return result

    def compute_output_shape(self, input_shape):
        main_input_shape, embedding_matrix_shape = input_shape
        emb_input_dim, emb_output_dim = embedding_matrix_shape
        return main_input_shape[0], main_input_shape[1], emb_input_dim


get_custom_objects().update({
    'ReusableEmbedding': ReusableEmbedding,
    'TiedOutputEmbedding': TiedOutputEmbedding,
})
示例#29
0
    def get_model(self, pad_id):
        self.pad_id = pad_id
        inp_src = Input(name='src_input', shape=(None, ), dtype='int32')
        inp_answer = Input(
            name='answer_input',
            shape=(None, ),
            dtype='int32',
        )

        encoder_output = self.__get_encoder(inp_src)

        mutual_attn_mask = PaddingMaskLayer(name='decoder_mutual_padding_mask',
                                            src_len=self.args.tar_seq_length,
                                            pad_id=self.pad_id)(inp_src)

        decoder_output = self.__get_decoder(inp_answer, encoder_output,
                                            mutual_attn_mask)

        # build model part
        word_predictions = self.output_softmax_layer(
            self.output_layer([decoder_output, self.decoder_embedding_matrix]))
        model = Model(inputs=[inp_src, inp_answer], outputs=[word_predictions])
        return model


get_custom_objects().update({
    'PaddingMaskLayer': PaddingMaskLayer,
    'SequenceMaskLayer': SequenceMaskLayer,
})
示例#30
0
        # some real computational time.
        if self.zeros_like_input is None:
            self.zeros_like_input = K.zeros_like(
                inputs, name='zeros_like_input')
        # just because K.any(step_is_active) doesn't work in PlaidML
        any_step_is_active = K.greater(
            K.sum(K.cast(step_is_active, 'int32')), 0)
        step_weighted_output = K.switch(
            any_step_is_active,
            K.expand_dims(halting_prob, -1) * inputs,
            self.zeros_like_input)
        if self.weighted_output is None:
            self.weighted_output = step_weighted_output
        else:
            self.weighted_output += step_weighted_output
        return [inputs, self.weighted_output]

    def compute_output_shape(self, input_shape):
        return [input_shape, input_shape]

    def finalize(self):
        self.add_loss(self.ponder_cost)


get_custom_objects().update({
    'TransformerEncoderBlock': TransformerEncoderBlock,
    'TransformerDecoderBlock': TransformerDecoderBlock,
    'TransformerACT': TransformerACT,
    'gelu': gelu,
})
示例#31
0
        Parameters
        ----------
        inputs: tensor
            Input tensor, or list/tuple of input tensors
        """
        if get_backend() == "amd":
            return inputs * K.sigmoid(inputs * self.beta)
        # Native TF Implementation has more memory-efficient gradients
        return tf.nn.swish(inputs * self.beta)

    def get_config(self):
        """Returns the config of the layer.

        Adds the :attr:`beta` to config.

        Returns
        --------
        dict
            A python dictionary containing the layer configuration
        """
        config = super().get_config()
        config["beta"] = self.beta
        return config


# Update layers into Keras custom objects
for name, obj in inspect.getmembers(sys.modules[__name__]):
    if inspect.isclass(obj) and obj.__module__ == __name__:
        get_custom_objects().update({name: obj})
示例#32
0
        enc_state3 = self.memnn_encoder2([inp_q, inp_fact])

        emb_ans = self.decoder_embedding(inp_tar)
        emb_fact_ans = self.decoder_embedding(inp_fact_tar)

        # task 1: seq2seq, input: question; output: answer
        output1, state1 = self.decoder(emb_ans, initial_state=enc_state1)
        # task 2: memnn, input: question and facts; output: fact
        output2, state2 = self.decoder(emb_fact_ans, initial_state=enc_state2)
        # task 3: memnn, input: question and facts; output: answer
        output3, state3 = self.decoder(emb_ans, initial_state=enc_state3)

        # final output
        final_output1 = self.decoder_dense1(output1)
        final_output2 = self.decoder_dense2(output2)
        final_output3 = self.decoder_dense3(output3)

        # define model
        model = Model(inputs=[inp_q, inp_tar, inp_fact_tar, inp_fact],
                      outputs=[final_output1, final_output2, final_output3])

        return model


get_custom_objects().update({
    'PosEncodeEmbedding': PosEncodeEmbedding,
    'MultiTaskModel': MultiTaskModel,
    'S2SEncoder': S2SEncoder,
    'MemNNEncoder': MemNNEncoder,
})
示例#33
0
def custom_function_keras():
    get_custom_objects().update({'swish': Activation(swish, name='swish')})
示例#34
0
    def __init__(self,
                 latent_dim,
                 output_dim,
                 batch_size=1000,
                 noise_dim=50,
                 n_epochs=1000,
                 hidden_layer_depth=3,
                 hidden_size=200,
                 activation='swish',
                 verbose=True,
                 n_discriminator=5):
        self.latent_dim = latent_dim
        self.output_dim = output_dim
        self.hidden_layer_depth = hidden_layer_depth
        self.hidden_size = hidden_size
        self.activation = activation
        self.batch_size = batch_size
        self.n_epochs = n_epochs
        self.noise_dim = noise_dim
        self.verbose = verbose

        self.n_discriminator = n_discriminator
        optimizer = RMSprop(lr=0.00005)

        # Register swish
        get_custom_objects().update({'swish': swish})

        # Build generator and discriminator
        self.generator = self._build_generator()
        self.discriminator = self._build_discriminator()

        #
        # Construct computational graph for discriminator
        #
        self.generator.trainable = False

        # Real molecule
        real_mol = Input(shape=(self.output_dim, ))

        # Discriminator input
        z_disc = Input(shape=(self.latent_dim + self.noise_dim, ))
        fake_mol = self.generator(z_disc)

        # conditioning input
        z_cond = Input(shape=(self.latent_dim, ))
        # Condition both fake and valid.
        fake_mol_cond = Concatenate(axis=1)([fake_mol, z_cond])
        real_mol_cond = Concatenate(axis=1)([real_mol, z_cond])

        # Discriminator does its job
        fake = self.discriminator(fake_mol_cond)
        valid = self.discriminator(real_mol_cond)

        # Interpolated between real and fake molecule.
        interp_mol = RandomWeightedAverage(batch_size=self.batch_size)(
            [real_mol_cond, fake_mol_cond])
        validity_interp = self.discriminator(interp_mol)

        # Use Python partial to provide loss function with additional
        # 'averaged_samples' argument
        partial_gp_loss = partial(self.ـgradient_penalty_loss,
                                  averaged_samples=interp_mol)
        partial_gp_loss.__name__ = 'gradient_penalty'  # Keras requires function names

        self.discriminator_model = Model(
            inputs=[real_mol, z_disc, z_cond],
            outputs=[valid, fake, validity_interp])
        self.discriminator_model.compile(loss=[
            self._wasserstein_loss, self._wasserstein_loss, partial_gp_loss
        ],
                                         optimizer=optimizer,
                                         loss_weights=[1, 1, 10])

        #
        # Construct computational graph for generator
        #
        self.discriminator.trainable = False
        self.generator.trainable = True

        z_gen = Input(shape=(self.latent_dim + self.noise_dim, ))
        mol = self.generator(z_gen)
        z_cond = Input(shape=(self.latent_dim, ))
        mol_cond = Concatenate(axis=1)([mol, z_cond])
        valid = self.discriminator(mol_cond)
        self.generator_model = Model([z_gen, z_cond], valid)
        self.generator_model.compile(loss=self._wasserstein_loss,
                                     optimizer=optimizer)

        self.is_fitted = False