Esempio n. 1
0
def test_get_from_module_uses_custom_object():
    get_custom_objects().clear()
    assert (get_from_module("CustomObject", globals(), "test_generic_utils") == CustomObject)
    with pytest.raises(ValueError):
        get_from_module("TestObject", globals(), "test_generic_utils")
    with custom_object_scope({"TestObject": CustomObject}):
        assert (get_from_module("TestObject", globals(), "test_generic_utils") == CustomObject)
Esempio n. 2
0
    '''
    def __init__(self, activation, **kwargs):
        super(Mish, self).__init__(activation, **kwargs)
        self.__name__ = 'Mish'


def mish(x, fast=False):
    if fast:  # faster but requires extra storage
        y = K.exp(-x)
        z = 1 + 2 * y
        return x * z / (z + 2 * y * y)
    #return x * tf.math.tanh(tf.math.softplus(x))
    return x * K.tanh(K.softplus(x))


get_custom_objects().update({'Mish': Mish(mish)})


def space_to_depth_x2(x):
    return tf.space_to_depth(x, block_size=2)


def FullYOLO(
    X, Y
):  # after https://github.com/experiencor/basic-yolo-keras/blob/master/backend.py
    # see Table 6 in YOLO9000 paper, https://arxiv.org/pdf/1612.08242.pdf
    input_image = Input(shape=X[0].shape)

    # the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K)
    def space_to_depth_x2(x):
        return tf.space_to_depth(x, block_size=2)
    def compute_output_shape(self, input_shape):
        factor = self.upsampling_factor * self.upsampling_factor
        input_shape_1 = None
        if input_shape[1] is not None:
            input_shape_1 = input_shape[1] * self.upsampling_factor
        input_shape_2 = None
        if input_shape[2] is not None:
            input_shape_2 = input_shape[2] * self.upsampling_factor
        dims = [
            input_shape[0], input_shape_1, input_shape_2,
            int(input_shape[3] / factor)
        ]
        return tuple(dims)


get_custom_objects().update({'SubpixelConv2D': SubpixelConv2D})
'''
Usage:
    x = SubpixelConv2D(upsampling_factor=4)(ip)
'''


#%%
class ICNR:
    """ICNR initializer for checkerboard artifact free sub pixel convolution
    Ref:
     [1] Andrew Aitken et al. Checkerboard artifact free sub-pixel convolution
     https://arxiv.org/pdf/1707.02937.pdf)
    Copy:
        https://github.com/kostyaev/ICNR
    Args:
# In[362]:

# custom activation from Bagnall 2015
#  does not appear to perform as well as a ReLU
import tensorflow as tf
from keras.utils.generic_utils import get_custom_objects


def ReSQRT(x):
    cond = tf.less_equal(x, 0.0)
    result = tf.where(cond, x * 0.0, tf.sqrt(x + 1) - 1)
    return result


get_custom_objects().update({'ReSQRT': ReSQRT})

# Bagnall proposes that the following possible values contribute to the success of the model:
#
# | meta-parameter                  	| typical value                      	|
# |---------------------------------	|------------------------------------	|
# | initial adagrad learning scale  	| 0.1, 0.14, 0.2, 0.3                	|
# | initial leakage between classes 	| 1/4N to 5/N                        	|
# | leakage decay (per sub-epoch)   	| 0.67 to 0.9                        	|
# | hidden neurons                  	| 79, 99, 119, 139                   	|
# | presynaptic noise σ             	| 0, 0.1, 0.2, 0.3, 0.5              	|
# | sub-epochs                      	| 6 to 36                            	|
# | text direction                  	| forward or backward                	|
# | text handling                   	| sequential, concatenated, balanced 	|
# | initialisation                  	| gaussian, zero                     	|
def set_swish():
    get_custom_objects().update({"swish": layers.Activation(swish)})
Esempio n. 6
0
        #     broadcast_beta = K.reshape(self.beta, (1, G, C//G, 1, 1))
        #     outputs = outputs + broadcast_beta
        # outputs = K.reshape(outputs, (-1, C, H, W))
        # outputs = K.permute_dimensions(outputs, (0, 2, 3, 1))
        # return outputs

    def get_config(self):
        config = {
            'groups': self.groups,
            'axis': self.axis,
            'epsilon': self.epsilon,
            'center': self.center,
            'scale': self.scale,
            'beta_initializer': initializers.serialize(self.beta_initializer),
            'gamma_initializer':
            initializers.serialize(self.gamma_initializer),
            'beta_regularizer': regularizers.serialize(self.beta_regularizer),
            'gamma_regularizer':
            regularizers.serialize(self.gamma_regularizer),
            'beta_constraint': constraints.serialize(self.beta_constraint),
            'gamma_constraint': constraints.serialize(self.gamma_constraint)
        }
        base_config = super(GroupNormalization, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    def compute_output_shape(self, input_shape):
        return input_shape


get_custom_objects().update({'GroupNormalization': GroupNormalization})
    steps_per_epoch = int(num_frame / batch_size)

    continue_training = False
    saved_model = 'saved_model/fv_model_for_car_June_28_99.h5'

    if not continue_training:
        print('Initiate training')
        model = fcn_model(summary=False)
        opt = Adam(lr=1e-4)
        #keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        model.compile(optimizer=opt, loss=my_loss)

    else:
        print('Continue training')
        from keras.utils.generic_utils import get_custom_objects
        get_custom_objects().update({"my_loss": my_loss})

        model = load_model(saved_model)
        opt = Adam(lr=1e-5)
        # #keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        model.compile(optimizer=opt, loss=my_loss)

    checkpointer = ModelCheckpoint(
        'saved_model/fv_model_for_car_June_28_{epoch:02d}.h5')
    #logger = CSVLogger(filename='saved_model/model_May_29_450.csv')

    print(
        'Start training - batch_size : {0} - num_frame : {1} - steps_per_epoch : {2}'
        .format(batch_size, num_frame, steps_per_epoch))
    start = time.time()
Esempio n. 8
0
        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            normed = normed + broadcast_beta
        return normed

    def get_config(self):
        config = {
            'axis': self.axis,
            'epsilon': self.epsilon,
            'center': self.center,
            'scale': self.scale,
            'beta_initializer': initializers.serialize(self.beta_initializer),
            'gamma_initializer': initializers.serialize(self.gamma_initializer),
            'beta_regularizer': regularizers.serialize(self.beta_regularizer),
            'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
            'beta_constraint': constraints.serialize(self.beta_constraint),
            'gamma_constraint': constraints.serialize(self.gamma_constraint)
        }
        base_config = super(InstanceNormalization, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

get_custom_objects().update({'InstanceNormalization': InstanceNormalization})
import os
import keras.backend as K
from keras.utils.generic_utils import get_custom_objects


def Swish(x):
    return (K.sigmoid(x) * x)


class Swish_Class(Activation):
    def __init__(self, activation, **kwargs):
        super(Swish_Class, self).__init__(activation, **kwargs)
        self.__name__ = 'SWISH'


get_custom_objects().update({'Swish': Swish_Class(Swish)})

# Training parameters
batch_size = 128  # orig paper trained all networks with batch_size=128
epochs = 200
data_augmentation = True
num_classes = 10

# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True

# Model parameter
# ----------------------------------------------------------------------------
#           |      | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch
# Model     |  n   | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti
#           |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2)
Esempio n. 10
0
            padding_height = max(kernel_height - self.stride, 0)
        else:
            padding_height = max(kernel_height - (in_height % self.stride), 0)
        if (in_width % self.stride == 0):
            padding_width = max(kernel_width - self.stride, 0)
        else:
            padding_width = max(kernel_width- (in_width % self.stride), 0)

        padding_top = padding_height // 2
        padding_bot = padding_height - padding_top
        padding_left = padding_width // 2
        padding_right = padding_width - padding_left

        return pad(x, [[0,0],
                          [padding_top, padding_bot],
                          [padding_left, padding_right],
                          [0,0] ],
                          'REFLECT')

    def get_config(self):
        config = {'stride': self.stride,
                  'kernel_size': self.kernel_size}
        base_config = super(ReflectionPadding2D, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 


# Update layers into Keras custom objects
for name, obj in inspect.getmembers(sys.modules[__name__]):
    if inspect.isclass(obj) and obj.__module__ == __name__:
        get_custom_objects().update({name: obj})
Esempio n. 11
0
if __name__ != "__main__":
    from ..util import isstring, istuple, islist
else:
    from nifty.util import isstring, istuple, islist


#####################################################################################################################################################
#####
#####  LAYERS & ACTIVATIONS
#####

def Relu(x): return Activation('relu')(x)
def LeakyReLU(x): return keras.layers.LeakyReLU()(x)
def Softmax(x): return Activation('softmax')(x)

get_custom_objects().update({'Relu': Relu})
# get_custom_objects().update({'Leaky_relu': LeakyReLU})


def relu_BN(y):
    "Relu activation preceeded by BatchNormalization."
    y = BatchNormalization()(y)
    y = Relu(y)
    return y

def leaky_BN(y):
    "LeakyReLU activation preceeded by BatchNormalization."
    y = BatchNormalization()(y)
    y = keras.layers.LeakyReLU()(y)
    return y
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline

from keras.layers import Activation
from keras.utils.generic_utils import get_custom_objects

def mod(x):
        return (x%2)

get_custom_objects().update({'mod': Activation(mod)})


# fix random seed for reproducibility
seed = 7
np.random.seed(seed)

# Dataset preparation
X = np.arange(1, 100)
# X = np.arange(1, 10)
np.random.shuffle(X)

# We now have an array with element n , 1 <= n < 100000
# Our output variable will have the string either ODD or EVEN.
# We will use the numpy vectorize vectorize function to generate
# output dataset.
from keras.layers import Dropout, Activation, Input
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import GlobalAveragePooling2D
from keras.regularizers import l2
from keras.models import Model
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects


def relu_neg(x):
    return K.relu(x + 1) - 1


get_custom_objects().update({'relu_neg': Activation(relu_neg)})


def create_model(nb_classes, input_shape, config=None):
    """Create a VGG-16 like model."""
    if len(input_shape) != 3:
        raise Exception("Input shape should be a tuple (nb_channels, nb_rows, "
                        "nb_cols) or (nb_rows, nb_cols, nb_channels), "
                        "depending on your backend.")
    if config is None:
        config = {'model': {}}

    min_feature_map_dimension = min(input_shape[:2])
    if min_feature_map_dimension < 32:
        print("ERROR: Please upsample the feature maps to have at least "
              "a size of 32 x 32. Currently, it has {}".format(input_shape))
Esempio n. 14
0
def test_custom_object_scope_adds_objects():
    get_custom_objects().clear()
    assert (len(get_custom_objects()) == 0)
    with custom_object_scope({"Test1": object, "Test2": object}, {"Test3": object}):
        assert (len(get_custom_objects()) == 3)
    assert (len(get_custom_objects()) == 0)
Esempio n. 15
0
#####


def Relu(x):
    return Activation('relu')(x)


def LeakyReLU(x):
    return keras.layers.LeakyReLU()(x)


def Softmax(x):
    return Activation('softmax')(x)


get_custom_objects().update({'Relu': Relu})
# get_custom_objects().update({'Leaky_relu': LeakyReLU})


def relu_BN(y):
    "Relu activation preceeded by BatchNormalization."
    y = BatchNormalization()(y)
    y = Relu(y)
    return y


def leaky_BN(y):
    "LeakyReLU activation preceeded by BatchNormalization."
    y = BatchNormalization()(y)
    y = keras.layers.LeakyReLU()(y)
    return y
Esempio n. 16
0
            #h._uses_learning_phase = True
            c._uses_learning_phase = True
        return c, [c, c, t]
        #return h, [h, c, t]

    def get_config(self):
        config = {'units': self.units,
                  'retention_ratio': self._retention_ratio,
                  'learn_retention_ratio': self._learn_retention_ratio,
                  'activation': activations.serialize(self.activation),
                  'recurrent_activation': activations.serialize(self.recurrent_activation),
                  'use_bias': self.use_bias,
                  'kernel_initializer': initializers.serialize(self.kernel_initializer),
                  'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
                  'bias_initializer': initializers.serialize(self.bias_initializer),
                  'unit_forget_bias': self.unit_forget_bias,
                  'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
                  'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
                  'bias_regularizer': regularizers.serialize(self.bias_regularizer),
                  'activity_regularizer': regularizers.serialize(self.activity_regularizer),
                  'kernel_constraint': constraints.serialize(self.kernel_constraint),
                  'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
                  'bias_constraint': constraints.serialize(self.bias_constraint),
                  'dropout': self.dropout,
                  'recurrent_dropout': self.recurrent_dropout}
        base_config = super(PlpRNN, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'PlpRNN': PlpRNN})
Esempio n. 17
0
    def __init__(self):
        def plotloss():
            plt.figure()
            ax = plt.gca()
            y1 = R_variable['loss_test']
            y2 = R_variable['loss_train']
            plt.plot(y1, 'ro', label='Test')
            plt.plot(y2, 'g*', label='Train')
            # ax.set_xscale('log')
            ax.set_yscale('log')
            plt.legend(fontsize=18)
            plt.xlabel('Epoch', fontsize=15)
            plt.title('loss', fontsize=15)
            fntmp = '%sloss' % (self.FolderName)
            mySaveFig(plt, fntmp, ax=ax, isax=1, iseps=0)

        def plotacc():
            plt.figure()
            ax = plt.gca()
            y1 = R_variable['acc_test']
            y2 = R_variable['acc_train']
            plt.plot(y1, 'ro', label='Test')
            plt.plot(y2, 'g*', label='Train')
            # ax.set_xscale('log')
            # ax.set_yscale('log')
            plt.legend(fontsize=18)
            plt.xlabel('Epoch', fontsize=15)
            plt.title('accuracy', fontsize=15)
            fntmp = '%saccuracy' % (self.FolderName)
            mySaveFig(plt, fntmp, ax=ax, isax=1, iseps=0)

        # 保存文件
        def savefile():
            # 序列化变量R, 需要的话可以load出来
            with open('%s/objs.pkl' % (self.FolderName),
                      'wb') as f:  # Python 3: open(..., 'wb')
                pickle.dump(R_variable, f, protocol=4)

            # 保存变量R参数长度小于等于20的
            text_file = open("%s/Output.txt" % (self.FolderName), "w")
            for para in R_variable:
                if np.size(R_variable[para]) > 20:
                    continue
                text_file.write('%s: %s\n' % (para, R_variable[para]))
            text_file.close()

            # 保存loss到csv中
            da = pd.DataFrame(R_variable['loss_train'])
            da.to_csv(self.FolderName + "loss_train" + ".csv",
                      header=False,
                      columns=None)
            db = pd.DataFrame(R_variable['loss_test'])
            db.to_csv(self.FolderName + "loss_test" + ".csv",
                      header=False,
                      columns=None)
            dc = pd.DataFrame(R_variable['acc_train'])
            dc.to_csv(self.FolderName + "acc_train" + ".csv",
                      header=False,
                      columns=None)
            dd = pd.DataFrame(R_variable['acc_test'])
            dd.to_csv(self.FolderName + "acc_test" + ".csv",
                      header=False,
                      columns=None)

        # 记录误差值,L2,在每次画loss前更新(以防中期停止程序)
        def gapReocord():
            R_variable['final_train_loss'] = R_variable['loss_train'][-1]
            R_variable['final_test_loss'] = R_variable['loss_test'][-1]
            R_variable['final_train_acc'] = R_variable['acc_train'][-1]
            R_variable['final_test_acc'] = R_variable['acc_test'][-1]

        # 储存误差
        R_variable['loss_test'] = []
        R_variable['loss_train'] = []
        R_variable['acc_test'] = []
        R_variable['acc_train'] = []

        # s-relu
        def custom_activation(x):
            return K.relu(-(x - 1)) * K.relu(x)

        get_custom_objects().update({'srelu': Activation(custom_activation)})

        # 记时,创建新目录
        self.t0 = time.time()
        self.FolderName = mk_newfolder()

        def Resnet18_block(X, filters, s, stage, block):
            """
            Implementation of the identity block as defined in Figure 4
            Arguments:
            X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
            f -- integer, specifying the shape of the middle CONV's window for the main path
            filters -- python list of integers, defining the number of filters in the CONV layers of the main path
            stage -- integer, used to name the layers, depending on their position in the network
            block -- string/character, used to name the layers, depending on their position in the network
            Returns:
            X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
            """
            # defining name basis
            conv_name_base = 'res' + str(stage) + block + '_branch'
            bn_name_base = 'bn' + str(stage) + block + '_branch'
            # Retrieve Filters
            F1, F2 = filters
            # Save the input value. You'll need this later to add back to the main path.
            X_shortcut = X
            if s != 1:
                X_shortcut = Conv2D(filters=F1,
                                    kernel_size=(1, 1),
                                    strides=(s, s),
                                    padding='same',
                                    name=conv_name_base + '2c')(X_shortcut)
                X_shortcut = BatchNormalization(axis=3,
                                                name=bn_name_base +
                                                '2c')(X_shortcut)
                X_shortcut = Activation(R_variable['actfun'])(X_shortcut)
            # First component of main path
            X = Conv2D(filters=F1,
                       kernel_size=(3, 3),
                       strides=(s, s),
                       padding='same',
                       name=conv_name_base + '2a')(X)
            X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
            X = Activation(R_variable['actfun'])(X)
            ### START CODE HERE ###
            # Second component of main path (≈3 lines)
            X = Conv2D(filters=F2,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='same',
                       name=conv_name_base + '2b')(X)
            X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
            X = Activation(R_variable['actfun'])(X)
            # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
            X = layers.add([X, X_shortcut])
            X = Activation(R_variable['actfun'])(X)
            ### END CODE HERE ###
            return X

        def ResNet18(input_shape=(28, 28, 1), classes=10):
            X_input = Input(input_shape)
            #X1 = ZeroPadding2D((3,3))(X_input)
            #stage 1
            X1 = Conv2D(64, (3, 3),
                        strides=(1, 1),
                        name='conv1',
                        padding='same',
                        kernel_initializer=glorot_uniform(seed=0))(X_input)
            X3 = BatchNormalization(axis=3, name='bn_conv1')(X1)
            X2 = Activation(R_variable['actfun'])(X3)
            X32 = Dropout(0.1)(X2)
            #stage2
            X3 = Resnet18_block(X32, filters=[64, 64], s=1, stage=2, block='a')
            X4 = Resnet18_block(X3, filters=[64, 64], s=1, stage=2, block='b')
            X33 = Dropout(0.1)(X4)
            #Stage3
            X5 = Resnet18_block(X33,
                                filters=[128, 128],
                                s=2,
                                stage=3,
                                block='a')
            X6 = Resnet18_block(X5,
                                filters=[128, 128],
                                s=1,
                                stage=3,
                                block='b')
            X34 = Dropout(0.1)(X6)
            #Stage4
            X7 = Resnet18_block(X34,
                                filters=[256, 256],
                                s=2,
                                stage=4,
                                block='a')
            X8 = Resnet18_block(X7,
                                filters=[256, 256],
                                s=1,
                                stage=4,
                                block='b')
            X35 = Dropout(0.1)(X8)
            #Stage5
            X9 = Resnet18_block(X35,
                                filters=[1024, 1024],
                                s=2,
                                stage=5,
                                block='a')
            X10 = Resnet18_block(X9,
                                 filters=[1024, 1024],
                                 s=1,
                                 stage=5,
                                 block='b')
            X19 = Resnet18_block(X10,
                                 filters=[1024, 1024],
                                 s=1,
                                 stage=5,
                                 block='c')
            X11 = AveragePooling2D(pool_size=(4, 4))(X19)
            X12 = Dropout(0.1)(X11)

            X23 = Flatten()(X12)
            X24 = Dense(1024,
                        activation=R_variable['actfun'],
                        kernel_initializer=glorot_uniform(seed=1),
                        name='dense1')(X23)
            X25 = Dense(1024,
                        activation=R_variable['actfun'],
                        kernel_initializer=glorot_uniform(seed=2),
                        name='dense2')(X24)
            X26 = Dense(classes,
                        activation='softmax',
                        name='fc' + str(classes),
                        kernel_initializer=glorot_uniform(seed=3))(X25)

            model = Model(inputs=X_input, outputs=X26, name='ResNet18')

            return model

        self.model = ResNet18(input_shape=(28, 28, 1), classes=10)
        self.sgd = tf.keras.optimizers.SGD(learning_rate=0.01,
                                           momentum=0.9,
                                           decay=1e-6,
                                           nesterov=True,
                                           name='SGD')
        self.adam1 = tf.keras.optimizers.Adam(learning_rate=0.01,
                                              beta_1=0.85,
                                              beta_2=0.999,
                                              epsilon=1e-07,
                                              amsgrad=False,
                                              name='Adam1')
        self.adam2 = tf.keras.optimizers.Adam(learning_rate=0.001,
                                              beta_1=0.9,
                                              beta_2=0.999,
                                              epsilon=1e-07,
                                              amsgrad=False,
                                              name='Adam2')
        self.adam3 = tf.keras.optimizers.Adam(learning_rate=0.0005,
                                              beta_1=0.9,
                                              beta_2=0.999,
                                              epsilon=1e-07,
                                              amsgrad=False,
                                              name='Adam3')

        self.adam4 = tf.keras.optimizers.Adam(learning_rate=0.0001,
                                              beta_1=0.9,
                                              beta_2=0.999,
                                              epsilon=1e-07,
                                              amsgrad=False,
                                              name='Adam4')

        self.adam5 = tf.keras.optimizers.Adam(learning_rate=0.00001,
                                              beta_1=0.9,
                                              beta_2=0.999,
                                              epsilon=1e-07,
                                              amsgrad=False,
                                              name='Adam4')

        def fn(correct, predicted):
            return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                           logits=predicted)

        self.model.compile(optimizer=self.adam2,
                           loss=tf.keras.losses.CategoricalCrossentropy(),
                           metrics=['accuracy'])

        early_stopping = EarlyStopping(monitor='val_loss',
                                       patience=30,
                                       verbose=2)

        self.history1 = self.model.fit(
            R_variable['train_inputs'],
            R_variable['y_true_train'],
            epochs=50,
            batch_size=256,
            validation_data=(R_variable['test_inputs'],
                             R_variable['y_true_test']),
            shuffle=True)

        R_variable['loss_train'] = self.history1.history['loss']
        R_variable['loss_test'] = self.history1.history['val_loss']
        R_variable['acc_train'] = self.history1.history['accuracy']
        R_variable['acc_test'] = self.history1.history['val_accuracy']
        round_time = time.time()
        R_variable['use_time'] = round_time - self.t0
        savefile()
        gapReocord()
        plotloss()
        plotacc()

        self.model.compile(optimizer=self.adam4,
                           loss=tf.keras.losses.CategoricalCrossentropy(),
                           metrics=['accuracy'])

        self.history2 = self.model.fit(
            R_variable['train_inputs'],
            R_variable['y_true_train'],
            epochs=25,
            batch_size=256,
            validation_data=(R_variable['test_inputs'],
                             R_variable['y_true_test']),
            shuffle=True,
            callbacks=[early_stopping])

        R_variable['loss_train'] += self.history2.history['loss']
        R_variable['loss_test'] += self.history2.history['val_loss']
        R_variable['acc_train'] += self.history2.history['accuracy']
        R_variable['acc_test'] += self.history2.history['val_accuracy']
        round_time = time.time()
        R_variable['use_time'] = round_time - self.t0
        savefile()
        gapReocord()
        plotloss()
        plotacc()

        self.model.compile(optimizer=self.adam5,
                           loss=tf.keras.losses.CategoricalCrossentropy(),
                           metrics=['accuracy'])

        self.history3 = self.model.fit(
            R_variable['train_inputs'],
            R_variable['y_true_train'],
            epochs=25,
            batch_size=256,
            validation_data=(R_variable['test_inputs'],
                             R_variable['y_true_test']),
            shuffle=True,
            callbacks=[early_stopping])

        R_variable['loss_train'] += self.history3.history['loss']
        R_variable['loss_test'] += self.history3.history['val_loss']
        R_variable['acc_train'] += self.history3.history['accuracy']
        R_variable['acc_test'] += self.history3.history['val_accuracy']
        round_time = time.time()
        R_variable['use_time'] = round_time - self.t0
        savefile()
        gapReocord()
        plotloss()
        plotacc()

        score1 = self.model.evaluate(R_variable['train_inputs'],
                                     R_variable['y_true_train'],
                                     verbose=0)
        score2 = self.model.evaluate(R_variable['test_inputs'],
                                     R_variable['y_true_test'],
                                     verbose=0)

        print("Program ends. ")
        print("Train accuracy is: %3.3f, Train loss is: %3.3f" %
              (score1[1], score1[0]))
        print("Test accuracy is: %3.3f, Test loss is: %3.3f" %
              (score2[1], score2[0]))
        print("The program have been running for %ds." %
              (time.time() - self.t0))
Esempio n. 18
0
        dim = K.int_shape(mean)[1]

        latent_loss = -0.5 * (1 + log_var - K.square(mean) - K.exp(log_var))
        latent_loss = K.sum(latent_loss, axis=1, keepdims=True)
        latent_loss = K.mean(latent_loss)
        latent_loss = self.gamma * K.abs(latent_loss - self.max_capacity)

        latent_loss = K.reshape(latent_loss, [1, 1])

        epsilon = K.random_normal(shape=(batch, dim), mean=0., stddev=1.)
        layer_output = mean + K.exp(0.5 * log_var) * epsilon

        self.add_loss(losses=[latent_loss], inputs=[layer_inputs])

        return layer_output

    def compute_output_shape(self, input_shape):
        return input_shape[0]

    def get_config(self):
        config = {
            'gamma': self.gamma,
            'capacity': self.max_capacity,
            'name': self.name
        }
        base_config = super(SampleLayer, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'SampleLayer': SampleLayer})
Esempio n. 19
0
    def build(self, input_shape):
        pass

    def call(self, x, mask=None):
        y = tf.compat.v1.depth_to_space(input=x,
                                        block_size=self.scale_factor,
                                        name=self.data_format)
        return y

    def compute_output_shape(self, input_shape):
        if self.data_format == 'channels_first':
            b, k, r, c = input_shape
            return (b, k // (self.scale_factor**2), r * self.scale_factor,
                    c * self.scale_factor)
        else:
            b, r, c, k = input_shape
            return (b, r * self.scale_factor, c * self.scale_factor,
                    k // (self.scale_factor**2))

    def get_config(self):
        config = {
            'scale_factor': self.scale_factor,
            'data_format': self.data_format
        }
        base_config = super(SubPixelUpscaling, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'SubPixelUpscaling': SubPixelUpscaling})
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]

#custom activation
from keras.layers import Activation
from keras.utils.generic_utils import get_custom_objects


def srelu(x):
    if np.random.rand(1) > 0.5:
        return K.relu(x)
    return K.relu(x) - x


get_custom_objects().update({'srelu': Activation(srelu)})


def baseline_model(af):
    # create model
    model = Sequential()
    model.add(Conv2D(32, (5, 5), input_shape=(1, 28, 28), activation=af))
    model.add(Flatten())
    model.add(Dense(128, activation=af))
    model.add(Dense(num_classes, activation='softmax'))
    # Compile model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
            steps_per_epoch=nb_train_samples // batch_size,
            epochs=epochs,
            #validation_data=validation_generator.get_next_batch(),
            #validation_steps=nb_validation_samples // batch_size,
            # max_queue_size=5
            #callbacks=[history, tb, earlystop],
            class_weight=class_weights)

    model.save('my_model_inception_2_full.h5')
    # save history
    try:
        with open('history_dict_earlystop_full.pkl', 'wb') as f:
            pickle.dump(history.history, f)
    except:
        print("history object does not exist.")
        print("Maybe you did a full training without validation.")

    # load model (need to add the "custom" metric function again)
    top20_acc = functools.partial(top_k_categorical_accuracy, k=20)
    top20_acc.__name__ = 'top20_acc'
    keras.losses.custom_loss = top20_acc
    get_custom_objects().update({"top20_acc": top20_acc})

    print("check of loading model works...")
    model = load_model('my_model.h5')
    print(model.summary())

    # preds = model.evaluate_generator(validation_generator.get_next_batch(),
    #                                  steps=nb_validation_samples\
    #    // batch_size)
    def call(self, inputs, training=None):
        def drop_connect():
            keep_prob = 1.0 - self.drop_connect_rate

            # Compute drop_connect tensor
            batch_size = tf.shape(inputs)[0]
            random_tensor = keep_prob
            random_tensor += K.random_uniform([batch_size, 1, 1, 1],
                                              dtype=inputs.dtype)
            binary_tensor = tf.floor(random_tensor)
            output = (inputs / keep_prob) * binary_tensor
            return output

        return K.in_train_phase(drop_connect, inputs, training=training)

    def get_config(self):
        config = {
            'drop_connect_rate': self.drop_connect_rate,
        }
        base_config = super(DropConnect, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({
    'EfficientNetConvInitializer': EfficientNetConvInitializer,
    'EfficientNetDenseInitializer': EfficientNetDenseInitializer,
    'DropConnect': DropConnect,
    'Swish': Swish,
})
Esempio n. 23
0
import numpy as np
np.random.seed(1337)  # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, Flatten, LocallyConnected2D, ZeroPadding2D
from keras.optimizers import Adam
from keras.initializers import random_uniform
from six.moves import cPickle as pickle
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects


def scaled_hyperbolic_tangent(x):
    return K.tanh((2 / 3) * x) * 1.7159


get_custom_objects().update(
    {'scaled_hyperbolic_tangent': Activation(scaled_hyperbolic_tangent)})

with open('./train_data/data.pickle', 'rb') as f:
    tr_dat = pickle.load(f)
with open('./train_data/label.pickle', 'rb') as f:
    tr_lab = pickle.load(f)
with open('./test_data/data.pickle', 'rb') as f:
    te_dat = pickle.load(f)
with open('./test_data/label.pickle', 'rb') as f:
    te_lab = pickle.load(f)

# zero_padding (now the data shape is [None, 16, 16])
# after zero_padding -> [None, 18, 18]
tr_dat = np.pad(tr_dat, ((0, 0), (1, 1), (1, 1)), 'constant')
te_dat = np.pad(te_dat, ((0, 0), (1, 1), (1, 1)), 'constant')
Esempio n. 24
0
from pandas import read_csv
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from keras.utils.np_utils import to_categorical
from keras.utils.generic_utils import get_custom_objects
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import os


def normalized_linear(x):
    return x / sum(x, axis=-1, keepdims=True)


get_custom_objects().update(
    {"normalized_linear": Activation(normalized_linear)})


class SGAN:
    def __init__(self):
        self.num_classes = 4
        self.features = 12
        self.timesteps = 40
        self.timesteps_in_future = 20
        self.nodes_per_layer = 32
        self.filter_length = 3
        self.generator_input_length = 100
        self.dropout = 0.2
        self.encoder = LabelEncoder()
        self.results_folder_name = ""
        self.results_folder_name_gs = ""
Esempio n. 25
0
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import MaxPooling2D
from keras import optimizers

from sim_layer import Norm_Conv2d as Conv2D

# create custom activation function for CosSim layer


def pow_activation(x, p_val=3):
    #return 2 * K.pow(K.tanh(x) , p_val)
    return K.pow(x, p_val)


get_custom_objects().update({'pow_activation': Activation(pow_activation)})


def lr_schedule(epoch):
    lrate = 0.001
    if epoch > 25:
        lrate = 0.0005
    if epoch > 50:
        lrate = 0.0002
    return lrate


batch_size = 32
epochs = 75

# The data, split between train and test sets:
Esempio n. 26
0
        return tf.nn.swish(inputs)


class DropConnect(KL.Layer):
    def __init__(self, drop_connect_rate=0.0, **kwargs):
        super().__init__(**kwargs)
        self.drop_connect_rate = drop_connect_rate

    def call(self, inputs, training=None):
        def drop_connect():
            keep_prob = 1.0 - self.drop_connect_rate

            # Compute drop_connect tensor
            batch_size = tf.shape(inputs)[0]
            random_tensor = keep_prob
            random_tensor += tf.random_uniform([batch_size, 1, 1, 1],
                                               dtype=inputs.dtype)
            binary_tensor = tf.floor(random_tensor)
            output = tf.div(inputs, keep_prob) * binary_tensor
            return output

        return K.in_train_phase(drop_connect, inputs, training=training)

    def get_config(self):
        config = super().get_config()
        config["drop_connect_rate"] = self.drop_connect_rate
        return config


get_custom_objects().update({"DropConnect": DropConnect, "Swish": Swish})
Esempio n. 27
0
        if `data_format` is `"channels_last"`.
    # Output shape
        5D tensor with shape:
        `(samples, channels + 2, conv_dim1, conv_dim2, conv_dim3)`
        if `data_format` is `"channels_first"`
        or 5D tensor with shape:
        `(samples, conv_dim1, conv_dim2, conv_dim3, channels + 2)`
        if `data_format` is `"channels_last"`.
    # References:
        - [An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](https://arxiv.org/abs/1807.03247)
    """

    def __init__(self, data_format=None,
                 **kwargs):
        super(CoordinateChannel3D, self).__init__(
            rank=3,
            use_radius=False,
            data_format=data_format,
            **kwargs
        )

    def get_config(self):
        config = super(CoordinateChannel3D, self).get_config()
        config.pop('rank')
        config.pop('use_radius')
        return config


get_custom_objects().update({'CoordinateChannel1D': CoordinateChannel1D,
                             'CoordinateChannel2D': CoordinateChannel2D,
                             'CoordinateChannel3D': CoordinateChannel3D})
Esempio n. 28
0
from keras import layers, models, optimizers, regularizers
from keras import backend as K
from keras.layers import Flatten, Concatenate, LeakyReLU
from keras.utils.generic_utils import get_custom_objects
from keras.initializers import RandomUniform, Zeros
from keras_radam import RAdam


def mish(x):
    return x * K.tanh(K.softplus(x))


get_custom_objects().update({'Mish': layers.Activation(mish)})


class Actor:
    """Actor (Policy) Model."""
    def __init__(self, state_size, action_size, action_low, action_high, lr,
                 network):
        """Initialize parameters and build model.

        Params
        ======
            state_size (int): Dimension of each state
            action_size (int): Dimension of each action
            action_low (array): Min value of each action dimension
            action_high (array): Max value of each action dimension
        """
        self.state_size = state_size
        self.action_size = action_size
        self.action_low = action_low
Esempio n. 29
0
import numpy as np
import keras2onnx
import onnxruntime

import tensorflow as tf
from keras import backend as K

from tensorflow.keras.models import load_model
import numpy as np
import sys

from constraints import ZeroSomeWeights
from keras.utils.generic_utils import get_custom_objects

get_custom_objects().update({"ZeroSomeWeights": ZeroSomeWeights})

model = load_model(sys.argv[1])
X = np.array(np.random.rand(10, 21), dtype=np.float32)
print(model.predict(X))

# convert to onnx model
onnx_model = keras2onnx.convert_keras(model, model.name)

temp_model_file = 'NN_model.onnx'
keras2onnx.save_model(onnx_model, temp_model_file)
sess = onnxruntime.InferenceSession(temp_model_file)

input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
Esempio n. 30
0
np.random.seed(5566)
from keras import backend as K
from keras.layers import Activation, Input, ZeroPadding2D
from keras.layers.core import Dense, Lambda, Reshape
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import AveragePooling2D, GlobalMaxPooling2D
from keras.layers.merge import concatenate, dot
from keras.models import Model
from keras.utils.generic_utils import get_custom_objects


def log_activation(x):
    return K.log(x)


get_custom_objects().update({'log_activation': Activation(log_activation)})


def create_model(MAX_QRY_LENGTH=50,
                 MAX_DOC_LENGTH=2900,
                 NUM_OF_FEATS=10,
                 PSGS_SIZE=[(50, 1)],
                 NUM_OF_FILTERS=5,
                 tau=1):
    alpha_size = len(PSGS_SIZE)
    psgMat = Input(shape=(
        MAX_QRY_LENGTH,
        MAX_DOC_LENGTH,
        1,
    ), name="passage")
    homoMat = Input(shape=(NUM_OF_FEATS, ), name="h_feats")
Esempio n. 31
0
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)


#Define activation functions
def tf_sqnlsig(x):
    u = tf.clip_by_value(x, -2, 2)
    a = u
    b = tf.negative(tf.abs(u))
    wsq = (tf.multiply(a, b)) / 4.0
    y = tf.add(tf.multiply(tf.add(u, wsq), 0.5), 0.5)
    return y


get_custom_objects().update({'custom_activation': Activation(tf_sqnlsig)})


def tf_sqnl(x):
    u = tf.clip_by_value(x, -2, 2)
    a = u
    b = tf.negative(tf.abs(u))
    wsq = (tf.multiply(a, b)) / 4.0
    y = tf.add(u, wsq)
    return y


get_custom_objects().update({'custom_activation': Activation(tf_sqnl)})


def tf_tansig(x):
Esempio n. 32
0
    def __init__(self,
                 input_dim,
                 n_hidden_units,
                 n_hidden_layers,
                 rho=0.6,
                 nonlinearity='tanh',
                 init='gau',
                 bias_sigma=0.0,
                 weight_sigma=1.25,
                 input_layer=None,
                 flip=False,
                 output_dim=None):
        #if input_layer is not None:
        #    assert input_layer.output_shape[1] == input_dim
        self.input_dim = input_dim
        self.n_hidden_units = n_hidden_units
        self.n_hidden_layers = n_hidden_layers
        self.nonlinearity = nonlinearity
        self.bias_sigma = bias_sigma
        self.weight_sigma = weight_sigma
        self.input_layer = input_layer

        if output_dim is None:
            output_dim = n_hidden_units
        self.output_dim = output_dim

        model = Sequential()
        get_custom_objects().update({'hard_tanh': Activation(hard_tanh)})
        get_custom_objects().update({'erf2': Activation(erf2)})
        if input_layer is not None:
            model.add(input_layer)
        # model.add(Dropout(0.1, input_layer))

        #model.add(Activation('tanh'))
        for i in range(n_hidden_layers):
            nunits = n_hidden_units if i < n_hidden_layers - 1 else output_dim
            if flip:
                if nonlinearity == 'prelu':
                    model.add(
                        LeakyReLU(alpha=0.5,
                                  input_shape=(input_dim, ),
                                  name='a%d' % i))
                else:
                    model.add(
                        Activation(nonlinearity,
                                   input_shape=(input_dim, ),
                                   name='a%d' % i))

            # model.add(Activation(nonlinearity, input_dim=1000, name='a%d'%i))
                model.add(Dropout(1 - rho))  # dropout = 1 - rho
                if init == 'gau':
                    model.add(
                        Dense(nunits,
                              name='d%d' % i,
                              kernel_initializer=normal(
                                  mean=0.0,
                                  stddev=weight_sigma * 1.0 /
                                  np.sqrt(n_hidden_units)),
                              bias_initializer=normal(mean=0.0,
                                                      stddev=bias_sigma)))
                if init == 'orth':
                    model.add(
                        Dense(nunits,
                              name='d%d' % i,
                              kernel_initializer=orthogonal(gain=weight_sigma),
                              bias_initializer=normal(mean=0.0,
                                                      stddev=bias_sigma)))
            # model.add(Dense(nunits, name='d%d'%i))
            else:
                model.add(
                    Dense(nunits, input_shape=(input_dim, ), name='%d' % i))
                if i < n_hidden_layers - 1 or self.output_dim == self.n_hidden_units:
                    model.add(Activation(nonlinearity, name='a%d' % i))
                else:
                    # Theano is optimizing out the nonlinearity if it can which is breaking shit
                    # Give it something that it won't optimize out.
                    model.add(
                        Activation(lambda x: T.minimum(x, 999999.999),
                                   name='a%d' % i))

        model.build()
        self.model = model
        # print(self.hs)

        self.weights = model.get_weights()
        self.dense_layers = filter(lambda x: x.name.startswith('d'),
                                   model.layers)
        self.activ_layers = filter(lambda x: x.name.startswith('a'),
                                   model.layers)

        self.hs = [h.output for h in self.dense_layers]
        self.ac = [b.output for b in self.activ_layers]

        self.f_acts = self.f_jac = self.f_jac_hess = self.f_act = None
        vec = K.ones_like(self.model.input)
Esempio n. 33
0
embedding_layer = Embedding(len(word_index),
                            output_dim=emb_size,
                            weights=[embedding_matrix],
                            input_length=max_length,
                            trainable=False)
print("embedd matrix shape: ", embedding_matrix.shape)

batch_size = 64


def swish(x):
    return (K.sigmoid(x) * x)


get_custom_objects().update({'swish': Activation(swish)})
for i in range(20):
    if i == 0:
        numb_train = 180000
        train_X, valid_X = train_X_num[:numb_train], train_X_num[numb_train:]
        train_y, valid_y = train_y_cat[:numb_train], train_y_cat[numb_train:]
    elif i == 1:
        numb_train = -180000
        train_X, valid_X = train_X_num[numb_train:], train_X_num[:numb_train]
        train_y, valid_y = train_y_cat[numb_train:], train_y_cat[:numb_train]
    else:
        train_X, valid_X, train_y, valid_y = train_test_split(train_X_num,
                                                              train_y_cat,
                                                              test_size=0.07)

    sequence_input = Input(shape=(max_length, ), dtype='int32')
D = np.empty((4, 0))
for i in range(max_iter):
    D = np.concatenate([D, generate_timed_trajectory(28)], axis=1)

# format data for training
input_data = D[:3].T
target_data = D[3].T


# ## Set up Neural Network
# set up keras
def rad_bas(x):
    return K.exp(-x**2)


get_custom_objects().update({'rad_bas': Activation(rad_bas)})


def tan_sig(x):
    return 2 / (1 + K.exp(-2 * x)) - 1


get_custom_objects().update({'tan_sig': Activation(tan_sig)})

# define neural net
model = Sequential()
model.add(Dense(10, activation='tan_sig', use_bias=True, input_shape=(3, )))
model.add(Dense(10, activation='sigmoid', use_bias=True))
model.add(Dense(10, activation='linear', use_bias=True))
model.add(Dense(1))
Esempio n. 35
0
    # Name of the training session to which all objects will be saved
    NAME_OF_THE_TRAIN_SESSION = config["NAME_OF_THE_TRAIN_SESSION"]
    PATH_TO_THE_LEARNING_SESSION = "./learning_sessions/" + NAME_OF_THE_TRAIN_SESSION + "/"

    # Name of the pretrained model, if continue_training=1
    pretrained_filepath = PATH_TO_THE_LEARNING_SESSION + config["MODEL_NAME"]

    print("DEBUG = {} \nconfig = {}\nmodel_config = {}".format(
        DEBUG, config, model_config))

    # Update custom object with my own loss functions
    custom_objects = {
        'macro_averaged_recall_tf_onehot': macro_averaged_recall_tf_onehot,
        'macro_averaged_recall_tf_soft': macro_averaged_recall_tf_soft
    }
    get_custom_objects().update(custom_objects)

    # Configuring gpu for the training
    gpu_configuration_initialization()
    use_generator = 0

    # Build directories
    path_builder(PATH_TO_THE_LEARNING_SESSION)
    # Create logger to files (copying std out stream to a file)
    sys.stdout = Logger(PATH_TO_THE_LEARNING_SESSION + "log_training")

    # First, build index mapping words in the embeddings set to their embedding vector
    print("Indexing word vectors.")

    # Using twitter specific dataset. Pretrained by glove.
    filename_to_read = config["TWITTER_GLOVE"]
Esempio n. 36
0
    def call(self, inputs):
        return resize_images(inputs, self.factor[0], self.factor[1],
                             self.data_format, self.interpolation)

    def get_config(self):
        config = {
            'factor': self.factor,
            'interpolation': self.interpolation,
            'data_format': self.data_format
        }
        base_config = super(ResizeImage, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'ResizeImage': ResizeImage})


def variable(value, dtype=_FLOATX, name=None):
    v = tf.Variable(np.asarray(value, dtype=dtype), name=name)
    _get_session().run(v.initializer)
    return v


def shape(x):
    return x.get_shape()


def square(x):
    return tf.square(x)
Esempio n. 37
0
from keras.models import Sequential
from keras.layers import Activation, Dense, Maximum
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
import keras
import numpy as np

get_custom_objects().update(
    {'sine': Activation(lambda x: K.sigmoid(10 * K.sin(x)))})

n_rows = 3
limit = 5

model = Sequential()
model.add(
    Dense(units=(limit + 1) * n_rows, activation='sine', input_dim=n_rows))
model.add(Dense(units=limit, activation='sine'))
model.add(Dense(units=1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
npoints_x = 1000000

x_data_r = np.random.randint(0, 2**limit + 1, (npoints_x // 2, n_rows))
y_data_r = np.where(np.bitwise_xor.reduce(x_data_r, axis=1) == 0, 0,
                    1).reshape((-1, 1))

x_data_0 = np.random.randint(0, 2**limit + 1, (npoints_x // 2, n_rows - 1))
x_data_0 = np.hstack(
    (x_data_0, np.bitwise_xor.reduce(x_data_0, axis=1).reshape((-1, 1))))
y_data_0 = np.zeros((npoints_x // 2, 1))
        cnn = self.ResidualBlock(64, cnn)
        cnn = self.ResidualBlock(64, cnn)

        cnn = AveragePooling2D((2, 2))(cnn)
        cnn = LeakyReLU(alpha=0.2)(cnn)

        cnn = Conv2D(128, (5, 5), padding='same')(cnn)
        cnn = LeakyReLU(alpha=0.2)(cnn)
        cnn = self.ResidualBlock(128, cnn)
        cnn = self.ResidualBlock(128, cnn)

        cnn = AveragePooling2D((2, 2))(cnn)
        cnn = LeakyReLU(alpha=0.2)(cnn)

        cnn = Conv2D(256, (5, 5), padding='same')(cnn)
        cnn = LeakyReLU(alpha=0.2)(cnn)
        cnn = self.ResidualBlock(256, cnn)
        cnn = self.ResidualBlock(256, cnn)

        cnn = Reshape((-1, 256))(cnn)
        capsule = Capsule(self.class_num, 16, 3, True)(cnn)
        output = Lambda(lambda x: K.sqrt(K.sum(K.square(x), 2)),
                        output_shape=(self.class_num, ))(capsule)

        model = Model(inputs=input_image, outputs=output)

        return model


get_custom_objects().update({'Capsule': Capsule})
"""Create a sequential model."""

from keras.layers import Dropout, Activation, Input
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import GlobalAveragePooling2D
from keras.regularizers import l2
from keras.models import Model
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects


def sigmoid_neg(x):
    return K.sigmoid(x) - 0.5

get_custom_objects().update({'sigmoid_neg': Activation(sigmoid_neg)})


def create_model(nb_classes, input_shape, config=None):
    """Create a VGG-16 like model."""
    if len(input_shape) != 3:
        raise Exception("Input shape should be a tuple (nb_channels, nb_rows, "
                        "nb_cols) or (nb_rows, nb_cols, nb_channels), "
                        "depending on your backend.")
    if config is None:
        config = {'model': {}}

    min_feature_map_dimension = min(input_shape[:2])
    if min_feature_map_dimension < 32:
        print("ERROR: Please upsample the feature maps to have at least "
              "a size of 32 x 32. Currently, it has {}".format(input_shape))
Esempio n. 40
0
    def __init__(self, scale_factor=2, data_format=None, **kwargs):
        super(SubPixelUpscaling, self).__init__(**kwargs)

        self.scale_factor = scale_factor
        self.data_format = normalize_data_format(data_format)

    def build(self, input_shape):
        pass

    def call(self, x, mask=None):
        y = tf.depth_to_space(x, self.scale_factor, self.data_format)
        return y

    def compute_output_shape(self, input_shape):
        if self.data_format == 'channels_first':
            b, k, r, c = input_shape
            return (b, k // (self.scale_factor ** 2), r * self.scale_factor, c * self.scale_factor)
        else:
            b, r, c, k = input_shape
            return (b, r * self.scale_factor, c * self.scale_factor, k // (self.scale_factor ** 2))

    def get_config(self):
        config = {'scale_factor': self.scale_factor,
                  'data_format': self.data_format}
        base_config = super(SubPixelUpscaling, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


get_custom_objects().update({'SubPixelUpscaling': SubPixelUpscaling})