def advanced_autoencoder(x_in, x, epochs, batch_size, activations, depth, neurons):
    sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    K.set_session(sess)
    num_stock = len(x_in.columns)

    # activation functions
    if activations == 'elu':
        function = ELU(alpha=1.0)
    elif activations == 'lrelu':
        function = LeakyReLU(alpha=0.1)
    else:
        function = ReLU(max_value=None, negative_slope=0.0, threshold=0.0)

    autoencoder = Sequential()
    # encoding layers of desired depth
    for n in range(1, depth + 1):
        # input layer
        if n == 1:
            # autoencoder.add(GaussianNoise(stddev=0.01, input_shape=(num_stock,)))
            autoencoder.add(Dense(int(neurons / n), input_shape=(num_stock,)))
            autoencoder.add(function)
        else:
            autoencoder.add(Dense(int(neurons / n)))
            autoencoder.add(function)
    # decoding layers of desired depth
    for n in range(depth, 1, -1):
        autoencoder.add(Dense(int(neurons / (n - 1))))
        autoencoder.add(function)
    # output layer
    autoencoder.add(Dense(num_stock, activation='linear'))

    # autoencoder.compile(optimizer='sgd', loss='mean_absolute_error', metrics=['accuracy'])

    autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])

    # checkpointer = ModelCheckpoint(filepath='weights.{epoch:02d}-{val_loss:.2f}.txt', verbose=0, save_best_only=True)
    earlystopper = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='auto', baseline=None,
                                 restore_best_weights=True)
    history = autoencoder.fit(x_in, x_in, epochs=epochs, batch_size=batch_size, \
                              shuffle=False, validation_split=0.15, verbose=0, callbacks=[earlystopper])
    # errors = np.add(autoencoder.predict(x_in),-x_in)
    y = autoencoder.predict(x)
    # saving results of error distribution tests
    # A=np.zeros((5))
    # A[0]=chi2test(errors)
    # A[1]=pesarantest(errors)
    # A[2]=portmanteau(errors,1)
    # A[3]=portmanteau(errors,3)
    # A[4]=portmanteau(errors,5)

    # autoencoder.summary()

    # plot accuracy and loss of autoencoder
    # plot_accuracy(history)
    # plot_loss(history)

    # plot original, encoded and decoded data for some stock
    # plot_two_series(x_in, 'Original data', auto_data, 'Reconstructed data')

    # the histogram of the data
    # make_histogram(x_in, 'Original data', auto_data, 'Reconstructed data')

    # CLOSE TF SESSION
    K.clear_session()
    return y
Esempio n. 2
0
def _cnn(imgs_dim, compile_=True):
    model = Sequential()

    model.add(_convolutional_layer(nb_filter=16, input_shape=imgs_dim))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
    model.add(_convolutional_layer(nb_filter=16))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(_convolutional_layer(nb_filter=16))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    """
    model.add(_convolutional_layer(nb_filter=32))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
     

    model.add(_convolutional_layer(nb_filter=32))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
    """

    model.add(_convolutional_layer(nb_filter=32))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    """
    model.add(_convolutional_layer(nb_filter=64))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU()
     
    model.add(_convolutional_layer(nb_filter=64))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())


    model.add(_convolutional_layer(nb_filter=64))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(_convolutional_layer(nb_filter=128))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
        
    model.add(_convolutional_layer(nb_filter=128))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
    """

    model.add(_convolutional_layer(nb_filter=128))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    """
    model.add(_convolutional_layer(nb_filter=256))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
        
    model.add(_convolutional_layer(nb_filter=256))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
    """

    model.add(_convolutional_layer(nb_filter=256))
    model.add(BatchNormalization(axis=-1))
    model.add(ReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(rate=0.5))

    model.add(Flatten())
    model.add(_dense_layer(output_dim=PENULTIMATE_SIZE))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(_dense_layer(output_dim=PENULTIMATE_SIZE))
    model.add(BatchNormalization())
    model.add(ReLU())

    if compile_:
        model.add(Dropout(rate=0.5))
        model.add(_dense_layer(output_dim=SOFTMAX_SIZE))
        model.add(BatchNormalization())
        model.add(Activation(activation='softmax'))
        return compile_model(model)

    return model
Esempio n. 3
0
    def train(self, tr_x, tr_y, va_x=None, va_y=None):

        # データのセット・スケーリング
        self.scaler = StandardScaler().fit(tr_x)
        tr_x = self.scaler.transform(tr_x)
        # tr_y = np_utils.to_categorical(tr_y)

        validation = va_x is not None
        if validation:
            va_x = self.scaler.transform(va_x)
            # va_y = np_utils.to_categorical(va_y)

        # パラメータ
        num_classes = self.params["num_classes"]
        input_dropout = self.params['input_dropout']
        hidden_layers = int(self.params['hidden_layers'])
        hidden_units = int(self.params['hidden_units'])
        hidden_activation = self.params['hidden_activation']
        hidden_dropout = self.params['hidden_dropout']
        batch_norm = self.params['batch_norm']
        output_activation = self.params["output_activation"]
        optimizer_type = self.params['optimizer']['type']
        optimizer_lr = self.params['optimizer']['lr']
        loss = self.params['loss']
        metrics = self.params['metrics']
        batch_size = int(self.params['batch_size'])

        # モデルの構築
        self.model = Sequential()

        # 入力層
        self.model.add(Dropout(input_dropout, input_shape=(tr_x.shape[1], )))

        # 中間層
        for i in range(hidden_layers):
            self.model.add(Dense(hidden_units))
            if batch_norm == 'before_act':
                self.model.add(BatchNormalization())
            if hidden_activation == 'prelu':
                self.model.add(PReLU())
            elif hidden_activation == 'relu':
                self.model.add(ReLU())
            else:
                raise NotImplementedError
            self.model.add(Dropout(hidden_dropout))

        # 出力層
        self.model.add(Dense(num_classes, activation=output_activation))

        # オプティマイザ
        if optimizer_type == 'sgd':
            optimizer = SGD(lr=optimizer_lr,
                            decay=1e-6,
                            momentum=0.9,
                            nesterov=True)
        elif optimizer_type == 'adam':
            optimizer = Adam(lr=optimizer_lr,
                             beta_1=0.9,
                             beta_2=0.999,
                             decay=0.)
        else:
            raise NotImplementedError

        # 目的関数、評価指標などの設定
        self.model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

        # エポック数、アーリーストッピング、学習の実行
        # あまりepochを大きくすると、小さい学習率のときに終わらないことがあるので注意
        nb_epoch = 1000
        patience = 100
        if validation:
            early_stopping = EarlyStopping(monitor='val_loss',
                                           patience=patience,
                                           restore_best_weights=True)
            history = self.model.fit(tr_x,
                                     tr_y,
                                     epochs=nb_epoch,
                                     batch_size=batch_size,
                                     verbose=0,
                                     validation_data=(va_x, va_y),
                                     callbacks=[early_stopping])
        else:
            self.model.fit(tr_x,
                           tr_y,
                           nb_epoch=nb_epoch,
                           batch_size=batch_size,
                           verbose=0)

        hists.append(pd.DataFrame(history.history))
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU,ReLU
from keras.optimizers import RMSprop, Adam
from keras.models import Model, Sequential

"""Use a neuro-network."""

sentence_input = Input(shape=(768,1), name='sentence_input')

droprate = 0.3
dimension = 128

x = Dense(dimension)(sentence_input)
x = Dropout(droprate)(x)
# x = LeakyReLU(alpha=0.01)(x)
x = ReLU()(x)

x = Bidirectional(LSTM(dimension, return_sequences=True, activation='tanh'))(x) 
x = Bidirectional(LSTM(dimension, return_sequences=False, activation='tanh'))(x)

x = Dropout(droprate)(x)
x = Dense(dimension)(x)
x = ReLU()(x)

output = Dense(1, activation='softmax')(x)
discriminator = Model(sentence_input ,output)
discriminator.summary()

optimizer = Adam(lr=0.001)
discriminator.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
Esempio n. 5
0
def main():
    import pynvml
    pynvml.nvmlInit()
    # 这里的0是GPU id
    handle2 = pynvml.nvmlDeviceGetHandleByIndex(2)
    handle3 = pynvml.nvmlDeviceGetHandleByIndex(3)
    # meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)

    # print(meminfo.used)

    parser = argparse.ArgumentParser(
        description='simple 3D convolution for action recognition')
    parser.add_argument('--batch', type=int, default=128)
    parser.add_argument('--epoch', type=int, default=100)
    parser.add_argument('--videos',
                        type=str,
                        default='UCF101',
                        help='directory where videos are stored')
    parser.add_argument('--nclass', type=int, default=101)
    parser.add_argument('--output', type=str, required=True)
    parser.add_argument('--color', type=bool, default=False)
    parser.add_argument('--skip', type=bool, default=True)
    parser.add_argument('--depth', type=int, default=10)
    parser.add_argument('--dataset', type=str, default='ucf101')
    args = parser.parse_args()

    img_rows, img_cols, frames = 64, 64, args.depth
    channel = 3 if args.color else 1
    fname_npz = 'dataset_{}_{}_{}_{}.npz'.format(args.dataset, args.nclass,
                                                 args.depth, args.skip)

    vid3d = videoto3d.Videoto3D(img_rows, img_cols, frames, args.dataset)
    nb_classes = args.nclass

    if os.path.exists(fname_npz):
        loadeddata = np.load(fname_npz)
        X, Y = loadeddata["X"], loadeddata["Y"]
    else:
        x, y = loaddata(args.videos, vid3d, args.nclass, args.output,
                        args.dataset, frames, args.color, args.skip)
        X = x.reshape((x.shape[0], img_rows, img_cols, frames, channel))
        Y = np_utils.to_categorical(y, nb_classes)

        X = X.astype('float32')
        np.savez(fname_npz, X=X, Y=Y)
        print('Saved dataset to dataset.npz.')
    print('X_shape:{}\nY_shape:{}'.format(X.shape, Y.shape))

    # Define model

    # conv3D + Relu + Conv3D + Softmax + Pooling3D + DropOut
    input_x = Input(shape=(img_rows, img_cols, frames, channel))

    #
    # # C3D-conv1
    # convLayer = Conv3D(32, kernel_size= (3, 3, 3),padding='same')(input_x)
    # convLayer = ReLU()(convLayer)
    #
    # convLayer = Conv3D(32, kernel_size= (3, 3, 3), padding='same')(convLayer)
    # convLayer = Softmax()(convLayer)
    # convLayer = MaxPooling3D(pool_size=(3,3,3), padding='same')(convLayer)
    # convLayer = Dropout(0.25)(convLayer)
    #
    # # C3D-conv2
    # convLayer = Conv3D(64, kernel_size= (3, 3, 3),padding='same')(convLayer)
    # convLayer = ReLU()(convLayer)
    #
    # convLayer = Conv3D(64, kernel_size= (3, 3, 3), padding='same')(convLayer)
    # convLayer = Softmax()(convLayer)
    # convLayer = MaxPooling3D(pool_size=(3,3,3), padding='same')(convLayer)
    # convLayer = Dropout(0.25)(convLayer)
    #
    #
    # maskLayer = Conv3D(64*frames, kernel_size=(3,3,2), padding='same')(convLayer)
    # maskLayer = Lambda(mean_filter)(maskLayer)      # [None,1, 64], each point represent a mask of input region of 8x8 points
    # # maskLayer = BatchNormalization()(maskLayer)
    # maskLayer = Lambda(K.sigmoid)(maskLayer)
    # # maskLayer = ReLU()(maskLayer)
    # # maskLayer = Lambda(bi_trans, arguments={'th':0.5})(maskLayer)
    # maskLayer = Reshape(( 8, 8, frames, 1))(maskLayer)  #reshape_filter(maskLayer, shape=[None,8,8,1,1])
    # # maskLayer = Lambda(normalize)(maskLayer)
    # maskLayerForLoss = maskLayer
    # maskLayer = Lambda(repeat_filter,arguments={'rep':8, 'axis':1})(maskLayer)
    # maskLayer = Lambda(repeat_filter,arguments={'rep':8, 'axis':2})(maskLayer)
    # # maskLayer = Lambda(repeat_filter,arguments={'rep':frames, 'axis':3})(maskLayer)
    # maskLayer = Lambda(repeat_filter,arguments={'rep':channel, 'axis':4})(maskLayer)
    #
    # # maskLayer = Lambda(repeat_filter,arguments={'rep':2, 'axis':3})(maskLayer)
    # # maskLayer = Lambda(repeat_filter,arguments={'rep':64, 'axis':4})(maskLayer)
    #
    #
    # convLayer = Multiply()([maskLayer,input_x])
    #
    #

    # C3D-conv1
    convLayer = Conv3D(32, kernel_size=(3, 3, 3), padding='same')(input_x)
    convLayer = ReLU()(convLayer)

    convLayer = Conv3D(32, kernel_size=(3, 3, 3), padding='same')(convLayer)
    convLayer = Softmax()(convLayer)
    convLayer = MaxPooling3D(pool_size=(3, 3, 3), padding='same')(convLayer)
    convLayer = Dropout(0.25)(convLayer)

    # C3D-conv2
    convLayer = Conv3D(64, kernel_size=(3, 3, 3), padding='same')(convLayer)
    convLayer = ReLU()(convLayer)

    convLayer = Conv3D(64, kernel_size=(3, 3, 3), padding='same')(convLayer)
    convLayer = Softmax()(convLayer)
    convLayer = MaxPooling3D(pool_size=(3, 3, 3), padding='same')(convLayer)
    convLayer = Dropout(0.25)(convLayer)

    fc1 = Flatten()(convLayer)

    fc = Dense(512, activation='sigmoid')(fc1)
    fc = Dropout(0.5)(fc)
    dense_out = Dense(nb_classes, activation='softmax')(fc)
    dense_out_converse = Dense(nb_classes)(fc)

    # model = Model(input_x, [dense_out, dense_out_converse])
    model = Model(input_x, [dense_out, dense_out_converse])

    # loss of 2 parts
    losses = {'dense_2': K.categorical_crossentropy, 'dense_3': unlikely_loss}
    lossWeights = {'dense_2': 1, 'dense_3': 1}
    model.compile(loss=losses,
                  loss_weights=lossWeights,
                  optimizer=Adam(lr=0.001),
                  metrics=['accuracy'])
    # model.compile(loss=categorical_crossentropy, optimizer=Adam(lr=0.001),metrics=['accuracy'])
    model.summary()
    plot_model(model,
               show_shapes=True,
               to_file=os.path.join(args.output, 'model.png'))

    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.1,
                                                        random_state=43)
    X_train, X_val, Y_train, Y_val = train_test_split(X_train,
                                                      Y_train,
                                                      test_size=0.1,
                                                      random_state=43)

    # history = model.fit_generator(myGenerator(X_train, X_test, Y_train, Y_test, nb_classes, args.batch),
    #                               samples_per_epoch=X_train.shape[0], epochs=args.epoch, verbose=1,
    #                               callbacks=callbacks_list,
    # shuffle=True)
    # check GPUs status , once a GPU is available, change os environment parameters and break
    # is none of the GPUs are ready ,sleep for 2 secs and retry.
    cnt = 0
    while True:
        cnt += 1
        processinfo = pynvml.nvmlDeviceGetComputeRunningProcesses(handle2)
        if len(processinfo) == 0:
            os.environ['CUDA_VISIBLE_DEVICES'] = '2'
            print('GPU 2 is available, use GPU 2\n')
            break
        processinfo = pynvml.nvmlDeviceGetComputeRunningProcesses(handle3)
        if len(processinfo) == 0:
            os.environ['CUDA_VISIBLE_DEVICES'] = '3'
            print('GPU 3 is available, use GPU 3\n')
            break
        sleep(2)
        print('\rretry time: {}'.format(cnt), end='')

    history = model.fit(X_train, [Y_train, Y_train],
                        validation_data=(X_val, [Y_val, Y_val]),
                        batch_size=args.batch,
                        epochs=args.epoch,
                        verbose=1,
                        shuffle=True)
    # history = model.fit(X_train, Y_train,
    #                     validation_data=(X_val, Y_val),
    #                     batch_size=args.batch,
    #                     epochs=args.epoch, verbose=1, shuffle=True)
    # loss, acc = model.evaluate(X_test, Y_test, verbose=0)
    model_json = model.to_json()
    if not os.path.isdir(args.output):
        os.makedirs(args.output)
    with open(
            os.path.join(
                args.output, '{}_{}_{}_ucf101_3dcnnmodel.json'.format(
                    current_time, nb_classes, args.depth)), 'w') as json_file:
        json_file.write(model_json)
    model.save_weights(
        os.path.join(
            args.output,
            '{}_{}_{}_ucf101_3dcnnmodel.hd5'.format(current_time, nb_classes,
                                                    args.depth)))
    loss = model.evaluate(X_test, [Y_test, Y_test], verbose=0)
    # loss, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test loss:', loss)
    plot_history(history, args.output)
    save_history(history, args.output)

    print('Test loss:', loss)
Esempio n. 6
0
 def BN_CONV(self, x, FNum, FSize, strides=(1, 1)):
     # The activation is relu default(it will add other activation in future)
     x = Conv2D(FNum, FSize, padding='same', strides=strides)(x)
     x = BatchNormalization()(x)
     x = ReLU()(x)
     return x
Esempio n. 7
0
 def uk(layer_input, filters, f_size=3, i_norm=True) :
     d = Deconv2D(filters, kernel_size=f_size, padding="valid")(layer_input)
     if i_norm == True:
         d = InstanceNormalization()(d)
     d = ReLU()(d)
     return d
def relu6(x):
    # return K.relu(x, max_value=6)
    return ReLU(max_value=6)(x)
import matplotlib.pyplot as plt
import glob
from keras.preprocessing.image import load_img, array_to_img, img_to_array, ImageDataGenerator
from keras.utils import multi_gpu_model
import shutil
import os

from tensorflow.keras.preprocessing.image import img_to_array, array_to_img

generator_ = Sequential()
generator_.add(Dense(256 * 7 * 7, input_dim=100))
generator_.add(Reshape((7, 7, 256)))

generator_.add(Conv2DTranspose(1024, 4, strides=1, padding='same'))
generator_.add(BatchNormalization(momentum=0.1, epsilon=1e-05))
generator_.add(ReLU())

generator_.add(Conv2DTranspose(512, 4, strides=2, padding='same'))
generator_.add(BatchNormalization(momentum=0.1, epsilon=1e-05))
generator_.add(ReLU())

generator_.add(Conv2DTranspose(256, 4, strides=2, padding='same'))
generator_.add(BatchNormalization(momentum=0.1, epsilon=1e-05))
generator_.add(ReLU())

generator_.add(Conv2DTranspose(128, 4, strides=2, padding='same'))
generator_.add(BatchNormalization(momentum=0.1, epsilon=1e-05))
generator_.add(ReLU())

generator_.add(Conv2DTranspose(64, 4, strides=2, padding='same'))
generator_.add(BatchNormalization(momentum=0.1, epsilon=1e-05))
Esempio n. 10
0
 def dk(layer_input, filters, f_size=3, i_norm=True) :
     d = Conv2D(filters, kernel_size=f_size, strides=2, padding="same")(layer_input)
     if i_norm == True :
         d = InstanceNormalization()(d)
     d = ReLU()(d)
     return d
Esempio n. 11
0
    def fit(self, tr_x, tr_y, va_x, va_y):

        # 매개변수
        input_dropout = self.params['input_dropout']
        hidden_layers = int(self.params['hidden_layers'])
        hidden_units = int(self.params['hidden_units'])
        hidden_activation = self.params['hidden_activation']
        hidden_dropout = self.params['hidden_dropout']
        batch_norm = self.params['batch_norm']
        optimizer_type = self.params['optimizer']['type']
        optimizer_lr = self.params['optimizer']['lr']
        batch_size = int(self.params['batch_size'])

        # 표준화
        self.scaler = StandardScaler()
        tr_x = self.scaler.fit_transform(tr_x)
        va_x = self.scaler.transform(va_x)

        self.model = Sequential()

        # 입력계층
        self.model.add(Dropout(input_dropout, input_shape=(tr_x.shape[1], )))

        # 은닉계층
        for i in range(hidden_layers):
            self.model.add(Dense(hidden_units))
            if batch_norm == 'before_act':
                self.model.add(BatchNormalization())
            if hidden_activation == 'prelu':
                self.model.add(PReLU())
            elif hidden_activation == 'relu':
                self.model.add(ReLU())
            else:
                raise NotImplementedError
            self.model.add(Dropout(hidden_dropout))

        # 출력 계층
        self.model.add(Dense(1, activation='sigmoid'))

        # 최적화(옵티마이저)
        if optimizer_type == 'sgd':
            optimizer = SGD(lr=optimizer_lr,
                            decay=1e-6,
                            momentum=0.9,
                            nesterov=True)
        elif optimizer_type == 'adam':
            optimizer = Adam(lr=optimizer_lr,
                             beta_1=0.9,
                             beta_2=0.999,
                             decay=0.)
        else:
            raise NotImplementedError

        # 목적함수, 평가지표 등의 설정
        self.model.compile(loss='binary_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])

        # 에폭 수, 조기 종료
        # 에폭을 너무 크게 하면 작은 학습률일 때 끝나지 않을 수 있으므로 주의
        nb_epoch = 200
        patience = 20
        early_stopping = EarlyStopping(patience=patience,
                                       restore_best_weights=True)

        # 학습의 실행
        history = self.model.fit(tr_x,
                                 tr_y,
                                 epochs=nb_epoch,
                                 batch_size=batch_size,
                                 verbose=1,
                                 validation_data=(va_x, va_y),
                                 callbacks=[early_stopping])
def conv_layer(feature_batch, feature_map, kernel_size=(3, 3),strides=(1,1), padding='same'):
    conv = Conv2D(filters=feature_map, kernel_size=kernel_size, strides=strides, 
                  padding=padding)(feature_batch)
    act = ReLU()(conv)
    bn = BatchNormalization(axis=3)(act)
    return bn
Esempio n. 13
0
def bottleneck_encoder(tensor, nfilters, downsampling=False, dilated=False, asymmetric=False, normal=False, drate=0.1, name=''):

    y = tensor

    skip = tensor

    stride = 1

    ksize = 1

    if downsampling:

        stride = 2

        ksize = 2

        skip = MaxPooling2D(pool_size=(2, 2), name=f'max_pool_{name}')(skip)

        skip = Permute((1,3,2), name=f'permute_1_{name}')(skip)       #(B, H, W, C) -> (B, H, C, W)

        ch_pad = nfilters - K.int_shape(tensor)[-1]

        skip = ZeroPadding2D(padding=((0,0),(0,ch_pad)), name=f'zeropadding_{name}')(skip)

        skip = Permute((1,3,2), name=f'permute_2_{name}')(skip)       #(B, H, C, W) -> (B, H, W, C)        

    

    y = Conv2D(filters=nfilters//4, kernel_size=(ksize, ksize), kernel_initializer='he_normal', strides=(stride, stride), padding='same', use_bias=False, name=f'1x1_conv_{name}')(y)

    y = BatchNormalization(momentum=0.1, name=f'bn_1x1_{name}')(y)

    y = ReLU(name=f'prelu_1x1_{name}')(y)

    

    if normal:

        y = Conv2D(filters=nfilters//4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same', name=f'3x3_conv_{name}')(y)

    elif asymmetric:

        y = Conv2D(filters=nfilters//4, kernel_size=(5, 1), kernel_initializer='he_normal', padding='same', use_bias=False, name=f'5x1_conv_{name}')(y)

        y = Conv2D(filters=nfilters//4, kernel_size=(1, 5), kernel_initializer='he_normal', padding='same', name=f'1x5_conv_{name}')(y)

    elif dilated:

        y = Conv2D(filters=nfilters//4, kernel_size=(3, 3), kernel_initializer='he_normal', dilation_rate=(dilated, dilated), padding='same', name=f'dilated_conv_{name}')(y)

    y = BatchNormalization(momentum=0.1, name=f'bn_main_{name}')(y)

    y = ReLU(name=f'prelu_{name}')(y)

    

    y = Conv2D(filters=nfilters, kernel_size=(1, 1), kernel_initializer='he_normal', use_bias=False, name=f'final_1x1_{name}')(y)

    y = BatchNormalization(momentum=0.1, name=f'bn_final_{name}')(y)

    y = SpatialDropout2D(rate=drate, name=f'spatial_dropout_final_{name}')(y)

    

    y = Add(name=f'add_{name}')([y, skip])

    y = ReLU(name=f'prelu_out_{name}')(y)

    

    return y
    def train(self, tr_x, tr_y, va_x=None, va_y=None):
        """ 
            tr_x : List[str] (example.) [ "I am happy", "hello" ]
            tr_y : List[label]
            embedding_model : gensim.models.KeyedVectors Object
        """
        # scaling
        validation = va_x is not None

        # パラメータ
        nb_classes = 5
        embedding_dropout = self.params['embedding_dropout']
        lstm_dropout = self.params['lstm_dropout']
        lstm_recurrent_dropout = self.params['recurrent_dropout']
        hidden_layers = int(self.params['hidden_layers'])
        hidden_units = int(self.params['hidden_units'])
        hidden_activation = self.params['hidden_activation']
        hidden_dropout = self.params['hidden_dropout']
        batch_norm = self.params['batch_norm']
        optimizer_type = self.params['optimizer']['type']
        optimizer_lr = self.params['optimizer']['lr']
        batch_size = int(self.params['batch_size'])
        nb_epoch = int(self.params['nb_epoch'])
        embedding_model = self.params['embedding_model']
        bidirectional = self.params['Bidirectional']
        use_pre_embedding = not (embedding_model is None)

        # using keras tokenizer here
        token = Tokenizer(num_words=None)
        max_len = 70
        if validation:
            token.fit_on_texts(list(tr_x) + list(va_x))
        else:
            token.fit_on_texts(list(tr_x))

        xtrain_seq = token.texts_to_sequences(tr_x)
        tr_x = pad_sequences(xtrain_seq, maxlen=max_len)
        tr_y = np_utils.to_categorical(tr_y, num_classes=5)

        if validation:
            xvalid_seq = token.texts_to_sequences(va_x)
            va_x = pad_sequences(xvalid_seq, maxlen=max_len)
            va_y = np_utils.to_categorical(va_y, num_classes=5)

        word_index = token.word_index

        if use_pre_embedding:
            # create an embedding matrix
            vector_dim = embedding_model.vector_size
            embedding_matrix = np.zeros((len(word_index) + 1, vector_dim))
            for word, i in tqdm(word_index.items()):
                embedding_vector = embedding_model.wv[word]
                if embedding_vector is not None:
                    embedding_matrix[i] = embedding_vector
            
        inputs = Input(shape=(max_len, ))
        # input layer
        if use_pre_embedding:
            self.model.add(Embedding(
                    input_dim=len(word_index) + 1, 
                    output_dim=vector_dim,
                    input_length=max_len,
                    weights=[embedding_matrix],
                    trainable=False))
        else:
            self.model.add(Embedding(input_dim=len(word_index) + 1, 
                    output_dim=300,
                    input_length=max_len))

        self.model.add(SpatialDropout1D(embedding_dropout))
        if bidirectional:
            self.model.add(Bidirectional(LSTM(300, dropout=lstm_dropout, recurrent_dropout=lstm_recurrent_dropout)))
        else:
            self.model.add(LSTM(100, dropout=lstm_dropout, recurrent_dropout=lstm_recurrent_dropout))
        # 中間層
        for i in range(hidden_layers):
            self.model.add(Dense(hidden_units))
            if batch_norm == 'before_act':
                self.model.add(BatchNormalization())
            if hidden_activation == 'prelu':
                self.model.add(PReLU())
            elif hidden_activation == 'relu':
                self.model.add(ReLU())
            else:
                raise NotImplementedError
            self.model.add(Dropout(hidden_dropout))

        # 出力層
        self.model.add(Dense(nb_classes, activation='sigmoid'))

        # オプティマイザ
        if optimizer_type == 'sgd':
            optimizer = SGD(lr=optimizer_lr, decay=1e-6, momentum=0.9, nesterov=True)
        elif optimizer_type == 'adam':
            optimizer = Adam(lr=optimizer_lr, beta_1=0.9, beta_2=0.999, decay=0.)
        else:
            raise NotImplementedError

        # 目的関数、評価指標などの設定
        self.model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])

        # エポック数、アーリーストッピング
        # あまりepochを大きくすると、小さい学習率のときに終わらないことがあるので注意
        patience = 20
        # 学習の実行
        if validation:
            early_stopping = EarlyStopping(monitor='val_loss', patience=patience,
                                            verbose=1, restore_best_weights=True)
            history = self.model.fit(tr_x, tr_y, epochs=nb_epoch, batch_size=batch_size, verbose=2,
                                validation_data=(va_x, va_y), callbacks=[early_stopping])
        else:
            history = self.model.fit(tr_x, tr_y, nb_epoch=nb_epoch, batch_size=batch_size, verbose=2)
def get_net(input_shape=(Height, Weight, Channel),
            load_weight_path=None) -> Model:  #期待返回类型为model

    inputs = Input(shape=input_shape, name="input")
    x = inputs

    ##################################################################################################################
    x_ident_1 = x
    x_ident_1 = AveragePooling2D(pool_size=(2, 2),
                                 strides=(2, 2),
                                 border_mode='valid')(x_ident_1)
    # 1st layer group
    x = Convolution2D(16,
                      3,
                      3,
                      activation=None,
                      border_mode='same',
                      name='conv1a',
                      subsample=(1, 1))(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Convolution2D(16,
                      3,
                      3,
                      activation=None,
                      border_mode='same',
                      name='conv1b',
                      subsample=(1, 1))(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2),
                     strides=(2, 2),
                     border_mode='valid',
                     name='pool1')(x)
    x = Concatenate(axis=3)([x, x_ident_1])

    ##################################################################################################################
    x_ident_1 = AveragePooling2D(pool_size=(2, 2),
                                 strides=(2, 2),
                                 border_mode='valid')(x_ident_1)
    x_ident_2 = AveragePooling2D(pool_size=(2, 2),
                                 strides=(2, 2),
                                 border_mode='valid')(x)
    # 2nd layer group
    x = Convolution2D(32,
                      3,
                      3,
                      activation=None,
                      border_mode='same',
                      name='conv2a',
                      subsample=(1, 1))(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Convolution2D(32,
                      3,
                      3,
                      activation=None,
                      border_mode='same',
                      name='conv2b',
                      subsample=(1, 1))(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2),
                     strides=(2, 2),
                     border_mode='valid',
                     name='pool2')(x)
    x = Concatenate(axis=3)([x, x_ident_1, x_ident_2])

    ##################################################################################################################
    x_ident_1 = AveragePooling2D(pool_size=(2, 2),
                                 strides=(2, 2),
                                 border_mode='valid')(x_ident_1)
    x_ident_2 = AveragePooling2D(pool_size=(2, 2),
                                 strides=(2, 2),
                                 border_mode='valid')(x_ident_2)
    x_ident_3 = AveragePooling2D(pool_size=(2, 2),
                                 strides=(2, 2),
                                 border_mode='valid')(x)
    # 3rd layer group
    x = Convolution2D(64,
                      3,
                      3,
                      activation=None,
                      border_mode='same',
                      name='conv3a',
                      subsample=(1, 1))(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Convolution2D(64,
                      3,
                      3,
                      activation=None,
                      border_mode='same',
                      name='conv3b',
                      subsample=(1, 1))(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2),
                     strides=(2, 2),
                     border_mode='valid',
                     name='pool3')(x)
    x = Concatenate(axis=3)([x, x_ident_1, x_ident_2, x_ident_3])

    ##################################################################################################################
    x_ident_1 = AveragePooling2D(pool_size=(2, 2),
                                 strides=(2, 2),
                                 border_mode='valid')(x_ident_1)
    x_ident_2 = AveragePooling2D(pool_size=(2, 2),
                                 strides=(2, 2),
                                 border_mode='valid')(x_ident_2)
    x_ident_3 = AveragePooling2D(pool_size=(2, 2),
                                 strides=(2, 2),
                                 border_mode='valid')(x_ident_3)
    x_ident_4 = AveragePooling2D(pool_size=(2, 2),
                                 strides=(2, 2),
                                 border_mode='valid')(x)
    # 4th layer group
    x = Convolution2D(
        128,
        3,
        3,
        activation=None,
        border_mode='same',
        name='conv4a',
        subsample=(1, 1),
    )(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Convolution2D(
        128,
        3,
        3,
        activation=None,
        border_mode='same',
        name='conv4b',
        subsample=(1, 1),
    )(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2),
                     strides=(2, 2),
                     border_mode='valid',
                     name='pool4')(x)
    x = Concatenate(axis=3)([x, x_ident_1, x_ident_2, x_ident_3, x_ident_4])

    x = GlobalMaxPooling2D()(x)
    x = BatchNormalization(name="final_features_344")(x)

    ##################################################################################################################
    if USE_DROPOUT:
        x = Dropout(p=0.3)(x)

    x = Dense(64, activation='relu', name="final_features_64")(x)
    out_class = Dense(5, activation='softmax', name='out_class')(x)

    model = Model(input=inputs, output=out_class)

    if load_weight_path is not None:
        model.load_weights(load_weight_path, by_name=False)

    #编译模型
    model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True),
                  loss={"out_class": "categorical_crossentropy"},
                  metrics={
                      "out_class":
                      [categorical_accuracy, categorical_crossentropy]
                  })
    model.summary(line_length=120)

    return model
Esempio n. 16
0
Y_train = np.asarray(Y_train)
X_val = np.asarray(X_val)
Y_val = np.asarray(Y_val)

X_train = X_train.reshape(lt, 2304)
X_val = X_val.reshape(lv, 2304)

inputs = Input(shape=(2304, ))
x = inputs

i = 0

while i < 3:

    x = Dense(254)(x)
    x = ReLU()(x)  #Non-linearily
    x = Dropout(0.5)(x)
    i = i + 1

predictions = Dense(nb_classes, activation='softmax')(x)
model = Model(input=inputs, output=predictions)

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

model.compile(optimizer=sgd,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# model.summary()

history = model.fit(X_train,
Esempio n. 17
0
 def conv_block(x, nb_filter, filter_size, atrous_rate=(1, 1)):
     x = Conv2D(nb_filter, filter_size, dilation_rate=atrous_rate, kernel_initializer='he_normal', padding='same')(x)
     x = bn_block(x)
     x = ReLU()(x)
     return x
Esempio n. 18
0
def DarknetConv2D_BN_RELU(*args, **kwargs):
    """Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
    no_bias_kwargs = {'use_bias': False}
    no_bias_kwargs.update(kwargs)
    return compose(DarknetConv2D(*args, **no_bias_kwargs),
                   BatchNormalization(), ReLU())
Esempio n. 19
0
def BN_Relu(out):

    bacth_conv = BatchNormalization(axis=3)(out)
    relu_batch_norm = ReLU()(bacth_conv)
    return relu_batch_norm
Esempio n. 20
0
    def train(self, tr_x, tr_y, va_x=None, va_y=None):
        # scaling
        validation = va_x is not None

        # パラメータ
        nb_classes = self.params['nb_class']
        input_dropout = self.params['input_dropout']
        hidden_layers = int(self.params['hidden_layers'])
        hidden_units = int(self.params['hidden_units'])
        hidden_activation = self.params['hidden_activation']
        hidden_dropout = self.params['hidden_dropout']
        batch_norm = self.params['batch_norm']
        optimizer_type = self.params['optimizer']['type']
        optimizer_lr = self.params['optimizer']['lr']
        batch_size = int(self.params['batch_size'])
        nb_epoch = int(self.params['nb_epoch'])

        if issparse(tr_x):
            scaler = StandardScaler(with_mean=False)
        else:
            scaler = StandardScaler()
        scaler.fit(tr_x)
        tr_x = scaler.transform(tr_x)
        tr_y = np_utils.to_categorical(tr_y, num_classes=nb_classes)
        if validation:
            va_x = scaler.transform(va_x)
            va_y = np_utils.to_categorical(va_y, num_classes=nb_classes)

        self.model = Sequential()
        # input layer
        self.model.add(Dropout(input_dropout, input_shape=(tr_x.shape[1], )))
        # 中間層
        for i in range(hidden_layers):
            self.model.add(Dense(hidden_units))
            if batch_norm == 'before_act':
                self.model.add(BatchNormalization())
            if hidden_activation == 'prelu':
                self.model.add(PReLU())
            elif hidden_activation == 'relu':
                self.model.add(ReLU())
            else:
                raise NotImplementedError
            self.model.add(Dropout(hidden_dropout))

        # 出力層
        self.model.add(Dense(nb_classes, activation='softmax'))

        # オプティマイザ
        if optimizer_type == 'sgd':
            optimizer = SGD(lr=optimizer_lr,
                            decay=1e-6,
                            momentum=0.9,
                            nesterov=True)
        elif optimizer_type == 'adam':
            optimizer = Adam(lr=optimizer_lr,
                             beta_1=0.9,
                             beta_2=0.999,
                             decay=0.)
        else:
            raise NotImplementedError

        # 目的関数、評価指標などの設定
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])

        # エポック数、アーリーストッピング
        # あまりepochを大きくすると、小さい学習率のときに終わらないことがあるので注意
        patience = 12
        # 学習の実行
        if validation:
            early_stopping = EarlyStopping(monitor='val_loss',
                                           patience=patience,
                                           verbose=2,
                                           restore_best_weights=True)
            history = self.model.fit(tr_x,
                                     tr_y,
                                     epochs=nb_epoch,
                                     batch_size=batch_size,
                                     verbose=2,
                                     validation_data=(va_x, va_y),
                                     callbacks=[early_stopping])
        else:
            history = self.model.fit(tr_x,
                                     tr_y,
                                     nb_epoch=nb_epoch,
                                     batch_size=batch_size,
                                     verbose=2)

        self.scaler = scaler
Esempio n. 21
0
def _main(args):
    config_path = os.path.expanduser(args.config_path)
    weights_path = os.path.expanduser(args.weights_path)
    assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(
        config_path)
    assert weights_path.endswith(
        '.weights'), '{} is not a .weights file'.format(weights_path)

    output_path = os.path.expanduser(args.output_path)
    assert output_path.endswith(
        '.h5'), 'output path {} is not a .h5 file'.format(output_path)
    output_root = os.path.splitext(output_path)[0]

    # Load weights and config.
    print('Loading weights.')
    weights_file = open(weights_path, 'rb')
    weights_header = np.int64(np.ndarray(
    shape=(4, ), dtype='int32', buffer=weights_file.read(16)))
    major = weights_header[0]
    minor = weights_header[1]
    if (major*10 + minor) >= 2 and major < 1000 and minor < 1000:
        final = np.int64(np.ndarray(shape=(1, ), dtype='int32', buffer=weights_file.read(4)))
        weights_header[3] = np.bitwise_or(np.left_shift(final, 4), weights_header[3])
    print('Weights Header: ', weights_header)
    # TODO: Check transpose flag when implementing fully connected layers.
    # transpose = (weight_header[0] > 1000) or (weight_header[1] > 1000)

    print('Parsing Darknet config.')
    unique_config_file = unique_config_sections(config_path)
    cfg_parser = configparser.ConfigParser()
    cfg_parser.read_file(unique_config_file)

    print('Creating Keras model.')
    if args.fully_convolutional:
        image_height, image_width = None, None
    else:
        image_height = int(cfg_parser['net_0']['height'])
        image_width = int(cfg_parser['net_0']['width'])
    prev_layer = Input(shape=(image_height, image_width, 3))
    all_layers = [prev_layer]

    weight_decay = float(cfg_parser['net_0']['decay']
                         ) if 'net_0' in cfg_parser.sections() else 5e-4
    count = 0
    for section in cfg_parser.sections():
        print('Parsing section {}'.format(section))
        if section.startswith('convolutional'):
            filters = int(cfg_parser[section]['filters'])
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            pad = int(cfg_parser[section]['pad'])
            activation = cfg_parser[section]['activation']
            batch_normalize = 'batch_normalize' in cfg_parser[section]
            if(int('groups' in cfg_parser[section])==1):
              groups_count = int(cfg_parser[section]['groups'])
            else:
              groups_count = 1
            # padding='same' is equivalent to Darknet pad=1
            padding = 'same' if pad == 1 else 'valid'

            # Setting weights.
            # Darknet serializes convolutional weights as:
            # [bias/beta, [gamma, mean, variance], conv_weights]
            prev_layer_shape = K.int_shape(prev_layer)

            # TODO: This assumes channel last dim_ordering.
            weights_shape = (size, size, int(prev_layer_shape[-1]/groups_count), filters)
            darknet_w_shape = (filters, weights_shape[2], size, size)
            weights_size = np.product(weights_shape)

            print('conv2d', 'bn'
                  if batch_normalize else '  ', activation, weights_shape)
            print('darknet_w_shape:',darknet_w_shape)
            conv_bias = np.ndarray(
                shape=(filters, ),
                dtype='float32',
                buffer=weights_file.read(filters * 4))
            count += filters

            if batch_normalize:
                bn_weights = np.ndarray(
                    shape=(3, filters),
                    dtype='float32',
                    buffer=weights_file.read(filters * 12))
                count += 3 * filters

                # TODO: Keras BatchNormalization mistakenly refers to var
                # as std.
                bn_weight_list = [
                    bn_weights[0],  # scale gamma
                    conv_bias,  # shift beta
                    bn_weights[1],  # running mean
                    bn_weights[2]  # running var
                ]

            conv_weights = np.ndarray(
                shape=darknet_w_shape,
                dtype='float32',
                buffer=weights_file.read(weights_size * 4))
            count += weights_size

            # DarkNet conv_weights are serialized Caffe-style:
            # (out_dim, in_dim, height, width)
            # We would like to set these to Tensorflow order:
            # (height, width, in_dim, out_dim)
            # TODO: Add check for Theano dim ordering.
            conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
            conv_weights = [conv_weights] if batch_normalize else [
                conv_weights, conv_bias
            ]

            # Handle activation.
            act_fn = None
            if activation == 'leaky':
                pass  # Add advanced activation later.
            elif activation =='relu':
                pass
            elif activation != 'linear':
                raise ValueError(
                    'Unknown activation function `{}` in section {}'.format(
                        activation, section))
            # Create Conv2D layer
            conv_layer = (Conv2D(
                filters, (size, size),
                strides=(stride, stride),
                groups = groups_count,
                kernel_regularizer=l2(weight_decay),
                use_bias=not batch_normalize,
                weights=conv_weights,
                activation=act_fn,
                padding=padding))(prev_layer)

            if batch_normalize:
                conv_layer = (BatchNormalization(
                    weights=bn_weight_list))(conv_layer)
            prev_layer = conv_layer

            if activation == 'linear':
                all_layers.append(prev_layer)
            elif activation == 'leaky':
                act_layer = LeakyReLU(alpha=0.1)(prev_layer)
                prev_layer = act_layer
                all_layers.append(act_layer)
            elif activation == 'relu':
                act_layer = ReLU()(prev_layer)
                prev_layer = act_layer
                all_layers.append(act_layer)

        elif section.startswith('maxpool'):
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            all_layers.append(
                MaxPooling2D(
                    padding='same',
                    pool_size=(size, size),
                    strides=(stride, stride))(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('avgpool'):
            if cfg_parser.items(section) != []:
                raise ValueError('{} with params unsupported.'.format(section))
            all_layers.append(GlobalAveragePooling2D()(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('route'):
            ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
            layers = [all_layers[i] for i in ids]
            if len(layers) > 1:
                print('Concatenating route layers:', layers)
                concatenate_layer = concatenate(layers)
                all_layers.append(concatenate_layer)
                prev_layer = concatenate_layer
            else:
                skip_layer = layers[0]  # only one layer to route
                all_layers.append(skip_layer)
                prev_layer = skip_layer

        elif section.startswith('reorg'):
            block_size = int(cfg_parser[section]['stride'])
            assert block_size == 2, 'Only reorg with stride 2 supported.'
            all_layers.append(
                Lambda(
                    space_to_depth_x2,
                    output_shape=space_to_depth_x2_output_shape,
                    name='space_to_depth_x2')(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('region'):
            with open('{}_anchors.txt'.format(output_root), 'w') as f:
                print(cfg_parser[section]['anchors'], file=f)

        elif (section.startswith('net') or section.startswith('cost') or
              section.startswith('softmax')):
            pass  # Configs not currently handled during model definition.

        else:
            raise ValueError(
                'Unsupported section header type: {}'.format(section))

    # Create and save model.
    model = Model(inputs=all_layers[0], outputs=all_layers[-1])
    print(model.summary())
    model.save('{}'.format(output_path))
    print('Saved Keras model to {}'.format(output_path))
    image = cv2.imread('YAD2K/images/1.jpg')
    dummy_array = np.zeros((1,1,1,1,50,4))


    input_image = cv2.resize(image, (320, 224))
    input_image = np.expand_dims(input_image, 0)
    netout = model.predict([input_image, dummy_array])

    print(netout)
    #image = draw_boxes(image, boxes, labels=['person'])
    cv2.imwrite('output.jpg', image)
    # Check to see if all weights have been read.
    remaining_weights = len(weights_file.read()) / 4
    weights_file.close()
    print('Read {} of {} from Darknet weights.'.format(count, count +
                                                       remaining_weights))
    if remaining_weights > 0:
        print('Warning: {} unused weights'.format(remaining_weights))

    if args.plot_model:
        plot(model, to_file='{}.png'.format(output_root), show_shapes=True)
        print('Saved model plot to {}.png'.format(output_root))
Esempio n. 22
0
 def BN_FC(self, x, FNum):
     x = Dense(FNum)(x)
     x = BatchNormalization()(x)
     x = ReLU()(x)
     return x
Esempio n. 23
0
    def fit(self, tr_x, tr_y, va_x=None, va_y=None):

        # パラメータ
        input_dropout = self.params['input_dropout']
        hidden_layers = int(self.params['hidden_layers'])
        hidden_units = int(self.params['hidden_units'])
        hidden_activation = self.params['hidden_activation']
        hidden_dropout = self.params['hidden_dropout']
        batch_norm = self.params['batch_norm']
        optimizer_type = self.params['optimizer']['type']
        optimizer_lr = self.params['optimizer']['lr']
        batch_size = int(self.params['batch_size'])

        #標準化
        y_scaler = StandardScaler()
        tr_y_std = y_scaler.fit_transform(np.log1p(tr_y.values.reshape(-1, 1)))

        # データのセット・スケーリング
        validation = va_x is not None
        if validation:
            va_y_std = y_scaler.transform(np.log1p(va_y.values.reshape(-1, 1)))

        # モデルの構築
        # 入力層
        self.model = Sequential()
        self.model.add(Dense(512, input_shape=(tr_x.shape[1], )))
        self.model.add(PReLU())
        self.model.add(Dropout(input_dropout))

        # 中間層
        for i in range(hidden_layers):
            self.model.add(Dense(hidden_units))
            if batch_norm == 'before_act':
                self.model.add(BatchNormalization())
            if hidden_activation == 'prelu':
                self.model.add(PReLU())
            elif hidden_activation == 'relu':
                self.model.add(ReLU())
            else:
                raise NotImplementedError
            self.model.add(Dropout(hidden_dropout))

        self.model.add(Dense(1, activation="linear"))

        # オプティマイザ
        if optimizer_type == 'sgd':
            optimizer = SGD(lr=optimizer_lr,
                            decay=1e-6,
                            momentum=0.9,
                            nesterov=True)
        elif optimizer_type == 'adam':
            optimizer = Adam(lr=optimizer_lr,
                             beta_1=0.9,
                             beta_2=0.999,
                             decay=0.)
        else:
            raise NotImplementedError

        # 目的関数、評価指標などの設定
        self.model.compile(loss=root_mean_squared_error, optimizer=optimizer)

        if validation:
            early_stopping = EarlyStopping(monitor='val_loss',
                                           patience=5,
                                           verbose=1,
                                           restore_best_weights=True)
            self.model.fit(tr_x,
                           tr_y_std,
                           epochs=200,
                           batch_size=64,
                           verbose=1,
                           validation_data=(va_x, va_y_std),
                           callbacks=[early_stopping])
        else:
            self.model.fit(tr_x,
                           tr_y_std,
                           epochs=200,
                           batch_size=64,
                           verbose=1)

        # モデル・スケーラーの保持
        self.scaler = y_scaler
Esempio n. 24
0
def build_model(WINDOW_SIZE):
    abits = 16
    wbits = 16
    kernel_lr_multiplier  = 10 

    def quantized_relu(x):
        return quantize_op(x,nb=abits)

    def binary_tanh(x):
        return binary_tanh_op(x)
#    network_type = 'float'
    #network_type ='qnn'
#    network_type = 'full-qnn'
    network_type ='bnn'
    # network_type = 'full-bnn'   
    H = 1.
    if network_type =='float':
        Conv_ = lambda f, s, c, n: Conv1D(kernel_size=s, filters=f, padding='same', activation='linear',
                                   input_shape = (c,1), name = n)
        Conv = lambda  f, s, n: Conv1D(kernel_size= s, filters=f,  padding='same', activation='linear', name = n)
        
        Dense_ = lambda f, n: Dense(units = f, kernel_initializer='normal', activation='relu', name = n)
        Act = lambda: ReLU()
    elif network_type=='qnn':
       # sys.exit(0)
        Conv_ = lambda f, s,  c, n: QuantizedConv1D(kernel_size= s, H=1, nb=wbits, filters=f, strides=1,
                                            padding='same', activation='linear',
                                            input_shape = (c,1),name = n)
        Conv = lambda f, s, n: QuantizedConv1D(kernel_size=s, H=1, nb=wbits, filters=f, strides= 1,
                                            padding='same', activation='linear',
                                            name = n)
        Act = lambda: ReLU()
        
        Dense_ = lambda f, n: QuantizedDense(units = f, nb = wbits, name = n)
        
    elif network_type=='full-qnn':
        #sys.exit(0)
        Conv_ = lambda f, s,  c, n: QuantizedConv1D(kernel_size= s, H=1, nb=wbits, filters=f, strides=1,
                                            padding='same', activation='linear',
                                            input_shape = (c,1),name = n)
        Conv = lambda f, s, n: QuantizedConv1D(kernel_size=s, H=1, nb=wbits, filters=f, strides= 1,
                                            padding='same', activation='linear',
                                            name = n)
        Act = lambda: Activation(quantized_relu)
        Dense_ = lambda f, n: QuantizedDense(units = f, nb = wbits, name = n)
    elif network_type=='bnn':
       # sys.exit(0)
        Conv_ = lambda f,s,c,n: BinaryConv1D(kernel_size= s, H=1, filters=f, strides=1, padding='same',
                                         activation='linear',
                                         input_shape = (c,1),
					 name = n)
        Conv = lambda f,s,n: BinaryConv1D(kernel_size=s, H=1, filters=f, strides=1, padding='same',
                                         activation='linear', 
                                         name = n )
        Dense_ = lambda f, n: BinaryDense(units = f, name = n)
        Act = lambda: ReLU()
    elif network_type=='full-bnn':
        #sys.exit(0)
        Conv_ = lambda f,s,c,n: BinaryConv1D(kernel_size= s, H=1, filters=f, strides=1, padding='same',
                                         activation='linear',
                                         input_shape = (c,1),
					 name = n)
        Conv = lambda f,s,n: BinaryConv1D(kernel_size=s, H=1, filters=f, strides=1, padding='same',
                                         activation='linear', 
                                         name = n    )
        Act = lambda: Activation(binary_tanh)
    else:
        #sys.exit(0)
        print('wrong network type, the supported network types in this repo are float, qnn, full-qnn, bnn and full-bnn') 


    model = Sequential()      
    OUTPUT_CLASS = 4    # output classes
    #model = Sequential()
    model.add(Conv_(64, 55, WINDOW_SIZE,  'conv1')  )  
    model.add(Act())
    model.add(MaxPooling1D(10))
    model.add(Dropout(0.5))
    model.add(Conv(64, 25,  'conv2' ))
    model.add(Act())
    model.add(MaxPooling1D(5))
    model.add(Dropout(0.5))
    model.add(Conv(64, 10,  'conv3'))
    model.add(Act())
    model.add(GlobalAveragePooling1D())
    model.add(Dense_(256,  'den6'))
    model.add(Dropout(0.5))
    model.add(Dense_(128,  'den7'))
    model.add(Dropout(0.5))		
    model.add(Dense_(64, 'den8'))
    model.add(Dropout(0.5))	
    model.add(Dense(OUTPUT_CLASS, kernel_initializer='normal', activation='softmax', name = 'den9'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])        
        
        
    print(model.summary())
    plot_model(model, to_file='my_model.png')
    return model
from PIL import Image

INPUT_SCALE = 2
img_shape = (32, 32, 1)

model = Sequential()

img = image.load_img('./data/Set1/comic.bmp', color_mode = "grayscale")
# img = image.img_to_array(img)
# img = img[0:8,0:8,0]
#input_img = Input(shape=(IMG_SIZE[0]/INPUT_SCALE, IMG_SIZE[1]/INPUT_SCALE, 1))
input_img = Input(batch_shape=(1,img.size[1],img.size[0],1))

#特征提取阶段
model = Conv2D(64,(3,3),padding='same',kernel_initializer='he_normal')(input_img)
model = ReLU()(model)
model = Conv2D(64,(3,3),padding='same',kernel_initializer='he_normal')(model)
model = ReLU()(model)
model = Conv2D(64,(3,3),padding='same',kernel_initializer='he_normal')(model)

#非线性映射阶段
model = Conv2D(16,(1,1),padding='same',kernel_initializer='he_normal')(model)
model = ReLU()(model)

model = Conv2D(16,(3,3),padding='same',kernel_initializer='he_normal')(model)
model = ReLU()(model)
model = Conv2D(16,(3,3),padding='same',kernel_initializer='he_normal')(model)
model = ReLU()(model)
model = Conv2D(16,(3,3),padding='same',kernel_initializer='he_normal')(model)
model = ReLU()(model)