コード例 #1
0
def test_arrhythmia_in_DeepSVDD():

    input_shape = (274, )
    inputs = Input(shape=input_shape, name='encoder_input')
    x = Dense(125, activation='tanh')(inputs)
    out = Dense(60, activation='tanh')(x)
    Encoder = Model(inputs, out)

    i = Input(shape=(60, ))
    x = Dense(125, activation='tanh')(i)
    y = Dense(274, name='decoder_out', activation=None)(x)
    Decoder = Model(i, y)

    tool = toolkit()
    #set options
    options = tool.setAlgorthem("deepsvdd")
    options.datasetname = "arrhythmia"
    options.class_num = 0
    options.pre_train_epochs = 10
    options.train_epochs = 50
    options.encoder_model = Encoder
    options.decoder_model = Decoder
    options.inputs = inputs
    options.x_train, options.train_label, options.x_test, options.test_label = gen_arrhythmia_train_valid_data(
    )
    scaler = StandardScaler()
    options.x_train = scaler.fit_transform(options.x_train)
    options.x_test = scaler.transform(options.x_test)
    tool.train(options)
    tool.test()
コード例 #2
0
def test_SVDD():
    i = Input(shape=(28, 28, 1))
    x = Conv2D(16, (3, 3), activation='relu', padding='same')(i)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    Encoder = Model(i, x)

    i = Input(shape=(4, 4, 8)) # 8 conv2d features
    x = Conv2D(8, (3, 3), activation='relu', padding='same')(i)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(16, (3, 3), activation='relu')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
    Decoder = Model(i, x)

    tool = toolkit()
    #set options
    options = tool.setAlgorthem("deepsvdd")
    options.datasetname = "mnist"
    options.class_num = 0
    options.pre_train_epochs = 10
    options.train_epochs = 20
    options.encoder_model = Encoder
    options.decoder_model = Decoder
    
    tool.train(options)
    tool.test()

    return
コード例 #3
0
def test_KDD_on_Dagmm():
    tool = toolkit()
    #set options
    options = tool.setAlgorthem("DAGMM")
    options.datasetname = "kdd"
    X_train, X_test, y_train, y_test = gen_KDD_train_valid_data()
    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_test = scaler.transform(X_test)

    minmaxscaler = sklearn.preprocessing.MinMaxScaler().fit(X_train)
    X_train = minmaxscaler.transform(X_train)

    minmaxscaler = sklearn.preprocessing.MinMaxScaler().fit(X_test)
    X_test = minmaxscaler.transform(X_test)

    options.X_test = X_test
    options.X_train = X_train
    options.y_train = y_train
    options.y_test = y_test
    options.comp_hidden = [60, 30, 10, 1, 10, 30, 60]
    options.mid_layer = 3
    options.epoch = 80
    options.batch_size = 4096
    tool.train(options)
    tool.test()
コード例 #4
0
def test_REPEN():
  
    tool = toolkit()
    #set options
    options = tool.setAlgorthem("repen")
    options.datasetname = "kdd"
    options.num_runs = 3

    
    tool.train(options)
    tool.test()

    return
コード例 #5
0
def test_GAN():

    tool = toolkit()
    #set options
    options = tool.setAlgorthem("GAN")
    options.datasetname == "kdd"
    options.class_num = 0
    options.epochs = 1
    options.generator_model= None
    options.discriminator_model = None
    
    tool.train(options)
    tool.test()

    return
コード例 #6
0
def test_Thyroid_on_Dagmm_with_customed_net_structure():
    tool = toolkit()
    #set options
    options = tool.setAlgorthem("DAGMM")
    options.datasetname = "Thyroid"
    X_train, X_test, y_train, y_test = gen_Thyroid_train_valid_data()
    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_test = scaler.transform(X_test)
    options.X_test = X_test
    options.X_train = X_train
    options.y_train = y_train
    options.y_test = y_test

    # set compression model
    input_shape = (6,)
    inputs = Input(shape=input_shape, name='encoder_input')
    x = Dense(14, activation=None)(inputs)
    x = Activation('tanh')(x)
    mid_presentation = Dense(10, activation=None)(x)
    x = Dense(14, activation=None)(mid_presentation)
    x = Activation('tanh')(x)
    y = Dense(6, name='compression_out_layer',activation=None)(x)

    options.compress_net_inputs = inputs
    options.compress_net_mid_presentation = mid_presentation
    options.compress_net_outputs = y
    
    # set estimated model
    z = options.create_estimate_net_z_layer(inputs,mid_presentation,y)
    layer = Dense(10, activation=None)(z)
    layer = Activation('tanh')(layer)
    layer = Dropout(0.5, noise_shape=None, seed=None)(layer)
    gamma = Dense(2, activation='softmax', name='gamma')(layer)
    
    options.z_layer = z
    options.gamma_layer = gamma

    options.epoch = 100
    options.normal_portion = 97.5
    options.batch_size = 1024
    tool.train(options)
    tool.test()
コード例 #7
0
def test_arrhythmia_on_Dagmm():
    tool = toolkit()
    #set options
    options = tool.setAlgorthem("DAGMM")
    options.datasetname = "Thyroid"
    X_train, X_test, y_train, y_test = gen_arrhythmia_train_valid_data()
    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_test = scaler.transform(X_test)
    options.X_test = X_test
    options.X_train = X_train
    options.y_train = y_train
    options.y_test = y_test
    options.comp_hidden = [10,2,10]
    options.mid_layer = 1
    options.mix_components = 2
    options.epoch = 15000
    options.batch_size = 500
    options.normal_portion = 85
    tool.train(options)
    tool.test()
コード例 #8
0
def test_B(n_class, epcho):
    #change kernal size to 3*3
    kernel_size = 3
    latent_dim = 128  #output features number
    inputs = Input(shape=(32, 32, 3), name='encoder_input')

    x = inputs
    x = Conv2D(filters=32,
               kernel_size=kernel_size,
               use_bias=False,
               padding="same",
               kernel_initializer="glorot_normal")(x)
    x = BatchNormalization(epsilon=1e-04, scale=False)(x)
    x = LeakyReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(filters=64,
               kernel_size=kernel_size,
               use_bias=False,
               padding="same",
               kernel_initializer="glorot_normal")(x)
    x = BatchNormalization(epsilon=1e-04, scale=False)(x)
    x = LeakyReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(filters=128,
               kernel_size=kernel_size,
               use_bias=False,
               padding="same",
               kernel_initializer="glorot_normal")(x)
    x = BatchNormalization(epsilon=1e-04, scale=False)(x)
    x = LeakyReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    # Generate the latent vector
    x = Flatten()(x)
    latent = Dense(latent_dim, name='latent_vector', use_bias=False)(x)

    #Instantiate Encoder Model
    encoder = Model(inputs, latent, name='encoder')

    latent_inputs = Input(shape=(128, ), name='decoder_input')
    x = BatchNormalization(epsilon=1e-04, scale=False)(latent_inputs)
    x = Reshape((4, 4, 8))(x)
    x = LeakyReLU()(x)

    x = Conv2DTranspose(filters=128,
                        kernel_size=kernel_size,
                        padding='same',
                        use_bias=False,
                        kernel_initializer="glorot_normal")(x)
    x = BatchNormalization(epsilon=1e-04, scale=False)(x)
    x = LeakyReLU()(x)
    x = UpSampling2D(size=(2, 2), interpolation='nearest')(x)

    x = Conv2DTranspose(filters=64,
                        kernel_size=kernel_size,
                        padding='same',
                        use_bias=False,
                        kernel_initializer="glorot_normal")(x)
    x = BatchNormalization(epsilon=1e-04, scale=False)(x)
    x = LeakyReLU()(x)
    x = UpSampling2D(size=(2, 2), interpolation='nearest')(x)

    x = Conv2DTranspose(filters=32,
                        kernel_size=kernel_size,
                        padding='same',
                        use_bias=False,
                        kernel_initializer="glorot_normal")(x)
    x = BatchNormalization(epsilon=1e-04, scale=False)(x)
    x = LeakyReLU()(x)
    x = UpSampling2D(size=(2, 2), interpolation='nearest')(x)

    x = Conv2DTranspose(filters=3,
                        kernel_size=kernel_size,
                        padding='same',
                        use_bias=False,
                        kernel_initializer="glorot_normal")(x)

    outputs = Activation('sigmoid', name='decoder_output')(x)
    # Instantiate Decoder Model
    decoder = Model(latent_inputs, outputs, name='decoder')

    tool = toolkit()
    #set options
    options = tool.setAlgorthem("deepsvdd")
    options.datasetname = "cifar10"
    options.class_num = n_class
    options.pre_train_epochs = 10
    options.train_epochs = epcho
    options.encoder_model = encoder
    options.decoder_model = decoder

    tool.train(options)
    tool.test()
    print('change kernal size to 3*3')
    return
コード例 #9
0
def test_B_C_D_E(n_class, epcho):
    #remove one Conv layer+ remove one Dense layer+ set filter =32
    #set kernal 3*3
    kernel_size = 3
    inputs = Input(shape=(32, 32, 3), name='encoder_input')

    x = inputs
    x = Conv2D(filters=32,
               kernel_size=kernel_size,
               use_bias=False,
               padding="same",
               kernel_initializer="glorot_normal")(x)
    x = BatchNormalization(epsilon=1e-04, scale=False)(x)
    x = LeakyReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(filters=32,
               kernel_size=kernel_size,
               use_bias=False,
               padding="same",
               kernel_initializer="glorot_normal")(x)
    x = BatchNormalization(epsilon=1e-04, scale=False)(x)
    x = LeakyReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    '''
    x = Conv2D(filters=128,kernel_size=kernel_size,use_bias= False
                    ,padding="same",kernel_initializer="glorot_normal")(x)
    x = BatchNormalization( epsilon=1e-04, scale=False)(x)
    x = LeakyReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    '''

    # Generate the latent vector
    latent = Flatten()(x)
    #latent = Dense(latent_dim, name='latent_vector', use_bias=False)(x)

    #Instantiate Encoder Model
    encoder = Model(inputs, latent, name='encoder')
    encoder.summary()

    latent_inputs = Input(shape=(8 * 8 * 32, ), name='decoder_input')
    x = BatchNormalization(epsilon=1e-04, scale=False)(latent_inputs)
    x = Reshape((8, 8, 32))(latent_inputs)
    x = LeakyReLU()(x)

    x = Conv2DTranspose(filters=32,
                        kernel_size=kernel_size,
                        padding='same',
                        use_bias=False,
                        kernel_initializer="glorot_normal")(x)
    x = BatchNormalization(epsilon=1e-04, scale=False)(x)
    x = LeakyReLU()(x)
    x = UpSampling2D(size=(2, 2), interpolation='nearest')(x)

    x = Conv2DTranspose(filters=32,
                        kernel_size=kernel_size,
                        padding='same',
                        use_bias=False,
                        kernel_initializer="glorot_normal")(x)
    x = BatchNormalization(epsilon=1e-04, scale=False)(x)
    x = LeakyReLU()(x)
    x = UpSampling2D(size=(2, 2), interpolation='nearest')(x)

    x = Conv2DTranspose(filters=3,
                        kernel_size=kernel_size,
                        padding='same',
                        use_bias=False,
                        kernel_initializer="glorot_normal")(x)

    outputs = Activation('sigmoid', name='decoder_output')(x)
    # Instantiate Decoder Model
    decoder = Model(latent_inputs, outputs, name='decoder')
    decoder.summary()

    tool = toolkit()
    #set options
    options = tool.setAlgorthem("deepsvdd")
    options.datasetname = "cifar10"
    options.class_num = n_class
    options.pre_train_epochs = 10
    options.train_epochs = epcho
    options.encoder_model = encoder
    options.decoder_model = decoder

    tool.train(options)
    tool.test()
    print(
        'remove one Conv layer+ remove one Dense layer+set filters = 32+set kernal 3*3'
    )
    return