Пример #1
0
def VGGish(load_weights=True, weights='audioset',
           input_tensor=None, input_shape=None,
           out_dim=None, include_top=True, pooling='avg'):

    if weights not in {'audioset', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `audioset` '
                         '(pre-training on audioset).')

    if out_dim is None:
        out_dim = 128

    # input shape
    if input_shape is None:
        input_shape = (496, 64, 1)

    if input_tensor is None:
        aud_input = Input(shape=input_shape, name='input_1')
    else:
        if not K.is_keras_tensor(input_tensor):
            aud_input = Input(tensor=input_tensor, shape=input_shape, name='input_1')
        else:
            aud_input = input_tensor



    # Block 1
    x = Conv2D(64, (3, 3), strides=(1, 1), activation='relu', padding='same', name='conv1')(aud_input)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='pool1')(x)

    # Block 2
    x = Conv2D(128, (3, 3), strides=(1, 1), activation='relu', padding='same', name='conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='pool2')(x)

    # Block 3
    x = Conv2D(256, (3, 3), strides=(1, 1), activation='relu', padding='same', name='conv3/conv3_1')(x)
    x = Conv2D(256, (3, 3), strides=(1, 1), activation='relu', padding='same', name='conv3/conv3_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='pool3')(x)

    # Block 4
    x = Conv2D(512, (3, 3), strides=(1, 1), activation='relu', padding='same', name='conv4/conv4_1')(x)
    x = Conv2D(512, (3, 3), strides=(1, 1), activation='relu', padding='same', name='conv4/conv4_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='pool4')(x)



    if include_top:
        # FC block
        x = Flatten(name='flatten_')(x)
        x = Dense(4096, activation='relu', name='vggish_fc1/fc1_1')(x)
        x = Dense(4096, activation='relu', name='vggish_fc1/fc1_2')(x)
        x = Dense(out_dim, activation='relu', name='vggish_fc2')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)


    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = aud_input
    # Create model.
    model = Model(inputs, x, name='VGGish')


    # load weights
    if load_weights:
        if weights == 'audioset':
            if include_top:
                model.load_weights(WEIGHTS_PATH_TOP)
            else:
                print("POGGERS")
                model.load_weights(WEIGHTS_PATH)
        else:
            print("failed to load weights")

    return model
Пример #2
0
    imgs += Path(args.dir).files('*.jpeg')

elif args.img[0] is not None:
    print("Loading images from path(s) : ", args.img)
    imgs = args.img

else:
    raise RuntimeError('Either -dir or -img arguments must be passed as argument')

with tf.device('/CPU:0'):
    base_model = MobileNet((None, None, 3), alpha=1, include_top=False, pooling='avg', weights=None)
    x = Dropout(0.75)(base_model.output)
    x = Dense(10, activation='softmax')(x)

    model = Model(base_model.input, x)
    model.load_weights('weights/mobilenet_weights.h5')

    score_list = []
    NIMA_scores = []

    for img_path in tqdm(imgs):
        img = load_img(img_path, target_size=target_size)
        x = img_to_array(img)
        x = np.expand_dims(x, axis=0)

        x = preprocess_input(x)

        scores = model.predict(x, batch_size=1, verbose=0)[0]

        mean = mean_score(scores)
        std = std_score(scores)
Пример #3
0
                     save_best_only=True,
                     verbose=1)

model.compile(loss='categorical_crossentropy',
              optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005),
              metrics=['acc'])
learning_history = model.fit_generator(train_generator,
                                       epochs=300,
                                       steps_per_epoch=len(x_train) / 32,
                                       validation_data=valid_generator,
                                       callbacks=[early_stopping, lr, mc])

model.save('../data/lotte/h5/b4_weight_model.h5')

# predict
model.load_weights('../data/lotte/mc/lotte_b4.h5')
result = model.predict(x_pred, verbose=True)

# tta_steps = 30
# # tta : 증강된 이미지를 여러번 보여준 다음 각각의 단계에 대해서 prediction을 평균하고 이 결과를 최종값으로 사용하는 것
# predictions = []

# for i in tqdm(range(tta_steps)):
#    # generator 초기화
#     test_generator.reset()
#     preds = model.predict_generator(generator = test_generator, verbose = 1)
#     predictions.append(preds)

# 평균을 통한 final prediction
# pred = np.mean(predictions, axis=0)
Пример #4
0
class AEOshea1hot(object):
  def __init__(self, in_dim=None, latent_dim=None, h_dim=None, train_snr_dB=10.0):
    self.in_dim = in_dim
    self.latent_dim = latent_dim
    self.train_snr_dB = train_snr_dB
    self.h_dim = h_dim
    self.train_noisepow = 10.0**(-self.train_snr_dB/10.0)
    
    if self.in_dim and self.latent_dim:
      self.make_model()
    
  def make_model(self):
    # Input layer
    self.inputs = Input(shape=(self.in_dim,), name="enc_in")
    
    # Hidden Layers
    x = self.inputs
    if self.h_dim is not None:
      for (i,d) in enumerate(self.h_dim):
        x = Dense( d, activation='relu', name="enc_l{}".format(i))(x)
    # Mean and Variance
    x = Dense(self.latent_dim)(x)
    self.z_mean = BatchNormalization(center=False, scale=False)(x)
    # Channel
    self.z = Lambda(self.channel, output_shape=(self.latent_dim,), name="z")(self.z_mean)
   
    
    # Encoder model
    self.encoder = Model(self.inputs, [self.z_mean,self.z], name="encoder")
    
    # Decoder
    self.latent_inputs = Input(shape=(self.latent_dim,), name="z_sample")
    
    # Hidden layers
    x = self.latent_inputs
    if self.h_dim is not None:
      for (i,d) in enumerate(self.h_dim[::-1]):
        x = Dense( d, activation='relu', name="dec_l{}".format(i))(x)
    self.dec_outputs = Dense( self.in_dim, activation='softmax', name="decoder_out")(x)
    
    # Decoder model
    self.decoder = Model(self.latent_inputs, self.dec_outputs, name="decoder")
    
    # VAE
    self.outputs = self.decoder(self.encoder(self.inputs)[1])
    self.model = Model(self.inputs, self.outputs, name="VAE")
    
    # Losses
    self.recon_loss = categorical_crossentropy(self.inputs, self.outputs)
    
    self.model.add_loss(self.recon_loss)
    
    self.model.compile(optimizer='adam')
#     self.model.compile( optimizer=tf.train.AdamOptimizer(learning_rate=0.01))
    
    
  def channel( self, zMean ):
    batch = K.shape( zMean )[0]
    epsilon = K.random_normal( shape = (batch,self.latent_dim) )
    # Because BatchNormalization produces z vector with signal power 'latentDim',
    # we should not scale the noise power here.
    return zMean + np.sqrt(self.train_noisepow)*epsilon
  
  def fit(self, x_train, epochs=10, batch_size=128, validation_data=None, 
          verbose=0, callbacks=None):
    train_log = self.model.fit(x_train, epochs=epochs, batch_size=batch_size, 
                               validation_data=validation_data, verbose=verbose,
                               callbacks=callbacks)
    return train_log.history
  
  def encode( self, data ):
    return self.encoder.predict(data)
  
  def decode( self, data ):
    return self.decoder.predict(data)
  
  def analysis( self ):
    xTest = np.eye(self.in_dim)
    enc_mu, enc_z = self.encode(xTest)
    dec_mu = self.decode(enc_mu)
    dec_z = self.decode(enc_z)

    chDim = self.latent_dim//2
    f = plt.figure(figsize=(5*chDim,9))
    for i in range(chDim):
      ax1 = plt.subplot(2,chDim,i+1)
      ax1.scatter(enc_mu[:,i],enc_mu[:,i+chDim],c=np.arange(self.in_dim))
      for j in range(self.in_dim):
        ax1.annotate( j, (enc_mu[j,i],enc_mu[j,i+chDim]) )
      ax1.set_title( "TX Symbols n = {}".format(i+1) )

      
  def save_model(self, fileprefix):
    with open(fileprefix+".dil","wb") as obj:
      dill.dump( {'in_dim': self.in_dim,
                  'latent_dim': self.latent_dim,
                  'train_noisepow': self.train_noisepow,
                  'train_snr_dB': self.train_snr_dB,
                  'h_dim': self.h_dim}, obj )
    self.model.save_weights(fileprefix+".h5")
  
  def load_model(self, fileprefix):
    with open(fileprefix+".dil","rb") as obj:
      config = dill.load(obj)
      self.in_dim = config['in_dim']
      self.latent_dim = config['latent_dim']
      self.train_snr_dB = config['train_snr_dB']
      self.train_noisepow = config['train_noisepow']
      self.h_dim = config['h_dim']
    self.make_model()
    self.model.load_weights(fileprefix+".h5")
x = Conv2D(512, (3, 3), activation='relu', name='conv5_2', padding='same')(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_3', padding='same')(x)
x = pooling_func(x)

model = Model(ip, x)

if model == "vgg19":
    weights = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
                       TF_19_WEIGHTS_PATH_NO_TOP,
                       cache_subdir='models')
else:
    weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
                       TF_WEIGHTS_PATH_NO_TOP,
                       cache_subdir='models')

model.load_weights(weights)

if K.backend() == 'tensorflow' and K.image_data_format() == "channels_first":
    convert_all_kernels_in_model(model)

print('Model loaded.')
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
shape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])


def gram_matrix(input_tensor):
    assert K.ndim(input_tensor) == 3

    features = K.batch_flatten(K.permute_dimensions(input_tensor, (2, 0, 1)))
    gram = K.dot(features, K.transpose(features))
    return gram
Пример #6
0
gan = Model(inputs=[random_vec_input, labels_input], outputs=decision)
gan.compile(loss="binary_crossentropy", optimizer=tf.keras.optimizers.Adam(lr=1e-4))

if verbose:
    print("Model is built.")
    print(gan.summary())

# Training
# -------------------------

if verbose:
    print("Training GAN model...")

if path_weights is not None and os.path.exists(path_weights):
    gan.load_weights(path_weights)

# Adversarial ground truths
real = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))

iterations = len(x_train) // batch_size

for e in range(epochs):
    if verbose:
        print("Epoch {}/{} :".format(e, epochs), end="")

    for i in range(iterations):

        # Training discriminator
        # -------------------------
Пример #7
0
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
    filepath="/content/drive/My Drive/weights{epoch:02d}.h5",
    save_weights_only=True,
    save_freq="epoch")
N = X_train.shape[0]
X_L, Y_L = X_train[:N // 2], Y_train[:N // 2]
U_hat = X_train[N // 2:]
history = model.fit(generate_data(X_L, Y_L, U_hat, X_val, Y_val, batch_size),
                    epochs=30,
                    steps_per_epoch=X_L.shape[0] // batch_size,
                    callbacks=model_checkpoint_callback)

data = np.load('/content/drive/My Drive/Dataset/splitted_mfccs_order0_new.npz')
X_test = data['X_test']
Y_test = data['Y_test']
model_U.load_weights("/content/drive/My Drive/weights11.h5")
model_U.evaluate(X_test, Y_test)

inp = Input(shape=(129, 20))
x = LSTM(128, return_sequences=True)(inp)
x = LSTM(32)(x)
y = Dense(10,
          activation='softmax',
          kernel_regularizer=tf.keras.regularizers.l2(1e-4))(x)
model_base = Model(inputs=inp, outputs=y)
print(model_base.summary())
model_base.compile(loss=tf.keras.losses.categorical_crossentropy,
                   optimizer=tf.keras.optimizers.Adam(),
                   metrics=['accuracy'])

model_base.fit(X_train, Y_train, epochs=5, validation_data=(X_val, Y_val))
Пример #8
0
              metrics=['accuracy'])

with tf.Session() as session:
    K.set_session(session)
    session.run(tf.global_variables_initializer())
    session.run(tf.tables_initializer())
    history = model.fit(x_train, y_train, epochs=10, batch_size=32)
    model.save_weights('./elmo-model-9.h5')

model.summary()

with tf.Session() as session:
    K.set_session(session)
    session.run(tf.global_variables_initializer())
    session.run(tf.tables_initializer())
    model.load_weights('./elmo-model-12.h5')
    predicts = model.predict(x_test, batch_size=10)

y_test = decode(le, y_test)
y_preds = decode(le, predicts)
#for item in y_test:
#       print(item.strip())
#print(y_test)
print('-----')
for item in y_preds:
    print(item.strip())

from sklearn import metrics

print(metrics.confusion_matrix(y_test, y_preds))
Пример #9
0
classifier = nn.classifier(shared_layers,
                           roi_input,
                           C.num_rois,
                           nb_classes=len(classes_count),
                           trainable=True)

model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)

# this is a model that holds both the RPN and the classifier, used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)

try:
    print('loading weights from {}'.format(C.base_net_weights))
    model_rpn.load_weights(C.base_net_weights, by_name=True)
    model_classifier.load_weights(C.base_net_weights, by_name=True)
except:
    print(
        'Could not load pretrained model weights. Weights can be found in the keras application folder \
		https://github.com/fchollet/keras/tree/master/keras/applications')

optimizer = Adam(lr=1e-5)
optimizer_classifier = Adam(lr=1e-5)
model_rpn.compile(
    optimizer=optimizer,
    loss=[losses.rpn_loss_cls(num_anchors),
          losses.rpn_loss_regr(num_anchors)])
model_classifier.compile(
    optimizer=optimizer_classifier,
    loss=[
Пример #10
0
class HLRGDVGG(object):
    def __init__(self, input_dimension, output_dimension, vggModelLocation, number_of_classes=2, optimizer=None,
                 loss_threshold=defaultLossThreshold, patience=defaultPatience, dropout_rate=defaultDropoutRate,
                 max_relu_bound=None, adv_penalty=0.01, unprotected=False, verbose=False):

        self.buildModel(input_dimension=input_dimension, output_dimension=output_dimension , vggModelLocation=vggModelLocation, optimizer=optimizer,
                        number_of_classes=number_of_classes, loss_threshold=loss_threshold, patience=patience, dropout_rate=dropout_rate,
                        max_relu_bound=max_relu_bound, adv_penalty=adv_penalty, unprotected=unprotected, verbose=verbose)


    def buildModel(self, input_dimension, output_dimension, vggModelLocation, number_of_classes=2, optimizer=None,
                 loss_threshold=defaultLossThreshold, patience=defaultPatience, dropout_rate=defaultDropoutRate,
                 max_relu_bound=None, adv_penalty=0.01, unprotected=False, verbose=False):
        #instantiate the vgg instance
        self.ourVGG = HLVGG(input_dimension=input_dimension, output_dimension=output_dimension, dual_outputs=True, number_of_classes=number_of_classes,
                            loss_threshold=loss_threshold, patience=patience, dropout_rate=0, freezeWeights=True, optimizer=optimizer,
                            max_relu_bound=max_relu_bound, adv_penalty=adv_penalty, unprotected=True, verbose=verbose)

        #set the vgg weights
        self.ourVGG.model.load_weights(vggModelLocation)

        #set protected flag
        self.unprotected = unprotected

        #instantiate our CAE with residual connections
        self.input_dimension, self.output_dimension = input_dimension, np.copy(output_dimension)

        self.loss_threshold, self.number_of_classes = np.copy(loss_threshold), np.copy(number_of_classes)
        self.dropoutRate, self.max_relu_bound = dropout_rate, np.copy(max_relu_bound)
        self.patience = np.copy(patience)
        # self.learning_rate, self.learning_rate_drop = np.copy(learning_rate), np.copy(learning_rate_drop)
        self.image_size = 32
        self.num_channels = 3
        self.num_labels = number_of_classes
        self.penaltyCoeff = adv_penalty
        
        if (verbose):
            print("input dimension: %s"%str(self.input_dimension))
    
        # define input layer
        self.inputLayer = layers.Input(shape=self.input_dimension)
        self.advInputLayer = layers.Input(shape=self.input_dimension)
        previousLayer = self.advInputLayer

        #following the architecture shown in figure 3 of the hlrgd paper, [ https://openaccess.thecvf.com/content_cvpr_2018/papers/Liao_Defense_Against_Adversarial_CVPR_2018_paper.pdf ]
        #
        self.hiddenEncoderLayers = dict()
        #first encoder block
        previousLayer = layers.Conv2D(numberOfFilters[0], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2D(numberOfFilters[0], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        if (self.dropoutRate > 0):
            previousLayer = Dropout(self.dropoutRate)(previousLayer)
        self.hiddenEncoderLayers[0] = previousLayer

        #second encoder block
        previousLayer = layers.Conv2D(numberOfFilters[1], kernel_size=(3,3), strides=(2, 2), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2D(numberOfFilters[1], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2D(numberOfFilters[1], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        if (self.dropoutRate > 0):
            previousLayer = Dropout(self.dropoutRate)(previousLayer)
        self.hiddenEncoderLayers[1] = previousLayer

        #third encoder block
        previousLayer = layers.Conv2D(numberOfFilters[2], kernel_size=(3,3), strides=(2, 2), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2D(numberOfFilters[2], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2D(numberOfFilters[2], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        if (self.dropoutRate > 0):
            previousLayer = Dropout(self.dropoutRate)(previousLayer)
        self.hiddenEncoderLayers[2] = previousLayer

        #fourth encoder block
        previousLayer = layers.Conv2D(numberOfFilters[3], kernel_size=(3,3), strides=(2, 2), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2D(numberOfFilters[3], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2D(numberOfFilters[3], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        if (self.dropoutRate > 0):
            previousLayer = Dropout(self.dropoutRate)(previousLayer)
        self.hiddenEncoderLayers[3] = previousLayer

        #fifth encoder block
        previousLayer = layers.Conv2D(numberOfFilters[4], kernel_size=(3,3), strides=(2, 2), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2D(numberOfFilters[4], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2D(numberOfFilters[4], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        if (self.dropoutRate > 0):
            previousLayer = Dropout(self.dropoutRate)(previousLayer)
        self.hiddenEncoderLayers[4] = previousLayer

        #first decoding block
        #fuse inputs (i.e. concatenate the residual connection with an upscaled version of the previousLayer
        previousLayer = layers.UpSampling2D((2, 2), interpolation='nearest')(previousLayer)
        previousLayer = layers.concatenate([previousLayer, self.hiddenEncoderLayers[3]])
        #perform convolutions
        previousLayer = layers.Conv2DTranspose(numberOfFilters[5], kernel_size=(3,3), strides=(2, 2), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2DTranspose(numberOfFilters[5], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2DTranspose(numberOfFilters[5], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        if (self.dropoutRate > 0):
            previousLayer = Dropout(self.dropoutRate)(previousLayer)

        #second decoding block
        #fuse inputs (i.e. concatenate the residual connection with an upscaled version of the previousLayer
        # previousLayer = layers.UpSampling2D((2, 2), interpolation='nearest')(previousLayer)
        previousLayer = layers.concatenate([previousLayer, self.hiddenEncoderLayers[2]])
        #perform convolutions
        previousLayer = layers.Conv2DTranspose(numberOfFilters[6], kernel_size=(3,3), strides=(2, 2), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2DTranspose(numberOfFilters[6], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2DTranspose(numberOfFilters[6], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        if (self.dropoutRate > 0):
            previousLayer = Dropout(self.dropoutRate)(previousLayer)

        #third decoding block
        #fuse inputs (i.e. concatenate the residual connection with an upscaled version of the previousLayer
        # previousLayer = layers.UpSampling2D((2, 2), interpolation='nearest')(previousLayer)
        previousLayer = layers.concatenate([previousLayer, self.hiddenEncoderLayers[1]])
        #perform convolutions
        previousLayer = layers.Conv2DTranspose(numberOfFilters[7], kernel_size=(3,3), strides=(2, 2), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2DTranspose(numberOfFilters[7], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2DTranspose(numberOfFilters[7], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        if (self.dropoutRate > 0):
            previousLayer = Dropout(self.dropoutRate)(previousLayer)

        #fourth decoding block
        #fuse inputs (i.e. concatenate the residual connection with an upscaled version of the previousLayer
        # previousLayer = layers.UpSampling2D((2, 2), interpolation='nearest')(previousLayer)
        previousLayer = layers.concatenate([previousLayer, self.hiddenEncoderLayers[0]])
        #perform convolutions
        previousLayer = layers.Conv2DTranspose(numberOfFilters[8], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        previousLayer = layers.Conv2DTranspose(numberOfFilters[8], kernel_size=(3,3), strides=(1, 1), padding='same', activation='relu')(previousLayer)
        previousLayer = layers.BatchNormalization()(previousLayer)
        if (self.dropoutRate > 0):
            previousLayer = Dropout(self.dropoutRate)(previousLayer)

        #1x1 convolutional layer
        previousLayer = layers.Conv2DTranspose(1, kernel_size=(1,1), strides=(1, 1), padding='same', activation='relu')(previousLayer)

        #compute the output
        #\hat{x} = x^{\asterisk} - d\hat{x}
        duOutput = self.inputLayer - previousLayer

        #compute vgg penultimate layer post-activation outputs when the vgg subnet is stimulated with duOutput and the benign sample
        # inputList = [self.inputLayer, self.inputLayer]
        # duList = [duOutput, duOutput]
        vggOutputBenign, vggPenultimate = self.ourVGG.model([self.inputLayer, self.inputLayer])
        vggOutputDU, vggPenultimateDU = self.ourVGG.model([duOutput, duOutput])


        #assign output layer as the evaluation of vgg that depends on the (potentially) adv. input
        self.outputLayer = vggOutputDU

        #formulate our loss function
        #define our custom loss function
        #this is L1 norm of the difference between the vggOutput
        def customLossWrapper(vggPenultimateDU, vggPenultimate, penaltyCoeff = self.penaltyCoeff):
            def customLoss(y_true, y_pred):
                return K.sum(K.abs(vggPenultimateDU - vggPenultimate))
            return customLoss
        # optimization details
        # def lr_scheduler(epoch):
        #     return self.learning_rate * (0.5 ** (epoch // self.learning_rate_drop))
        # self.lr_scheduler = lr_scheduler
        # self.reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)
        # self.sgd = tf.keras.optimizers.Nadam()#Adadelta(learning_rate=self.learning_rate)
        # self.sgd = tf.keras.optimizers.SGD(lr=self.learning_rate, decay=0.000001, momentum=0.9, nesterov=True)
        if (optimizer==None):
            self.sgd = tf.keras.optimizers.Nadam()#Adadelta(learning_rate=self.learning_rate)
            self.reduceLR = None
        elif(optimizer=='SGD'):
            def lr_scheduler(epoch):
                return 0.1 * (0.5 ** (epoch // 20))#learning_rate * (0.5 ** (epoch // 20))

            self.reduceLR = keras.callbacks.LearningRateScheduler(lr_scheduler)
            self.sgd = tf.keras.optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)

        #set up data augmentation
        self.generator = ImageDataGenerator(featurewise_center=False,  # set input mean to 0 over the dataset
                                            samplewise_center=False,  # set each sample mean to 0
                                            featurewise_std_normalization=False,  # divide inputs by std of the dataset
                                            samplewise_std_normalization=False,  # divide each input by its std
                                            zca_whitening=False,  # apply ZCA whitening
                                            rotation_range=15,  # randomly rotate images in the range (degrees, 0 to 180)
                                            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
                                            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
                                            horizontal_flip=True,  # randomly flip images
                                            vertical_flip=False)

        # convert self.hiddenAdvLayers to a list for the model compilation, ascending order of keys is order of layers
        # outputsList is a list of outputs of the model constructed so that the first entry is the true output (ie prediction) layer
        # and each subsequent (i, i+1)th entries are the pair of hiddenAdvLayer, hiddenBenignLayer activations
        # this is going to be useful for calculating the MAE between benignly and adversarially induced hidden states
        # outputsList = [self.outputLayer]
        # for curHiddenLayer in range(len(self.ourVGG.hiddenAdvModelOutputs)):
        #     outputsList.append(self.ourVGG.hiddenAdvModelOutputs[curHiddenLayer])
        #     outputsList.append(self.ourVGG.hiddenModelOutputs[curHiddenLayer])

        # instantiate and compile the model
        self.customLossWrapper = customLossWrapper
        self.model = Model(inputs=[self.inputLayer, self.advInputLayer], outputs=[self.outputLayer], name='hlrgd_vgg16')
        self.model.compile(loss=customLossWrapper(vggPenultimateDU, vggPenultimate, self.penaltyCoeff), metrics=['acc'], optimizer=self.sgd)

        # double check weight trainability bug
        allVars = self.model.variables
        trainableVars = self.model.trainable_variables
        allVarNames = [self.model.variables[i].name for i in range(len(self.model.variables))]
        trainableVarNames = [self.model.trainable_variables[i].name for i in range(len(self.model.trainable_variables))]
        nonTrainableVars = np.setdiff1d(allVarNames, trainableVarNames)

        if (verbose):
            if (len(nonTrainableVars) > 0):
                print('the following variables are set to non-trainable; ensure that this is correct before publishing!!!!')
            print(nonTrainableVars)
            self.model.summary()

        #set data statistics to default values
        self.mean = 0
        self.stddev = 1


    # handle data augmentation with multiple inputs (example found on https://stackoverflow.com/questions/49404993/keras-how-to-use-fit-generator-with-multiple-inputs
    #so thanks to loannis and Julian
    def multiInputDataGenerator(self, X1, X2, Y, batch_size):
        genX1 = self.generator.flow(X1, Y, batch_size=batch_size)
        genX2 = self.generator.flow(X2, Y, batch_size=batch_size)

        while True:
            X1g = genX1.next()
            X2g = genX2.next()
            yield [X1g[0], X2g[0]], X1g[1]

    #this method trains the HLRGD
    #inputData is a list of training matrices, the 0th is adversarial (i.e., to be denoised), the 2nd is the benign input
    def train(self, inputTrainingData, trainingTargets, inputValidationData, validationTargets, training_epochs=1, normed=False, monitor='val_loss',
              patience=defaultPatience, model_path=None, keras_batch_size=None, validation_split=0.1, adversarialOrder=0,
              dataAugmentation=False):
        
        #if a path isn't provided by caller, just use the current time for restoring best weights from fit
        if (model_path is None):
            model_path = os.path.join('/tmp/models/', 'hlrgd_vgg16_'+str(int(round(time.time()*1000))))
         
        #if the data are not normalized, normalize them
        trainingData, validationData = [[],[]], [[],[]]
        if (not normed):
            #don't store stats from the adversarially attacked data
            if (adversarialOrder == 0):
                trainingData[0] = self.normalize(inputTrainingData[0], storeStats=True)
                trainingData[1] = self.normalize(inputTrainingData[1], storeStats=False)
            else:
                trainingData[1] = self.normalize(inputTrainingData[1], storeStats=True)
                trainingData[0] = self.normalize(inputTrainingData[0], storeStats=False)
            #also don't store stats from validation data
            validationData[0] = self.normalize(inputValidationData[0], storeStats=False)
            validationData[1] = self.normalize(inputValidationData[1], storeStats=False)
        else:
            trainingData[0] = inputTrainingData[0]
            trainingData[1] = inputTrainingData[1]
            validationData[0] = inputValidationData[0]
            validationData[1] = inputValidationData[1]

        #collect our callbacks
        earlyStopper = EarlyStopping(monitor=monitor, mode='min', patience=patience,
                                     verbose=1, min_delta=defaultLossThreshold)
        checkpoint = ModelCheckpoint(model_path, verbose=1, monitor=monitor, save_weights_only=True,
                                     save_best_only=True, mode='auto')
        # history = self.model.fit(trainingData, trainingTargets, epochs=training_epochs, batch_size=keras_batch_size,
        #                          validation_split=validation_split, callbacks=[earlyStopper, self.reduce_lr])
        #handle data augmentation
        if (not dataAugmentation):
            # set up data augmentation
            self.generator = ImageDataGenerator(featurewise_center=False,  # set input mean to 0 over the dataset
                                                samplewise_center=False,  # set each sample mean to 0
                                                featurewise_std_normalization=False,
                                                # divide inputs by std of the dataset
                                                samplewise_std_normalization=False,  # divide each input by its std
                                                zca_whitening=False,  # apply ZCA whitening
                                                # randomly shift images vertically (fraction of total height)
                                                horizontal_flip=False,  # randomly flip images
                                                vertical_flip=False)
            self.generator.fit(trainingData[0])
            history = self.model.fit(self.multiInputDataGenerator(trainingData[0], trainingData[1], trainingTargets, keras_batch_size),
                                     steps_per_epoch=trainingData[0].shape[0] // keras_batch_size,
                                     epochs=training_epochs, validation_data=(validationData, validationTargets),
                                     callbacks=[earlyStopper, checkpoint], verbose=1) #self.reduce_lr
        else:
            # set up data augmentation
            self.generator = ImageDataGenerator(featurewise_center=False,  # set input mean to 0 over the dataset
                                                samplewise_center=False,  # set each sample mean to 0
                                                featurewise_std_normalization=False,
                                                # divide inputs by std of the dataset
                                                samplewise_std_normalization=False,  # divide each input by its std
                                                zca_whitening=False,  # apply ZCA whitening
                                                rotation_range=15,
                                                # randomly rotate images in the range (degrees, 0 to 180)
                                                width_shift_range=0.1,
                                                # randomly shift images horizontally (fraction of total width)
                                                height_shift_range=0.1,
                                                # randomly shift images vertically (fraction of total height)
                                                horizontal_flip=False,  # randomly flip images
                                                vertical_flip=False)
            self.generator.fit(trainingData[0])
            history = self.model.fit(self.multiInputDataGenerator(trainingData[0], trainingData[1], trainingTargets, keras_batch_size),
                                     steps_per_epoch=trainingData[0].shape[0] // keras_batch_size,
                                     epochs=training_epochs, validation_data=(validationData, validationTargets),
                                     callbacks=[earlyStopper, checkpoint], verbose=1) #self.reduce_lr

        self.model.load_weights(model_path)
        loss, acc = history.history['loss'], history.history['val_acc']
        return loss, acc, model_path

    def evaluate(self, inputData, targets, batchSize=None):
        evalData = [self.normalize(inputData[0], storeStats=False), self.normalize(inputData[1], storeStats=False)]
        fullEval = self.model.evaluate(evalData, targets, batch_size=batchSize)
        return fullEval

    #method to read model from the disk
    def readModelFromDisk(self, pathToFile, vggModelLocation):
        #rebuild the model
        self.buildModel(self.input_dimension, self.output_dimension, vggModelLocation, self.number_of_classes,
                        loss_threshold=self.loss_threshold, patience=self.patience, dropout_rate=self.dropoutRate,
                        max_relu_bound=self.max_relu_bound, adv_penalty=self.penaltyCoeff, unprotected=self.unprotected,
                        verbose=False)
        # set the vgg weights
        self.model.load_weights(pathToFile)
        # #read in the picklebox
        pickleBox = pickle.load(open(pathToFile+'_pickle', 'rb'))
        # # self.bottleneckLayer = pickleBox['bottleneckLayer']
        # # self.hiddenEncodingLayer = pickleBox['hiddenEncodingLayer']
        # # self.inputLayer = pickleBox['inputLayer']
        self.mean, self.std = pickleBox['scaleMean'], pickleBox['scaleSTD']

        #first read in the inner model
        # self.ourVGG.readModelFromDisk('inner_'+pathToFile)

        # #read in the picklebox
        # pickleBox = pickle.load(open(pathToFile+'_pickle', 'rb'))
        # # self.bottleneckLayer = pickleBox['bottleneckLayer']
        # # self.hiddenEncodingLayer = pickleBox['hiddenEncodingLayer']
        # # self.inputLayer = pickleBox['inputLayer']
        # self.chosenActivation = pickleBox['chosenActivation']

        # formulate our loss function
        # define our custom loss function
        # this is L1 norm of the difference between the vggOutput
        # if (not self.unprotected):
        #     def customLossWrapper(benProjs, advProjs, penaltyCoeff = self.penaltyCoeff):
        #         def customLoss(y_true, y_pred):
        #             return K.categorical_crossentropy(y_true, y_pred) + penaltyCoeff*K.sum(K.abs(benProjs - advProjs))/(0.00000001+tf.cast(tf.shape(benProjs)[0], tf.float32))
        #         return customLoss
        # else:#if we are using an unprotected model, don't force the  machine to calculate this too
        #     def customLossWrapper(benProjs, advProjs, penaltyCoeff = self.penaltyCoeff):
        #         def customLoss(y_true, y_pred):
        #             return K.categorical_crossentropy(y_true, y_pred)
        #         return customLoss
        # self.customLossWrapper = customLossWrapper(self.benProjs, self.advProjs)
        # #load the model
        # self.model = load_model(pathToFile, custom_objects={'customLossWrapper': self.customLossWrapper})


    # this routine is used to collect statistics on training data, as well as to preprocess the training data by normalizing
    # i.e. centering and dividing by standard deviation
    def normalize(self, inputData, storeStats=False):
        if (storeStats):
            self.mean = np.mean(inputData)
            self.stddev = np.std(inputData)
        outputData = (inputData - self.mean) / (self.stddev + 0.0000001)
        return outputData

    # routine to get a pointer to the optimizer of this model
    def getOptimizer(self):
        if (self.ourVGG is not None):
            return self.sgd, self.ourVGG.getOptimizer()
        else:
            return self.sgd
Пример #11
0
class AGZeroModel:
    def __init__(self,
                 N,
                 batch_size=32,
                 archive_fit_samples=64,
                 use_tpu=None,
                 log_path='logs/tensorboard'):
        self.N = N
        self.batch_size = batch_size

        self.model = None
        self.archive_fit_samples = archive_fit_samples
        self.position_archive = []

        self.tpu_grpc_url = use_tpu
        tpu_name_environ_key = 'TPU_NAME'

        # Check has server got TPU
        if use_tpu is not False and tpu_name_environ_key in os.environ:
            tpu_name = os.environ[tpu_name_environ_key].strip()
            if tpu_name != "":
                self.is_tpu = True
                self.tpu_grpc_url = TPUClusterResolver(
                    tpu=[os.environ[tpu_name_environ_key]]).get_master()
        # TODO write an if condition to validate and resolve the TPU url provided

        self.__loss_functions = [
            'categorical_crossentropy', 'binary_crossentropy'
        ]

        self.model_name = time.strftime('GM{0}-%y%m%dT%H%M%S').format('%02d' %
                                                                      N)
        # print(self.model_name)

        log_path = os.path.join(log_path, self.model_name)
        if not os.path.exists(log_path):
            os.makedirs(log_path)
        self.callback = TensorBoard(log_path)

    def create(self):
        bn_axis = 3

        N = self.N
        position = Input((N, N, 6))
        resnet = ResNet(n_stages=N)
        resnet.create(N, N, 6)
        x = resnet.model(position)

        dist = Conv2D(2, (1, 1))(x)
        dist = BatchNormalization(axis=bn_axis)(dist)
        dist = Activation('relu')(dist)
        dist = Flatten()(dist)
        dist = Dense(N * N + 1, activation='softmax',
                     name='distribution')(dist)

        res = Conv2D(1, (1, 1))(x)
        res = BatchNormalization(axis=bn_axis)(res)
        res = Activation('relu')(res)
        res = Flatten()(res)
        res = Dense(256, activation='relu')(res)
        res = Dense(1, activation='sigmoid', name='result')(res)

        self.model = Model(position, [dist, res])
        self.model.compile(Adam(lr=2e-2), self.__loss_functions)

        self.callback.set_model(self.model)

        # check if TPU available
        if self.tpu_grpc_url is not None:
            self.model = tf.contrib.tpu.keras_to_tpu_model(
                self.model,
                strategy=tf.contrib.tpu.TPUDistributionStrategy(
                    tf.contrib.cluster_resolver.TPUClusterResolver(
                        self.tpu_grpc_url)))

        self.model.summary()

    def fit_game(self, X_positions, result):
        X_posres = []

        for pos, dist in X_positions:
            X_posres.append((pos, dist, result))
            result = -result

        if len(self.position_archive) >= self.archive_fit_samples:
            archive_samples = random.sample(self.position_archive,
                                            self.archive_fit_samples)
        else:
            # initial case
            archive_samples = self.position_archive

        self.position_archive.extend(X_posres)

        # I'm going to some lengths to avoid the potentially overloaded + operator
        X_fit_samples = list(itertools.chain(X_posres, archive_samples))

        self.__fit_model(X_fit_samples, self.batch_size)

    def retrain_position_archive(self, batch_size=None):
        self.__fit_model(self.position_archive,
                         batch_size if batch_size else self.batch_size * 8)

    def reduce_position_archive(self, ratio=0.5):
        try:
            self.position_archive = random.sample(
                self.position_archive, int(len(self.position_archive) * ratio))
        except:
            pass

    def __fit_model(self, X_fit_samples, batch_size):
        batch_no = 1
        X, y_dist, y_res = [], [], []

        X_shuffled = random.sample(X_fit_samples, len(X_fit_samples))
        X_shuffled = create_batches(X_shuffled, batch_size)

        for batch in X_shuffled:
            for pos, dist, res in batch:
                X.append(pos)
                y_dist.append(dist)
                y_res.append(float(res) / 2 + 0.5)

            logs = self.model.train_on_batch(
                np.array(X),
                [np.array(y_dist), np.array(y_res)])

            self.write_log(self.__loss_functions, logs, batch_no)

            batch_no += 1
            X, y_dist, y_res = [], [], []

    def write_log(self, names, logs, batch_no):
        for name, value in zip(names, logs):
            summary = tf.Summary()

            summary_value = summary.value.add()
            summary_value.simple_value = value
            summary_value.tag = name

            self.callback.writer.add_summary(summary, batch_no)
            self.callback.writer.flush()

    def predict(self, X_positions):
        dist, res = self.model.predict(X_positions)
        res = np.array([r[0] * 2 - 1 for r in res])
        return [dist, res]

    def save(self, snapshot_id, save_archive=False):
        self.model.save_weights('%s.weights.h5' % (snapshot_id, ))
        if save_archive:
            joblib.dump(self.position_archive,
                        '%s.archive.joblib' % (snapshot_id, ),
                        compress=5)

    def load(self, snapshot_id):
        self.model.load_weights('%s.weights.h5' % (snapshot_id, ))

        pos_fname = '%s.archive.joblib' % (snapshot_id, )
        try:
            self.position_archive = joblib.load(pos_fname)
        except:
            print('Warning: Could not load position archive %s' %
                  (pos_fname, ))

    def unload_pos_archive(self):
        self.position_archive = []

    def load_pos_archive(self, archive_file):
        try:
            print('Attempting to load position archive %s' % (archive_file, ))
            self.position_archive = joblib.load(archive_file)
            print('Successfully loaded position archive %s' % (archive_file, ))
            return True
        except:
            import traceback
            traceback.print_exc()
            print('Warning: Could not load position archive %s' %
                  (archive_file, ))
            return False

    def load_averaged(self, weights, log=None):
        new_weights = []
        loaded_weights = []

        for weight in weights:
            self.model.load_weights(weight)
            loaded_weights.append(self.model.get_weights())
            print("Read weight: {0}".format(weight))
            if log is not None:
                log("Read weight: {0}".format(weight), self.model_name)

        if len(loaded_weights) > 0:
            for weights_list_tuple in zip(*loaded_weights):
                new_weights.append([
                    np.array(weights_).mean(axis=0)
                    for weights_ in zip(*weights_list_tuple)
                ])

            self.model.set_weights(new_weights)
        else:
            print(
                "No weights to load. Initializing the model with random weights!"
            )
Пример #12
0
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          histogram_freq=1)

    # Third callback; save the best model-weights (best validation_accuracy)
    weights_dir = "temp/"
    model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=weights_dir,
        save_weights_only=True,
        monitor='val_accuracy',
        mode='max',
        save_best_only=True)

    model.fit(features_train,
              ltr,
              epochs=50,
              batch_size=128,
              validation_data=(features_test, ltt),
              callbacks=[
                  tensorboard_callback, model_checkpoint_callback, cm_callback
              ])

    # Load the temporarly saved best model weights and save the entire model with this weights
    model.load_weights(weights_dir)

    model_dir = 'models/' + MODELNAME
    model.save(model_dir)

    print("Model trained and saved!")

    cc = cc + 1
Пример #13
0
def Xception(include_top=True, weights='hasc', input_shape=None, pooling=None, classes=6, classifier_activation='softmax'):
    if input_shape is None:
        input_shape = (256*3, 1)

    if weights in ['hasc', 'HASC'] and include_top and classes != 6:
        raise ValueError('If using `weights` as `"hasc"` with `include_top`'
                         ' as true, `classes` should be 6')

    inputs = layers.Input(shape=input_shape)

    x = layers.Conv1D(32, 3, strides=2, use_bias=False, name='block1_conv1')(inputs)
    x = layers.BatchNormalization(name='block1_conv1_bn')(x)
    x = layers.Activation('relu', name='block1_conv1_act')(x)
    x = layers.Conv1D(64, 3, use_bias=False, name='block1_conv2')(x)
    x = layers.BatchNormalization(name='block1_conv2_bn')(x)
    x = layers.Activation('relu', name='block1_conv2_act')(x)

    residual = layers.Conv1D(
        128, 1, strides=2, padding='same', use_bias=False
    )(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.SeparableConv1D(128, 3, padding='same', use_bias=False, name='block2_sepconv1')(x)
    x = layers.BatchNormalization(name='block2_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block2_sepconv2_act')(x)
    x = layers.SeparableConv1D(128, 3, padding='same', use_bias=False, name='block2_sepconv2')(x)
    x = layers.BatchNormalization(name='block2_sepconv2_bn')(x)

    x = layers.MaxPooling1D(3, strides=2, padding='same', name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = layers.Conv1D(
        256, 1, strides=2, padding='same', use_bias=False
    )(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.Activation('relu', name='block3_sepconv1_act')(x)
    x = layers.SeparableConv1D(256, 3, padding='same', use_bias=False, name='block3_sepconv1')(x)
    x = layers.BatchNormalization(name='block3_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block3_sepconv2_act')(x)
    x = layers.SeparableConv1D(256, 3, padding='same', use_bias=False, name='block3_sepconv2')(x)
    x = layers.BatchNormalization(name='block3_sepconv2_bn')(x)

    x = layers.MaxPooling1D(3, strides=2, padding='same', name='block3_pool')(x)
    x = layers.add([x, residual])

    residual = layers.Conv1D(728, 1, strides=2, padding='same', use_bias=False)(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.Activation('relu', name='block4_sepconv1_act')(x)
    x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name='block4_sepconv1')(x)
    x = layers.BatchNormalization(name='block4_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block4_sepconv2_act')(x)
    x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name='block4_sepconv2')(x)
    x = layers.BatchNormalization(name='block4_sepconv2_bn')(x)

    x = layers.MaxPooling1D(3, strides=2, padding='same', name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = layers.Activation('relu', name=prefix + "_sepconv1_act")(x)
        x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name=prefix + "_sepconv1")(x)
        x = layers.BatchNormalization(name=prefix + "_sepconv1_bn")(x)
        x = layers.Activation('relu', name=prefix + "_sepconv2_act")(x)
        x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name=prefix + "_sepconv2")(x)
        x = layers.BatchNormalization(name=prefix + "_sepconv2_bn")(x)
        x = layers.Activation('relu', name=prefix + "_sepconv3_act")(x)
        x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name=prefix + "_sepconv3")(x)

        x = layers.add([x, residual])

    residual = layers.Conv1D(1024, 1, strides=2, padding='same', use_bias=False)(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.Activation('relu', name='block13_sepconv1_act')(x)
    x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name='block13_sepconv1')(x)
    x = layers.BatchNormalization(name='block13_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block13_speconv2_act')(x)
    x = layers.SeparableConv1D(1024, 3, padding='same', use_bias=False, name='block13_sepconv2')(x)
    x = layers.BatchNormalization(name='block13_sepconv2_bn')(x)

    x = layers.MaxPooling1D(3, strides=2, padding='same')(x)
    x = layers.add([x, residual])

    x = layers.SeparableConv1D(1536, 3, padding='same', use_bias=False, name='block14_sepconv1')(x)
    x = layers.BatchNormalization(name='block14_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block14_sepconv1_act')(x)

    x = layers.SeparableConv1D(2048, 3, padding='same', use_bias=False, name='block14_sepconv2')(x)
    x = layers.BatchNormalization(name='block14_sepconv2_bn')(x)
    x = layers.Activation('relu', name='block14_sepconv2_act')(x)

    x = layers.GlobalAveragePooling1D(name='avg_pool')(x)
    y = layers.Dense(classes, activation=classifier_activation,
                     name='predictions')(x)

    model = Model(inputs, y)

    if weights is not None:
        if weights in ['hasc', "HASC"]:
            weights = 'weights/xception/xception_hasc_weights_{}_{}.hdf5'.format(int(input_shape[0]),
                                                                                 int(input_shape[1]))

        # hasc or weights fileで初期化
        if os.path.exists(weights):
            print("Load weights from {}".format(weights))
            model.load_weights(weights)
        else:
            print("Not exist weights: {}".format(weights))

    # topを含まないとき
    if not include_top:
        if pooling is None:
            # topを削除する
            model = Model(inputs=model.input, outputs=model.layers[-3].output)
        elif pooling == 'avg':
            y = layers.GlobalAveragePooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        elif pooling == 'max':
            y = layers.GlobalMaxPooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        else:
            print("Not exist pooling option: {}".format(pooling))
            model = Model(inputs=model.input, outputs=model.layers[-3].output)

    return model
Пример #14
0
class SRGAN():
    def __init__(self, quantize_flag):
        self.quantize_flag = quantize_flag
        # Input shape
        self.channels = 3
        self.lr_height = 56  # Low resolution height
        self.lr_width = 56  # Low resolution width
        self.lr_shape = (self.lr_height, self.lr_width, self.channels)
        self.hr_height = self.lr_height * 4  # High resolution height
        self.hr_width = self.lr_width * 4  # High resolution width
        self.hr_shape = (self.hr_height, self.hr_width, self.channels)

        # Number of residual blocks in the generator
        self.n_residual_blocks = 16

        optimizer = Adam(0.000002, 0.5)

        # We use a pre-trained VGG19 model to extract image features from the high resolution
        # and the generated high resolution images and minimize the mse between them
        self.vgg = self.build_vgg()
        self.vgg.trainable = False
        self.vgg.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])

        # Configure data loader
        self.dataset_name = 'img_align_celeba'
        self.test_dataset_name = 'test_images'
        self.data_loader = DataLoader(dataset_name=self.dataset_name,
                                      img_res=(self.hr_height, self.hr_width))
        self.test_data_loader = DataLoader(dataset_name=self.test_dataset_name,
                                           img_res=(self.hr_height,
                                                    self.hr_width))

        # Calculate output shape of D (PatchGAN)
        patch = int(self.hr_height / 2**4)
        self.disc_patch = (patch, patch, 1)

        # Number of filters in the first layer of G and D
        self.gf = 64
        self.df = 64

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='mse',
                                   optimizer=optimizer,
                                   metrics=['accuracy'])

        # Build the generator
        self.generator = self.build_generator()

        # High res. and low res. images
        img_hr = Input(shape=self.hr_shape)
        img_lr = Input(shape=self.lr_shape)

        # Generate high res. version from low res.
        fake_hr = self.generator(img_lr)

        # Extract image features of the generated img
        fake_features = self.vgg(fake_hr)

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # Discriminator determines validity of generated high res. images
        validity = self.discriminator(fake_hr)

        self.combined = Model([img_lr, img_hr], [validity, fake_features])
        self.combined.compile(loss=['binary_crossentropy', 'mse'],
                              loss_weights=[1e-3, 1],
                              optimizer=optimizer)

    def build_vgg(self):
        """
        Builds a pre-trained VGG19 model that outputs image features extracted at the
        third block of the model
        """
        vgg = VGG19(weights="imagenet")
        # Set outputs to outputs of last conv. layer in block 3
        # See architecture at: https://github.com/tensorflow.keras-team/tensorflow.keras/blob/master/tensorflow.keras/applications/vgg19.py
        vgg.outputs = [vgg.layers[9].output]

        img = Input(shape=self.hr_shape)

        # Extract image features
        print(img.shape)
        img_features = vgg(img)

        return Model(img, img_features)

    def build_generator(self):
        def residual_block(layer_input, filters):
            """Residual block described in paper"""
            d = Conv2D(filters,
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation='relu')(layer_input)
            d = BatchNormalization(momentum=0.8)(d)
            d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
            d = BatchNormalization(momentum=0.8)(d)
            d = Add()([d, layer_input])
            return d

        def deconv2d(layer_input):
            """Layers used during upsampling"""
            u = UpSampling2D(size=2)(layer_input)
            u = Conv2D(256,
                       kernel_size=3,
                       strides=1,
                       padding='same',
                       activation="relu")(u)
            return u

        # Low resolution image input
        img_lr = Input(shape=self.lr_shape)

        # Pre-residual block
        c1 = Conv2D(
            64,
            kernel_size=9,
            strides=1,
            padding='same',
            activation='relu',
        )(img_lr)

        # Propogate through residual blocks
        r = residual_block(c1, self.gf)
        for _ in range(self.n_residual_blocks - 1):
            r = residual_block(r, self.gf)

        # Post-residual block
        c2 = Conv2D(64, kernel_size=3, strides=1, padding='same')(r)
        c2 = BatchNormalization(momentum=0.8)(c2)
        c2 = Add()([c2, c1])

        # Upsampling
        u1 = deconv2d(c2)
        u2 = deconv2d(u1)

        # Generate high resolution output
        gen_hr = Conv2D(self.channels,
                        kernel_size=9,
                        strides=1,
                        padding='same',
                        activation='tanh')(u2)

        if self.quantize_flag == "quant":
            return Quantizer.apply_quantization(Model(img_lr, gen_hr),
                                                weight_precision=16,
                                                activation_precision=16)
        elif self.quantize_flag == "sparsity":
            return Sparsity.measure_sparsity(Model(img_lr, gen_hr))
        else:
            return Model(img_lr, gen_hr)

    def build_discriminator(self):
        def d_block(layer_input, filters, strides=1, bn=True):
            """Discriminator layer"""
            d = Conv2D(filters, kernel_size=3, strides=strides,
                       padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if bn:
                d = BatchNormalization(momentum=0.8)(d)
            return d

        # Input img
        d0 = Input(shape=self.hr_shape)

        d1 = d_block(d0, self.df, bn=False)
        d2 = d_block(d1, self.df, strides=2)
        d3 = d_block(d2, self.df * 2)
        d4 = d_block(d3, self.df * 2, strides=2)
        d5 = d_block(d4, self.df * 4)
        d6 = d_block(d5, self.df * 4, strides=2)
        d7 = d_block(d6, self.df * 8)
        d8 = d_block(d7, self.df * 8, strides=2)

        d9 = Dense(self.df * 16)(d8)
        d10 = LeakyReLU(alpha=0.2)(d9)
        validity = Dense(1, activation='sigmoid')(d10)

        return Model(d0, validity)

    def train(self,
              epochs,
              batch_size=1,
              sample_interval=50,
              start_from_ckp_number=None):

        start_time = datetime.datetime.now()

        # Set up ternsorboard
        logdir = "logs/scalars/" + datetime.datetime.now().strftime(
            "%Y%m%d-%H%M%S")
        file_writer = tf.summary.create_file_writer(logdir + "/metrics")
        file_writer.set_as_default()

        # Restore checkpoint if present
        start_epoch = 0
        if start_from_ckp_number is not None:
            start_epoch = start_from_ckp_number
            self.generator.load_weights(
                './saved_models/generator_ckp_epoch_{}'.format(
                    start_from_ckp_number))
            self.discriminator.load_weights(
                './saved_models/discriminator_ckp_epoch_{}'.format(
                    start_from_ckp_number))
            self.combined.load_weights(
                './saved_models/combined_ckp_epoch_{}'.format(
                    start_from_ckp_number))

        callbacks = [
            TensorBoard(log_dir=r"./logs/tensorboard",
                        histogram_freq=1,
                        write_graph=True,
                        write_images=False,
                        update_freq='epoch',
                        profile_batch=0,
                        embeddings_freq=0,
                        embeddings_metadata=None)
        ]

        for epoch in range(start_epoch, epochs):

            # ----------------------
            #  Train Discriminator
            # ----------------------

            # Sample images and their conditioning counterparts
            imgs_hr, imgs_lr = self.data_loader.load_data(batch_size)

            # From low res. image generate high res. version
            fake_hr = self.generator.predict(imgs_lr)

            valid = np.ones((batch_size, ) + self.disc_patch)
            fake = np.zeros((batch_size, ) + self.disc_patch)

            # Train the discriminators (original images = real / generated = Fake)
            d_loss_real = self.discriminator.train_on_batch(imgs_hr, valid)
            d_loss_fake = self.discriminator.train_on_batch(fake_hr, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ------------------
            #  Train Generator
            # ------------------

            # Sample images and their conditioning counterparts
            imgs_hr, imgs_lr = self.data_loader.load_data(batch_size)

            # The generators want the discriminators to label the generated images as real
            valid = np.ones((batch_size, ) + self.disc_patch)

            # Extract ground truth image features using pre-trained VGG19 model
            image_features = self.vgg.predict(imgs_hr)

            # Train the generators
            try:
                self.combined.fit([imgs_lr, imgs_hr], [valid, image_features],
                                  epochs=epoch + 1,
                                  batch_size=len(imgs_lr),
                                  initial_epoch=epoch,
                                  callbacks=callbacks)

                #g_loss = self.combined.train_on_batch([imgs_lr, imgs_hr], [valid, image_features])
            except Exception as e:
                print("Something went wrong with this batch: {}".format(e))

            elapsed_time = datetime.datetime.now() - start_time
            # Plot the progress
            if epoch % 10 == 0:
                print("%d time: %s" % (epoch, elapsed_time))

            # If at save interval => save generated image samples
            if epoch % sample_interval == 0:
                self.sample_images(epoch, batch_size)

    def sample_images(self, epoch, batch_size):
        os.makedirs('images/%s' % self.test_dataset_name, exist_ok=True)

        # Get test data
        # imgs_hr, imgs_lr = self.data_loader.load_data(batch_size=2, is_testing=True)
        imgs_hr_a, imgs_lr_a = self.test_data_loader.load_data(
            batch_size=batch_size, is_testing=True)

        # Make prediction
        fake_hr_a = self.generator.predict(imgs_lr_a)
        image_idx = 0

        for imgs_lr, fake_hr, imgs_hr in zip(imgs_lr_a, fake_hr_a, imgs_hr_a):
            # # Rescale images 0 - 1
            imgs_lr = 0.5 * imgs_lr + 0.5
            fake_hr = 0.5 * fake_hr + 0.5
            imgs_hr = 0.5 * imgs_hr + 0.5

            imgs_hr = imgs_hr.reshape(self.hr_shape)
            scipy.misc.toimage(imgs_hr).save(
                'images/{}/batch_{}_idx_{}_original_highres.png'.format(
                    self.test_dataset_name,
                    epoch,
                    image_idx,
                ))

            # Save lr version (if not present yet)
            imgs_lr = imgs_lr.reshape(self.lr_shape)
            scipy.misc.toimage(imgs_lr).save(
                'images/{}/batch_{}_idx_{}_original_lowres.png'.format(
                    self.test_dataset_name,
                    epoch,
                    image_idx,
                ))

            # Save generaed image
            fake_hr = fake_hr.reshape(self.hr_shape)
            scipy.misc.toimage(fake_hr).save(
                'images/{}/batch_{}_idx_{}_generated_highres.png'.format(
                    self.test_dataset_name,
                    epoch,
                    image_idx,
                ))

            image_idx = image_idx + 1
        # Compute mean squared error
        mse = np.mean((np.array(imgs_hr_a, dtype=np.float32) -
                       np.array(fake_hr_a, dtype=np.float32))**2)

        # Compute psnr
        if mse == 0:
            # avoid (improbable) division by 0
            psnr = 1000000
        else:
            max_pixel = 1.0
            psnr = 20 * np.log10(max_pixel / np.sqrt(mse))

        # save MSE and PSNR to tensorboard
        tf.summary.scalar('MSE', data=mse, step=epoch)
        tf.summary.scalar('PSNR', data=psnr, step=epoch)

        for index, layer in enumerate(self.generator.layers):
            try:
                sparsity = layer.get_weights()[4]
                tf.summary.scalar('Sparsity_{}'.format(index),
                                  data=sparsity,
                                  step=epoch)
            except IndexError:
                pass
                # try:
                #     print(layer.layer)
                # except:
                #     print("no", layer)

        # save the generator model weights
        self.generator.save_weights(
            './saved_models/generator_ckp_epoch_{}'.format(epoch))
        self.discriminator.save_weights(
            './saved_models/discriminator_ckp_epoch_{}'.format(epoch))
        self.combined.save_weights(
            './saved_models/combined_ckp_epoch_{}'.format(epoch))
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy')

checkpoint_path = "model-1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)

es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3, restore_best_weights=True)
cp_callback = ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1)
initial_epoch = 0

if os.path.exists(checkpoint_path+".index")==False:
    print("No model saved.")
else:
    print("Model loaded.")
    latest = tf.train.latest_checkpoint(checkpoint_dir)
    initial_epoch = 0
    model.load_weights(latest)


history=model.fit([x_tr,y_tr[:,:-1]], 
                y_tr.reshape(y_tr.shape[0],y_tr.shape[1], 1)[:,1:],
                epochs=25,
                initial_epoch=initial_epoch,
                callbacks=[es, cp_callback],
                batch_size=128,
                validation_data=([x_val,y_val[:,:-1]],
                y_val.reshape(y_val.shape[0],y_val.shape[1], 1)[:,1:]))


reverse_target_word_index=y_tokenizer.index_word 
reverse_source_word_index=x_tokenizer.index_word 
target_word_index=y_tokenizer.word_index
Пример #16
0
                           input_tensor=img_input)
elif C.network == 'resnet152':
    from src.architectures import resnet152 as nn
    base_layers = ResNet152(weights=None,
                            include_top=False,
                            input_tensor=img_input)

with tf.device(device):

    print('Loading weights from {}'.format(options.weights))
    classifier = nn.classifier(base_layers.output, trainable=False)

    optimizer = Adam(learning_rate=0.0001)

    model = Model(inputs=base_layers.input, outputs=classifier)
    model.load_weights(options.weights)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])


def load_binarizer(path):
    global binarizer
    with open(path, 'rb') as f:
        binarizer = pickle.load(f)


def prepare_images(images, target):
    """
    Args:
        image:  list containing images paths
Пример #17
0
def Deeplabv3pXception(input_shape=(512, 512, 3),
                       weights='pascal_voc',
                       input_tensor=None,
                       classes=21,
                       OS=16,
                       **kwargs):
    """ Instantiates the Deeplabv3+ architecture
    Optionally loads weights pre-trained
    on PASCAL VOC. This model is available for TensorFlow only,
    and can only be used with inputs following the TensorFlow
    data format `(width, height, channels)`.
    # Arguments
        input_shape: shape of input image. format HxWxC
            PASCAL VOC model was trained on (512,512,3) images
        weights: one of 'pascal_voc' (pre-trained on pascal voc)
            or None (random initialization)
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        classes: number of desired classes. If classes != 21,
            last layer is initialized randomly
        OS: determines input_shape/feature_extractor_output ratio. One of {8,16}.
            Used only for xception backbone.
    # Returns
        A Keras model instance.
    # Raises
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
        ValueError: in case of invalid argument for `weights` or `backbone`
    """

    if not (weights in {'pascal_voc', None}):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `pascal_voc` '
                         '(pre-trained on PASCAL VOC)')

    if input_tensor is None:
        img_input = Input(shape=input_shape, name='image_input')
    else:
        img_input = input_tensor

    # normalize input image
    img_norm = Lambda(normalize, name='input_normalize')(img_input)

    # backbone body for feature extract
    x, skip_feature, backbone_len = Xception_body(img_norm, OS)

    # ASPP block
    x = ASPP_block(x, OS)

    # Deeplabv3+ decoder for feature projection
    x = Decoder_block(x, skip_feature)

    # Final prediction conv block
    x = DeeplabConv2D(classes, (1, 1), padding='same',
                      name='logits_semantic')(x)
    x = Lambda(img_resize,
               arguments={
                   'size': (input_shape[0], input_shape[1]),
                   'mode': 'bilinear'
               },
               name='pred_resize')(x)
    x = Reshape((input_shape[0] * input_shape[1], classes))(x)
    x = Softmax(name='Predictions/Softmax')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    #if input_tensor is not None:
    #inputs = get_source_inputs(input_tensor)
    #else:
    #inputs = img_input

    model = Model(img_input, x, name='deeplabv3p_xception')

    # load weights
    if weights == 'pascal_voc':
        weights_path = get_file(
            'deeplabv3_xception_tf_dim_ordering_tf_kernels.h5',
            WEIGHTS_PATH_X,
            cache_subdir='models')
        model.load_weights(weights_path, by_name=True)
    return model, backbone_len
Пример #18
0
class dense2DkernelCNN(denseCNN):
    def __init__(self,name='',weights_f=''):
        self.name=name
        self.pams ={
            'CNN_layer_nodes'  : [8],  #n_filters
            'CNN_kernel_size'  : [3],
            'CNN_padding'      : ['same'],  
            'CNN_pool'         : [False],
            'share_filters'    : True,
            'Dense_layer_nodes': [], #does not include encoded layer
            'encoded_dim'      : 12,
            'shape'            : (4,4,3),
            'channels_first'   : False,
            'arrange'          : [],
            'arrMask'          : [],
            'calQMask'         : [],
            'n_copy'           : 0,      # no. of copy for hi occ datasets
            'loss'             : '',
            'optimizer'       : 'adam',
        }

        self.weights_f =weights_f
        

    def init(self,printSummary=True):
        encoded_dim = self.pams['encoded_dim']

        CNN_layer_nodes   = self.pams['CNN_layer_nodes']
        CNN_kernel_size   = self.pams['CNN_kernel_size']
        CNN_padding       = self.pams['CNN_padding']
        CNN_pool          = self.pams['CNN_pool']
        Dense_layer_nodes = self.pams['Dense_layer_nodes'] #does not include encoded layer
        channels_first    = self.pams['channels_first']
        share_filters      = self.pams['share_filters']

        # fix to one cnn layer for now
        nnodes     =CNN_layer_nodes[0] #8
        CNN_kernel =CNN_kernel_size[0] #3
        CNN_padding=CNN_padding[0]

        inputs = Input(shape=self.pams['shape'], name='input_1')
        x = inputs
        
        x1 = Lambda(lambda x: x[:,:,:,0:1], name='lambda_1')(x)
        x2 = Lambda(lambda x: x[:,:,:,1:2], name='lambda_2')(x)
        x3 = Lambda(lambda x: x[:,:,:,2:3], name='lambda_3')(x)

        if share_filters:
            conv = Conv2D(nnodes, CNN_kernel, activation='relu',padding=CNN_padding, name='conv2d_1')
            x1 = conv(x1)
            x2 = conv(x2)
            x3 = conv(x3)
        else:
            x1 = Conv2D(nnodes, CNN_kernel, activation='relu',padding=CNN_padding)(x1)
            x2 = Conv2D(nnodes, CNN_kernel, activation='relu',padding=CNN_padding)(x2)
            x3 = Conv2D(nnodes, CNN_kernel, activation='relu',padding=CNN_padding)(x3)

        if CNN_pool[0]:
           x1 = MaxPooling2D( (2,2), padding='same')(x1) 
           x2 = MaxPooling2D( (2,2), padding='same')(x2) 
           x3 = MaxPooling2D( (2,2), padding='same')(x3) 

        conv_vol_slice = K.int_shape(x1)
        x1 = Flatten(name='flatten_1')(x1)
        x2 = Flatten(name='flatten_2')(x2)
        x3 = Flatten(name='flatten_3')(x3)

        x = [x1,x2,x3]
        x = Concatenate(axis=-1,name='concat_1')(x)

        conv_vol = K.int_shape(x)

        encodedLayer = Dense(encoded_dim, activation='relu',name='encoded_vector')(x)

        # Instantiate Encoder Model
        self.encoder = Model(inputs, encodedLayer, name='encoder')
        if printSummary:
          self.encoder.summary()

        encoded_inputs = Input(shape=(encoded_dim,), name='decoder_input')
        x = encoded_inputs

        x = Dense(conv_vol[1], activation='relu',name='dense_2')(x)

        x = Reshape((conv_vol_slice[1],conv_vol_slice[2],nnodes,3,),name='reshape_1')(x)
        
        x1 = Lambda(lambda x: x[:,:,:,:,0],  name='lambda_4')(x)
        x2 = Lambda(lambda x: x[:,:,:,:,1],  name='lambda_5')(x)
        x3 = Lambda(lambda x: x[:,:,:,:,2],  name='lambda_6')(x)

        if CNN_pool[0]:
           x1 = UpSampling2D( (2,2) )(x1) 
           x2 = UpSampling2D( (2,2) )(x2) 
           x3 = UpSampling2D( (2,2) )(x3) 

        ## Use n filter here
        conv_t = Conv2DTranspose(nnodes, CNN_kernel, activation='relu', padding=CNN_padding, name='conv2d_transpose_1')
        x1 = conv_t(x1)
        x2 = conv_t(x2)
        x3 = conv_t(x3)
        ## Always use 1 filter
        conv_t2 = Conv2DTranspose(1, CNN_kernel, activation=None, padding='same', name='conv2d_transpose_2')
        x1 = conv_t2(x1)
        x2 = conv_t2(x2)
        x3 = conv_t2(x3)

        x = [x1,x2,x3]
        x = Concatenate(axis=-1,name='concat_2')(x)

        outputs = Activation('sigmoid', name='decoder_output')(x)

        self.decoder = Model(encoded_inputs, outputs, name='decoder')
        if printSummary:
          self.decoder.summary()

        self.autoencoder = Model(inputs, self.decoder(self.encoder(inputs)), name='autoencoder')
        if printSummary:
          self.autoencoder.summary()

        self.compileModels()

        CNN_layers=''
        if len(CNN_layer_nodes)>0:
            CNN_layers += '_Conv'
            for i,n in enumerate(CNN_layer_nodes):
                CNN_layers += f'_{n}x{CNN_kernel_size[i]}'
                if CNN_pool[i]:
                    CNN_layers += 'pooled'
        Dense_layers = ''
        if len(Dense_layer_nodes)>0:
            Dense_layers += '_Dense'
            for n in Dense_layer_nodes:
                Dense_layers += f'_{n}'

        self.name = f'Autoencoded{CNN_layers}{Dense_layers}_Encoded_{encoded_dim}'
        
        if not self.weights_f=='':
            self.autoencoder.load_weights(self.weights_f)
Пример #19
0
# Decoder
# -------
# Decoder는 1개 단어씩을 입력으로 받는다.
decoderX = Input(batch_shape=(None, 1))
decEMB = wordEmbedding(decoderX)
decLSTM1 = LSTM(LSTM_HIDDEN, return_sequences=True, return_state=True)
decLSTM2 = LSTM(LSTM_HIDDEN, return_sequences=True, return_state=True)
dy1, _, _ = decLSTM1(decEMB, initial_state=[eh1, ec1])
dy2, _, _ = decLSTM2(dy1, initial_state=[eh2, ec2])
decOutput = TimeDistributed(Dense(VOCAB_SIZE, activation='softmax'))
outputY = decOutput(dy2)

# Model
# -----
model = Model([encoderX, decoderX], outputY)
model.load_weights(MODEL_PATH)

# Chatting용 model
model_enc = Model(encoderX, [eh1, ec1, eh2, ec2])

ih1 = Input(batch_shape=(None, LSTM_HIDDEN))
ic1 = Input(batch_shape=(None, LSTM_HIDDEN))
ih2 = Input(batch_shape=(None, LSTM_HIDDEN))
ic2 = Input(batch_shape=(None, LSTM_HIDDEN))

dec_output1, dh1, dc1 = decLSTM1(decEMB, initial_state=[ih1, ic1])
dec_output2, dh2, dc2 = decLSTM2(dec_output1, initial_state=[ih2, ic2])

dec_output = decOutput(dec_output2)
model_dec = Model([decoderX, ih1, ic1, ih2, ic2],
                  [dec_output, dh1, dc1, dh2, dc2])
Пример #20
0
    def build_darknet19(input_image=None, weights=None, top=True):
        # the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K)

        if input_image is None:
            input_image = Input(shape=(None, None, 3))

        def space_to_depth_x2(x):
            return tf.nn.space_to_depth(x, block_size=2)

        # Layer 1
        x = Conv2D(32, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_1',
                   use_bias=False)(input_image)
        x = BatchNormalization(name='norm_1')(x)
        x = LeakyReLU(alpha=0.1)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)

        # Layer 2
        x = Conv2D(64, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_2',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_2')(x)
        x = LeakyReLU(alpha=0.1)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)

        # Layer 3
        x = Conv2D(128, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_3',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_3')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 4
        x = Conv2D(64, (1, 1),
                   strides=(1, 1),
                   padding='same',
                   name='conv_4',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_4')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 5
        x = Conv2D(128, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_5',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_5')(x)
        x = LeakyReLU(alpha=0.1)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)

        # Layer 6
        x = Conv2D(256, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_6',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_6')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 7
        x = Conv2D(128, (1, 1),
                   strides=(1, 1),
                   padding='same',
                   name='conv_7',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_7')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 8
        x = Conv2D(256, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_8',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_8')(x)
        x = LeakyReLU(alpha=0.1)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)

        # Layer 9
        x = Conv2D(512, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_9',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_9')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 10
        x = Conv2D(256, (1, 1),
                   strides=(1, 1),
                   padding='same',
                   name='conv_10',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_10')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 11
        x = Conv2D(512, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_11',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_11')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 12
        x = Conv2D(256, (1, 1),
                   strides=(1, 1),
                   padding='same',
                   name='conv_12',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_12')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 13
        x = Conv2D(512, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_13',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_13')(x)
        x = LeakyReLU(alpha=0.1)(x)

        skip_connection = x

        x = MaxPooling2D(pool_size=(2, 2))(x)

        # Layer 14
        x = Conv2D(1024, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_14',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_14')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 15
        x = Conv2D(512, (1, 1),
                   strides=(1, 1),
                   padding='same',
                   name='conv_15',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_15')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 16
        x = Conv2D(1024, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_16',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_16')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 17
        x = Conv2D(512, (1, 1),
                   strides=(1, 1),
                   padding='same',
                   name='conv_17',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_17')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 18
        x = Conv2D(1024, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_18',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_18')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 19
        x = Conv2D(1024, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_19',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_19')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 20
        x = Conv2D(1024, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_20',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_20')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 21
        skip_connection = Conv2D(64, (1, 1),
                                 strides=(1, 1),
                                 padding='same',
                                 name='conv_21',
                                 use_bias=False)(skip_connection)
        skip_connection = BatchNormalization(name='norm_21')(skip_connection)
        skip_connection = LeakyReLU(alpha=0.1)(skip_connection)
        skip_connection = Lambda(space_to_depth_x2)(skip_connection)

        x = concatenate([skip_connection, x])

        # Layer 22
        x = Conv2D(1024, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   name='conv_22',
                   use_bias=False)(x)
        x = BatchNormalization(name='norm_22')(x)
        x = LeakyReLU(alpha=0.1)(x)

        feature_extractor = Model(input_image, x, name='Full_YOLO_backend')

        if weights == 'imagenet':
            try:
                feature_extractor.load_weights(
                    'models/weights/full_yolo_backend.h5')
            except:
                print(
                    "weights not found or directory does not exist, not loading imagenet weights..."
                )

        if top == False:
            return feature_extractor
        else:

            # =================
            # classification Framework
            # =================

            output = Conv2D(kwargs.get('num_classes'),
                            (1, 1))(feature_extractor.output)
            output = GlobalAveragePooling2D()(output)
            output = Softmax()(output)
            model = Model(inputs=feature_extractor.input, outputs=output)

            return model
Пример #21
0
from PIL import Image
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (LSTM, Embedding, TimeDistributed, Dense,
                                     RepeatVector, Activation, Flatten,
                                     Reshape, concatenate, Dropout,
                                     BatchNormalization)
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras import Input, layers
from tensorflow.keras.models import Model
from tensorflow.keras.layers import add
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
from utils import *

inputs1 = Input(shape=(OUTPUT_DIM, ))
fe1 = Dropout(0.5)(inputs1)
fe2 = Dense(256, activation='relu')(fe1)
inputs2 = Input(shape=(max_length, ))
se1 = Embedding(vocab_size, embedding_dim, mask_zero=True)(inputs2)
se2 = Dropout(0.5)(se1)
se3 = LSTM(256)(se2)
decoder1 = add([fe2, se3])
decoder2 = Dense(256, activation='relu')(decoder1)
outputs = Dense(vocab_size, activation='softmax')(decoder2)
caption_model = Model(inputs=[inputs1, inputs2], outputs=outputs)
caption_model.load_weights(model_path)
Пример #22
0
def unet():
    inputs = Input((image_row, image_col, image_depth))
    conv11 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
    conc11 = concatenate([inputs, conv11], axis=3)
    conv12 = Conv2D(32, (3, 3), activation='relu', padding='same')(conc11)
    conc12 = concatenate([inputs, conv12], axis=3)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conc12)

    conv21 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conc21 = concatenate([pool1, conv21], axis=3)
    conv22 = Conv2D(64, (3, 3), activation='relu', padding='same')(conc21)
    conc22 = concatenate([pool1, conv22], axis=3)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conc22)

    conv31 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
    conc31 = concatenate([pool2, conv31], axis=3)
    conv32 = Conv2D(128, (3, 3), activation='relu', padding='same')(conc31)
    conc32 = concatenate([pool2, conv32], axis=3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conc32)

    conv41 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
    conc41 = concatenate([pool3, conv41], axis=3)
    conv42 = Conv2D(256, (3, 3), activation='relu', padding='same')(conc41)
    conc42 = concatenate([pool3, conv42], axis=3)
    drop4 = Dropout(0.5)(conv42)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv51 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
    conc51 = concatenate([pool4, conv51], axis=3)
    conv52 = Conv2D(512, (3, 3), activation='relu', padding='same')(conc51)
    conc52 = concatenate([pool4, conv52], axis=3)
    drop5 = Dropout(0.5)(conc52)

    up6 = concatenate([
        Conv2DTranspose(256,
                        (2, 2), strides=(2, 2), padding='same')(drop5), conc42
    ],
                      axis=3)
    conv61 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
    conc61 = concatenate([up6, conv61], axis=3)
    conv62 = Conv2D(256, (3, 3), activation='relu', padding='same')(conc61)
    conc62 = concatenate([up6, conv62], axis=3)

    up7 = concatenate([
        Conv2DTranspose(128,
                        (2, 2), strides=(2, 2), padding='same')(conc62), conv32
    ],
                      axis=3)
    conv71 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
    conc71 = concatenate([up7, conv71], axis=3)
    conv72 = Conv2D(128, (3, 3), activation='relu', padding='same')(conc71)
    conc72 = concatenate([up7, conv72], axis=3)

    up8 = concatenate([
        Conv2DTranspose(64,
                        (2, 2), strides=(2, 2), padding='same')(conc72), conv22
    ],
                      axis=3)
    conv81 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
    conc81 = concatenate([up8, conv81], axis=3)
    conv82 = Conv2D(64, (3, 3), activation='relu', padding='same')(conc81)
    conc82 = concatenate([up8, conv82], axis=3)

    up9 = concatenate([
        Conv2DTranspose(32,
                        (2, 2), strides=(2, 2), padding='same')(conc82), conv12
    ],
                      axis=3)
    conv91 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
    conc91 = concatenate([up9, conv91], axis=3)
    conv92 = Conv2D(32, (3, 3), activation='relu', padding='same')(conc91)
    conc92 = concatenate([up9, conv92], axis=3)

    conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conc92)

    model = Model(inputs=[inputs], outputs=[conv10])

    model.summary()

    model.compile(optimizer=Adam(lr=1e-5),
                  loss='binary_crossentropy',
                  metrics=[dice_coef, 'accuracy'])

    pretrained_weights = None

    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model
Пример #23
0
class NAPr:
    def __init__(self,
                 tss_train_file: str = None,
                 tss_test_file: str = None,
                 options: dict = None):
        """ Options """
        self.opts = {"seed": 1,
                     "n_epochs": 100,
                     "n_batch_size": 64,
                     "dropout_rate": 0.2,
                     "eval_size": 0.1,
                     "activation_function": "relu"}
        self.__setSeed()

        if options is not None:
            for key in options.keys():
                self.opts[key] = options[key]

        """ Load data and setup """
        if tss_test_file is not None:
            self.X_train, self.R_train, self.Y_train = self.__loadData(tss_train_file)
            self.X_test, self.R_test, self.Y_test = self.__loadData(tss_test_file)

            self.__oneHotEncoderSetup()
            self.Y_train = np.asarray(
                self.onehot_encoder.transform(self.label_encoder.transform(self.Y_train).reshape(-1, 1)))
            self.Y_test = np.asarray(
                self.onehot_encoder.transform(self.label_encoder.transform(self.Y_test).reshape(-1, 1)))

            self.stdScaler = MinMaxScaler()
            self.stdScaler.fit(self.X_train)
            self.X_train = self.stdScaler.transform(self.X_train)
            self.X_test = self.stdScaler.transform(self.X_test)

            self.stdScaler_res = MinMaxScaler()
            self.stdScaler_res.fit(self.R_train)
            self.R_train = self.stdScaler_res.transform(self.R_train)
            self.R_test = self.stdScaler_res.transform(self.R_test)

            self.X_train = np.concatenate([self.X_train, self.R_train], axis=1)
            self.X_test = np.concatenate([self.X_test, self.R_test], axis=1)

            self.X_train, self.X_val, self.Y_train, self.Y_val = train_test_split(self.X_train,
                                                                                  self.Y_train,
                                                                                  test_size=self.opts["eval_size"],
                                                                                  random_state=self.opts["seed"],
                                                                                  shuffle=True)

            insize = self.X_train.shape[1]
            outsize = len(self.Y_train[0])

            class_inputs = Input(shape=(insize,))
            l0 = Dropout(self.opts["dropout_rate"])(class_inputs)
            bn0 = BatchNormalization()(l0)
            ae_x = Dense(300, activation=self.opts["activation_function"])(bn0)
            bn1 = BatchNormalization()(ae_x)
            l1 = Dropout(self.opts["dropout_rate"])(bn1)
            l1 = Dense(200, activation=self.opts["activation_function"], name="classifier_l1")(l1)
            bn2 = BatchNormalization()(l1)
            l2 = Dropout(self.opts["dropout_rate"])(bn2)
            l2 = Dense(100, activation=self.opts["activation_function"], name="classifier_l2")(l2)
            bn3 = BatchNormalization()(l2)
            l2 = Dropout(self.opts["dropout_rate"])(bn3)
            l3 = Dense(50, activation=self.opts["activation_function"], name="classifier_l3")(l2)
            bn4 = BatchNormalization()(l3)
            l3 = Dropout(self.opts["dropout_rate"])(bn4)
            out = Dense(outsize, activation='softmax', name="classifier")(l3)

            self.model = Model([class_inputs], [out])
            losses = {
                "classifier": "categorical_crossentropy",
            }
            self.model.compile(optimizer='adam', loss=losses, metrics=['accuracy'])

    def train(self, checkpoint_path: str,
              name: str,
              save_results: bool = False):

        event_dict_file = str(checkpoint_path) + "/" + str(name) + "_napr_onehotdict.json"
        with open(str(event_dict_file), 'w') as outfile:
            json.dump(self.one_hot_dict, outfile)

        with open(checkpoint_path + "/" + name + "_napr_model.json", 'w') as f:
            f.write(self.model.to_json())

        ckpt_file = str(checkpoint_path) + "/" + str(name) + "_napr_weights.hdf5"
        checkpoint = ModelCheckpoint(ckpt_file, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
        hist = self.model.fit(self.X_train,
                              self.Y_train,
                              batch_size=self.opts["n_batch_size"],
                              epochs=self.opts["n_epochs"], shuffle=True,
                              validation_data=([self.X_val], [self.Y_val]),
                              callbacks=[self.EvaluationCallback(self.X_test, self.Y_test),
                                         checkpoint])
        joblib.dump(self.stdScaler, str(checkpoint_path) + "/" + str(name) + "_napr_stdScaler.pkl")
        joblib.dump(self.stdScaler_res, str(checkpoint_path) + "/" + str(name) + "_napr_stdScaler_res.pkl")
        if save_results:
            results_file = str(checkpoint_path) + "/" + str(name) + "_napr_results.json"
            with open(str(results_file), 'w') as outfile:
                json.dump(str(hist.history), outfile)

    def __oneHotEncoderSetup(self):
        """ Events to One Hot"""
        events = np.unique(self.Y_train)

        self.label_encoder = LabelEncoder()
        integer_encoded = self.label_encoder.fit_transform(events)
        integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)

        self.onehot_encoder = OneHotEncoder(sparse=False)
        self.onehot_encoder.fit(integer_encoded)

        self.one_hot_dict = {}
        for event in events:
            self.one_hot_dict[event] = list(self.onehot_encoder.transform([self.label_encoder.transform([event])])[0])

    @staticmethod
    def __loadData(file: str):
        x, r, y = [], [], []
        with open(file) as json_file:
            tss = json.load(json_file)
            for sample in tss:
                if sample["label"] is not None:
                    x.append(list(itertools.chain(sample["tss"][0], sample["tss"][1],
                                                  sample["tss"][2])))
                    r.append(list(sample["tss"][3]))
                    y.append(sample["label"])
        return np.array(x), np.array(r), np.array(y)

    def __setSeed(self):
        seed(self.opts["seed"])
        tf.random.set_seed(self.opts["seed"])

    def loadModel(self,
                  path: str,
                  name: str):

        with open(path + "/" + name + "_napr_model.json", 'r') as f:
            self.model = model_from_json(f.read())
        self.model.load_weights(path + "/" + name + "_napr_weights.hdf5")
        with open(path + "/" + name + "_napr_onehotdict.json", 'r') as f:
            self.one_hot_dict = json.load(f)
        self.stdScaler = joblib.load(path + "/" + name + "_napr_stdScaler.pkl")
        self.stdScaler_res = joblib.load(path + "/" + name + "_napr_stdScaler_res.pkl")

    def __intToEvent(self, value: int):
        one_hot = list(np.eye(len(self.one_hot_dict.keys()))[value])
        for k, v in self.one_hot_dict.items():
            if str(v) == str(one_hot):
                return k

    def predict(self, tss: list):
        """
        Predict from a list TimedStateSamples

        :param tss: list<TimedStateSamples>
        :return: tuple (DREAM-NAP output, translated next event)
        """
        if not isinstance(tss, list) or not isinstance(tss[0], TimedStateSample):
            raise ValueError("Input is not a list with TimedStateSample")

        preds = []
        next_events = []
        for sample in tss:
            features = [list(
                itertools.chain(sample.export()["tss"][0], sample.export()["tss"][1],
                                sample.export()["tss"][2]))]
            features = self.stdScaler.transform(features)
            r = [list(sample.export()["tss"][3])]
            r = self.stdScaler_res.transform(r)
            features = np.concatenate([features, r], axis=1)

            pred = np.argmax(self.model.predict(features), axis=1)
            preds.append(pred[0])
            for p in pred:
                next_events.append(self.__intToEvent(p))
        return preds, next_events

    """ Callback """
    class EvaluationCallback(Callback):

        def __init__(self,
                     X_test: np.ndarray,
                     Y_test: np.ndarray):

            super().__init__()
            self.X_test = X_test
            self.Y_test = Y_test
            self.Y_test_int = np.argmax(self.Y_test, axis=1)

            self.test_accs = []
            self.losses = []

        def on_train_begin(self,
                           logs: dict = {}):
            self.test_accs = []
            self.losses = []

        def on_epoch_end(self,
                         epoch: int,
                         logs: dict = {}):

            y_pred = self.model.predict(self.X_test)
            y_pred = y_pred.argmax(axis=1)

            test_acc = accuracy_score(self.Y_test_int, y_pred, normalize=True)
            test_loss, _ = self.model.evaluate(self.X_test, self.Y_test)

            precision, recall, fscore, _ = precision_recall_fscore_support(self.Y_test_int, y_pred, average='weighted',
                                                                           pos_label=None)
            auc = multiclass_roc_auc_score(self.Y_test_int, y_pred, average="weighted")

            logs['test_acc'] = test_acc
            logs['test_prec_weighted'] = precision
            logs['test_rec_weighted'] = recall
            logs['test_loss'] = test_loss
            logs['test_fscore_weighted'] = fscore
            logs['test_auc_weighted'] = auc

            precision, recall, fscore, support = precision_recall_fscore_support(self.Y_test_int, y_pred,
                                                                                 average='macro', pos_label=None)
            auc = multiclass_roc_auc_score(self.Y_test_int, y_pred, average="macro")
            logs['test_prec_mean'] = precision
            logs['test_rec_mean'] = recall
            logs['test_fscore_mean'] = fscore
            logs['test_auc_mean'] = auc
Пример #24
0
input_path_4 = "/home/mukesh/Documents/IISc/DATA/4_1"
# import random
# random.shuffle(input_path_0)
test_files_0 = glob.glob(os.path.join(input_path_0, "*.jpeg"))
test_files_1 = glob.glob(os.path.join(input_path_1, "*.jpeg"))
test_files_2 = glob.glob(os.path.join(input_path_2, "*.jpeg"))
test_files_3 = glob.glob(os.path.join(input_path_3, "*.jpeg"))
test_files_4 = glob.glob(os.path.join(input_path_4, "*.jpeg"))

print(len(test_files_0))
print(len(test_files_1))
print(len(test_files_2))
print(len(test_files_3))
print(len(test_files_4))

model2.load_weights('Weights_multi.034-0.312.hdf5')

test_files = []
test_files += test_files_1
test_files += test_files_2
test_files += test_files_3
test_files += test_files_4
print(len(test_files))

import random
random.shuffle(test_files_0)
random.shuffle(test_files)

PT = []
GT = []
predict = []
Пример #25
0
class CommVAE1hot(object):
    """
    in_dim : block length
    latent_dim : encoding dimension (half of number of channel uses)
    h_dim : number of hidden layers
    obj_fn : objective function with to optimize over
    n0 : noise power (over all components)
    sigma2 : prior variance (per component)
  """
    def __init__(self,
                 in_dim=None,
                 latent_dim=None,
                 h_dim=None,
                 obj_fn='RBF',
                 n0=1.0,
                 sigma2=1.0):
        self.in_dim = in_dim
        self.latent_dim = latent_dim
        self.n0 = n0
        self.sigma2 = sigma2
        self.h_dim = h_dim
        self.obj_fn = obj_fn

        if self.in_dim and self.latent_dim:
            self.make_model()

    def make_model(self):
        # Input layer
        self.inputs = Input(shape=(self.in_dim, ), name="enc_in")

        # Hidden Layers
        x = self.inputs
        if self.h_dim is not None:
            for (i, d) in enumerate(self.h_dim):
                x = Dense(d, activation='relu', name="enc_l{}".format(i))(x)
        # Mean and Variance
        self.z_mean = Dense(self.latent_dim, name="z_mean")(x)

        # Channel
        self.z = Lambda(self.channel,
                        output_shape=(self.latent_dim, ),
                        name="z")(self.z_mean)

        # Encoder model
        self.encoder = Model(self.inputs, [self.z_mean, self.z],
                             name="encoder")

        # Decoder
        self.latent_inputs = Input(shape=(self.latent_dim, ), name="z_sample")

        # Hidden layers
        x = self.latent_inputs
        if self.h_dim is not None:
            for (i, d) in enumerate(self.h_dim[::-1]):
                x = Dense(d, activation='relu', name="dec_l{}".format(i))(x)
        self.dec_outputs = Dense(self.in_dim,
                                 activation='softmax',
                                 name="decoder_out")(x)

        # Decoder model
        self.decoder = Model(self.latent_inputs,
                             self.dec_outputs,
                             name="decoder")

        # VAE
        self.outputs = self.decoder(self.encoder(self.inputs)[1])
        self.model = Model(self.inputs, self.outputs, name="VAE")

        # Losses
        self.recon_loss = categorical_crossentropy(self.inputs, self.outputs)

        if self.obj_fn == 'AWGN':
            # print( "Model with AWGN ")
            sig_pow = 0.5 * 1.0 / self.sigma2 * K.sum(K.square(self.z_mean),
                                                      axis=-1)
            noise_term = 0.5 * self.latent_dim * (
                (self.n0 / self.latent_dim) / self.sigma2 - 1.0 - K.log(
                    (self.n0 / self.latent_dim) / self.sigma2))
            self.kl_loss = sig_pow + noise_term
        elif self.obj_fn == 'RBF':
            # print( "Model with RBF")
            sig_pow = 0.5 * 1.0 / self.sigma2 * K.sum(K.square(self.z_mean),
                                                      axis=-1)
            noise_term = 0.5 * self.latent_dim * (
                (self.n0 / self.latent_dim) / self.sigma2 - 1.0 - K.log(
                    (self.n0 / self.latent_dim) / self.sigma2))
            rbf_term = K.log(1.0 + 0.5 * self.latent_dim / self.n0 * sig_pow)
            self.kl_loss = sig_pow + noise_term - rbf_term
        else:
            raise NotImplementedError("Unknown obj_fn: {}".format(self.obj_fn))

        self.vae_loss = K.mean(self.recon_loss + self.kl_loss)

        self.model.add_loss(self.vae_loss)

        self.model.compile(optimizer='adam')


#     self.model.compile( optimizer=tf.train.AdamOptimizer(learning_rate=0.01))

    def channel(self, zMean):
        batch = K.shape(zMean)[0]
        #     dims = K.shape( zMean )[1]
        epsilon = K.random_normal(shape=(batch, self.latent_dim))
        return zMean + np.sqrt(self.n0 / self.latent_dim) * epsilon

    def fit(self,
            x_train,
            epochs=10,
            batch_size=128,
            validation_data=None,
            verbose=0,
            callbacks=None):
        train_log = self.model.fit(x_train,
                                   epochs=epochs,
                                   batch_size=batch_size,
                                   validation_data=validation_data,
                                   verbose=verbose,
                                   callbacks=callbacks)
        return train_log.history

    def encode(self, data):
        return self.encoder.predict(data)

    def decode(self, data):
        return self.decoder.predict(data)

    def analysis(self):
        xTest = np.eye(self.in_dim)
        enc_mu, enc_z = self.encode(xTest)
        dec_mu = self.decode(enc_mu)
        dec_z = self.decode(enc_z)

        chDim = self.latent_dim // 2
        f = plt.figure(figsize=(5 * chDim, 9))
        for i in range(chDim):
            ax1 = plt.subplot(2, chDim, i + 1)
            ax1.scatter(enc_mu[:, i],
                        enc_mu[:, i + chDim],
                        c=np.arange(self.in_dim))
            for j in range(self.in_dim):
                ax1.annotate(j, (enc_mu[j, i], enc_mu[j, i + chDim]))
            ax1.set_title("TX Symbols n = {}".format(i + 1))

    def save_model(self, fileprefix):
        with open(fileprefix + ".dil", "wb") as obj:
            dill.dump(
                {
                    'in_dim': self.in_dim,
                    'latent_dim': self.latent_dim,
                    'n0': self.n0,
                    'sigma2': self.sigma2,
                    'h_dim': self.h_dim,
                    'obj_fn': self.obj_fn
                }, obj)
        self.model.save_weights(fileprefix + ".h5")

    def load_model(self, fileprefix):
        with open(fileprefix + ".dil", "rb") as obj:
            config = dill.load(obj)
            self.in_dim = config['in_dim']
            self.latent_dim = config['latent_dim']
            self.n0 = config['n0']
            self.h_dim = config['h_dim']
            self.obj_fn = config['obj_fn']
            if 'sigma2' in config:  # For backward compatability
                self.sigma2 = config['sigma2']
            else:
                self.sigma2 = 1.0
        self.make_model()
        self.model.load_weights(fileprefix + ".h5")
Пример #26
0
def NASNet(input_shape=None,
           penultimate_filters=4032,
           nb_blocks=6,
           stem_filters=96,
           initial_reduction=True,
           skip_reduction_layer_input=True,
           use_auxiliary_branch=False,
           filters_multiplier=2,
           dropout=0.5,
           weight_decay=5e-5,
           include_top=True,
           weights=None,
           input_tensor=None,
           pooling=None,
           classes=1000,
           default_size=None,
           activation='softmax'):
    """Instantiates a NASNet architecture.
    Note that only TensorFlow is supported for now,
    therefore it only works with the data format
    `image_data_format='channels_last'` in your Keras config
    at `~/.keras/keras.json`.

    # Arguments
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(331, 331, 3)` for NASNetLarge or
            `(224, 224, 3)` for NASNetMobile
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(224, 224, 3)` would be one valid value.
        penultimate_filters: number of filters in the penultimate layer.
            NASNet models use the notation `NASNet (N @ P)`, where:
                -   N is the number of blocks
                -   P is the number of penultimate filters
        nb_blocks: number of repeated blocks of the NASNet model.
            NASNet models use the notation `NASNet (N @ P)`, where:
                -   N is the number of blocks
                -   P is the number of penultimate filters
        stem_filters: number of filters in the initial stem block
        initial_reduction: Whether to perform the reduction step at the beginning
            end of the network. Set to `True` for CIFAR models.
        skip_reduction_layer_input: Determines whether to skip the reduction layers
            when calculating the previous layer to connect to.
        use_auxiliary_branch: Whether to use the auxiliary branch during
            training or evaluation.
        filters_multiplier: controls the width of the network.
            - If `filters_multiplier` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `filters_multiplier` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `filters_multiplier` = 1, default number of filters from the paper
                 are used at each layer.
        dropout: dropout rate
        weight_decay: l2 regularization weight
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: `None` (random initialization) or
            `imagenet` (ImageNet weights)
        input_tensor: optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
        default_size: specifies the default image size of the model
        activation: Type of activation at the top layer.
            Can be one of 'softmax' or 'sigmoid'.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """
    if K.backend() != 'tensorflow':
        raise RuntimeError('Only Tensorflow backend is currently supported, '
                           'as other backends do not support '
                           'separable convolution.')

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as ImageNet with `include_top` '
                         'as true, `classes` should be 1000')

    if default_size is None:
        default_size = 331

    # Determine proper input shape and default size.
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=default_size,
                                      min_size=32,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top or weights)

    if K.image_data_format() != 'channels_last':
        warnings.warn('The NASNet family of models is only available '
                      'for the input data format "channels_last" '
                      '(width, height, channels). '
                      'However your settings specify the default '
                      'data format "channels_first" (channels, width, height).'
                      ' You should set `image_data_format="channels_last"` '
                      'in your Keras config located at ~/.keras/keras.json. '
                      'The model being returned right now will expect inputs '
                      'to follow the "channels_last" data format.')
        K.set_image_data_format('channels_last')
        old_data_format = 'channels_first'
    else:
        old_data_format = None

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    assert penultimate_filters % 24 == 0, "`penultimate_filters` needs to be " \
                                          "divisible by 24."

    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
    filters = penultimate_filters // 24

    if initial_reduction:
        x = Conv2D(stem_filters, (3, 3), strides=(2, 2), padding='valid',
                   use_bias=False, name='stem_conv1', kernel_initializer='he_normal',
                   kernel_regularizer=l2(weight_decay))(img_input)
    else:
        x = Conv2D(stem_filters, (3, 3), strides=(1, 1), padding='same', use_bias=False,
                   name='stem_conv1', kernel_initializer='he_normal',
                   kernel_regularizer=l2(weight_decay))(img_input)

    x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                           name='stem_bn1')(x)

    p = None
    if initial_reduction:  # imagenet / mobile mode
        x, p = _reduction_A(x, p, filters // (filters_multiplier ** 2), weight_decay,
                            id='stem_1')
        x, p = _reduction_A(x, p, filters // filters_multiplier, weight_decay,
                            id='stem_2')

    for i in range(nb_blocks):
        x, p = _normal_A(x, p, filters, weight_decay, id='%d' % i)

    x, p0 = _reduction_A(x, p, filters * filters_multiplier, weight_decay,
                         id='reduce_%d' % nb_blocks)

    p = p0 if not skip_reduction_layer_input else p

    for i in range(nb_blocks):
        x, p = _normal_A(x, p, filters * filters_multiplier, weight_decay,
                         id='%d' % (nb_blocks + i + 1))

    auxiliary_x = None
    if not initial_reduction:  # imagenet / mobile mode
        if use_auxiliary_branch:
            auxiliary_x = _add_auxiliary_head(x, classes, weight_decay, pooling,
                                              include_top, activation)

    x, p0 = _reduction_A(x, p, filters * filters_multiplier ** 2, weight_decay,
                         id='reduce_%d' % (2 * nb_blocks))

    if initial_reduction:  # CIFAR mode
        if use_auxiliary_branch:
            auxiliary_x = _add_auxiliary_head(x, classes, weight_decay, pooling,
                                              include_top, activation)

    p = p0 if not skip_reduction_layer_input else p

    for i in range(nb_blocks):
        x, p = _normal_A(x, p, filters * filters_multiplier ** 2, weight_decay,
                         id='%d' % (2 * nb_blocks + i + 1))

    x = Activation('relu')(x)

    if include_top:
        x = GlobalAveragePooling2D()(x)
        x = Dropout(dropout)(x)
        x = Dense(classes, activation=activation,
                  kernel_regularizer=l2(weight_decay), name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    if use_auxiliary_branch:
        model = Model(inputs, [x, auxiliary_x], name='NASNet_with_auxiliary')
    else:
        model = Model(inputs, x, name='NASNet')

    # load weights
    if weights == 'imagenet':
        if default_size == 224:  # mobile version
            if include_top:
                if use_auxiliary_branch:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH_WITH_AUXULARY
                    model_name = 'nasnet_mobile_with_aux.h5'
                else:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH
                    model_name = 'nasnet_mobile.h5'
            else:
                if use_auxiliary_branch:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH_WITH_AUXULARY_NO_TOP
                    model_name = 'nasnet_mobile_with_aux_no_top.h5'
                else:
                    weight_path = NASNET_MOBILE_WEIGHT_PATH_NO_TOP
                    model_name = 'nasnet_mobile_no_top.h5'

            weights_file = get_file(model_name, weight_path, cache_subdir='models')
            model.load_weights(weights_file, by_name=True)

        elif default_size == 331:  # large version
            if include_top:
                if use_auxiliary_branch:
                    weight_path = NASNET_LARGE_WEIGHT_PATH_WITH_auxiliary
                    model_name = 'nasnet_large_with_aux.h5'
                else:
                    weight_path = NASNET_LARGE_WEIGHT_PATH
                    model_name = 'nasnet_large.h5'
            else:
                if use_auxiliary_branch:
                    weight_path = NASNET_LARGE_WEIGHT_PATH_WITH_auxiliary_NO_TOP
                    model_name = 'nasnet_large_with_aux_no_top.h5'
                else:
                    weight_path = NASNET_LARGE_WEIGHT_PATH_NO_TOP
                    model_name = 'nasnet_large_no_top.h5'

            weights_file = get_file(model_name, weight_path, cache_subdir='models')
            model.load_weights(weights_file, by_name=True)

        else:
            raise ValueError('ImageNet weights can only be loaded on NASNetLarge '
                             'or NASNetMobile')

    if old_data_format:
        K.set_image_data_format(old_data_format)

    return model
Пример #27
0
def ShuffleNet(include_top=True,
               input_tensor=None,
               scale_factor=1.0,
               pooling=None,
               input_shape=None,
               groups=1,
               weights='imagenet',
               num_shuffle_units=[3, 7, 3],
               bottleneck_ratio=0.25,
               classes=1000,
               **kwargs):
    """
    ShuffleNet implementation for Keras 2
    ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices
    Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, Jian Sun
    https://arxiv.org/pdf/1707.01083.pdf
    Note that only TensorFlow is supported for now, therefore it only works
    with the data format `image_data_format='channels_last'` in your Keras
    config at `~/.keras/keras.json`.
    Parameters
    ----------
    include_top: bool(True)
         whether to include the fully-connected layer at the top of the network.
    input_tensor:
        optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model.
    scale_factor:
        scales the number of output channels
    input_shape:
    pooling:
        Optional pooling mode for feature extraction
        when `include_top` is `False`.
        - `None` means that the output of the model
            will be the 4D tensor output of the
            last convolutional layer.
        - `avg` means that global average pooling
            will be applied to the output of the
            last convolutional layer, and thus
            the output of the model will be a
            2D tensor.
        - `max` means that global max pooling will
            be applied.
    groups: int
        number of groups per channel
    num_shuffle_units: list([3,7,3])
        number of stages (list length) and the number of shufflenet units in a
        stage beginning with stage 2 because stage 1 is fixed
        e.g. idx 0 contains 3 + 1 (first shuffle unit in each stage differs) shufflenet units for stage 2
        idx 1 contains 7 + 1 Shufflenet Units for stage 3 and
        idx 2 contains 3 + 1 Shufflenet Units
    bottleneck_ratio:
        bottleneck ratio implies the ratio of bottleneck channels to output channels.
        For example, bottleneck ratio = 1 : 4 means the output feature map is 4 times
        the width of the bottleneck feature map.
    classes: int(1000)
        number of classes to predict
    Returns
    -------
        A Keras model instance
    References
    ----------
    - [ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices]
      (http://www.arxiv.org/pdf/1707.01083.pdf)
    """

    if K.backend() != 'tensorflow':
        raise RuntimeError('Only TensorFlow backend is currently supported, '
                           'as other backends do not support ')

    name = "ShuffleNet_%.2gX_g%d_br_%.2g_%s" % (
        scale_factor, groups, bottleneck_ratio, "".join(
            [str(x) for x in num_shuffle_units]))

    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=28,
                                      require_flatten=include_top,
                                      data_format=K.image_data_format())

    out_dim_stage_two = {1: 144, 2: 200, 3: 240, 4: 272, 8: 384}
    if groups not in out_dim_stage_two:
        raise ValueError("Invalid number of groups.")

    if pooling not in ['max', 'avg', None]:
        raise ValueError("Invalid value for pooling.")

    if not (float(scale_factor) * 4).is_integer():
        raise ValueError("Invalid value for scale_factor. Should be x over 4.")

    exp = np.insert(np.arange(0, len(num_shuffle_units), dtype=np.float32), 0,
                    0)
    out_channels_in_stage = 2**exp
    out_channels_in_stage *= out_dim_stage_two[
        groups]  # calculate output channels for each stage
    out_channels_in_stage[0] = 24  # first stage has always 24 output channels
    out_channels_in_stage *= scale_factor
    out_channels_in_stage = out_channels_in_stage.astype(int)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        #if not K.is_keras_tensor(input_tensor):
        #img_input = Input(tensor=input_tensor, shape=input_shape)
        #else:
        #img_input = input_tensor
        img_input = input_tensor

    # create shufflenet architecture
    x = YoloConv2D(filters=out_channels_in_stage[0],
                   kernel_size=(3, 3),
                   padding='same',
                   use_bias=False,
                   strides=(2, 2),
                   activation="relu",
                   name="conv1")(img_input)
    x = MaxPooling2D(pool_size=(3, 3),
                     strides=(2, 2),
                     padding='same',
                     name="maxpool1")(x)

    # create stages containing shufflenet units beginning at stage 2
    for stage in range(0, len(num_shuffle_units)):
        repeat = num_shuffle_units[stage]
        x = _block(x,
                   out_channels_in_stage,
                   repeat=repeat,
                   bottleneck_ratio=bottleneck_ratio,
                   groups=groups,
                   stage=stage + 2)

    if include_top:
        #x = Dense(units=classes, name="fc")(x)
        #x = Activation('softmax', name='softmax')(x)
        x = GlobalAveragePooling2D(name='global_avg_pool')(x)
        x = Dense(units=classes,
                  activation='softmax',
                  use_bias=True,
                  name='Logits')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D(name='global_avg_pool')(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D(name='global_max_pool')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = Model(inputs=inputs, outputs=x, name=name)

    # Load weights.
    if weights == 'imagenet':
        if K.image_data_format() == 'channels_first':
            raise ValueError('Weights for "channels_first" format '
                             'are not available.')

        if include_top:
            model_name = ('shufflenet_weights_tf_dim_ordering_tf_kernels_' +
                          str(alpha) + '_' + str(rows) + '.h5')
            weigh_path = BASE_WEIGHT_PATH + model_name
            weights_path = get_file(model_name,
                                    weigh_path,
                                    cache_subdir='models')
        else:
            model_name = ('shufflenet_weights_tf_dim_ordering_tf_kernels_' +
                          str(alpha) + '_' + str(rows) + '_no_top' + '.h5')
            weigh_path = BASE_WEIGHT_PATH + model_name
            weights_path = get_file(model_name,
                                    weigh_path,
                                    cache_subdir='models')
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    return model
Пример #28
0
                           C.num_rois,
                           nb_classes=len(classes_count),
                           trainable=True)
## build model for each network Model(input, output)
model_rpn = Model(img_input, rpn[:2])  ## because rpn[2] is base_layers(input)
model_classifier = Model([img_input, roi_input], classifier)
model_all = Model([img_input, roi_input], rpn[:2] + classifier)

try:
    ##base_net_weight not in config.py but defined above
    #     print('loading weights from {}'.format('finalmodel_frcnn_cat.h5'))
    #     model_rpn.load_weights('finalmodel_frcnn_cat.h5', by_name=True)
    #     model_classifier.load_weights('finalmodel_frcnn_cat.h5', by_name = True)
    print('loading weights from {}'.format(
        'vgg16_weights_tf_dim_ordering_tf_kernels.h5'))
    model_rpn.load_weights('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
                           by_name=True)
    model_classifier.load_weights(
        'vgg16_weights_tf_dim_ordering_tf_kernels.h5', by_name=True)
except:
    print(
        'Could not load pretrained model weights. Weights can be found in the keras application folder \
        https://github.com/fchollet/keras/tree/master/keras/applications')

## optimizer for each network
optimizer = Adam(lr=1e-6)
optimizer_classifier = Adam(lr=1e-6)
model_rpn.compile(
    optimizer=optimizer,
    loss=[losses.rpn_loss_cls(num_anchors),
          losses.rpn_loss_reg(num_anchors)])
model_classifier.compile(
Пример #29
0
class VGGish(KerasModelContainer):
    """ KerasModelContainer for VGGish model

    Jort F. Gemmeke et al.
    Audio Set: An ontology and human-labeled dataset for audio events
    International Conference on Acoustics, Speech, and Signal Processing.
    New Orleans, LA, 2017.

    Notes
    -----
    https://research.google.com/audioset/
    Based on vggish-keras https://pypi.org/project/vggish-keras/

    Parameters
    ----------
    n_frames_cnn : int or None, default=96
        Length of the input (number of frames of each sequence).

    n_freq_cnn : int, default=64
        Number of frequency bins. The model's input has shape
        (n_frames, n_freqs).

    n_classes : int, default=10
        Number of classes (dimmension output).

    n_channels : int, default=0
        Number of input channels

        0 : mono signals.
            Input shape = (n_frames_cnn, n_freq_cnn)
        1 : mono signals.
            Input shape = (n_frames_cnn, n_freq_cnn, 1)
        2 : stereo signals.
            Input shape = (n_frames_cnn, n_freq_cnn, 2)
        n > 2 : multi-representations.
            Input shape = (n_frames_cnn, n_freq_cnn, n_channels)

    embedding_size : int, default=128
        Number of units in the embeddings layer.

    pooling : {'avg', max}, default='avg'
        Use AveragePooling or Maxpooling.

    include_top : bool, default=False
        Include fully-connected layers.

    compress : bool, default=False
        Apply PCA.


    Attributes
    ----------
    model : keras.models.Model
        Keras model.

    Examples
    --------
    >>> from dcase_models.model.models import VGGish
    >>> model_container = VGGish()
    >>> model_container.model.summary()
    _________________________________________________________________
    Layer (type)                 Output Shape              Param #
    =================================================================
    input (InputLayer)           (None, 96, 64)            0
    _________________________________________________________________
    lambda (Lambda)              (None, 96, 64, 1)         0
    _________________________________________________________________
    conv1 (Conv2D)               (None, 96, 64, 64)        640
    _________________________________________________________________
    pool1 (MaxPooling2D)         (None, 48, 32, 64)        0
    _________________________________________________________________
    conv2 (Conv2D)               (None, 48, 32, 128)       73856
    _________________________________________________________________
    pool2 (MaxPooling2D)         (None, 24, 16, 128)       0
    _________________________________________________________________
    conv3/conv3_1 (Conv2D)       (None, 24, 16, 256)       295168
    _________________________________________________________________
    conv3/conv3_2 (Conv2D)       (None, 24, 16, 256)       590080
    _________________________________________________________________
    pool3 (MaxPooling2D)         (None, 12, 8, 256)        0
    _________________________________________________________________
    conv4/conv4_1 (Conv2D)       (None, 12, 8, 512)        1180160
    _________________________________________________________________
    conv4/conv4_2 (Conv2D)       (None, 12, 8, 512)        2359808
    _________________________________________________________________
    pool4 (MaxPooling2D)         (None, 6, 4, 512)         0
    _________________________________________________________________
    global_average_pooling2d_1 ( (None, 512)               0
    =================================================================
    Total params: 4,499,712
    Trainable params: 4,499,712
    Non-trainable params: 0
    _________________________________________________________________
    """

    def __init__(self, model=None, model_path=None, metrics=['classification'],
                 n_frames_cnn=96, n_freq_cnn=64, n_classes=10,
                 n_channels=0, embedding_size=128, pooling='avg',
                 include_top=False, compress=False):

        self.n_frames_cnn = n_frames_cnn
        self.n_freq_cnn = n_freq_cnn
        self.n_classes = n_classes
        self.n_channels = n_channels
        self.embedding_size = embedding_size
        self.pooling = pooling
        self.include_top = include_top
        self.compress = compress

        super().__init__(
            model=model, model_path=model_path,
            model_name='VGGish', metrics=metrics
        )

    class Postprocess(Layer):
        """ Keras layer that applies PCA and quantizes the ouput.

        Based on vggish-keras https://pypi.org/project/vggish-keras/
        """
        def __init__(self, output_shape=None, **kw):
            self.emb_shape = output_shape
            super().__init__(**kw)

        def build(self, input_shape):
            input_shape = tuple(int(x) for x in tuple(input_shape)[1:])
            emb_shape = (self.emb_shape,) if self.emb_shape else input_shape

            self.pca_matrix = self.add_weight(
                name='pca_matrix', shape=emb_shape + input_shape,
                initializer='uniform')
            self.pca_means = self.add_weight(
                name='pca_means', shape=input_shape + (1,),
                initializer='uniform')

        def call(self, x):
            # Apply PCA.
            # - Embeddings come in as [batch_size, embedding_size].
            # - Transpose to [embedding_size, batch_size].
            # - Subtract pca_means column vector from each column.
            # - Premultiply by PCA matrix of shape [output_dims, input_dims]
            #   where both are are equal to embedding_size in our case.
            # - Transpose result back to [batch_size, embedding_size].
            x = K.dot(self.pca_matrix, (K.transpose(x) - self.pca_means))
            x = K.transpose(x)

            # Quantize by:
            # - clipping to [min, max] range
            # - convert to 8-bit in range [0.0, 255.0]
            # - cast 8-bit float to uint8
            QUANTIZE_MIN_VAL = -2.0
            QUANTIZE_MAX_VAL = +2.0
            x = clip_by_value(x, QUANTIZE_MIN_VAL, QUANTIZE_MAX_VAL)
            x = ((x - QUANTIZE_MIN_VAL) *
                 (255.0 / (QUANTIZE_MAX_VAL - QUANTIZE_MIN_VAL)))
            return K.cast(x, 'uint8')

    def build(self):
        """ Builds the VGGish Keras model.
        """
        if self.n_channels == 0:
            inputs = Input(shape=(self.n_frames_cnn, self.n_freq_cnn),
                           dtype='float32', name='input')
            x = Lambda(
                lambda x: K.expand_dims(x, -1), name='lambda'
            )(inputs)
        else:
            inputs = Input(
                shape=(self.n_frames_cnn, self.n_freq_cnn, self.n_channels),
                dtype='float32', name='input'
            )
            x = Lambda(lambda x: x, name='lambda')(inputs)

        # setup layer params
        conv = partial(Conv2D, kernel_size=(3, 3), strides=(
            1, 1), activation='relu', padding='same')
        maxpool = partial(MaxPooling2D, pool_size=(2, 2),
                          strides=(2, 2), padding='same')

        # Block 1
        x = conv(64, name='conv1')(x)
        x = maxpool(name='pool1')(x)

        # Block 2
        x = conv(128, name='conv2')(x)
        x = maxpool(name='pool2')(x)

        # Block 3
        x = conv(256, name='conv3/conv3_1')(x)
        x = conv(256, name='conv3/conv3_2')(x)
        x = maxpool(name='pool3')(x)

        # Block 4
        x = conv(512, name='conv4/conv4_1')(x)
        x = conv(512, name='conv4/conv4_2')(x)
        x = maxpool(name='pool4')(x)

        if self.include_top:
            dense = partial(Dense, activation='relu')

            # FC block
            x = Flatten(name='flatten_')(x)
            x = dense(4096, name='fc1/fc1_1')(x)
            x = dense(4096, name='fc1/fc1_2')(x)
            x = dense(self.embedding_size, name='fc2')(x)

            if self.compress:
                x = self.Postprocess()(x)
        else:
            globalpool = (
                GlobalAveragePooling2D() if self.pooling == 'avg' else
                GlobalMaxPooling2D() if self.pooling == 'max' else None)

            if globalpool:
                x = globalpool(x)

        # Create model
        self.model = Model(inputs, x, name='vggish_model')

        super().build()

    def load_pretrained_model_weights(self,
                                      weights_folder='./pretrained_weights'):
        """
        Loads pretrained weights to self.model weights.

        Parameters
        ----------
        weights_folder : str
            Path to load the weights file

        """
        basepath = os.path.dirname(__file__)
        weights_file = self.model_name + '.hdf5'
        weights_path = os.path.join(basepath, weights_folder, weights_file)
        if not os.path.isfile(weights_path):
            self.download_pretrained_weights()
        self.model.load_weights(weights_path, by_name=True)

    def download_pretrained_weights(self,
                                    weights_folder='./pretrained_weights'):
        """
        Download pretrained weights from:
        https://github.com/DTaoo/VGGish
        https://drive.google.com/file/d/1mhqXZ8CANgHyepum7N4yrjiyIg6qaMe6/view

        Code based on:
        https://github.com/beasteers/VGGish/blob/master/vggish_keras/download_helpers/download_weights.py

        Parameters
        ----------
        weights_folder : str
            Path to save the weights file

        """
        import requests
        import tqdm

        DRIVE_URL = 'https://drive.google.com/uc?id={id}&export=download'
        DRIVE_CONFIRM_URL = ('https://drive.google.com/uc?id={id}&export'
                             '=download&confirm={confirm}')

        basepath = os.path.dirname(__file__)
        weights_file = self.model_name + '.hdf5'
        weights_path = os.path.join(basepath, weights_folder, weights_file)
        # gdrive_id = '1mhqXZ8CANgHyepum7N4yrjiyIg6qaMe6'
        # This file includes PCA weights
        gdrive_id = '1QbMNrhu4RBUO6hIcpLqgeuVye51XyMKM'

        if not os.path.isfile(weights_path):
            print('Downloading weights...')

            sess = requests.Session()
            r = sess.get(DRIVE_URL.format(id=gdrive_id), stream=True)

            # check for google virus message
            confirm = next(
                (v for k, v in r.cookies.get_dict().items()
                 if 'download_warning_' in k), None)

            if confirm:
                # print('Using confirmation code {}...'.format(confirm))
                r = sess.get(
                    DRIVE_CONFIRM_URL.format(id=gdrive_id, confirm=confirm),
                    stream=True)

            # download w/ progress bar

            chunk_size = 1024
            unit = 1024 ** 2
            with open(weights_path, 'wb') as f:
                pbar = tqdm.tqdm(
                    unit='mb', leave=False,
                    total=int(
                        r.headers.get('Content-Length', 0)) / unit or None)

                for chunk in r.iter_content(chunk_size=chunk_size):
                    if chunk:  # filter out keep-alive new chunks
                        pbar.update(len(chunk) / unit)
                        f.write(chunk)

            print('Done!')
def InceptionV3(include_top=True,
                weights='imagenet',
                input_tensor=None,
                input_shape=None,
                pooling=None,
                classes=1000):
    """Instantiates the Inception v3 architecture.

    Optionally loads weights pre-trained
    on ImageNet. Note that when using TensorFlow,
    for best performance you should set
    `image_data_format="channels_last"` in your Keras config
    at ~/.keras/keras.json.
    The model and the weights are compatible with both
    TensorFlow and Theano. The data format
    convention used by the model is the one
    specified in your Keras config file.
    Note that the default input image size for this model is 299x299.

    Arguments:
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization)
            or "imagenet" (pre-training on ImageNet).
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(299, 299, 3)` (with `channels_last` data format)
            or `(3, 299, 299)` (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 139.
            E.g. `(150, 150, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    Returns:
        A Keras model instance.

    Raises:
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')

    # Determine proper input shape
    # input_shape = _obtain_input_shape(
    #     input_shape,
    #     default_size=299,
    #     min_size=139,
    #     data_format=K.image_data_format(),
    #     include_top=include_top)

    input_shape = _obtain_input_shape(input_shape,
                                      default_size=299,
                                      min_size=139,
                                      data_format=K.image_data_format(),
                                      require_flatten=True)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        img_input = Input(tensor=input_tensor, shape=input_shape)

    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = 3

    x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
    x = conv2d_bn(x, 32, 3, 3, padding='valid')
    x = conv2d_bn(x, 64, 3, 3)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv2d_bn(x, 80, 1, 1, padding='valid')
    x = conv2d_bn(x, 192, 3, 3, padding='valid')
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    # mixed 0, 1, 2: 35 x 35 x 256
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed0')

    # mixed 1: 35 x 35 x 256
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed1')

    # mixed 2: 35 x 35 x 256
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed2')

    # mixed 3: 17 x 17 x 768
    branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl,
                             96,
                             3,
                             3,
                             strides=(2, 2),
                             padding='valid')

    branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
    x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed3')

    # mixed 4: 17 x 17 x 768
    branch1x1 = conv2d_bn(x, 192, 1, 1)

    branch7x7 = conv2d_bn(x, 128, 1, 1)
    branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
    branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

    branch7x7dbl = conv2d_bn(x, 128, 1, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed4')

    # mixed 5, 6: 17 x 17 x 768
    for i in range(2):
        branch1x1 = conv2d_bn(x, 192, 1, 1)

        branch7x7 = conv2d_bn(x, 160, 1, 1)
        branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
        branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

        branch7x7dbl = conv2d_bn(x, 160, 1, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

        branch_pool = AveragePooling2D((3, 3), strides=(1, 1),
                                       padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
        x = layers.concatenate(
            [branch1x1, branch7x7, branch7x7dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(5 + i))

    # mixed 7: 17 x 17 x 768
    branch1x1 = conv2d_bn(x, 192, 1, 1)

    branch7x7 = conv2d_bn(x, 192, 1, 1)
    branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
    branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

    branch7x7dbl = conv2d_bn(x, 192, 1, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

    branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed7')

    # mixed 8: 8 x 8 x 1280
    branch3x3 = conv2d_bn(x, 192, 1, 1)
    branch3x3 = conv2d_bn(branch3x3,
                          320,
                          3,
                          3,
                          strides=(2, 2),
                          padding='valid')

    branch7x7x3 = conv2d_bn(x, 192, 1, 1)
    branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
    branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
    branch7x7x3 = conv2d_bn(branch7x7x3,
                            192,
                            3,
                            3,
                            strides=(2, 2),
                            padding='valid')

    branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
    x = layers.concatenate([branch3x3, branch7x7x3, branch_pool],
                           axis=channel_axis,
                           name='mixed8')

    # mixed 9: 8 x 8 x 2048
    for i in range(2):
        branch1x1 = conv2d_bn(x, 320, 1, 1)

        branch3x3 = conv2d_bn(x, 384, 1, 1)
        branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
        branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
        branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2],
                                       axis=channel_axis,
                                       name='mixed9_' + str(i))

        branch3x3dbl = conv2d_bn(x, 448, 1, 1)
        branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
        branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
        branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
        branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2],
                                          axis=channel_axis)

        branch_pool = AveragePooling2D((3, 3), strides=(1, 1),
                                       padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
        x = layers.concatenate(
            [branch1x1, branch3x3, branch3x3dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(9 + i))
    if include_top:
        # Classification block
        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='inception_v3')

    # load weights
    if weights == 'imagenet':
        if K.image_data_format() == 'channels_first':
            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
        if include_top:
            weights_path = get_file(
                'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                md5_hash='9a0d58056eeedaa3f26cb7ebd46da564')
        else:
            weights_path = get_file(
                'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                md5_hash='bcbd6486424b2319ff4ef7d526e38f63')
        model.load_weights(weights_path)
        if K.backend() == 'theano':
            convert_all_kernels_in_model(model)
    return model