コード例 #1
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def on_train_begin(self, logs={}):
     self.nlayerinput = lambda x: K.function([self.model.layers[0].input], [self.kdelayer.input])([x])[0]
     N, dims = self.entropy_train_data.shape
     Kdists = K.placeholder(ndim=2)
     Klogvar = K.placeholder(ndim=0)
     def obj(logvar, dists):
         #print 'here', logvar # lossfunc([dists, logvar[0]])[0]
         return lossfunc([dists, logvar.flat[0]])[0]
     def jac(logvar, dists):
         #print logvar, lossfunc([dists, logvar[0]]), jacfunc([dists, logvar[0]])
         return np.atleast_2d(np.array(jacfunc([dists, logvar.flat[0]])))[0] 
         
     lossfunc = K.function([Kdists, Klogvar,], [kde_entropy_from_dists_loo(Kdists, N, dims, K.exp(Klogvar))])
     jacfunc  = K.function([Kdists, Klogvar,], K.gradients(kde_entropy_from_dists_loo(Kdists, N, dims, K.exp(Klogvar)), Klogvar))
     self.obj =obj #  lambda logvar, dists: np.array([lossfunc([dists, logvar[0]]),]) # [0]
     self.jac =jac # lambda logvar, dists: jacfunc([dists, np.array([logvar]).flat[0]])[0]
コード例 #2
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
 def on_train_begin(self, logs={}):
     modelobj = self.model.model
     inputs = modelobj.inputs + modelobj.targets + modelobj.sample_weights + [ K.learning_phase(),]
     lossfunc = K.function(inputs, [modelobj.total_loss])
     jacfunc  = K.function(inputs, K.gradients(modelobj.total_loss, self.noiselayer.logvar))
     sampleweights = np.ones(len(self.traindata.X))
     def obj(logvar):
         v = K.get_value(self.noiselayer.logvar)
         K.set_value(self.noiselayer.logvar, logvar.flat[0])
         r = lossfunc([self.traindata.X, self.traindata.Y, sampleweights, 1])[0]
         K.set_value(self.noiselayer.logvar, v)
         return r
     def jac(logvar):
         v = K.get_value(self.noiselayer.logvar)
         K.set_value(self.noiselayer.logvar, logvar.flat[0])
         r = np.atleast_2d(np.array(jacfunc([self.traindata.X, self.traindata.Y, sampleweights, 1])))[0]
         K.set_value(self.noiselayer.logvar, v)
         return r
         
     self.obj = obj # lambda logvar: lossfunc([self.traindata.X_train, self.traindata.Y_train, self.sampleweights, logvar[0], 1])[0]
     self.jac = jac # lambda logvar: np.array(jacfunc([self.traindata.X_train, self.traindata.Y_train, self.sampleweights, logvar[0], 1]))
コード例 #3
0
ファイル: miregularizer2.py プロジェクト: artemyk/mireg
def get_logs(model, data, kdelayer, noiselayer, max_entropy_calc_N=None):
    logs = {}

    modelobj = model.model
    inputs = modelobj.inputs + modelobj.targets + modelobj.sample_weights + [ K.learning_phase(),]
    lossfunc = K.function(inputs, [modelobj.total_loss])
    sampleweightstrn = np.ones(len(data.train.X))
    sampleweightstst = np.ones(len(data.test.X))
    noreglosstrn = lambda: lossfunc([data.train.X, data.train.Y, sampleweightstrn, 0])[0]
    noreglosstst = lambda: lossfunc([data.test.X , data.test.Y , sampleweightstst, 0])[0]

    if kdelayer is not None:
        lv1 = K.get_value(kdelayer.logvar)
        logs['kdeLV']   = lv1
        print 'kdeLV=%.5f,' % lv1,
        
    if noiselayer is not None:
        lv2 = K.get_value(noiselayer.logvar)
        logs['noiseLV'] = lv2
        print 'noiseLV=%.5f' % lv2
    
    if kdelayer is not None and noiselayer is not None:
        if max_entropy_calc_N is None:
            mitrn = data.train.X
            mitst = data.test.X
        else:
            mitrn = randsample(data.train.X, max_entropy_calc_N)
            mitst = randsample(data.test.X, max_entropy_calc_N)

        mi_obj_trn = MIComputer(noiselayer.get_noise_input_func(mitrn), kdelayer=kdelayer, noiselayer=noiselayer)
        mi_obj_tst = MIComputer(noiselayer.get_noise_input_func(mitst), kdelayer=kdelayer, noiselayer=noiselayer)

        if True:
            mivals_trn = map(lambda x: float(K.eval(x)), [mi_obj_trn.get_mi(), mi_obj_trn.get_h(), mi_obj_trn.get_hcond()]) # [data.train.X,]))
            logs['mi_trn'] = mivals_trn[0]
            mivals_tst = map(lambda x: float(K.eval(x)), [mi_obj_tst.get_mi(), mi_obj_tst.get_h(), mi_obj_tst.get_hcond()]) # [data.train.X,]))
            logs['mi_tst'] = mivals_tst[0]
            logs['kl_trn'] = noreglosstrn()
            logs['kl_tst'] = noreglosstst()
            print ', mitrn=%s, mitst=%s, kltrn=%.3f, kltst=%.3f' % (mivals_trn, mivals_tst, logs['kl_trn'], logs['kl_tst'])
        else:
            print
        
    return logs
    #logs['tstloss'] = self.totalloss([self.xX_test,0])
        
コード例 #4
0
def extractFeatures(X, model):
    """
    Extract the features of X using the activation layer of the model

    Inputs:
    - X: data sample to extract features for
    - model: model to use to get the features

    Returns: the np array of features (output from the last layer of the model)
    """
    # https://keras.io/getting-started/faq/#how-can-i-visualize-the-output-of-an-intermediate-layer
    # https://github.com/fchollet/keras/issues/1641
    # extract layer
    get_last_layer_output = K.function(
        [model.layers[0].input, K.learning_phase()], [model.layers[-4].output])
    layer_output = get_last_layer_output([X, 0])[0]

    return layer_output
コード例 #5
0
def plotCropImg(img,simg):

    from keras.layers.core import K

    model = Sequential()
    #model.add(Lambda(lambda x: x, input_shape=(160,320,3)))
    model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(160,320,3)))
    model.add(Cropping2D(cropping=((50,25),(0,0))))

    output = K.function([model.layers[0].input], [model.layers[1].output])
    crop_img = output([img[None,...]])[0]

    plt.figure()
    plt.imshow(img, cmap='gray')
    plt.savefig(simg+"_org.png")

    plt.imshow(np.uint8(crop_img[0,...]), cmap='gray')
    plt.savefig(simg+"_crop.png")
コード例 #6
0
def extractLearnedFeatures(model, newData):
    """
    Using the previously trained model, extract a learned set of features for the new
    data from the second to last layer of the model.

    Inputs:
    - model: the trained model
    - newData: the new data to extract features from

    Returns:
    - learnedFeats: features extracted from the model
    """
    # https://keras.io/getting-started/faq/#how-can-i-visualize-the-output-of-an-intermediate-layer
    # https://github.com/fchollet/keras/issues/1641
    # extract layer
    get_last_layer_output = K.function(
        [model.layers[0].input, K.learning_phase()], [model.layers[-4].output])
    learnedFeats = get_last_layer_output([newData, 0])[0]

    return learnedFeats
コード例 #7
0
def train(epoch_num=None, name=MODEL_NAME):

    input_tensor = Input(name='the_input',
                         shape=(width, height, 3),
                         dtype='float32')  #Input((width, height, 1))
    x = input_tensor
    for i in range(2):
        # x = Conv2D(filters=2 ** (3+i), kernel_size=(3, 3), padding="same", activation='relu', kernel_initializer='he_normal')(x)
        x = Conv2D(filters=16 * (i + 1),
                   kernel_size=(3, 3),
                   padding="same",
                   activation='relu',
                   kernel_initializer='he_normal')(x)
        x = Conv2D(filters=16 * (i + 1),
                   kernel_size=(3, 3),
                   padding="same",
                   activation='relu',
                   kernel_initializer='he_normal')(x)
        # x = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(x)
        x = MaxPool2D(pool_size=(2, 2))(x)
    conv_shape = x.get_shape()

    # conv_to_rnn_dims = (width // (2 ** 3),
    #                     (height // (2 ** 3)) * 32)

    x = Reshape(target_shape=(int(conv_shape[1]),
                              int(conv_shape[2] * conv_shape[3])))(x)
    x = Dense(dense_size, activation='relu')(x)

    # (batch_size, 20, 8 )
    gru_1 = GRU(rnn_size,
                return_sequences=True,
                kernel_initializer='he_normal',
                name='gru1')(x)
    gru_1b = GRU(rnn_size,
                 return_sequences=True,
                 go_backwards=True,
                 kernel_initializer='he_normal',
                 name='gru1_b')(x)
    gru1_merged = Add()([gru_1, gru_1b])  #sum

    gru_2 = GRU(rnn_size,
                return_sequences=True,
                kernel_initializer='he_normal',
                name='gru2')(gru1_merged)
    gru_2b = GRU(rnn_size,
                 return_sequences=True,
                 go_backwards=True,
                 kernel_initializer='he_normal',
                 name='gru2_b')(gru1_merged)
    gru_2 = TimeDistributed(BatchNormalization())(gru_2)
    gru_2b = TimeDistributed(BatchNormalization())(gru_2b)
    x = Concatenate()([gru_2, gru_2b])  #concat

    # x = Dropout(0.25)(x)
    """
    最后结果是[batch_size, 最大时间序列, 分类总数+1位空白符+1位CTC校验位],使用softmax函数,将所有结果的概率分布在(0,1)之间,激活用在每一帧时间序列上,求最大概率的分类,得出该帧的预测结果。
    因此,此处dense层设置 分类总数的数量为结果,并采用softmax多分类激活函数
    """
    x = Dense(n_class, kernel_initializer='he_normal', activation='softmax')(x)

    # Model(inputs=input_tensor, outputs=x).summary()
    # base_model = Model(inputs=input_tensor, outputs=x)
    # 评估回调函数
    evaluator_func = K.function([input_tensor, K.learning_phase()], [x])
    # evaluator_func.
    # base_model.summary()
    evaluator = Evaluate(validation_func=evaluator_func,
                         val_seq=val_obj,
                         name="keras_cnn_gru_add_batch")

    labels = Input(name='the_labels', shape=[n_len], dtype='float32')
    input_length = Input(name='input_length', shape=[1], dtype='int64')
    label_length = Input(name='label_length', shape=[1], dtype='int64')
    loss_out = Lambda(ctc_lambda_func, output_shape=(1, ),
                      name='ctc')([x, labels, input_length, label_length])

    model = Model(inputs=[input_tensor, labels, input_length, label_length],
                  outputs=[loss_out])  #.summary()
    model.summary()
    model.compile(loss={
        'ctc': lambda y_true, y_pred: y_pred
    },
                  optimizer='adadelta')
    if epoch_num is not None:
        weight_file = os.path.join(
            OUTPUT_DIR, os.path.join(name, 'epoch_%02d.h5' % (epoch_num)))
        model.load_weights(weight_file)
    # print(base_model == model)

    # model.fit_generator(train_gen.gen_batch(n_len, 200), steps_per_epoch=100, epochs=100, max_queue_size=1, workers=1, callbacks=[evaluator])
    # model.fit_generator(image_gen.next_val(), steps_per_epoch=1, epochs=100, max_queue_size=1, workers=1, callbacks=[evaluator]) #单线程,易调试
    model.fit_generator(image_gen,
                        steps_per_epoch=200,
                        epochs=100,
                        callbacks=[evaluator],
                        use_multiprocessing=True,
                        workers=2)  #多线程
コード例 #8
0
ファイル: utils.py プロジェクト: artemyk/mireg
def get_activations(model, layer, X_batch):
    get_activations = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer].output,])
    activations = get_activations([X_batch,0])
    return activations
コード例 #9
0
model.add(Dense(512, input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))

model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer=RMSprop(),
              metrics=['accuracy'])

history = model.fit(X_train,
                    Y_train,
                    batch_size=batch_size,
                    nb_epoch=nb_epoch,
                    verbose=1,
                    validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])

# TESTING
get_last_layer_output = K.function(
    [model.layers[0].input, K.learning_phase()], [model.layers[-2].output])
layer_output = get_last_layer_output([X_test, 0])[0]

output2 = get_last_layer_output([X_test, 0])
コード例 #10
0
                  output=[loss_out])

    # load weights into new model
    print("Loading weights...")
    model.load_weights(os.path.join(file_path_weigths, "weights275.h5"))  #TODO
    print("Loaded weights to model")

    # the loss calc occurs elsewhere, so use a dummy lambda func for the loss
    model.compile(optimizer=optimizer,
                  loss={
                      'ctc': lambda y_true, y_pred: y_pred
                  })  # , metrics=[self.tim_metric]

    # Reporter captures output of softmax so we can decode the output during visualization
    print("Init Reporter")
    test_func = K.function([input_data], [y_pred])

    # print("Loading model...")
    # loaded_model = model_from_json(loaded_model_json)
    # print("Loaded model from disk")

    plot(model, to_file=os.path.join(file_path_model, 'model_eval.png'))

    input_tuple = [[
        ('../media/nas/01_Datasets/IAM/words/a01/a01-011/a01-011-03-08.png')
    ]]
    X = preprocessor.prep_run(input_tuple)

    if K.image_dim_ordering() == 'th':
        in1 = np.ones([1, 1, img_h, img_w])
    else:
コード例 #11
0
def evaluate_activations(model, X, layer):
    from keras.layers.core import K
    get_layer_output = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[layer].output])
    return get_layer_output([X, 0])[0]
コード例 #12
0
# show example data
ex_image = images[random.randint(0, len(measurements)), :, :, :]
plt.figure()
plt.imshow(ex_image, cmap='gray')
plt.show()

from keras.models import Sequential
from keras.layers import Cropping2D
from keras.layers.core import Reshape

### crop test
model_crop = Sequential()
model_crop.add(
    Cropping2D(cropping=((40, 25), (0, 0)), input_shape=(160, 320, 3)))

cropping_output = K.function([model_crop.layers[0].input],
                             [model_crop.layers[0].output])
cropped_image = cropping_output([ex_image[None, ...]])[0]

plt.imshow(cropped_image[0, ...] / 255, cmap='gray')
plt.show()
print(cropped_image.shape)

# plot data
plt.figure(figsize=(5, 2))
plt.plot(measurements)
plt.xlabel('frame')
plt.ylabel('steering angle (deg)')
plt.xlim([0, len(measurements)])

# plot histogram
plt.figure(figsize=(5, 3))