Ejemplo n.º 1
0
def create_uncertainty_model(learning_rate=1e-3, num_hidden_units=20, type = 'mobilenet_v2'):
    mu_input = Input(shape=(num_classes,))
    if type == 'mobilenet_v2':
        base_model = mobilenet_v2.MobileNetV2(include_top=False, weights='imagenet', input_tensor=None,
                                          input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    elif type == 'vgg16':
        base_model = vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=None,
                                 input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    elif type == 'resnet50':
        base_model = resnet50.ResNet50(include_top=False, weights='imagenet', input_tensor=None,
                                 input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    elif type == 'vgg19':
        base_model = vgg19.VGG19(include_top=False, weights='imagenet', input_tensor=None,
                                 input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    elif type == 'inception_v3':
        base_model = inception_v3.InceptionV3(include_top=False, weights='imagenet', input_tensor=None,
                                 input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    else:
        base_model = mobilenet_v2.MobileNetV2(include_top=False, weights='imagenet', input_tensor=None,
                                              input_shape=(224, 224, 3), pooling='avg', classes=num_classes)
    base_model.trainable = False
    beta = base_model.output
    beta = Dense(num_hidden_units, activation='relu')(beta)
    beta = Dense(num_hidden_units, activation='relu')(beta)
    beta = Dense(num_hidden_units, activation='relu')(beta)
    # beta = Dense(num_hidden_units,activation='relu')(beta)
    beta = Dense(1, activation='sigmoid')(beta)
    output = concatenate([mu_input, beta])

    model = Model(inputs=[mu_input, base_model.input], outputs=output)
    model.compile(loss=dirichlet_aleatoric_cross_entropy,
                  optimizer=Adam(lr=learning_rate),
                  metrics=[max_beta, min_beta]
                  )
    return model
Ejemplo n.º 2
0
def main(mode='train', gpu='0'):
    os.environ["CUDA_VISIBLE_DEVICES"]=gpu

    data = data_utils.ISIC2018_data(4)
    num_classes = 7

    # extract feature
    base_model = resnet50.ResNet50(weights='imagenet', include_top=False, pooling='avg', input_shape=(224,224,3))

    images_feature = []
    y_list = []
    with tf.Session() as sess:
        try:
            if mode == 'train_evaluation':
                x, y = data.read_record('train_evaluation')
                outputname = 'images_feature_with_labels_from_vgg16_train'
            elif mode == 'evaluate':
                x, y = data.read_record('evaluate')
                outputname = 'images_feature_with_labels_from_vgg16_evaluate'

            sess.run(tf.global_variables_initializer())
            cnt = 0
            while True:
                x_, y_ = sess.run([x, y])
                features = base_model.predict(x_)
                y_list.append((np.argmax(y_, axis=1)))
                #print(features.shape)
                images_feature.append(features)
                cnt += 1
                print('batch %d' % cnt)
        except tf.errors.OutOfRangeError:
            print('load finish labels')

    x_y = [images_feature, y_list]
    np.save(outputname, x_y)
Ejemplo n.º 3
0
def model(nb_classes=10,
          logits=False,
          input_ph=None,
          nb_rows=28,
          nb_cols=28,
          nb_channels=1):

    # load the resnet50 model
    model = resnet50.ResNet50(include_top=False, weights='imagenet')
    print('---------> Shape : ', model.output_shape[1:])

    # define the top-model
    top_model = Sequential()

    layers = [
        Flatten(input_shape=model.output_shape[1:]),
        Dense(120, activation='relu'),
        Dense(84, activation='relu'),
        Dense(nb_classes)
    ]

    # add layers to the model
    for layer in layers:
        top_model.add(layer)

    # add top_model to model
    model.add(top_model)

    # check if logits need to be returned
    if logits == True and input_ph is not None:
        logits_tensor = model(input_ph)

    # add softmax
    model.add(Activation('softmax'))

    if logits == True and input_ph is not None:
        return model, logits_tensor
    else:
        return model
Ejemplo n.º 4
0
def main(mode='train', gpu='0'):
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu

    data = data_utils.ISIC2018_data(4)
    num_classes = 7

    # using resnet50
    base_model = resnet50.ResNet50(weights='imagenet',
                                   include_top=False,
                                   pooling='avg',
                                   input_shape=(224, 224, 3))

    # using resnet152
    #base_model = resnet152.ResNet152(weights='imagenet', include_top=False, pooling='avg', input_shape=(224,224,3))

    # using resnet152
    #base_model = resnet.ResNet152(weights='imagenet', include_top=False, pooling='avg', input_shape=(224,224,3))

    # using densenet
    #base_model = densenet.DenseNet201(weights='imagenet', include_top=False, pooling='avg', input_shape=(224,224,3))

    # inception v3
    #base_model = inception_v3.InceptionV3(weights='imagenet', include_top=False, pooling='avg', input_shape=(224,224,3))

    model = models.Sequential()

    model.add(base_model)
    # remove fully connected according to paper
    model.add(layers.Dense(1024, activation='relu'))
    model.add(layers.Dense(7, activation='relu'))
    #model.add(layers.Dense(1000, activation='relu'))
    model.add(layers.Dense(num_classes, activation='softmax', name='fc7'))
    base_model.trainable = False
    #model.compile(loss=median_weight_class_loss,
    #model.compile(loss='categorical_crossentropy',
    model.compile(loss=focal_loss,
                  optimizer=SGD(lr=0.001, momentum=0.9, decay=0.0),
                  metrics=[metrics.categorical_accuracy])

    #model_dir = '/home/jiaxin/myGithub/Reverse_CISI_Classification/src/densenet201_keras_pre/model_fc'
    #model_dir = '/home/jiaxin/myGithub/Reverse_CISI_Classification/src/resnet152_keras_pre/model_fc'
    #model_dir = '/home/jiaxin/myGithub/Reverse_CISI_Classification/src/resnet152_keras_pre/model_fc'
    #model_dir = '/home/jiaxin/myGithub/Reverse_CISI_Classification/src/resnet50_keras_pre/model_cw'
    model_dir = '/home/jiaxin/myGithub/Reverse_CISI_Classification/src/resnet50_keras_pre/model_fc_repeat'
    #model_dir = '/home/jiaxin/myGithub/Reverse_CISI_Classification/src/resnet50_keras_pre/model_fc_3_m'
    #model_dir = '/home/jiaxin/myGithub/Reverse_CISI_Classification/src/resnet50_keras_pre/model_fc_cw_nor'
    os.makedirs(model_dir, exist_ok=True)
    print('model_dir', model_dir)
    est = tf.keras.estimator.model_to_estimator(keras_model=model,
                                                model_dir=model_dir)

    train_spec = tf.estimator.TrainSpec(
        input_fn=lambda: data.read_record('train'))
    eval_spec = tf.estimator.EvalSpec(
        input_fn=lambda: data.read_record('train_evaluation'))

    if mode == 'train':
        tf.estimator.train_and_evaluate(est, train_spec, eval_spec)
    elif mode == 'evaluate':
        with tf.Session() as sess:
            try:
                x, y = data.read_record('evaluate')
                sess.run(tf.global_variables_initializer())
                y_list = []
                while True:
                    _, y_ = sess.run([x, y])
                    y_list.extend((np.argmax(y_, axis=1)))
            except tf.errors.OutOfRangeError:
                print('load finish labels')

            # test
            #cnt = 0
            #while True:
            #    try:
            #        x, y = data.read_record('evaluate')
            #        sess.run(tf.global_variables_initializer())
            #        y_list_ = []
            #        while True:
            #            _, y_ = sess.run([x, y])
            #            y_list_.extend((np.argmax(y_, axis=1)))
            #    except tf.errors.OutOfRangeError:
            #        cnt += 1
            #        print(cnt)
            #        assert all([a==b for a, b in zip(y_list, y_list_)])

            pp = []
            while True:
                predictions = est.predict(
                    input_fn=lambda: data.read_record('evaluate'))
                predictions_list = []
                for pre in predictions:
                    p = np.argmax(pre['fc7'])
                    predictions_list.append(p)

                statistics_ = statistics.statistics(hps, mode='evaluate')
                statistics_.add_labels_predictions(predictions_list, y_list)
                statistics_.get_acc_normal()
                result = statistics_.get_acc_imbalanced()
                np.save('predictions_label_fc_repeat',
                        [predictions_list, y_list])
                #np.save('predictions_label_fc_3_m', [predictions_list, y_list])
                #np.save('predictions_label_fc_without_fulcon', [predictions_list, y_list])
                pp.append(result)

                print('---')
                np.save('result_fc_repeat', pp)
                #np.save('result_fc_3_m', pp)
                #np.save('result_fc_without_fulcon', pp)
                time.sleep(120)
Ejemplo n.º 5
0
def get_resnet50_model(weights='imagenet', input_tensor=None):
    return resnet50.ResNet50(weights=weights, include_top=False, input_tensor=input_tensor)
Ejemplo n.º 6
0
#-*-encoding:utf-8-*-
import numpy as np
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.applications import resnet50
import warnings

warnings.filterwarnings("ignore")

img = image.load_img('dog.png')
# print(image.img_to_array(img).shape)
model = resnet50.ResNet50(weights='imagenet')

img = image.load_img('dog.png', target_size=(224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
print(img.shape)

img = resnet50.preprocess_input(img)

#模型预测
pred = model.predict(img)
n = 10
top_n = resnet50.decode_predictions(pred, n)
for i in top_n[0]:
    print(i)