def predict(gen, weights_path, return_label=True):
    model = ResNet50(input_shape=cfg.input_shape,
                     classes=28, weights=weights_path)
    pred_Y = []
    test_Y = []

    for batch_id in range(len(gen)):
        if return_label:
            batch_x, batch_y = gen[batch_id]
        else:
            batch_x = gen[batch_id]

        batch_pred = model.predict(batch_x, batch_size=len(batch_x))
        batch_pred = np.split(batch_pred, cfg.batch_size, axis=0)

        for i, pred in enumerate(batch_pred):
            # (1, n_classes)
            pred = np.mean(pred, axis=0, keepdims=True)
            pred_Y.append(pred)
            if return_label:
                score = np.mean(np.round(pred[:]) == batch_y[i, :])
                print('predicting batch ', batch_id + 1, ', total',
                      len(gen), '---- accuracy score: ', score)
            else:
                print('predicting batch ', batch_id + 1, ', total', len(gen))

        if return_label:
            test_Y.append(batch_y)

    pred_Y = np.concatenate(pred_Y, axis=0)  # (batch, n_classes)
    if return_label:
        test_Y = np.concatenate(test_Y, axis=0)
        return pred_Y, test_Y
    else:
        return pred_Y
def predict_on_gennerator(gen, weights_path, return_label=True):
    model = ResNet50(input_shape=cfg.input_shape,
                     classes=28, weights=weights_path)
    pred_Y = []
    batch_pred = model.predict_generator(gen, steps=len(gen),
                                         use_multiprocessing=True,
                                         verbose=1,
                                         workers=8,
                                         max_queue_size=200)
    batch_pred = np.split(batch_pred, gen.test_df.shape[0], axis=0)

    if return_label:
        test_Y = gen.get_all_labels()

    for batch_id, pred in enumerate(batch_pred):
        pred = np.mean(pred, axis=0, keepdims=True)         # (1, n_classes)
        if return_label:
            acc = np.mean(test_Y[batch_id, :] == pred[0, :])
            print('predicting batch ', batch_id + 1, ', total', gen.test_df.shape[0], ' acc: ', acc)
        else:
            print('predicting batch ', batch_id + 1,
                  ', total', gen.test_df.shape[0])
        pred_Y.append(pred)
    pred_Y = np.concatenate(pred_Y, axis=0)  # (batch, n_classes)
    if return_label:
        return pred_Y, test_Y
    else:
        return pred_Y
def predict(model_folder, image_folder, classes_dict, debug=False):
    weights = os.path.join(model_folder, 'model.ckpt')
    n_classes = len(classes_dict)
    model = ResNet50(JSON_CONFIG, n_classes)
    filenames = model.load_pred(image_folder)
    predictions = model.predict(weights, debug=debug)
    interpret(filenames, predictions, classes_dict)
def qmodel(name, device):
    if name == "resnet18":
        return ResNet18().to(device)
    elif name == "resnet34":
        return ResNet34().to(device)
    elif name == "resnet50":
        return ResNet50().to(device)
    elif name == "resnet101":
        return ResNet101().to(device)
    elif name == "resnet152":
        return ResNet152().to(device)
    elif name == "vgg11":
        return VGG("VGG11").to(device)
    elif name == "vgg13":
        return VGG("VGG13").to(device)
    elif name == "vgg16":
        return VGG("VGG16").to(device)
    elif name == "vgg19":
        return VGG("VGG19").to(device)
    elif name == "densenet121":
        return DenseNet121().to(device)
    elif name == "densenet169":
        return DenseNet169().to(device)
    elif name == "densenet201":
        return DenseNet201().to(device)
    elif name == "resnext":
        return ResNeXt29_8x64d().to(device)
Exemple #5
0
    def __init__(self, channel=32):
        super(SCRN, self).__init__()
        self.resnet = ResNet50()
        self.reduce_s1 = Reduction(256, channel)
        self.reduce_s2 = Reduction(512, channel)
        self.reduce_s3 = Reduction(1024, channel)
        self.reduce_s4 = Reduction(2048, channel)

        self.reduce_e1 = Reduction(256, channel)
        self.reduce_e2 = Reduction(512, channel)
        self.reduce_e3 = Reduction(1024, channel)
        self.reduce_e4 = Reduction(2048, channel)

        self.df1 = DenseFusion(channel)
        self.df2 = DenseFusion(channel)
        self.df3 = DenseFusion(channel)
        self.df4 = DenseFusion(channel)

        self.output_s = ConcatOutput(channel)
        self.output_e = ConcatOutput(channel)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.data.normal_(std=0.01)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

        self.initialize_weights()
Exemple #6
0
def main(config_dict):

    print("--- Loading dataset [{}] ---".format(config_dict["Dataset"]["name"])) # --------
    if config_dict["Dataset"]["name"] == "SVHN":
        svhn = SVHNData("Dataset/SVHN/train_32x32.mat", "Dataset/SVHN/test_32x32.mat")
        train_images, train_labels = svhn.get_train_data()
        test_images, test_labels = svhn.get_test_data()
    elif config_dict["Dataset"]["name"] == "CIFAR10":
        cifer10 = CiferData("Dataset/CIFAR10/")
        train_images, train_labels, validation_images, validation_labels, test_images, test_labels = cifer10.training_data(7000)
    else:
        print("Not dataset. please check the toml file")
        exit()
    # -------------------------------------------------------------------------------------

    train_data = train_images
    train_label = train_labels
    test_data = test_images
    test_label = test_labels

    print("--- Creating model [{}] ---".format(config_dict["Network"]["name"])) # ---------
    if config_dict["Network"]["name"] == "ResNet50":
        network = ResNet50(config_dict["Network"]["fig_size"], config_dict["Network"]["class"])
    elif config_dict["Network"]["name"] == "ResNet18":
        network = ResNet18(config_dict["Network"]["fig_size"], config_dict["Network"]["class"])
    else:
        network = ConvolutionalNeuralNetwork(config_dict["Network"]["fig_size"], config_dict["Network"]["class"])
    network.set_model(config_dict["Network"]["lr"])
    # -------------------------------------------------------------------------------------

    #saver = tf.compat.v1.train.Saver
    sess = tf.compat.v1.Session()
    init = tf.compat.v1.global_variables_initializer()
    sess.run(init)

    epoch = config_dict["episode"]
    batch_size = config_dict["batch_size"]

    # train
    accuracy_list = []
    loss_list = []
    with tqdm(range(epoch)) as pbar:
        for i, ch in enumerate(pbar):
            choice_id = np.random.choice(train_data.shape[0], batch_size, replace=False)
            batch_data = train_data[choice_id]
            batch_label = train_label[choice_id]

            _, loss = network.train(sess, batch_data, batch_label)
            loss_list.append(loss)
            pbar.set_postfix(OrderedDict(loss=loss))

    # test
    accuracy = 0
    for j in range(0, test_data.shape[0], 100):
        data = test_data[j:j+100]
        label = test_label[j:j+100]
        accuracy += int(network.test(sess, data, label)[0]*data.shape[0])

    print("test accuracy {}".format(accuracy/test_data.shape[0]*100.0))
def train(dir, n_epochs, debug=False):
    # Load Dataset
    classes_dict, filenames, labels = load_set(dir)
    n_classes = len(classes_dict)

    # Build model and load data into it
    model = ResNet50(JSON_CONFIG, n_classes)
    model.load_data(filenames, labels)
    model.build()
    model.train(n_epochs, debug=debug)
    def __init__(self, device, backbone, head, class_num, embedding_size):
        super(FaceNetwork, self).__init__()
        
        self.device = device
        self.class_num = class_num

        # select backbone network 
        if backbone == 'vgg':
            self.backbone = vgg19().to(self.device)
        elif backbone == 'resnet':
            self.backbone = ResNet50().to(self.device)

        self.flatter = Flatter(embedding_size=embedding_size).to(self.device)

        # select head network
        if head == 'arcface':
            self.head = Arcface(num_classes = self.class_num, embedding_size=embedding_size).to(self.device)
Exemple #9
0
def select_hard_samples(test_gen, df, pretrain_weights):
    model = ResNet50(input_shape=cfg.input_shape,
                     classes=len(cfg.label_names),
                     trainable=True,
                     weights=pretrain_weights)
    select_idx = []
    for batch_id in range(len(test_gen)):
        print('processing ', batch_id, ' th batch, total ', len(test_gen))
        batch_x, batch_y = test_gen[batch_id]
        batch_df_idx = test_gen.batch_indexes
        batch_pred = model.predict(batch_x, batch_size=len(batch_x))
        batch_pred = np.split(batch_pred, int(len(batch_x) / 8), axis=0)
        for i, pred in enumerate(batch_pred):
            pred = np.mean(pred, axis=0, keepdims=True)
            if not np.alltrue(np.round(pred[:]) == batch_y[i, :]):
                select_idx.append(batch_df_idx[i])
                print('select hard sample', df.iloc[batch_df_idx[i], 0])

    return df.iloc[select_idx]
Exemple #10
0
def generate_model_base(preset, width, height, channel, weights_init):
    '''
    モデルを作成する

    # Arguments
        preset: プリセットモデルの名前
        width: 入力画像の幅
        height: 入力画像の高さ
        channel: 入力画像のチャンネル数
        class_num: 分類クラス数
        weights_init: 初期値(None, imagenet)

    # Returns
        keras.models.Model オブジェクト
    '''
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

    from keras.layers import Dense, BatchNormalization, Dropout, Input, Conv2D
    # from keras.layers import GlobalAveragePooling2D
    from keras.models import Model

    input_tensor = Input(shape=(width, height, channel))
    conv_base = None
    # output_layer = None
    prediction_layer = None

    if preset.upper() == "bench".upper():
        conv_base = create_bench_model(input_tensor)
        prediction_layer = conv_base
    elif preset.upper() == "VGG16".upper():
        from keras.applications import VGG16
        conv_base = None
        if channel == 3:
            conv_base = VGG16(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, channel)
                              )
        else:
            conv_base = VGG16(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, 3)
                              )

            conv_base.layers.pop(0)
            conv_base.layers.pop(0)
            input_layer = Input(shape=(width, height, channel), name='multi_input')
            block1_conv1_new = Conv2D(64, (3, 3),
                                      activation='relu',
                                      padding='same',
                                      kernel_initializer='glorot_uniform',
                                      name='block1_conv1_new')

            conv_base = insert_intermediate_layer_in_keras(conv_base, [0, 0], [input_layer, block1_conv1_new])

        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.get_output_at(-1)  # x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.2, name='fc_dropout')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "VGG19".upper():
        from keras.applications import VGG19
        conv_base = None
        if channel == 3:
            conv_base = VGG19(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, channel)
                              )
        else:
            conv_base = VGG19(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, 3)
                              )

            conv_base.layers.pop(0)
            conv_base.layers.pop(0)
            input_layer = Input(shape=(width, height, channel), name='multi_input')
            block1_conv1_new = Conv2D(64, (3, 3),
                                      activation='relu',
                                      padding='same',
                                      kernel_initializer='glorot_uniform',
                                      name='block1_conv1_new')

            conv_base = insert_intermediate_layer_in_keras(conv_base, [0, 0], [input_layer, block1_conv1_new])

        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.get_output_at(-1)  # x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.2, name='fc_dropout')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "VGG16BN".upper():
        from model import VGG16BN
        conv_base = None
        if channel == 3:
            conv_base = VGG16BN(weights=weights_init,
                                include_top=False,
                                pooling='avg',
                                kernel_initializer='glorot_uniform',
                                input_shape=(width, height, channel)
                                )
        else:
            conv_base = VGG16BN(weights=weights_init,
                                include_top=False,
                                pooling='avg',
                                kernel_initializer='glorot_uniform',
                                input_shape=(width, height, 3)
                                )

            conv_base.layers.pop(0)
            conv_base.layers.pop(0)
            input_layer = Input(shape=(width, height, channel), name='multi_input')
            block1_conv1_new = Conv2D(64, (3, 3),
                                      activation='relu',
                                      padding='same',
                                      kernel_initializer='glorot_uniform',
                                      name='block1_conv1_new')

            conv_base = insert_intermediate_layer_in_keras(conv_base, [0, 0], [input_layer, block1_conv1_new])

        # conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.get_output_at(-1)  # x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.2, name='fc_dropout')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "VGG19BN".upper():
        from model import VGG19BN
        conv_base = None
        if channel == 3:
            conv_base = VGG19BN(weights=weights_init,
                                include_top=False,
                                pooling='avg',
                                kernel_initializer='glorot_uniform',
                                input_shape=(width, height, channel)
                                )
        else:
            conv_base = VGG19BN(weights=weights_init,
                                include_top=False,
                                pooling='avg',
                                kernel_initializer='glorot_uniform',
                                input_shape=(width, height, 3)
                                )

            conv_base.layers.pop(0)
            conv_base.layers.pop(0)
            input_layer = Input(shape=(width, height, channel), name='multi_input')
            block1_conv1_new = Conv2D(64, (3, 3),
                                      activation='relu',
                                      padding='same',
                                      kernel_initializer='glorot_uniform',
                                      name='block1_conv1_new')

            conv_base = insert_intermediate_layer_in_keras(conv_base, [0, 0], [input_layer, block1_conv1_new])

        # conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.get_output_at(-1)  # x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.2, name='fc_dropout')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet20".upper():
        # from keras.applications import ResNet50
        from model.resnet import ResNet20
        conv_base = ResNet20(weights=weights_init,
                             include_top=True,
                             input_shape=(width, height, channel),
                             input_tensor=input_tensor
                             )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet50".upper():
        # from keras.applications import ResNet50
        from model.resnet import ResNet50
        conv_base = ResNet50(weights=weights_init,
                             include_top=True,
                             input_shape=(width, height, channel),
                             input_tensor=input_tensor
                             )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet101".upper():
        from model.resnet import ResNet101
        conv_base = ResNet101(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, channel),
                              input_tensor=input_tensor
                              )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet152".upper():
        from model.resnet import ResNet152
        conv_base = ResNet152(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, channel),
                              input_tensor=input_tensor
                              )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet50V2".upper():
        from model.resnet_v2 import ResNet50V2
        conv_base = ResNet50V2(weights=weights_init,
                               include_top=True,
                               input_shape=(width, height, channel),
                               input_tensor=input_tensor
                               )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet101V2".upper():
        from model.resnet_v2 import ResNet101V2
        conv_base = ResNet101V2(weights=weights_init,
                                include_top=True,
                                input_shape=(width, height, channel),
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNet152V2".upper():
        from model.resnet_v2 import ResNet152V2
        conv_base = ResNet152V2(weights=weights_init,
                                include_top=True,
                                input_shape=(width, height, channel),
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNeXt50".upper():
        from model.resnext import ResNeXt50
        conv_base = ResNeXt50(weights=weights_init,
                              include_top=True,
                              input_shape=(width, height, channel),
                              input_tensor=input_tensor
                              )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "ResNeXt101".upper():
        from model.resnext import ResNeXt101
        conv_base = ResNeXt101(weights=weights_init,
                               include_top=True,
                               input_shape=(width, height, channel),
                               input_tensor=input_tensor
                               )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        x = Dropout(0.5, name='fc_dropout')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "InceptionV3".upper():
        from keras.applications import InceptionV3
        conv_base = InceptionV3(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "InceptionResNetV2".upper():
        from keras.applications import InceptionResNetV2
        conv_base = InceptionResNetV2(weights=weights_init,
                                      include_top=True,
                                      input_tensor=input_tensor
                                      )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "DenseNet121".upper():
        from keras.applications import DenseNet121
        conv_base = DenseNet121(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "DenseNet169".upper():
        from keras.applications import DenseNet169
        conv_base = DenseNet169(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "DenseNet201".upper():
        from keras.applications import DenseNet201
        conv_base = DenseNet201(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "Xception".upper():
        from keras.applications import Xception
        conv_base = Xception(weights=weights_init,
                             include_top=True,
                             input_tensor=input_tensor
                             )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEDenseNetImageNet121".upper():
        from model import SEDenseNetImageNet121
        conv_base = SEDenseNetImageNet121(weights=weights_init,
                                          include_top=True,
                                          input_tensor=input_tensor
                                          )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEDenseNetImageNet169".upper():
        from model import SEDenseNetImageNet169
        conv_base = SEDenseNetImageNet169(weights=weights_init,
                                          include_top=True,
                                          input_tensor=input_tensor
                                          )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEDenseNetImageNet201".upper():
        from model import SEDenseNetImageNet201
        conv_base = SEDenseNetImageNet201(weights=weights_init,
                                          include_top=True,
                                          input_tensor=input_tensor
                                          )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEDenseNetImageNet264".upper():
        from model import SEDenseNetImageNet264
        conv_base = SEDenseNetImageNet264(weights=weights_init,
                                          include_top=True,
                                          input_tensor=input_tensor
                                          )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEDenseNetImageNet161".upper():
        from model import SEDenseNetImageNet161
        conv_base = SEDenseNetImageNet161(weights=weights_init,
                                          include_top=True,
                                          input_tensor=input_tensor
                                          )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEInceptionResNetV2".upper():
        from model import SEInceptionResNetV2
        conv_base = SEInceptionResNetV2(weights=weights_init,
                                        include_top=True,
                                        input_tensor=input_tensor
                                        )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEInceptionV3".upper():
        from model import SEInceptionV3
        conv_base = SEInceptionV3(weights=weights_init,
                                  include_top=True,
                                  input_tensor=input_tensor
                                  )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEMobileNet".upper():
        from model import SEMobileNet
        conv_base = SEMobileNet(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet6".upper():
        from model import SEResNet6
        conv_base = SEResNet6(weights=weights_init,
                              include_top=True,
                              input_tensor=input_tensor
                              )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet8".upper():
        from model import SEResNet8
        conv_base = SEResNet8(weights=weights_init,
                              include_top=True,
                              input_tensor=input_tensor
                              )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet10".upper():
        from model import SEResNet10
        conv_base = SEResNet10(weights=weights_init,
                               include_top=True,
                               input_tensor=input_tensor
                               )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet18".upper():
        from model import SEResNet18
        conv_base = SEResNet18(weights=weights_init,
                               include_top=True,
                               input_tensor=input_tensor
                               )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet34".upper():
        from model import SEResNet34
        conv_base = SEResNet34(weights=weights_init,
                               include_top=True,
                               input_tensor=input_tensor
                               )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet50".upper():
        from model import SEResNet50
        conv_base = SEResNet50(weights=weights_init,
                               include_top=True,
                               input_tensor=input_tensor
                               )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet101".upper():
        from model import SEResNet101
        conv_base = SEResNet101(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNet154".upper():
        from model import SEResNet154
        conv_base = SEResNet154(weights=weights_init,
                                include_top=True,
                                input_tensor=input_tensor
                                )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    elif preset.upper() == "SEResNext".upper():
        from model import SEResNext
        conv_base = SEResNext(weights=weights_init,
                              include_top=True,
                              input_tensor=input_tensor
                              )
        conv_base.layers.pop()
        output_layer = conv_base.layers[-1]
        x = output_layer.output
        x = BatchNormalization(name='fc_bachnorm')(x)
        # x = GlobalAveragePooling2D(name='avg_pool')(x)
        prediction_layer = Dense(class_num, activation='softmax',
                                 kernel_initializer='glorot_uniform', name='prediction')(x)

    else:
        raise ValueError('unknown model name : {}'.format(preset))

    # x = output_layer.output
    # # x = Flatten()(x)
    # # x = Dense(512, activation='relu', kernel_initializer='glorot_uniform')(x)
    # # # x = Dropout(0.7)(x)
    # # x = BatchNormalization(name='fc_bachnorm')(x)
    # prediction_layer = Dense(class_num, activation='softmax', kernel_initializer='glorot_uniform', name='prediction')(x)

    model = Model(inputs=conv_base.input,
                  outputs=prediction_layer, name='classification_model')
    # #weights_filepath = 'work/test/vgg19_weights_tf_dim_ordering_tf_kernels.h5'
    # weights_filepath = 'work/test/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
    # model.load_weights(weights_filepath, by_name=True, skip_mismatch=True)
    return model
Exemple #11
0
def pretrain(task_name='base_resnet',
             epochs=10,
             lr=1e-1,
             target_shape=(512, 512),
             trainable=True,
             pretrain_weights='imagenet',
             init_epoch=0):
    cfg.input_shape = list(target_shape) + [3]
    train_val_df = load_train_csv(cfg)
    train_df, val_df = split_train_val(train_val_df, 0.25, seed=42)
    train_gen = BaseGenerator(train_df,
                              cfg.train_dir,
                              batch_size=cfg.batch_size,
                              aug_args=cfg.aug_args.copy(),
                              target_shape=target_shape,
                              use_yellow=False,
                              preprocessing_function=preprocess_input)

    val_gen = BaseGenerator(val_df,
                            cfg.train_dir,
                            batch_size=cfg.batch_size,
                            aug_args=cfg.aug_args.copy(),
                            target_shape=target_shape,
                            use_yellow=False,
                            preprocessing_function=preprocess_input)
    if n_gpus > 1:
        print('use multi gpu')
        with tf.device('/cpu:0'):
            cpu_model = ResNet50(input_shape=cfg.input_shape,
                                 classes=len(cfg.label_names),
                                 trainable=trainable,
                                 weights=pretrain_weights)
        model = multi_gpu_model(cpu_model, gpus=n_gpus)
    else:
        print('use single gpu')
        model = ResNet50(input_shape=cfg.input_shape,
                         classes=len(cfg.label_names),
                         trainable=trainable,
                         weights=pretrain_weights)
    model.compile(optimizer=KO.Adam(lr=lr, amsgrad=True),
                  loss='binary_crossentropy',
                  metrics=[f1_score, 'mae'])
    log_dir = os.path.join(cfg.log_dir, task_name)
    makedir(log_dir)
    weights_path = os.path.join(log_dir, cfg.weights_file)
    checkpoint = ModelCheckpoint(weights_path,
                                 monitor='f1_score',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max',
                                 save_weights_only=True)
    if n_gpus > 1:
        del checkpoint
        checkpoint = MultiGPUCheckpoint(weights_path,
                                        cpu_model,
                                        verbose=1,
                                        monitor='f1_score',
                                        mode='max',
                                        save_weights_only=True,
                                        save_best_only=True)
    callbacks = [checkpoint]
    callbacks += [
        ReduceLROnPlateau(monitor='f1_score',
                          factor=0.5,
                          patience=3,
                          verbose=1,
                          mode='max')
    ]
    # callbacks += [LearningRateScheduler(lr_schedule)]
    train_steps = get_number_of_steps(len(train_df), cfg.batch_size)
    val_steps = get_number_of_steps(len(val_df), cfg.batch_size)
    model.fit_generator(train_gen,
                        epochs=epochs,
                        steps_per_epoch=train_steps,
                        callbacks=callbacks,
                        validation_data=val_gen,
                        workers=cfg.n_works,
                        max_queue_size=cfg.n_queue,
                        use_multiprocessing=True,
                        validation_steps=val_steps,
                        initial_epoch=init_epoch)
    K.clear_session()
Exemple #12
0
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           pin_memory=True,
                                           num_workers=2)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          pin_memory=True,
                                          num_workers=2)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes)
elif args.model == 'resnet50':
    cnn = ResNet50(num_classes=num_classes)
elif args.model == 'resnet101':
    cnn = ResNet101(num_classes=num_classes)
elif args.model == 'wideresnet':
    if args.dataset == 'svhn':
        cnn = WideResNet(depth=16,
                         num_classes=num_classes,
                         widen_factor=8,
                         dropRate=0.4)
    else:
        cnn = WideResNet(depth=28,
                         num_classes=num_classes,
                         widen_factor=10,
                         dropRate=0.3)

# cnn = cnn.cuda()
Exemple #13
0
                                           shuffle=True,
                                           pin_memory=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          pin_memory=True)

# teacher model
if 'x4' in args.teacher:
    teacher = build_resnetx4_backbone(depth=int(args.teacher[6:-2]),
                                      num_classes=num_classes)
elif 'resnet' in args.teacher:
    teacher = build_resnet_backbone(depth=int(args.teacher[6:]),
                                    num_classes=num_classes)
elif 'ResNet50' in args.teacher:
    teacher = ResNet50(num_classes=num_classes)
elif 'vgg' in args.teacher:
    teacher = vgg_dict[args.teacher](num_classes=num_classes)
elif 'mobile' in args.teacher:
    teacher = mobile_half(num_classes=num_classes)
elif 'wrn' in args.teacher:
    teacher = wrn(depth=int(args.teacher[4:6]),
                  widen_factor=int(args.teacher[-1:]),
                  num_classes=num_classes)
elif args.teacher == '':
    teacher = None
else:
    assert False
if teacher is not None:
    load_teacher_weight(teacher, args.teacher_weight, args.teacher)
    teacher = teacher.cuda()
Exemple #14
0
        D2 = nn.DataParallel(D2)
        D2.eval()

        num_classes = 1000
        img_size = 224
        img_ch = 3
        clipmax = [2.249, 2.429, 2.640]
        clipmin = [-2.118, -2.036, -1.804]
    elif args.dataset == 'cifar10':
        D1_path = os.path.join(load_dir, 'D1_network.pth')
        D1 = VGG('VGG16').cuda()
        D1.load_state_dict(torch.load(D1_path))
        D1.eval()

        D2_path = os.path.join(load_dir, 'D2_network.pth')
        D2 = ResNet50().cuda()
        D2.load_state_dict(torch.load(D2_path))
        D2.eval()

        num_classes = 10
        img_size = 32
        img_ch = 3
        clipmax = 1.
        clipmin = 0.
    else:
        D2_path = os.path.join(load_dir, 'Netll_mnist.pth')
        D2 = Net_ll().cuda()
        D2.load_state_dict(torch.load(D2_path))
        D2.eval()

        D1_path = os.path.join(load_dir, 'sklearn_mnist_model.pkl')
Exemple #15
0
def u_net_res_model(n_classes=1, input_height=576, input_width=576):

    # Get resnet
    if config.segmodel == "RS50":
        img_input, levels = ResNet50(input_shape=(input_height, input_width,
                                                  1),
                                     classes=2)
    elif config.segmodel == "RS50V":
        img_input, levels = ResNet50V2(input_shape=(input_height, input_width,
                                                    1),
                                       classes=2)
    elif config.segmodel == "RS101":
        img_input, levels = ResNet101(input_shape=(input_height, input_width,
                                                   1),
                                      classes=2)
    elif config.segmodel == "RS101V":
        img_input, levels = ResNet101V2(input_shape=(input_height, input_width,
                                                     1),
                                        classes=2)
    elif config.segmodel == "RS152":
        img_input, levels = ResNet152(input_shape=(input_height, input_width,
                                                   1),
                                      classes=2)
    else:
        img_input, levels = ResNet152V2(input_shape=(input_height, input_width,
                                                     1),
                                        classes=2)

    if config.segmodel == "RS50Ext":
        [f0, f1, f2, f3, f4, f5] = levels
    else:
        [f1, f2, f3, f4, f5] = levels

    o = f4

    o = (ZeroPadding2D((1, 1)))(o)
    if config.reg == "N":
        o = (Conv2D(512, (3, 3), padding='valid', activation='relu'))(o)
    else:
        o = (Conv2D(512, (3, 3),
                    padding='valid',
                    activation='relu',
                    kernel_regularizer=regularizers.l2(0.001)))(o)
    o = (BatchNormalization())(o)
    if config.dropout == "Y":
        o = Dropout(0.25)(o)

    o = (UpSampling2D((2, 2)))(o)
    o = (concatenate([o, f3], axis=-1))
    o = (ZeroPadding2D((1, 1)))(o)
    if config.reg == "N":
        o = (Conv2D(256, (3, 3), padding='valid', activation='relu'))(o)
    else:
        o = (Conv2D(256, (3, 3),
                    padding='valid',
                    activation='relu',
                    kernel_regularizer=regularizers.l2(0.001)))(o)
    o = (BatchNormalization())(o)
    if config.dropout == "Y":
        o = Dropout(0.25)(o)

    o = (UpSampling2D((2, 2)))(o)
    o = (concatenate([o, f2], axis=-1))
    o = (ZeroPadding2D((1, 1)))(o)
    if config.reg == "N":
        o = (Conv2D(128, (3, 3), padding='valid', activation='relu'))(o)
    else:
        o = (Conv2D(128, (3, 3),
                    padding='valid',
                    activation='relu',
                    kernel_regularizer=regularizers.l2(0.001)))(o)
    o = (BatchNormalization())(o)
    if config.dropout == "Y":
        o = Dropout(0.35)(o)

    o = (UpSampling2D((2, 2)))(o)
    o = (concatenate([o, f1], axis=-1))
    o = (ZeroPadding2D((1, 1)))(o)
    if config.reg == "N":
        o = (Conv2D(64, (3, 3), padding='valid', activation='relu'))(o)
    else:
        o = (Conv2D(64, (3, 3),
                    padding='valid',
                    activation='relu',
                    kernel_regularizer=regularizers.l2(0.001)))(o)
    o = (BatchNormalization())(o)
    if config.dropout == "Y":
        o = Dropout(0.35)(o)

    if config.segmodel == "RS50Ext":
        o = (UpSampling2D((2, 2)))(o)
        o = (concatenate([o, f0], axis=-1))
        o = (ZeroPadding2D((1, 1)))(o)
        o = (Conv2D(32, (3, 3), padding='valid', activation='relu'))(o)
        o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2)))(o)
    o = (ZeroPadding2D((1, 1)))(o)
    if config.reg == "N":
        o = (Conv2D(32, (3, 3), padding='valid', activation='relu'))(o)
    else:
        o = (Conv2D(32, (3, 3),
                    padding='valid',
                    activation='relu',
                    kernel_regularizer=regularizers.l2(0.001)))(o)
    o = (BatchNormalization())(o)

    o = Conv2D(n_classes, (3, 3), padding='same')(o)

    o_shape = Model(img_input, o).output_shape
    i_shape = Model(img_input, o).input_shape

    output_height = o_shape[1]
    output_width = o_shape[2]
    input_height = i_shape[1]
    input_width = i_shape[2]
    n_classes = o_shape[3]
    o = (Reshape((output_height * output_width, -1)))(o)

    o = (Activation('sigmoid'))(o)
    model = Model(img_input, o)
    model.output_width = output_width
    model.output_height = output_height
    model.n_classes = n_classes
    model.input_height = input_height
    model.input_width = input_width

    return model