Esempio n. 1
0
 def get_full_model(self, model_path):
     """获取训练好的完整的模型
     """
     model_path_lower = model_path.lower()
     if 'resnet50' in model_path_lower:
         base_model = ResNet50(input_shape=self.img_size,
                               weights=None,
                               include_top=False)
     elif 'resnet101' in model_path_lower:
         classifier, _ = Classifiers.get('resnet101')
         base_model = classifier(input_shape=self.img_size,
                                 weights=None,
                                 include_top=False)
     elif 'seresnext50' in model_path_lower:
         classifier, _ = Classifiers.get('seresnext50')
         base_model = classifier(input_shape=self.img_size,
                                 weights=None,
                                 include_top=False)
     elif 'efficientnetb5' in model_path_lower:
         classifier, _ = Classifiers.get('efficientnetB5')
         base_model = classifier(input_shape=self.img_size,
                                 weights=None,
                                 include_top=False)
     else:
         raise ValueError('Model {:s} is not supported!'.format(model_path))
     model = add_new_last_layer(base_model, self.class_num)
     model.load_weights(model_path)
     print(model.summary())
     return model
Esempio n. 2
0
def nasnet_model_fn(FLAGS,
                    objective,
                    optimizer,
                    metrics,
                    dropout=0.1,
                    weight_decay=0.1):
    nasnetlarge, preprocess_input = Classifiers.get('nasnetlarge')
    # build model
    base_model = nasnetlarge(input_shape=(FLAGS.input_size, FLAGS.input_size,
                                          3),
                             weights='imagenet',
                             include_top=False)
    x = GlobalAveragePooling2D()(base_model.output)

    x = Dense(1024,
              activation='relu',
              kernel_regularizer=regularizers.l2(weight_decay))(x)
    x = Dropout(dropout)(x)
    output = Dense(FLAGS.num_classes,
                   activation='softmax',
                   activity_regularizer=regularizers.l2(weight_decay))(x)
    model = Model(inputs=[base_model.input], outputs=[output])
    model.load_weights(
        filepath=
        '/home/nowburn/disk/data/Garbage_Classify/models/nas-label_smoothing-tta-11/weights_009_0.9004.h5',
        by_name=True)
    model.compile(loss=objective, optimizer=optimizer, metrics=metrics)
    return model
Esempio n. 3
0
def nasnetlarge(include_top=False, weights='imagenet', input_shape=None):
    nasnetlarge, preprocess_input = Classifiers.get('nasnetlarge')
    # build model
    base_model = nasnetlarge(input_shape=input_shape,
                             weights=weights,
                             include_top=include_top)
    return base_model
Esempio n. 4
0
def baseNet(img_input, base='ResNet34'):
    _, rows, cols, _ = img_input.shape
    if base == 'ResNet34':
        ResNet34, _ = Classifiers.get('resnet34')
        features = ResNet34(input_shape=(rows, cols, 3),
                            weights='imagenet',
                            include_top=False)(img_input)
    elif base == 'ResNet50':
        ResNet50, _ = Classifiers.get('resnet50')
        features = ResNet50(input_shape=(rows, cols, 3),
                            weights='imagenet',
                            include_top=False)(img_input)
    elif base == 'VGG16':
        vgg = VGG16(input_shape=(rows, cols, 3),
                    weights='imagenet',
                    include_top=False)(img_input)
        features = vgg.get_layer('block5_pool')
    return features
Esempio n. 5
0
def load_feature_extractor():
    ResNet18, preprocess_input_function = Classifiers.get('resnet18')
    #base_model = ResNet18(input_shape=(224,224,3), weights='imagenet', include_top=False)
    #output = keras.layers.GlobalAveragePooling2D()(base_model.output)
    #model = keras.models.Model(inputs=[base_model.input], outputs=[output])
    with open(os.path.join(os.getcwd(), 'classifier', 'resnet18_model.pickle'),
              'rb') as handle:
        model = pickle.load(handle)
    return [model, preprocess_input_function]
Esempio n. 6
0
def resnet18_2d_qubvel():
    ResNet18, preprocess_input = Classifiers.get('resnet18')
    model = ResNet18((256, 256, 3), weights='imagenet')

    model.summary()

    plot_model(model, to_file='model.png')

    return model
Esempio n. 7
0
    def create_resnet34_model(self):
        ResNet34, preprocess_input = Classifiers.get('resnet34')
        base_model = ResNet34((self.input_dims[0], self.input_dims[0], 3),
                              weights='imagenet',
                              include_top=False)
        x = keras.layers.GlobalAveragePooling2D()(base_model.output)
        out = keras.layers.Dense(6, activation='sigmoid',
                                 name='dense_output')(x)

        return keras.models.Model(inputs=base_model.input, outputs=out)
Esempio n. 8
0
def seresnet_retinanet(num_classes,
                       backbone='seresnet50',
                       inputs=None,
                       modifier=None,
                       **kwargs):
    """ Constructs a retinanet model using a resnet backbone.
    Args
        num_classes: Number of classes to predict.
        backbone: Which backbone to use (one of ('seresnet18', 'seresnet50', 'seresnet101', 'seresnet152')).
        inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
        modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).
    Returns
        RetinaNet model with a ResNet backbone.
    """
    # choose default input
    if inputs is None:
        if keras.backend.image_data_format() == 'channels_first':
            inputs = keras.layers.Input(shape=(3, None, None))
        else:
            inputs = keras.layers.Input(shape=(None, None, 3))

    classifier, _ = Classifiers.get(backbone)
    model = classifier(input_tensor=inputs, include_top=False, weights=None)

    # get last conv layer from the end of each block [28x28, 14x14, 7x7]
    if backbone == 'seresnet18' or backbone == 'seresnet34':
        layer_outputs = ['stage3_unit1_relu1', 'stage4_unit1_relu1', 'relu1']
    elif backbone == 'seresnet50':
        layer_outputs = ['activation_36', 'activation_66', 'activation_81']
    elif backbone == 'seresnet101':
        layer_outputs = ['activation_36', 'activation_151', 'activation_166']
    elif backbone == 'seresnet152':
        layer_outputs = ['activation_56', 'activation_236', 'activation_251']
    else:
        raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone))

    layer_outputs = [
        model.get_layer(name=layer_outputs[0]).output,  # 28x28
        model.get_layer(name=layer_outputs[1]).output,  # 14x14
        model.get_layer(name=layer_outputs[2]).output,  # 7x7
    ]
    # create the densenet backbone
    model = keras.models.Model(inputs=inputs,
                               outputs=layer_outputs,
                               name=model.name)

    # invoke modifier if given
    if modifier:
        model = modifier(model)

    # create the full model
    return retinanet.retinanet(inputs=inputs,
                               num_classes=num_classes,
                               backbone_layers=model.outputs,
                               **kwargs)
Esempio n. 9
0
def get_model(TRAIN_NUM, TRAIN_SIZE):

    SeResNet50, preprocess_input = Classifiers.get('seresnet50')
    base_model = SeResNet50((TRAIN_SIZE, TRAIN_SIZE, 3),
                            weights='imagenet',
                            include_top=False)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    prediction = Dense(TRAIN_NUM, activation='softmax', name='predict')(x)
    model = Model(inputs=base_model.input, outputs=prediction)
    return model
def resnet18_save(x):
    from classification_models.keras import Classifiers
    ResNet18, preprocess_input = Classifiers.get('resnet18')
    x = preprocess_input(x)
    resnet18 = ResNet18((224, 224, 3), weights='imagenet', include_top=False)
    for layer in resnet18.layers:
        print("Layer '%s' " % layer.name)
        layer.trainable = False
    resnet18.save(
        '/Users/manu/git/neurips2020-procgen-starter-kit/models/resnet18.h5')
    return resnet18(x)
Esempio n. 11
0
 def nasnetlarge_process(self, path):
     nasnetlarge, preprocess_input = Classifiers.get('nasnetlarge')
     img = Image.open(path)
     img = np.array(img)
     img = np.expand_dims(img, axis=0)
     print(type(img))
     print(img.shape)
     img2 = preprocess_input(img)
     print(type(img2))
     print(img2.shape)
     plt.imshow(img2[0])
     plt.show()
Esempio n. 12
0
 def resnet18_v2(self):
     from classification_models.keras import Classifiers
     ResNet18, preprocess_input = Classifiers.get('resnet18')
     conv_base = ResNet18(input_shape=(224,224,1), include_top=False)
     x = GlobalAveragePooling2D(name='GlobalAveragePooling')(conv_base.output)
     x = Dropout(0.5, name='drop1')(x)
     x = Dense(1024, activation='relu', name='dense1')(x)
     x = Dropout(0.5, name='drop2')(x)
     x = Dense(512, activation='relu', name='dense2')(x)
     x = Dense(self.nb_of_points, activation='sigmoid',name='dense3')(x)
     model = Model(conv_base.input, x)
     return model
Esempio n. 13
0
    def __init__(self, im_shape, num_anchor, base='ResNet34'):
        self.num_anchor = num_anchor

        if base == 'ResNet34':
            ResNet34, _ = Classifiers.get('resnet34')
            resnet = ResNet34(input_shape=(im_shape[0], im_shape[1], 3),
                              weights='imagenet',
                              include_top=False)
            input, features = resnet.input, resnet.output
        elif base == 'ResNet50':
            ResNet50, _ = Classifiers.get('resnet50')
            resnet = ResNet50(input_shape=(im_shape[0], im_shape[1], 3),
                              weights='imagenet',
                              include_top=False)
            input, features = resnet.input, resnet.output
        elif base == 'VGG16':
            vgg = VGG16(input_shape=(im_shape[0], im_shape[1], 3),
                        include_top=False,
                        weights='imagenet')
            input, features = vgg.input, vgg.get_layer('block5_pool').output

        vel = self.AnchorNet(features)
        self.model = Model(inputs=input, outputs=vel)
Esempio n. 14
0
 def resnet18_112_112(self):
     from classification_models.keras import Classifiers
     ResNet18, preprocess_input = Classifiers.get('resnet18')
     conv_base = ResNet18(input_shape=(128,128,1), include_top=False)
     #layer_name = 'stage4_unit1_relu1'
     #conv_base = Model(inputs=rm.input, outputs=rm.get_layer(layer_name).output)
     x = Flatten(name='flat')(conv_base.output)
     x = Dropout(0.5, name='drop1')(x)
     x = Dense(1024, activation='relu', name='dense1')(x)
     x = Dropout(0.5, name='drop2')(x)
     x = Dense(512, activation='relu', name='dense2')(x)
     x = Dense(self.nb_of_points, activation='sigmoid',name='dense3')(x)
     model = Model(conv_base.input, x)
     return model
Esempio n. 15
0
def get_model(backbone_name="xception"):
    if backbone_name == "xception":
        backbone = Xception(weights='imagenet',
                            input_shape=(384, 384, 3),
                            include_top=False,
                            classes=6)
        tail_prev = backbone.get_layer('block13_pool').output
        inputs = backbone.input
        x = Lambda(lambda image: preprocess_input(image))(inputs)
        tail = backbone.output
        output = FinalModel(x, tail_prev, tail, backbone_name)
    elif backbone_name == "resnet18" or backbone_name == "resnet50":
        if backbone_name == "resnet18":
            ResNet18, _ = Classifiers.get('resnet18')
            backbone = ResNet18(weights='imagenet',
                                input_tensor=Input((384, 384, 3)),
                                include_top=False,
                                classes=6)
            tail_prev = backbone.get_layer('stage4_unit2_relu2').output
        elif backbone_name == "resnet50":
            backbone = ResNet50(weights='imagenet',
                                input_shape=(384, 384, 3),
                                include_top=False,
                                classes=6)
            tail_prev = backbone.get_layer('conv5_block3_2_relu').output
        inputs = backbone.input
        x = Lambda(lambda image: preprocess_input(image))(inputs)
        tail = backbone.output
        output = FinalModel(x, tail_prev, tail, backbone_name)
    if backbone_name == "efficientnetb3":
        backbone = EfficientNetB3(weights='imagenet',
                                  input_shape=(384, 384, 3),
                                  include_top=False,
                                  classes=6)
        tail_prev = backbone.get_layer('block7b_project_conv').output
        inputs = backbone.input
        x = Lambda(lambda image: preprocess_input(image))(inputs)
        tail = backbone.output
        output = FinalModel(x, tail_prev, tail, backbone_name)

    x = Conv2D(
        filters=6,
        kernel_size=(3, 3),
        padding='same',
        use_bias=True,
        kernel_initializer='glorot_uniform',
        name='final_conv',
    )(output)
    x = Activation("softmax", name="softmax")(x)
    return Model(inputs, x)
Esempio n. 16
0
def senet_model_fn(include_top=False, weights='imagenet', input_shape=None):
    senet, preprocess_input = Classifiers.get('seresnext101')

    # build model
    base_model = senet(input_shape=input_shape,
                       weights=weights,
                       include_top=include_top)
    # x = GlobalAveragePooling2D()(base_model.output)
    # x = Dense(1024, activation='relu', kernel_regularizer=regularizers.l2(weight_decay))(x)
    # x = Dropout(dropout)(x)
    # output = Dense(FLAGS.num_classes, activation='softmax', activity_regularizer=regularizers.l2(weight_decay))(x)
    # model = Model(inputs=[base_model.input], outputs=[output])
    #
    # model.compile(loss=objective, optimizer=optimizer, metrics=metrics)
    return base_model
Esempio n. 17
0
def make_encoder(input, name='resnet50', pretrained=True):
	if name == 'resnet18':
		from classification_models.keras import Classifiers
		ResNet18, _ = Classifiers.get('resnet18')
		model = ResNet18(
			weights='imagenet' if pretrained else None,
			input_tensor=input,
			include_top=False
		)
	elif name == 'resnet50':
		from keras.applications.resnet import ResNet50
		model = ResNet50(
			weights='imagenet' if pretrained else None,
			input_tensor=input,
			include_top=False
		)
	elif name == 'resnet101':
		from keras.applications.resnet import ResNet101
		model = ResNet101(
			weights='imagenet' if pretrained else None,
			input_tensor=input,
			include_top=False
		)
	elif name == 'resnet152':
		from keras.applications.resnet import ResNet152
		model = ResNet152(
			weights='imagenet' if pretrained else None,
			input_tensor=input,
			include_top=False
		)
	elif name == 'vgg16':
		from keras.applications.vgg16 import VGG16
		model = VGG16(
			weights='imagenet' if pretrained else None,
			input_tensor=input,
			include_top=False
		)
	elif name == 'vgg19':
		from keras.applications.vgg19 import VGG19
		model = VGG19(
			weights='imagenet' if pretrained else None,
			input_tensor=input,
			include_top=False
		)
	else:
		raise Exception(f'unknown encoder {name}')

	return model
Esempio n. 18
0
def train_model(args, train_gen, val_gen):
    os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(d) for d in args['devices'])

    args['log_dir'] = os.getcwd() + '/info/logs/malnet_tiny={}/group={}/color={}/pretrain={}/model={}_loss={}_alpha={}_reweight={}_beta={}/epochs={}/'.format(
        args['malnet_tiny'], args['group'], args['color_mode'], args['weights'], args['model'], args['loss'], args['alpha'], args['reweight'], args['reweight_beta'],  args['epochs'])
    os.makedirs(args['log_dir'], exist_ok=True)

    if args['color_mode'] == 'grayscale':
        args['num_channels'] = 1
    else:
        args['num_channels'] = 3

    input_shape = (args['im_size'], args['im_size'], args['num_channels'])

    if args['weights'] is 'imagenet':
        model = models[args['model']](include_top=False, weights=args['weights'], input_shape=input_shape, classes=args['num_classes'])
        model = build_transfer_model(args, model)
    else:
        if 'mobile' in args['model']:
            model = models[args['model']](weights=args['weights'], input_shape=input_shape, classes=args['num_classes'], alpha=args['alpha'])
        elif 'resnet18' in args['model']:
            args['alpha'] = 0
            model, _ = Classifiers.get(args['model'])
            model = model(weights=args['weights'], input_shape=input_shape, classes=args['num_classes'])
        else:
            args['alpha'] = 0
            model = models[args['model']](weights=args['weights'], input_shape=input_shape, classes=args['num_classes'])

    loss, class_weights = get_loss(args)
    model.compile(loss=loss, optimizer='adam', metrics=[])

    model.fit(
        train_gen,
        batch_size=args['batch_size'],
        steps_per_epoch=int(train_gen.samples / args['batch_size']),
        epochs=args['epochs'],
        class_weight=class_weights,
        callbacks=[Metrics(args, val_gen)],
        workers=multiprocessing.cpu_count(),
    )

    # load best model
    model = tf.keras.models.load_model(args['log_dir'] + 'best_model.pt', compile=False)
    model.compile(loss=loss, optimizer='adam', metrics=[])

    return model
Esempio n. 19
0
def load_model():
    global MODEL, PREPROCESS_INPUT, GRAPH, SESS, INIT, MODEL_TO_FETCH
    start = time.time()
    logger.info(f'Loading model {MODEL_TO_FETCH}')
    SESS = tf.Session()
    GRAPH = tf.get_default_graph()
    set_session(SESS)
    # INIT = tf.global_variables_initializer()
    init_model, PREPROCESS_INPUT = Classifiers.get(MODEL_TO_FETCH)
    # MODEL = init_model(input_shape=(224, 224, 3), weights='imagenet', classes=1000)
    # MODEL = init_model(input_shape=(299, 299, 3), weights='imagenet', classes=1000)
    MODEL = init_model(input_shape=(331, 331, 3),
                       weights='imagenet',
                       classes=1000)
    logger.info(f'Loaded model {MODEL_TO_FETCH}')
    logger.info(
        f'Time taken to load {MODEL_TO_FETCH} model is {time.time() - start} Secs'
    )
Esempio n. 20
0
def convert_models():
    include_top = True
    target_channel = 0
    shape_size_3D = (64, 64, 64, 3)
    # shape_size_3D = (32, 7*32, 7*32, 3)
    shape_size_2D = (224, 224, 3)
    list_to_check = [
        'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
        'seresnet18', 'seresnet34', 'seresnet50', 'seresnet101', 'seresnet152',
        'seresnext50', 'seresnext101', 'senet154', 'resnext50', 'resnext101',
        'vgg16', 'vgg19', 'densenet121', 'densenet169', 'densenet201',
        'mobilenet', 'mobilenetv2'
    ]
    for t in list_to_check:
        out_path = MODELS_PATH + 'converter/{}_inp_channel_{}_tch_{}_top_{}.h5'.format(
            t, shape_size_3D[-1], target_channel, include_top)
        if os.path.isfile(out_path):
            print('Already exists: {}!'.format(out_path))
            continue

        model3D, preprocess_input = Classifiers_3D.get(t)
        model3D = model3D(
            include_top=include_top,
            weights=None,
            input_shape=shape_size_3D,
            pooling='avg',
        )
        mem = get_model_memory_usage(1, model3D)
        print('Model 3D: {} Mem single: {:.2f}'.format(t, mem))

        model2D, preprocess_input = Classifiers_2D.get(t)
        model2D = model2D(
            include_top=include_top,
            weights='imagenet',
            input_shape=shape_size_2D,
            pooling='avg',
        )
        mem = get_model_memory_usage(1, model2D)
        print('Model 2D: {} Mem single: {:.2f}'.format(t, mem))
        convert_weights(model2D,
                        model3D,
                        out_path,
                        target_channel=target_channel)
        K.clear_session()
Esempio n. 21
0
def KModel_resnet18_1x1conv(INPUT_SHAPE=(224, 224, 3),
                            OUTPUT_SHAPE=(10),
                            pretrained='imagenet',
                            top_layers=True):
    ResNet18, preprocess_input = Classifiers.get('resnet18')
    model = ResNet18(input_shape=INPUT_SHAPE,
                     classes=OUTPUT_SHAPE,
                     weights=None,
                     include_top=top_layers)

    if top_layers is False:
        out_gaPool = GlobalAveragePooling2D()(model.output)
        out_softmax = Dense(OUTPUT_SHAPE, activation='softmax')(out_gaPool)
        #out_softmax = Activation('softmax')(out_gaPool)
        model = Model(inputs=[model.input], outputs=[out_softmax])
    model = change_kernelsizes(model, target_kernel_size=(1, 1))
    #model=change_strides(model,target_strides=(1,1))
    model = change_padding(model, padding='same')
    return model
Esempio n. 22
0
    def build(height, width, depth, classes):
        if K.image_data_format() == "channels_last":
            input_shape = (height, width, depth)
        else:
            input_shape = (depth, height, width)

        # initialize the base model
        resnet18, _ = Classifiers.get("resnet18")
        base_model = resnet18(input_shape = input_shape,
                              weights = "imagenet", include_top = False)

        # average pooling
        gap = GlobalAveragePooling2D()(base_model.output)

        # softmax classifier
        x = Dense(classes, kernel_initializer = "he_normal")(gap)
        x = Activation("softmax")(x)

        # return the constructed model architecture
        return Model(inputs = base_model.input, outputs = x)
Esempio n. 23
0
    def __init__(self):
        from classification_models.keras import Classifiers
        from keras.models import Model
        from keras.layers import Input, Lambda
        import tensorflow as tf

        def preprocess_and_decode(raw_img):
            img = tf.image.decode_jpeg(raw_img, channels=3)
            img = tf.image.resize_images(img, [224, 224], method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
            img = tf.image.central_crop(img, central_fraction=0.7)

            return img

        ResNet18, _ = Classifiers.get('resnet18')
        input_layer = Input(shape=(1,), dtype="string")
        output_layer = Lambda(lambda img : tf.map_fn(lambda im : preprocess_and_decode(im[0]), img, dtype="float32"))(input_layer)
        raw_model = Model(input_layer, output_layer)
        resnet18_model = ResNet18(input_tensor=raw_model.output, weights='imagenet')
        avgpooling_layer = resnet18_model.get_layer('pool1').output
        self.model = Model(resnet18_model.input, avgpooling_layer)
Esempio n. 24
0
def class_model(preprocess_type, input_size, pretrained_weights):
    ResNet, _ = Classifiers.get(preprocess_type)

    model = ResNet(input_shape=input_size,
                   weights='imagenet',
                   include_top=False)
    x = keras.layers.GlobalAveragePooling2D()(model.output)
    output = keras.layers.Dense(1, activation='sigmoid')(x)
    model = keras.models.Model(inputs=[model.input], outputs=[output])

    adam = keras.optimizers.Adam(lr=1e-3)
    model.compile(optimizer=adam,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    model.summary()

    if (pretrained_weights):
        model.load_weights(pretrained_weights)

    return model
Esempio n. 25
0
def base_model():
    Resnet18, _ = Classifiers.get('resnet18')
    input_image = keras.layers.Input(shape=(None, None, 3))

    base_model = Resnet18(input_tensor=input_image,
                          weights='imagenet',
                          include_top=False)

    x = base_model.output
    x = keras.layers.GlobalAveragePooling2D()(x)
    x = keras.layers.Dense(64, activation='relu', name='Middle_Dense_1')(x)
    x = keras.layers.Dense(32, activation='relu', name='Middle_Dense_2')(x)
    x = keras.layers.Dense(16, activation='relu', name='Middle_Dense_3')(x)
    regression = keras.layers.Dense(2,
                                    activation='linear',
                                    name='regression_layer')(x)

    model = keras.models.Model(inputs=[base_model.input], outputs=[regression])

    for layer in model.layers:
        layer.trainable = True
    model.summary()
    return model
Esempio n. 26
0
 def __init__(self, backbone):
     super(SeBackbone, self).__init__(backbone)
     _, self.preprocess_image_func = Classifiers.get(self.backbone)
Esempio n. 27
0
        mask_bg = np.zeros(shape=(math.ceil((w - h) / 2), w), dtype=np.uint8)
        # image = cv2.vconcat([image_bg, image, image_bg])
        mask = cv2.vconcat([mask_bg, mask, mask_bg])

    image = cv2.resize(image, (320, 320))
    mask = cv2.resize(mask, (320, 320))

    return image, mask


if MODEL_TYPE == 'classification':
    from classification_models.keras import Classifiers
    from keras.layers import GlobalAveragePooling2D, Dense
    from keras.models import Model

    base, preprocess_input = Classifiers.get(BACKBONE)

    n_classes = 4

    # build model
    base_model = base(input_shape=(320, 320, 3), weights='imagenet', include_top=False)

    for layer in base_model.layers:
        layer.trainable = True

    global_avrg_pool = GlobalAveragePooling2D()(base_model.output)
    fc_1 = Dense(1024, activation='relu', name='fc_1')(global_avrg_pool)
    predictions = Dense(n_classes, activation='softmax', name='predictions')(fc_1)
    model = Model(inputs=base_model.input, outputs=predictions)

    if BACKBONE == 'densenet121':
Esempio n. 28
0

def ResNet():
    x = keras.layers.GlobalAveragePooling2D()(base_model.output)
    x = keras.layers.Dense(2048)(x)
    x = keras.layers.Dense(1024)(x)
    z_vector = keras.layers.Dense(512)(x)
    model = keras.models.Model(inputs=[base_model.input], outputs=[z_vector])
    return model


os.environ['TFHUB_CACHE_DIR'] = './'
generator = hub.load("http://tfhub.dev/google/progan-128/1").signatures['default']

"""Building the model"""
ResNet18, preprocess_input = Classifiers.get('resnet18')
base_model = ResNet18(input_shape=(224, 224, 3), weights=None, include_top=False)
resnet_model = ResNet()


optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)

""" Uncomment below if you want to test your checkpoints"""
# checkpoint_dir = './checkpoints/training_checkpoints4/'
# checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
# checkpoint = tf.train.Checkpoint(optimizer=optimizer,
#                                  Resnet=resnet_model)
# # restoring the latest checkpoint in checkpoint_dir
# checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))

resnet_model = tf.keras.models.load_model('saved_model/resnet_model_chkp25')
Esempio n. 29
0
from classification_models.keras import Classifiers
from keras.preprocessing import image
from keras.applications.imagenet_utils import decode_predictions
import numpy as np
from keras.models import Model
from keras.layers import Input, Lambda
import tensorflow as tf
from keras import backend as K

ResNet18, _ = Classifiers.get('resnet18')


def preprocess_and_decode(raw_img):
    img = tf.image.decode_jpeg(raw_img, channels=3)
    img = tf.image.resize_images(img, [224, 224],
                                 method=tf.image.ResizeMethod.BILINEAR,
                                 align_corners=False)

    return img


input_layer = Input(shape=(1, ), dtype="string")
output_layer = Lambda(lambda img: tf.map_fn(
    lambda im: preprocess_and_decode(im[0]), img, dtype="float32"))(
        input_layer)
raw_model = Model(input_layer, output_layer)

raw_model.summary()

resnet18_model = ResNet18(input_tensor=raw_model.output, weights='imagenet')
avgpooling_layer = resnet18_model.get_layer('pool1').output
def perform_lesion_experiment_imagenet(
        network,
        num_clusters=10,
        num_shuffles=10,
        with_random=True,
        downsampled=False,
        eigen_solver='arpack',
        batch_size=32,
        data_dir='/project/clusterability_in_neural_networks/datasets/imagenet2012',
        val_tar='ILSVRC2012_img_val.tar',
        downsampled_n_samples=10000):

    assert network != 'inceptionv3', 'This function does not yet support inceptionv3'

    net, preprocess = Classifiers.get(
        network)  # get network object and preprocess fn
    model = net((224, 224, 3),
                weights='imagenet')  # get network tf.keras.model

    data_path = Path(data_dir)
    tfrecords = list(data_path.glob('*validation.tfrecord*'))
    if not tfrecords:
        prep_imagenet_validation_data(data_dir, val_tar)  # this'll take a sec
    imagenet = tfds.image.Imagenet2012()  # dataset builder object
    imagenet._data_dir = data_dir
    val_dataset_object = imagenet.as_dataset(
        split='validation')  # datast object
    # assert isinstance(val_dataset_object, tf.data.Dataset)

    if downsampled:
        # get the ssmall dataset as an np.ndarray
        dataset, y = imagenet_downsampled_dataset(
            val_dataset_object, preprocess, n_images=downsampled_n_samples)
        steps = None
        val_set_size = downsampled_n_samples

    else:
        dataset = imagenet_generator(val_dataset_object, preprocess)
        val_set_size = 50000
        steps = val_set_size // 250  # use batch_size of 250
        y = []  # to become an ndarray of true labels
        for _ in range(steps):
            _, logits = next(dataset)
            y.append(np.argmax(logits, axis=-1))
        y = np.concatenate(y)
        batch_size = None

    # get info from clustering
    clustering_results = run_clustering_imagenet(network,
                                                 num_clusters=num_clusters,
                                                 with_shuffle=False,
                                                 eigen_solver=eigen_solver)
    labels = clustering_results['labels']
    connections = clustering_results[
        'conv_connections']  # just connections for conv layers
    layer_widths = [cc[0]['weights'].shape[0]
                    for cc in connections[1:]]  # skip first conv layer
    dense_sizes = get_dense_sizes(connections)
    layer_widths.extend(list(dense_sizes.values()))
    labels_in_layers = list(splitter(labels, layer_widths))

    y_pred = np.argmax(model.predict(dataset,
                                     steps=steps,
                                     batch_size=batch_size),
                       axis=-1)
    if not isinstance(dataset, np.ndarray):
        dataset = imagenet_generator(val_dataset_object, preprocess)
    evaluation = _get_classification_accs_imagenet(
        y, y_pred)  # an ndarray of all 1000 class accs

    # next get true accs and label bincounts for the 1000 classes
    accs_true, class_props_true, cluster_sizes = lesion_test_imagenet(
        model,
        dataset,
        y,
        labels_in_layers,
        num_clusters,
        steps,
        batch_size,
        val_dataset_object,
        preprocess,
        num_samples=1)
    accs_true = accs_true[0]  # it's a 1 element list, so just take the first
    class_props_true = class_props_true[0]  # same as line above

    if not with_random:

        # make and return a dict with a keys giving sub modules and values giving
        # num shuffles, overall acc, and class accs

        results = {}
        for layer_key in accs_true.keys():
            results[layer_key] = {}
            for cluster_key in accs_true[layer_key].keys():
                sm_results = {}
                true_accs = accs_true[layer_key][cluster_key]
                sm_results['num_shuffles'] = num_shuffles
                sm_results['overall_acc'] = np.mean(true_accs)
                sm_results['class_accs'] = true_accs
                results[layer_key][cluster_key] = sm_results

        return evaluation, results

    else:

        # perform random lesion tests num_shuffles times

        # get random results
        all_acc_random, all_class_props, _ = lesion_test_imagenet(
            model,
            dataset,
            y,
            labels_in_layers,
            num_clusters,
            steps,
            batch_size,
            val_dataset_object,
            preprocess,
            num_shuffles,
            shuffle=True)

        # make and return a dict with a keys giving sub modules and values giving
        # stats about true labels, shufflings, and p values for hypothesis tests

        results = {}
        for layer_key in accs_true.keys():
            results[layer_key] = {}
            for cluster_key in accs_true[layer_key].keys():

                sm_results = {}

                true_accs = accs_true[layer_key][cluster_key]
                random_accs = np.vstack([
                    all_acc_random[i][layer_key][cluster_key]
                    for i in range(num_shuffles)
                ])
                overall_acc = np.mean(true_accs)
                overall_random_accs = np.mean(random_accs, axis=1)
                overall_acc_percentile = compute_pvalue(
                    overall_acc, overall_random_accs)
                overall_acc_effect_factor = np.mean(
                    overall_random_accs) / overall_acc

                random_changes = random_accs - evaluation
                normalized_random_changes = (
                    random_changes.T / np.mean(random_changes, axis=-1)).T
                random_range_normalized_changes = np.ptp(
                    normalized_random_changes, axis=-1)
                true_changes = true_accs - evaluation
                normalized_true_changes = true_changes / np.mean(true_changes)
                true_range_normalized_changes = np.ptp(normalized_true_changes)
                range_percentile = compute_pvalue(
                    true_range_normalized_changes,
                    random_range_normalized_changes,
                    side='right')
                range_effect_factor = np.mean(random_range_normalized_changes
                                              ) / true_range_normalized_changes

                sm_results['cluster_size'] = cluster_sizes[layer_key][
                    cluster_key]
                sm_results['acc'] = overall_acc
                sm_results['acc_percentile'] = overall_acc_percentile
                sm_results[
                    'overall_acc_effect_factor'] = overall_acc_effect_factor
                sm_results['range'] = true_range_normalized_changes
                sm_results['range_percentile'] = range_percentile
                sm_results['range_effect_factor'] = range_effect_factor

                results[layer_key][cluster_key] = sm_results

        return evaluation, results