示例#1
0
def get_efficientnet_model(model_version):
    if model_version == "B0": return efn.EfficientNetB0(weights='imagenet')
    elif model_version == "B1": return efn.EfficientNetB1(weights='imagenet')
    elif model_version == "B2": return efn.EfficientNetB2(weights='imagenet')
    elif model_version == "B3": return efn.EfficientNetB3(weights='imagenet')
    elif model_version == "B4": return efn.EfficientNetB4(weights='imagenet')
    elif model_version == "B5": return efn.EfficientNetB5(weights='imagenet')
    elif model_version == "B6": return efn.EfficientNetB6(weights='imagenet')
    elif model_version == "B7": return efn.EfficientNetB7(weights='imagenet')
    else: return efn.EfficientNetB0(weights='imagenet')
示例#2
0
    def __create_model(self):
        inputs = tf.keras.layers.Input(shape=(self.im_height, self.im_width, 3))

        if self.backbond == "SHUFFLE_NET_V2":
            feature = ShuffleNetv2(self.class_num)(inputs)
            feature = tf.keras.layers.Flatten()(feature)
        elif self.backbond == "EFFICIENT_NET_B0":
            efn_backbond = efn.EfficientNetB0(weights='imagenet', include_top=False, input_shape=(self.im_height, self.im_width, 3))
            efn_backbond.trainable = False
            feature = efn_backbond(inputs)
            feature = tf.keras.layers.Flatten()(feature)
            feature = tf.keras.layers.Dense(1024, activation='relu')(feature)
        else:
            raise ValueError('No such arch!... Please check the backend in config file')

        fc_yaw = tf.keras.layers.Dense(name='yaw', units=self.class_num)(feature)
        fc_pitch = tf.keras.layers.Dense(name='pitch', units=self.class_num)(feature)
        fc_roll = tf.keras.layers.Dense(name='roll', units=self.class_num)(feature)

        fc_1_landmarks = tf.keras.layers.Dense(512, activation='relu', name='fc_landmarks')(feature)
        fc_2_landmarks = tf.keras.layers.Dense(10, name='landmarks')(fc_1_landmarks)
    
        model = tf.keras.Model(inputs=inputs, outputs=[fc_yaw, fc_pitch, fc_roll, fc_2_landmarks])
        
        losses = {
            'yaw':self.__loss_angle,
            'pitch':self.__loss_angle,
            'roll':self.__loss_angle,
            'landmarks':'mean_squared_error'
        }

        model.compile(optimizer=tf.optimizers.Adam(self.learning_rate),
                        loss=losses, loss_weights=self.loss_weights)
       
        return model
示例#3
0
def build_backbone_net_graph(input_tensor, architecture, weights=None):
    """
    Build basic feature extraction networks.
    :param input_tensor: Input of the basic networks, should be a tensor or tf.keras.layers.Input
    :param architecture: The architecture name of the basic network.
    :param weights: Whether download and initialize weights from the pre-trained weights,
                    could be either 'imagenet', (pre-training on ImageNet)
                                    'noisy-student',
                                    'None' (random initialization),
                                    or the path to the weights file to be loaded。
    :return: Efficient Model and corresponding endpoints.
    """
    assert architecture in ['efficientnet-b0', 'efficientnet-b1',
                            'efficientnet-b2', 'efficientnet-b3',
                            'efficientnet-b4', 'efficientnet-b5',
                            'efficientnet-b7', 'efficientnet-b7',
                            'efficientnet-l2']

    if architecture == 'efficientnet-b0':
        return efn.EfficientNetB0(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b1':
        return efn.EfficientNetB1(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b2':
        return efn.EfficientNetB2(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b3':
        return efn.EfficientNetB3(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b4':
        return efn.EfficientNetB4(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b5':
        return efn.EfficientNetB5(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b6':
        return efn.EfficientNetB6(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-b7':
        return efn.EfficientNetB7(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    elif architecture == 'efficientnet-l2':
        return efn.EfficientNetL2(include_top=False, weights=weights,
                                  input_tensor=input_tensor,
                                  input_shape=[None, None, 3])
    else:
        raise ValueError("Argument architecture should in "
                         "[efficientnet-b0, efficientnet-b1, "
                         "efficientnet-b2, efficientnet-b3, efficientnet-b4, efficientnet-b5, "
                         "efficientnet-b7, efficientnet-b7, efficientnet-l2] "
                         "but get %s" % architecture)
示例#4
0
    def create_efficientNet_b0(self, inp_shape, input_tensor, output_len):
        initializer = tf.keras.initializers.he_uniform()

        eff_net = efn.EfficientNetB0(include_top=True,
                                     weights=None,
                                     input_tensor=input_tensor,
                                     input_shape=inp_shape,
                                     pooling=None)  #,
        # classes=output_len)  # or weights='noisy-student'
        eff_net.layers.pop()
        # eff_net.summary()

        inp = eff_net.input

        x = eff_net.get_layer('top_activation').output
        x = GlobalAveragePooling2D()(x)
        x = keras.layers.Dropout(rate=0.5)(x)
        # x = keras.layers.Dense(2 * output_len, activation='relu', use_bias=True, kernel_initializer=initializer)(x)

        output = Dense(output_len,
                       activation='linear',
                       use_bias=True,
                       name='out',
                       kernel_initializer=initializer)(x)

        eff_net = Model(inp, output)
        eff_net.summary()

        model_json = eff_net.to_json()
        with open("eff_net_b0.json", "w") as json_file:
            json_file.write(model_json)

        return eff_net
示例#5
0
def get_efn0(name='efn0'):
    efn0 = efn.EfficientNetB0(input_shape=(32, 32, 3),
                              include_top=False,
                              weights='noisy-student',
                              pooling='max')
    for layer in efn0.layers:
        layer._name = f'{layer._name}_{name}'
    return efn0
示例#6
0
 def test_custom(self):
     from efficientnet import tfkeras as efn
     keras.backend.set_learning_phase(0)
     base_model = efn.EfficientNetB0(input_shape=(600, 600, 3), weights=None)
     backbone = keras.Model(base_model.input, base_model.get_layer("top_activation").output)
     res = run_image(backbone, self.model_files, img_path, target_size=(600, 600),
                     rtol=1e-2, atol=1e-1, tf_v2=True)
     self.assertTrue(*res)
def build_model(dim=224):
    inp = tf.keras.layers.Input(shape=(dim, dim, 3))
    base = efn.EfficientNetB0(input_shape=(dim, dim, 3),
                              weights='imagenet',
                              include_top=False)
    x = base(inp)
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    model = tf.keras.Model(inputs=inp, outputs=x)
    return model
示例#8
0
def effnet_retinanet(num_classes, backbone='EfficientNetB0', inputs=None, modifier=None, **kwargs):
    """ Constructs a retinanet model using a resnet backbone.

    Args
        num_classes: Number of classes to predict.
        backbone: Which backbone to use (one of ('resnet50', 'resnet101', 'resnet152')).
        inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
        modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).

    Returns
        RetinaNet model with a ResNet backbone.
    """
    # choose default input
    if inputs is None:
        if keras.backend.image_data_format() == 'channels_first':
            inputs = keras.layers.Input(shape=(3, None, None))
        else:
            # inputs = keras.layers.Input(shape=(224, 224, 3))
            inputs = keras.layers.Input(shape=(None, None, 3))

    # get last conv layer from the end of each block [28x28, 14x14, 7x7]
    if backbone == 'EfficientNetB0':
        model = efn.EfficientNetB0(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB1':
        model = efn.EfficientNetB1(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB2':
        model = efn.EfficientNetB2(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB3':
        model = efn.EfficientNetB3(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB4':
        model = efn.EfficientNetB4(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB5':
        model = efn.EfficientNetB5(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB6':
        model = efn.EfficientNetB6(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB7':
        model = efn.EfficientNetB7(input_tensor=inputs, include_top=False, weights=None)
    else:
        raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone))

    layer_outputs = ['block4a_expand_activation', 'block6a_expand_activation', 'top_activation']

    layer_outputs = [
        model.get_layer(name=layer_outputs[0]).output,  # 28x28
        model.get_layer(name=layer_outputs[1]).output,  # 14x14
        model.get_layer(name=layer_outputs[2]).output,  # 7x7
    ]
    # create the densenet backbone
    model = keras.Model(inputs=inputs, outputs=layer_outputs, name=model.name)

    # invoke modifier if given
    if modifier:
        model = modifier(model)

    # create the full model
    return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=model.outputs, **kwargs)
示例#9
0
def efficientnet_b0():
    inp = tf.keras.layers.Input(shape=(224, 224, 3))
    base = efn.EfficientNetB0(input_shape=(224, 224, 3),
                              weights='imagenet',
                              include_top=False)
    x = base(inp)
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    x = tf.keras.layers.Dense(1, activation='sigmoid')(x)
    model = tf.keras.Model(inputs=inp, outputs=x)
    return model
示例#10
0
    def efficientnetb0(self, input_shape, **kwagrs):
        # x_input = tf.keras.Input(shape=input_shape, name="triplet")

        m = efn.EfficientNetB0(input_shape=(256, 256, 3), weights=None)
        # remove the output layer, leave the feature extraction part
        m_fe = tf.keras.Model(inputs=m.inputs, outputs=m.layers[-2].output)
        output = tf.keras.layers.Dense(1, activation="sigmoid")(m_fe.output)
        m = tf.keras.Model(inputs=m_fe.inputs, outputs=output)

        return m
示例#11
0
文件: models.py 项目: makamoa/alfred
 def effnet(self):
     conv_base = efn.EfficientNetB0(weights=None, input_shape=self.input_shape, include_top=False,
                                    pooling='avg')  # or weights='noisy-student'
     x = Flatten(name='flat')(conv_base.output)
     x = Dropout(0.5, name='drop1')(x)
     x = Dense(1024, activation='relu', name='dense1')(x)
     x = Dropout(0.5, name='drop2')(x)
     x = Dense(512, activation='relu', name='dense2')(x)
     x = Dense(self.nb_of_points, activation='sigmoid',name='dense3')(x)
     model = Model(conv_base.input, x)
     return model
示例#12
0
    def __init__(self, hparams):
        super(InputEmbedding, self).__init__()
        self.hparams = hparams
        if hparams.base_model_name == 'InceptionV3':
            base_model = tf.keras.applications.InceptionV3(include_top=False,
                                                           weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'InceptionResNetV2':
            base_model = tf.keras.applications.InceptionResNetV2(
                include_top=False, weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB0':
            base_model = efn.EfficientNetB0(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB1':
            base_model = efn.EfficientNetB1(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB2':
            base_model = efn.EfficientNetB2(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB3':
            base_model = efn.EfficientNetB3(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB4':
            base_model = efn.EfficientNetB4(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB5':
            base_model = efn.EfficientNetB5(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB6':
            base_model = efn.EfficientNetB6(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]
        elif hparams.base_model_name == 'EfficientNetB7':
            base_model = efn.EfficientNetB7(include_top=False,
                                            weights='imagenet')
            base_model_layers = [layer.name for layer in base_model.layers]

        assert hparams.end_point in base_model_layers, "no {} layer in {}".format(
            hparams.end_point, hparams.base_model_name)
        conv_tower_output = base_model.get_layer(hparams.end_point).output
        self.conv_model = tf.keras.models.Model(inputs=base_model.input,
                                                outputs=conv_tower_output)
        self.conv_out_shape = self.conv_model.predict(
            np.array([np.zeros(hparams.image_shape)])).shape
        self.encode_cordinate = EncodeCordinate(
            input_shape=self.conv_out_shape)
示例#13
0
 def get_efficientnet(self):
     models_dict ={
         'b0': efn.EfficientNetB0(input_shape=self.shape,weights=None,include_top=False),
         'b1': efn.EfficientNetB1(input_shape=self.shape,weights=None,include_top=False),
         'b2': efn.EfficientNetB2(input_shape=self.shape,weights=None,include_top=False),
         'b3': efn.EfficientNetB3(input_shape=self.shape,weights=None,include_top=False),
         'b4': efn.EfficientNetB4(input_shape=self.shape,weights=None,include_top=False),
         'b5': efn.EfficientNetB5(input_shape=self.shape,weights=None,include_top=False),
         'b6': efn.EfficientNetB6(input_shape=self.shape,weights=None,include_top=False),
         'b7': efn.EfficientNetB7(input_shape=self.shape,weights=None,include_top=False)
     }
     return models_dict[self.model_class]
    def __init__(self):
        # Create the base model from the pre-trained EfficientNet
        model_full = efn.EfficientNetB0(input_shape=(self.IMG_SIZE,
                                                     self.IMG_SIZE, 3),
                                        include_top=False,
                                        weights="imagenet")
        model_full.trainable = False

        self.model = tf.keras.Model(
            model_full.inputs,
            model_full.get_layer(self.LAYER_NAME).output)
        self.model.trainable = False
示例#15
0
    def create_model_effnetb0(self, input_shape=None):
        # {{{
        if input_shape == None:
            input_shape = (512, 512, 3)

        model = tf.keras.Sequential([
            efn.EfficientNetB0(input_shape=input_shape,
                               weights='imagenet',
                               include_top=False),
            L.GlobalAveragePooling2D(),
            L.Dense(2, activation='softmax')
        ])
        return model
示例#16
0
def create_b0(include_top=False,
              input_shape=None,
              input_tensor=None,
              weights="noisy-student"):
    """ネットワークの作成。"""
    import efficientnet.tfkeras as efn

    return efn.EfficientNetB0(
        include_top=include_top,
        input_shape=input_shape,
        input_tensor=input_tensor,
        weights=weights,
    )
示例#17
0
    def create_efficientNet(self,
                            inp_shape,
                            input_tensor,
                            output_len,
                            is_teacher=True):
        if is_teacher:  # for teacher we use a heavier network
            eff_net = efn.EfficientNetB3(include_top=True,
                                         weights=None,
                                         input_tensor=None,
                                         input_shape=[
                                             InputDataSize.image_input_size,
                                             InputDataSize.image_input_size, 3
                                         ],
                                         pooling=None,
                                         classes=output_len)
            # return self._create_efficientNet_3deconv(inp_shape, input_tensor, output_len)
        else:  # for student we use the small network
            eff_net = efn.EfficientNetB0(
                include_top=True,
                weights=None,
                input_tensor=None,
                input_shape=inp_shape,
                pooling=None,
                classes=output_len)  # or weights='noisy-student'

        eff_net.layers.pop()
        inp = eff_net.input

        x = eff_net.get_layer('top_activation').output
        x = GlobalAveragePooling2D()(x)
        x = keras.layers.Dropout(rate=0.5)(x)
        output = Dense(output_len, activation='linear', name='out')(x)

        eff_net = Model(inp, output)

        eff_net.summary()

        # plot_model(eff_net, to_file='eff_net.png', show_shapes=True, show_layer_names=True)

        # tf.keras.utils.plot_model(
        #     eff_net,
        #     to_file="eff_net.png",
        #     show_shapes=False,
        #     show_layer_names=True,
        #     rankdir="TB"
        # )

        # model_json = eff_net.to_json()
        # with open("eff_net.json", "w") as json_file:
        #     json_file.write(model_json)
        return eff_net
示例#18
0
def build_model(img_height, img_width, n):
    inp = Input(shape=(img_height,img_width,n))
    efnet = efn.EfficientNetB0(
        input_shape=(img_height,img_width,n), 
        weights='imagenet', 
        include_top=False
    )
    x = efnet(inp)
    x = GlobalAveragePooling2D()(x)
    x = Dense(2, activation='softmax')(x)
    model = tf.keras.Model(inputs=inp, outputs=x)
    opt = tf.keras.optimizers.Adam(learning_rate=0.000003)
    loss = tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.01)
    model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
    return model
示例#19
0
    def getEffModel1(self, i):
        modelInput = tf.keras.Input(
            batch_input_shape=(None, 5, self.config['net_size'],
                               self.config['net_size'], 3),
            name=f"imgInput{i}")
        modelInput0, modelInput1, modelInput2, modelInput3, modelInput4 = tf.split(
            modelInput, [1, 1, 1, 1, 1], 1)
        x0 = tf.squeeze(tf.keras.layers.Lambda(lambda x0: x0)(modelInput0))
        x1 = tf.squeeze(tf.keras.layers.Lambda(lambda x1: x1)(modelInput1))
        x2 = tf.squeeze(tf.keras.layers.Lambda(lambda x2: x2)(modelInput2))
        x3 = tf.squeeze(tf.keras.layers.Lambda(lambda x3: x3)(modelInput3))
        x4 = tf.squeeze(tf.keras.layers.Lambda(lambda x4: x4)(modelInput4))

        net = efn.EfficientNetB0(include_top=False,
                                 weights='imagenet',
                                 input_shape=(self.config['net_size'],
                                              self.config['net_size'], 3),
                                 pooling='avg')
        net._name = f"efficientnet-b0-{i}"

        activation = "relu"
        activation = tf.keras.layers.LeakyReLU()

        ret0 = net(x0)
        self.config['rnn_size'] = 256
        ret0 = Dense(self.config['rnn_size'], activation=activation)(ret0)
        ret0 = Dropout(self.config['dropout'])(ret0)
        ret1 = net(x1)
        ret1 = Dense(self.config['rnn_size'], activation=activation)(ret1)
        ret1 = Dropout(self.config['dropout'])(ret1)
        ret2 = net(x2)
        ret2 = Dense(self.config['rnn_size'], activation=activation)(ret2)
        ret2 = Dropout(self.config['dropout'])(ret2)
        ret3 = net(x3)
        ret3 = Dense(self.config['rnn_size'], activation=activation)(ret3)
        ret3 = Dropout(self.config['dropout'])(ret3)
        ret4 = net(x4)
        ret4 = Dense(self.config['rnn_size'], activation=activation)(ret4)
        ret4 = Dropout(self.config['dropout'])(ret4)
        print(ret4)
        ret = tf.concat([ret0, ret1, ret2, ret3, ret4], axis=1)
        x = tf.keras.layers.Dense(self.config['rnn_size'] * 5,
                                  activation=activation)(ret)
        # x = tf.keras.layers.Dense(512, activation=activation)(x)
        # outputs = Dense(self.config['num_class'], activation="softmax")(x)

        model = tf.keras.Model(modelInput, x, name=f'effNetwork{i}')
        return model
示例#20
0
    def getEffTFModel(self, n=0):
        modelInput = tf.keras.Input(batch_input_shape=(None, 5, self.config['net_size'], self.config['net_size'], 3))
        modelInput0, modelInput1, modelInput2, modelInput3, modelInput4 = tf.split(modelInput, [1, 1, 1, 1, 1], 1)
        x0 = tf.squeeze(tf.keras.layers.Lambda(lambda x0: x0)(modelInput0))
        x1 = tf.squeeze(tf.keras.layers.Lambda(lambda x1: x1)(modelInput1))
        x2 = tf.squeeze(tf.keras.layers.Lambda(lambda x2: x2)(modelInput2))
        x3 = tf.squeeze(tf.keras.layers.Lambda(lambda x3: x3)(modelInput3))
        x4 = tf.squeeze(tf.keras.layers.Lambda(lambda x4: x4)(modelInput4))
        net = ''
        if n % 10 == 0:
            net = efn.EfficientNetB0(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')
        elif n % 10 == 1:
            net = efn.EfficientNetB1(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')
        elif n % 10 == 2:
            net = efn.EfficientNetB2(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')
        elif n % 10 == 3:
            net = efn.EfficientNetB3(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')
        elif n % 10 == 4:
            net = efn.EfficientNetB4(include_top=False, weights='imagenet', input_shape=(self.config['net_size'], self.config['net_size'], 3), pooling='avg')

        activation = tf.keras.layers.LeakyReLU()

        self.config['rnn_size'] = 256
        ret0 = net(x0)
        ret0 = Dense(self.config['rnn_size'], activation=activation)(ret0)
        ret0 = tf.expand_dims(ret0, axis=1)
        ret0 = self.transformer(ret0)
        ret1 = net(x1)
        ret1 = Dense(self.config['rnn_size'], activation=activation)(ret1)
        ret1 = tf.expand_dims(ret1, axis=1)
        ret1 = self.transformer(ret1)
        ret2 = net(x2)
        ret2 = Dense(self.config['rnn_size'], activation=activation)(ret2)
        ret2 = tf.expand_dims(ret2, axis=1)
        ret2 = self.transformer(ret2)
        ret3 = net(x3)
        ret3 = Dense(self.config['rnn_size'], activation=activation)(ret3)
        ret3 = tf.expand_dims(ret3, axis=1)
        ret3 = self.transformer(ret3)
        ret4 = net(x4)
        ret4 = Dense(self.config['rnn_size'], activation=activation)(ret4)
        ret4 = tf.expand_dims(ret4, axis=1)
        ret4 = self.transformer(ret4)
        ret = tf.concat([ret0, ret1, ret2, ret3, ret4], axis=1)
        print(ret)
        x = tf.keras.layers.Dense(self.config['rnn_size'], activation=activation)(ret)
        model = tf.keras.Model(modelInput, x)
        return model
示例#21
0
def EffnetB0(input_shape=None):

    # pip install efficientnet
    import efficientnet.tfkeras as efn

    if input_shape == None:
        input_shape = (512, 512, 3)

    model = tf.keras.Sequential([
        efn.EfficientNetB0(input_shape=input_shape,
                           weights='imagenet',
                           include_top=False),
        L.GlobalAveragePooling2D(),
        L.Dense(2, activation='softmax')
    ])
    return model
示例#22
0
 def __init__(self, snapshot=None):
     base_model = efn.EfficientNetB0(include_top=False,
                                     input_shape=(224, 224, 3))
     out = base_model.output
     out = keras.layers.GlobalAveragePooling2D()(out)
     fc_yaw = keras.layers.Dense(name='yaw_new', units=120)(
         out)  # 3 * 120 = 360 degrees in yaw
     fc_pitch = keras.layers.Dense(name='pitch_new', units=66)(out)
     fc_roll = keras.layers.Dense(name='roll_new', units=66)(out)
     self.model = keras.models.Model(inputs=base_model.input,
                                     outputs=[fc_yaw, fc_pitch, fc_roll])
     if snapshot != None:
         self.model.load_weights(snapshot)
     self.idx_tensor = [idx for idx in range(66)]
     self.idx_tensor = np.array(self.idx_tensor, dtype=np.float32)
     self.idx_tensor_yaw = [idx for idx in range(120)]
     self.idx_tensor_yaw = np.array(self.idx_tensor_yaw, dtype=np.float32)
示例#23
0
def build_model_efficientnet():
    pretrained_model = efn.EfficientNetB0(weights='imagenet',
                                          include_top=False)
    pretrained_model.trainable = False
    x = pretrained_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(2, activation='softmax')(x)
    model = Model(inputs=pretrained_model.input, outputs=predictions)

    # lr=1e-4
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()
    return model
示例#24
0
def get_model():
    with strategy.scope():
        base_model = efn.EfficientNetB0(
            input_shape=(IMG_SIZE, IMG_SIZE, 3),
            weights="imagenet",  # noisy-student
            include_top=False)
        for layer in base_model.layers:
            layer.trainable = True

        avg = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)
        output = tf.keras.layers.Dense(len(train_classes),
                                       activation="softmax")(avg)
        model = tf.keras.Model(inputs=base_model.input, outputs=output)

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['categorical_accuracy'])
    model.summary()
    return model
示例#25
0
 def model_chooser(self, classes=2, weights=None):
     print("Model selection started.")
     name = self.model_name
     if(name=='C0'):
         self.model = efn.EfficientNetB0(include_top=True, weights=weights, classes=classes)
     elif(name=='C1'):
         self.model = efn.EfficientNetB1(include_top=True, weights=weights, classes=classes)
     elif(name=='C2'):
         self.model = efn.EfficientNetB2(include_top=True, weights=weights, classes=classes)
     elif(name=='C3'):
         self.model = efn.EfficientNetB3(include_top=True, weights=weights, classes=classes)
     elif(name=='C4'):
         self.model = efn.EfficientNetB4(include_top=True, weights=weights, classes=classes)
     elif(name=='C5'):
         self.model = efn.EfficientNetB5(include_top=True, weights=weights, classes=classes)
     
     if(classes==2):
         self.model.compile(optimizer="adam", loss="binary_crossentropy", metrics = ['acc'])
     elif(classes>2):
         self.model.compile(optimizer="adam", loss="categorical_crossentropy", metrics = ['acc'])      
示例#26
0
def create_base_model(base_model_name, pretrained=True, IMAGE_SIZE=[300, 300]):
    if pretrained is False:
        weights = None
    else:
        weights = "imagenet"
    if base_model_name == 'B0':
        base = efn.EfficientNetB0(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B1':
        base = efn.EfficientNetB1(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B2':
        base = efn.EfficientNetB2(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B3':
        base = efn.EfficientNetB3(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B4':
        base = efn.EfficientNetB4(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B5':
        base = efn.EfficientNetB5(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B6':
        base = efn.EfficientNetB6(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B7':
        base = efn.EfficientNetB7(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    base = remove_dropout(base)
    base.trainable = True
    return base
def build_wide_and_deep(
        img_input_shape: Tuple[int, int, int] = (224, 224, 3),
        weights='noisy-student',
        tab_input_shape: Tuple[int, ] = (8, ),
):
    '''
    Builds a wide and deep model by concatenating a base efficientnet model --with a vector of tabular data
    INPUTS
        input_shape (Tuple(int,int,int)): input image size. May have only 1 channel.
            Models trained with sizes in chart above. Default is B0
        weights (str): May be one of None, 'imagenet', 'noisy-student', or weights file location
    RETURN
        model
    '''

    # Create Deep leg
    inp_img = Input(shape=img_input_shape)
    base = efn.EfficientNetB0(input_shape=img_input_shape,
                              weights=weights,
                              include_top=False)
    for layer in base.layers:
        layer.trainable = False
    x = base(inp_img)
    x = GlobalAveragePooling2D()(x)

    # Create Wide leg
    inp_tab = Input(shape=tab_input_shape)
    x = Concatenate()([x, inp_tab])

    x = Dense(1)(x)  # activation defaults to linear

    model = Model([inp_tab, inp_img], x)

    # compile model
    opt = Adam(learning_rate=1e-5)
    loss = 'MSE'
    model.compile(optimizer=opt, loss=loss)

    return model
示例#28
0
    def create_efficientNet(self,
                            inp_shape,
                            input_tensor,
                            output_len,
                            is_teacher=True):
        if is_teacher:  # for teacher we use a heavier network
            eff_net = efn.EfficientNetB3(include_top=True,
                                         weights=None,
                                         input_tensor=None,
                                         input_shape=[
                                             InputDataSize.image_input_size,
                                             InputDataSize.image_input_size, 3
                                         ],
                                         pooling=None,
                                         classes=output_len)
        else:  # for student we use the small network
            eff_net = efn.EfficientNetB0(
                include_top=True,
                weights=None,
                input_tensor=None,
                input_shape=inp_shape,
                pooling=None,
                classes=output_len)  # or weights='noisy-student'

        eff_net.layers.pop()
        inp = eff_net.input

        x = eff_net.get_layer('top_activation').output
        x = GlobalAveragePooling2D()(x)
        x = keras.layers.Dropout(rate=0.5)(x)
        output = Dense(output_len, activation='linear', name='out')(x)

        eff_net = Model(inp, output)

        eff_net.summary()

        return eff_net
示例#29
0
args = parser.parse_args()

train_data = ImageDataSequence([os.path.join(args.dataset, f) for f in (["train", "val"] if args.test else ["train"])], batch_size=int(args.batch_size), target_size=(224, 224, 3), frac=float(args.train_frac))
val_data = ImageDataSequence(os.path.join(args.dataset, "test" if args.test else "val"), batch_size=int(args.batch_size), target_size=(224, 224, 3))

img_size = int(args.image_size)
input_tensor = Input(shape=(img_size, img_size, 3))

model_kwargs = {"backend": tensorflow.keras.backend, "layers": tensorflow.keras.layers, "models": tensorflow.keras.models, "utils": tensorflow.keras.utils}

if args.network == "resnet101":
    zero = ResNet101(include_top=False, weights=None if args.random_init else "imagenet", input_tensor=input_tensor, **model_kwargs)
    model = ResNet101(include_top=False, weights=None if args.random_init else "imagenet", input_tensor=input_tensor, **model_kwargs)
elif args.network == "enb0":
    zero = en.EfficientNetB0(include_top=False, weights=None if args.random_init else "imagenet", input_tensor=input_tensor)
    model = en.EfficientNetB0(include_top=False, weights=None if args.random_init else "imagenet", input_tensor=input_tensor)
elif args.network.startswith("file:"):
    zero = load_model(args.network[5:])
    model = load_model(args.network[5:])
else:
    raise Exception("Unknown network: " + args.network)

num_frozen = int(float(args.freeze_frac) * len(model.layers))

for l in model.layers[:num_frozen]:
    if type(l) != keras.layers.normalization.BatchNormalization:
        l.trainable = False

zero_features = zero.outputs[0]
model_features = model.outputs[0]
示例#30
0
            'beta_1': self._serialize_hyperparameter('beta_1'),
            'beta_2': self._serialize_hyperparameter('beta_2'),
            'decay': self._serialize_hyperparameter('decay'),
            'weight_decay': self._serialize_hyperparameter('weight_decay'),
            'epsilon': self.epsilon,
            'amsgrad': self.amsgrad,
            'total_steps': self._serialize_hyperparameter('total_steps'),
            'warmup_proportion': self._serialize_hyperparameter('warmup_proportion'),
            'min_lr': self._serialize_hyperparameter('min_lr'),
        })
        return config


efficientnetb3 = efn.EfficientNetB0(
        weights='imagenet',
        input_shape=(224,224,3),
        include_top=False
                   )

model = build_model(efficientnetb3)
model.summary()

model.compile(
        loss='categorical_crossentropy',
        optimizer = RAdam(learning_rate=1e-3,
                          min_lr=1e-7,
                          warmup_proportion=0.15),
        metrics=['accuracy']
    )

# Learning Rate Reducer