Esempio n. 1
0
def model_2_lane_pixel_classification(train_db, params):
    """
	Represents the bottom of the image as 224 possible classes for the image.  Having a high value
	in label 0 represents a high confidence there is a lane intersecting the bottom of the image at
	pixel 0
	"""
    optimizer = params['optimizer']
    loss = params['loss']
    metrics = params['metrics']

    input_tensor = Input(shape=(224, 224, 3))
    base_model = ResNet50(weights='imagenet',
                          include_top=False,
                          input_tensor=input_tensor)
    fc_init = params['fc_init']
    x = base_model.output
    x = Flatten()(x)
    x = Dense(224, init=fc_init, name='fc5')(x)
    x = Activation('sigmoid')(x)

    model = Model(input_tensor, x)
    for layer in model.layers[:len(model.layers) - 1]:
        layer.trainable = False

    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
    return model
Esempio n. 2
0
def select_architecture(architecture):
    if architecture == 'vgg16':
        return VGG16(weights='imagenet', include_top=False)
    elif architecture == 'vgg19':
        return VGG19(weights='imagenet', include_top=False)
    elif architecture == 'resnet50':
        return ResNet50(weights='imagenet', include_top=False)
Esempio n. 3
0
def load_model (args):

	if args.model == 'inception':
		model = InceptionV3(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'xception':
		model = Xception(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'inceptionresnet':
		model = InceptionResNetV2(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'mobilenet':
		model = MobileNet(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'mobilenet2':	
		model = MobileNetV2(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'nasnet':	
		model = NASNetLarge(include_top=True, weights='imagenet')
		preprocess_mode='tf'
	elif args.model == 'resnet':
		model = ResNet50(include_top=True, weights='imagenet')
		preprocess_mode='caffe'
	elif args.model == 'vgg16':
		model = VGG16(include_top=True, weights='imagenet')
		preprocess_mode='caffe'
	elif args.model == 'vgg19':
		model = VGG19(include_top=True, weights='imagenet')
		preprocess_mode='caffe'
	else:
		print ("Model not found")

	return model,preprocess_mode
Esempio n. 4
0
def getModel():
    input_2 = Input(shape=[1], name="angle")
    angle_layer = Dense(1, )(input_2)
    base_model = ResNet50(weights='imagenet',
                          include_top=False,
                          input_shape=X_train.shape[1:],
                          classes=1)
    x = base_model.get_layer('avg_pool').output

    x = GlobalAveragePooling2D()(x)
    merge_one = concatenate([x, angle_layer])
    merge_one = Dense(1024, activation='relu', name='fc2')(merge_one)
    merge_one = Dropout(0.3)(merge_one)
    #merge_one = Dense(1024, activation='relu', name='fc3')(merge_one)
    #merge_one = Dropout(0.3)(merge_one)

    predictions = Dense(1, activation='sigmoid')(merge_one)

    model = Model(input=[base_model.input, input_2], output=predictions)

    sgd = optimizers.SGD(lr=1e-3, decay=1e-4, momentum=0.9, nesterov=True)
    #sgd = optimizers.Adam(lr=0.0001)
    #sgd = optimizers.Adadelta()
    model.compile(loss='binary_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    return model
Esempio n. 5
0
def finetuned_resnet(include_top, weights_dir):
    '''

    :param include_top: True for training, False for generating intermediate results for
                        LSTM cell
    :param weights_dir: path to load finetune_resnet.h5
    :return:
    '''
    base_model = ResNet50(include_top=False,
                          weights='imagenet',
                          input_shape=IMSIZE)
    for layer in base_model.layers:
        layer.trainable = False

    x = base_model.output
    x = Flatten()(x)
    x = Dense(2048, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)

    if include_top:
        x = Dense(N_CLASSES, activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=x)
    if os.path.exists(weights_dir):
        model.load_weights(weights_dir, by_name=True)

    return model
Esempio n. 6
0
def build_models(seq_len=12, num_classes=4, load_weights=False):
    # DST-Net: ResNet50
    resnet = ResNet50(weights='imagenet', include_top=False)
    for layer in resnet.layers:
        layer.trainable = False
    resnet.load_weights('model/resnet.h5')
    # DST-Net: Conv3D + Bi-LSTM
    inputs = Input(shape=(seq_len, 7, 7, 2048))
    # conv1_1, conv3D and flatten
    conv1_1 = TimeDistributed(Conv2D(128, 1, 1, activation='relu'))(inputs)
    conv3d = Conv3D(64, 3, 1, 'SAME', activation='relu')(conv1_1)
    flatten = Reshape(target_shape=(seq_len, 7 * 7 * 64))(conv3d)
    # 2 Layers Bi-LSTM
    bilstm_1 = Bidirectional(LSTM(128, dropout=0.5,
                                  return_sequences=True))(flatten)
    bilstm_2 = Bidirectional(LSTM(128, dropout=0.5,
                                  return_sequences=False))(bilstm_1)
    outputs = Dense(num_classes, activation='softmax')(bilstm_2)
    dstnet = Model(inputs=inputs, outputs=outputs)
    dstnet.compile(loss='categorical_crossentropy',
                   optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True))
    # load models
    if load_weights:
        dstnet.load_weights('model/dstnet.h5')
    return resnet, dstnet
Esempio n. 7
0
def fcn_resnet(input_shape, nb_labels):
    nb_rows, nb_cols, _ = input_shape
    input_tensor = Input(shape=input_shape)
    model = ResNet50(include_top=False,
                     weights='imagenet',
                     input_tensor=input_tensor)
    # for layer in model.layers:
    #     layer.trainable = False
    x32 = model.get_layer('act3d').output
    x16 = model.get_layer('act4f').output
    x8 = model.get_layer('act5c').output

    c32 = Conv2D(nb_labels, (1, 1), name='conv_labels_32')(x32)
    c16 = Conv2D(nb_labels, (1, 1), name='conv_labels_16')(x16)
    c8 = Conv2D(nb_labels, (1, 1), name='conv_labels_8')(x8)

    def resize_bilinear(images):
        return tf.image.resize_bilinear(images, [nb_rows, nb_cols])

    r32 = Lambda(resize_bilinear, name='resize_labels_32')(c32)
    r16 = Lambda(resize_bilinear, name='resize_labels_16')(c16)
    r8 = Lambda(resize_bilinear, name='resize_labels_8')(c8)

    m = Add(name='merge_labels')([r32, r16, r8])

    x = Reshape((nb_rows * nb_cols, nb_labels))(m)
    # x = Activation('softmax')(x)
    x = Activation('sigmoid')(x)
    x = Reshape((nb_rows, nb_cols, nb_labels))(x)

    model = Model(inputs=input_tensor, outputs=x)

    return model
Esempio n. 8
0
def main():
    ''' LEGACY PREPROCESSING
    training_img_list, training_pose = process_dataset(training_file)
    training_pose = training_pose.astype('float32')
    testing_img_list, testing_pose = process_dataset(testing_file)
    testing_pose = testing_pose.astype('float32')
    train_imgs = read_images(training_img_list)
    test_imgs = read_images(testing_img_list)
    train_imgs = train_imgs.astype('float32')
    test_imgs = test_imgs.astype('float32')
    train_imgs /= 255
    test_imgs /= 255

    tr_tx =training_pose[:,:3]
    tr_rt =training_pose[:,3:]
    ts_tx =testing_pose[:,:3]
    ts_rt =testing_pose[:,3:]'''

    #train_imgs, train_pose_tx, train_pose_rt, test_imgs, test_pose_tx, test_pose_rt = load_train_test_splits(base_dir )

    # import the resnet model
    base_model = ResNet50(weights='imagenet')
    x = base_model.output
    x = Dropout(0.7)(x)
    x = Dense(1024, activation='tanh')(x)
    position = Dense(3, activation='tanh', name='translation')(x)
    rotation = Dense(4, activation='tanh', name='rotation')(x)

    model = Model(input=base_model.input, output=[position, rotation])
    print(model.summary())
    '''sgd = optimizers.SGD(lr = 0.00001, decay=1e-6, momentum=0.9, nesterov = True)
def extract_resnet():
    model = ResNet50(weights='imagenet', include_top=False)
    print(model.summary())

    X_dirname = '../../411a3/train'
    Y_filename = '../../411a3/train.csv'
    X_filelist = image.list_pictures(X_dirname)
    Y_list = np.loadtxt(Y_filename, dtype='str', delimiter=',')[1:]

    X_resnet = np.zeros((train_size, 2048, 1, 1))
    y_resnet = Y_list[:, 1].astype('int64').reshape(-1, 1) - 1

    for i in range(train_size):
        img = image.load_img(X_filelist[i], target_size=target_size)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        resnet = model.predict(x)
        X_resnet[i, :, :, :] = resnet
        print('Read image: ' + X_filelist[i])

    # shuffle inputs and targets
    rnd_idx = np.arange(X_resnet.shape[0])
    np.random.shuffle(rnd_idx)
    X_train = X_resnet[rnd_idx]
    y_train = y_resnet[rnd_idx]

    return X_train, y_train
Esempio n. 10
0
def getImageCnnModel():
    model = ResNet50(include_top=True, weights='imagenet')
    cnn_output_len = 2048
    model.layers.pop()
    model.outputs = [model.layers[-1].output]
    model.output_layers = [model.layers[-1]]
    model.layers[-1].outbound_nodes = []
    return [model, cnn_output_len]
    def __init__(self):

        # if pool is not None:
        #     changePoolBool=pool
        self.networkMod = ResNet50(trainable=trainable, changePool=pool)
        # else:
        #     self.networkMod = ResNet50(trainable=trainable)
        self.network = Sequential()
        self.network.add(self.networkMod)
    def run(self):
        global label
        #self.ard = serial.Serial(self.get_serial_port(), baudrate=9600, timeout=3)
        # Load the VGG16 network
        print("[INFO] loading network...")
        self.model = ResNet50(weights="imagenet")

        while (~(frame is None)):
            (inID, label) = self.predict(frame)
Esempio n. 13
0
def Run(self, img_path, model_name):

    # config variables
    weights = 'imagenet'
    include_top = 0
    train_path = 'jpg'
    classfier_file = 'output/flowers_17/' + model_name + '/classifier.cpickle'

    # create the pretrained models
    # check for pretrained weight usage or not
    # check for top layers to be included or not
    if model_name == "vgg16":
        from vgg16 import VGG16, preprocess_input
        base_model = VGG16(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('fc1').output)
        image_size = (224, 224)
    elif model_name == "vgg19":
        from vgg19 import VGG19, preprocess_input
        base_model = VGG19(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('fc1').output)
        image_size = (224, 224)
    elif model_name == "resnet50":
        from resnet50 import ResNet50, preprocess_input
        base_model = ResNet50(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('avg_pool').output)
        image_size = (224, 224)
    elif model_name == "inceptionv3":
        from inception_v3 import InceptionV3, preprocess_input
        base_model = InceptionV3(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('mixed9').output)
        image_size = (299, 299)
    elif model_name == "xception":
        from xception import Xception, preprocess_input
        base_model = Xception(weights=weights)
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('avg_pool').output)
        image_size = (299, 299)
    else:
        base_model = None

    img = image.load_img(img_path, target_size=image_size)
    img_array = image.img_to_array(img)
    img_array = np.expand_dims(img_array, axis=0)
    img_array = preprocess_input(img_array)
    feature = model.predict(img_array)
    feature = feature.flatten()
    with open(classfier_file, 'rb') as f:
        model2 = pickle.load(f)

    pred = model2.predict(feature)
    prob = model2.predict_proba(np.atleast_2d(feature))[0]

    return pred, prob[0]
Esempio n. 14
0
def RetModel():
    return [
        VGG16(input_shape=(256, 256, 7), classes=2),
        VGG19(input_shape=(256, 256, 7), classes=2),
        ResNet50(input_shape=(256, 256, 7), classes=2),
        InceptionResNetV2(input_shape=(256, 256, 7), classes=2),
        DenseNet121(input_shape=(256, 256, 7), classes=2),
        DenseNet169(input_shape=(256, 256, 7), classes=2),
        DenseNet201(input_shape=(256, 256, 7), classes=2)
    ]
Esempio n. 15
0
 def __init__(self,in_channels,nums,classes,fpn_strides,encoder_decoder_levels=3,layers=[3,4,6,3],overshold=0.5,topk=100,train=True,min_size=32):
     super(Detect,self).__init__()
     self.backbone=ResNet50(layers)##resnet50基本网络
     self.fpn=FPNs()##根据基本网络构建fpns
     self.head=CenterModule(in_channels,nums,classes)##fcos的检测器头部
     self.fpn_strides=fpn_strides##构建中心先验的步长
     self.train=train
     self.loss=FCOSLoss(0.45)##训练时的损失
     self.encoder_decoder=EncoderDecoder(in_chnnels,out_channels,encoder_decoder_levels)
     self.select=TestSelect(overshold,topk,min_size,num_classes)
Esempio n. 16
0
def RetModel()->Tuple[Model,Model,Model,Model,Model,Model,Model]:
    '''
        To get all training model
        returns:
        load vgg,resnet and densenet model
        '''
    return [VGG16(input_shape=(256,256,7),classes=2),VGG19(input_shape=(256,256,7),classes=2),
            ResNet50(input_shape=(256, 256, 7), classes=2),InceptionResNetV2(input_shape=(256, 256, 7), classes=2),
            DenseNet121(input_shape=(256, 256, 7),classes=2),DenseNet169(input_shape=(256, 256, 7), classes=2),
            DenseNet201(input_shape=(256, 256, 7),classes=2)]
Esempio n. 17
0
def DeepLabV3Plus(img_height, img_width, nclasses=66):
    print('*** Building DeepLabv3Plus Network ***')

    base_model = ResNet50(input_shape=(img_height, img_width, 3),
                          weights='imagenet',
                          include_top=False)

    image_features = base_model.get_layer('activation_39').output
    x_a = ASPP(image_features)
    x_a = Upsample(tensor=x_a, size=[img_height // 4, img_width // 4])

    x_b = base_model.get_layer('activation_9').output
    x_b = Conv2D(filters=48,
                 kernel_size=1,
                 padding='same',
                 kernel_initializer='he_normal',
                 name='low_level_projection',
                 use_bias=False)(x_b)
    x_b = BatchNormalization(name=f'bn_low_level_projection')(x_b)
    x_b = Activation('relu', name='low_level_activation')(x_b)

    x = concatenate([x_a, x_b], name='decoder_concat')

    x = Conv2D(filters=256,
               kernel_size=3,
               padding='same',
               activation='relu',
               kernel_initializer='he_normal',
               name='decoder_conv2d_1',
               use_bias=False)(x)
    x = BatchNormalization(name=f'bn_decoder_1')(x)
    x = Activation('relu', name='activation_decoder_1')(x)

    x = Conv2D(filters=256,
               kernel_size=3,
               padding='same',
               activation='relu',
               kernel_initializer='he_normal',
               name='decoder_conv2d_2',
               use_bias=False)(x)
    x = BatchNormalization(name=f'bn_decoder_2')(x)
    x = Activation('relu', name='activation_decoder_2')(x)
    x = Upsample(x, [img_height, img_width])

    x = Conv2D(nclasses, (1, 1), name='output_layer')(x)
    '''
    x = Activation('softmax')(x) 
    tf.losses.SparseCategoricalCrossentropy(from_logits=True)
    Args:
        from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
        we assume that `y_pred` encodes a probability distribution.
    '''
    model = Model(inputs=base_model.input, outputs=x, name='DeepLabV3_Plus')
    print('*** Output_Shape => {model.output_shape} ***')
    return model
Esempio n. 18
0
    def __init__(self, model_path, class_num, insize, mean, gpu=0):

        # Load model
        self.__model = ResNet50(class_num, insize)
        serializers.load_npz(model_path, self.__model)

        chainer.cuda.get_device(gpu).use()
        self.__model.to_gpu(gpu)

        # Add height and width dimensions to mean
        self.__mean = mean[np.newaxis, np.newaxis, :]
Esempio n. 19
0
def model():
	model = ResNet50(weights='imagenet')
	model.layers.pop()
	for layer in model.layers:
		layer.trainable=False	
	new_layer1 = Dropout(0.4)(model.layers[-1].output)
	new_layer2 = Dense(3,activation="softmax")(new_layer1)
	model_1 = Model(input=model.input, output=[new_layer2])
	model_1.compile(optimizer="adam", loss='categorical_crossentropy',metrics=['accuracy'])
	model_1.summary()
	return model_1
Esempio n. 20
0
def train_model():
    model = ResNet50(weights='imagenet', include_top=False)
    last_layer = model.output
    x = GlobalAveragePooling2D()(last_layer)
    x = Dense(512, activation='relu', name='fc-1')(x)
    x = Dropout(0.5)(x)
    x = Dense(256, activation='relu', name='fc-2')(x)
    x = Dropout(0.5)(x)
    out = Dense(nb_classes, activation='softmax', name='output_layer')(x)
    custom_resnet_model = Model(inputs=model.input, outputs=out)

    custom_resnet_model.summary()

    #uncomment this if theres a bug
    #custom_resnet_model.layers[-1].trainable

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=40,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True,
                                       fill_mode='nearest')

    train_generator = train_datagen.flow_from_directory(
        config.TRAIN_DIR,
        target_size=(224, 224),
        batch_size=config.BATCH_SIZE,
        class_mode='sparse')

    opt = tfa.optimizers.SGDW(learning_rate=0.01,
                              weight_decay=0.0001,
                              momentum=0.9)

    # opt = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True, name='SGD')

    log_dir = config.LOGS_DIR + '/' + datetime.datetime.now().strftime(
        "%Y-%m-%d_%H:%M:%S")
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          histogram_freq=1)

    custom_resnet_model.compile(
        loss='sparse_categorical_crossentropy',
        optimizer=opt,
        metrics=['accuracy'],
    )

    custom_resnet_model.fit(train_generator,
                            steps_per_epoch=config.STEPS_PER_EPOCH,
                            epochs=config.EPOCHS,
                            callbacks=[tensorboard_callback])

    custom_resnet_model.save(config.TRAINED_MODEL_DIR_RESNET)
Esempio n. 21
0
def bulid_model(input_shape, dropout, fc_layers, num_classes):
    inputs = Input(shape=input_shape, name='input_1')
    x = ResNet50(input_tensor=inputs)
    x = Flatten()(x)
    for fc in fc_layers:
        x = Dense(fc, activation='relu')(x)
        #x = Dropout(dropout)(x)

    predictions = Dense(num_classes, activation='softmax')(x)
    model = Model(inputs=inputs, outputs=predictions)
    model.summary()
    return model
Esempio n. 22
0
def init():
    global model
    if model is None:
        start_time = time.time()
        from resnet50 import ResNet50
        model = ResNet50(weights='imagenet', include_top=False)
        print("Initializing ResNet, please wait...")
        pixels = np.zeros((480, 640, 3))
        x = pixels_to_input(pixels)
        preds = model.predict(x)
        print("Resnet initialized in {:.2f} sec".format(time.time() -
                                                        start_time))
Esempio n. 23
0
    def build_model(self):
        from keras.layers import Dense
        from keras.layers import Activation
        from keras.models import Model
        from resnet50 import ResNet50
        from resnet50 import ResNet50
        resnet50_model = ResNet50(weights='imagenet')

        fc1000 = resnet50_model.get_layer('fc1000').output
        final_softmax = Dense(output_dim=2, activation='softmax')(fc1000)
        resnet50_ftr_extrctr = Model(input=resnet50_model.input, output=final_softmax)

        self.model = resnet50_ftr_extrctr
def hack_resnet(num_classes):
    model = ResNet50(include_top=True, weights='imagenet')

    # Get input
    new_input = model.input
    # Find the layer to connect
    hidden_layer = model.layers[-2].output
    # Connect a new layer on it
    new_output = Dense(num_classes)(hidden_layer)
    # Build a new model
    newmodel = Model(new_input, new_output)

    return newmodel
    def predict(filename):
        assert os.path.isfile(filename) and 'cannot find file'
        model = ResNet50(weights='imagenet')

        img = image.load_img(filename, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        preds = decode_predictions(model.predict(x))
        if len(preds) == 0:
            return None
        return preds[0][0][1]
Esempio n. 26
0
def train_resnet(img_type):
    #data
    train_imgs, train_labels = read_image(path, img_type)

    Y_hat, model_params = ResNet50(input_shape=[256, 256, 1], classes=2)
    #Y_hat = tf.sigmoid(Z)

    X = model_params['input']
    Y_true = tf.placeholder(dtype=tf.int32, shape=[None, 1])

    Z = model_params['out']['Z']
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=Z, labels=Y_true))
    train_step = tf.train.AdamOptimizer(1e-3).minimize(loss)
    correct_prediction = tf.equal(tf.cast(tf.argmax(Y_hat, 1), tf.int32),
                                  Y_true)
    acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        for epoch in range(1, max_epochs + 1):
            start_time = time.time()
            for batch_images, batch_labels in minibatches(train_imgs,
                                                          train_labels,
                                                          batch_size,
                                                          shuffle=True):

                if epoch % 10 == 0:
                    l, ac, _ = sess.run([loss, acc, train_step],
                                        feed_dict={
                                            X: batch_images,
                                            Y_true: batch_labels
                                        })
                    print('epoch: ' + str(epoch) + ' loss: ' + str(l) +
                          ' accu: ' + str(ac))
                else:
                    sess.run([train_step],
                             feed_dict={
                                 X: batch_images,
                                 Y_true: batch_labels
                             })

                if epoch % 500 == 0:
                    saver.save(sess, path + './model' + str(epoch) + '.ckpt')
            end_time = time.time()
            print(end_time - start_time)
def resnet50_transfer_len():
    base_model = ResNet50(weights='imagenet')
    model = Model(input=base_model.input,
                  output=base_model.get_layer('avg_pool').output)
    img_path = '1.jpg'
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    transferva = model.predict(x)
    nsamples, nx, ny, npoints = transferva.shape
    d2_train_dataset = transferva.reshape((npoints * nsamples * nx * ny))
    K.clear_session()
    return d2_train_dataset.shape[0]
Esempio n. 28
0
def DeepLabV3Plus(img_height, img_width):
    print('*** Building DeepLabv3Plus Network ***')

    base_model = ResNet50(input_shape=(img_height, img_width, 3),
                          weights='imagenet',
                          include_top=False)

    image_features = base_model.get_layer('activation_39').output
    x_a = ASPP(image_features)
    x_a = Upsample(tensor=x_a, size=[img_height // 4, img_width // 4])

    x_b = base_model.get_layer('activation_9').output
    x_b = Conv2D(filters=48,
                 kernel_size=1,
                 padding='same',
                 kernel_initializer='he_normal',
                 name='low_level_projection',
                 use_bias=False)(x_b)
    x_b = BatchNormalization(name=f'bn_low_level_projection')(x_b)
    x_b = Activation('relu', name='low_level_activation')(x_b)

    x = concatenate([x_a, x_b], name='decoder_concat')

    x = Conv2D(filters=256,
               kernel_size=3,
               padding='same',
               activation='relu',
               kernel_initializer='he_normal',
               name='decoder_conv2d_1',
               use_bias=False)(x)
    x = BatchNormalization(name=f'bn_decoder_1')(x)
    x = Activation('relu', name='activation_decoder_1')(x)

    x = Conv2D(filters=256,
               kernel_size=3,
               padding='same',
               activation='relu',
               kernel_initializer='he_normal',
               name='decoder_conv2d_2',
               use_bias=False)(x)
    x = BatchNormalization(name=f'bn_decoder_2')(x)
    x = Activation('relu', name='activation_decoder_2')(x)
    x = Upsample(x, [img_height, img_width])

    x = Conv2D(1, (1, 1), name='output_layer')(x)
    x = Activation('sigmoid')(x)
    model = Model(inputs=base_model.input, outputs=x, name='DeepLabV3_Plus')
    print(f'*** Output_Shape => {model.output_shape} ***')
    return model
Esempio n. 29
0
def predict_one(src_img_dir, pic_name):
    pre_handle_picture(src_img_dir, pic_name)
    x_test = []
    x_test.append(covert_img_toarray(src_img_dir, pic_name))
    x_test = np.array(x_test)

    x_test = x_test.astype('float32')
    x_test /= 255

    model = ResNet50(weights='cat_kind')
    y_pred1 = model.predict(x_test)
    classes = np.argmax(y_pred1, axis=1)[0]
    for key in cat_dict.keys():
        if classes == key:
            print("class: {0},  name: {1}".format(classes, cat_dict.get(key)))
Esempio n. 30
0
def get_ResNet50(input_shape, trainable=False, pop=True, **kwargs):

    #importing convolutional layers of ResNet50 from keras
    model = ResNet50(include_top=False, weights='imagenet',input_shape=input_shape)
    if pop == True:
        model.layers.pop() # pop pooling layer
        model.layers.pop() # pop last activation layer

    #setting the convolutional layers to non-trainable 
    for layer in model.layers:
        layer.trainable = trainable
    
    print('Resnet50 for Perception loss:')
    model.summary()
    return(model)