def trainable_model(non_trainable_model):
    '''
	Parameters
	----------
	non_trainable_model: This is the non_trianable base network whose output features are used to trian this
	network.

	Returns
	-------
	trainable_model: The fine_tuning model object that we will train.
	'''
    input_tensor = Input(shape=non_trainable_model.output_shape[1:])
    # input_tensor = Input(shape = non_trainable_model.get_layer(BOTTLENECK_TENSOR_NAME).output_shape[1:])
    # weights = non_trainable_model.get_weights()
    trainable_model = transfer_model(input_tensor, LABEL_LENGTH)
    # trainable_model.summary()
    return (trainable_model)
    Returns
    ------
    predictions: The probability outputs of the network for the given image
    '''
    feature_extracted = non_trainable_model.predict(img)
    print(feature_extracted.shape)
    predictions = trainable_model.predict(feature_extracted)
    class_name = label_map[np.argmax(predictions[0])]
    if want_probability:
        return (class_name, predictions[0])
    else:
        return (class_name, predictions[0][np.argmax(predictions[0])])


# base_model = applications.ResNet50(weights = 'imagenet', include_top = False, input_shape = (256,256,3))
input_tensor = Input(
    shape=base_model.get_layer(BOTTLENECK_TENSOR_NAME).output_shape[1:])
trainable_model = transfer_model(input_tensor, LABEL_LENGTH)
trainable_model = load_model(args.weight_file)
non_trainable_model = create_non_trainable_model(base_model,
                                                 BOTTLENECK_TENSOR_NAME)
print(trainable_model.input_shape)
for img_addr in natsorted(glob(args.img_dir + "/*")):
    img = cv2.imread(img_addr)
    img = cv2.resize(img, (256, 256))
    img = np.expand_dims(img, axis=0)
    (class_name, confidence) = predict_complete_output(non_trainable_model,
                                                       trainable_model, img,
                                                       inv_label_map)
    print((class_name, confidence))
    # input_tensor = Input(shape = non_trainable_model.get_layer(BOTTLENECK_TENSOR_NAME).output_shape[1:])
    # weights = non_trainable_model.get_weights()
    trainable_model = transfer_model(input_tensor, LABEL_LENGTH)
    # trainable_model.summary()
    return (trainable_model)


# for i in (base_model.layers):
# print (i.name)
# exit(0)
non_trainable_model = create_non_trainable_model(base_model,
                                                 BOTTLENECK_TENSOR_NAME)
trainable_model = trainable_model(non_trainable_model)
non_bottleneck_net = Model(inputs=non_trainable_model.input,
                           output=transfer_model(non_trainable_model.output,
                                                 LABEL_LENGTH,
                                                 bottleneck_used=False))
non_bottleneck_net.compile(loss='categorical_crossentropy',
                           optimizer=optimizers.SGD(lr=1e-2, momentum=0.9),
                           metrics=['accuracy'])
# Train data generator
train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)

# Callback list
checkpoint = ModelCheckpoint(args.weight_file)
tb_callback = keras.callbacks.TensorBoard(log_dir=args.logs, write_graph=True)
# early_stopping = EarlyStopping(monitor = 'val_loss')
callback_list = [checkpoint, tb_callback]  #, early_stopping]

h1 = train_datagen.flow_from_directory(args.train, batch_size=BATCH_SIZE)