예제 #1
0
def train(model_dir, cnn_model, saved_model=None, 
			learning_rate = 1e-5, decay=1e-6, 
			train_size = 0.8, seq_length=16,
			hidden_units=256, dense_units=256, reg=1e-1, dropout_rate=1e-1,
			num_classes=6, batch_size=16, nb_epoch=100, 
			image_shape=None):


	# ---- CALL BACK FUNCTIONS FOR FIT_GENERATOR() ---- #
	checkpoints_dir = os.path.join(model_dir, 'checkpoints')
	if not os.path.exists(checkpoints_dir):
		os.makedirs(checkpoints_dir)

	checkpointer = ModelCheckpoint(
			filepath=os.path.join(checkpoints_dir, 'lstm_weights.{epoch:004d}-{val_loss:.3f}.hdf5'),
			verbose=1, save_best_only=False, period=50)
	
	# tensorboard info
	tb = TensorBoard(log_dir=model_dir)

	# ------------------------------------------------- # 


	# PREPARE DATASET
	dataset = DataSet(cnn_model, seq_length)
	
	# steps_per_epoch = number of batches in one epoch
	steps_per_epoch = (len(dataset.data) * train_size) // batch_size

	# create train and validation generators
	generator = dataset.frame_generator(batch_size, 'train')
	# val_generator = dataset.frame_generator(batch_size, 'validation') # use all validation data each time?
	(X_val, y_val) = dataset.generate_data('validation')

	# load or create model
	if saved_model:
		rnn_model = load_model(saved_model)
	else:
		rnn_model = lstm(hidden_units=hidden_units, dense_units=dense_units, 
						reg=reg, dropout_rate=dropout_rate,
						seq_length=seq_length, num_classes=num_classes)
	
	# setup optimizer: ADAM algorithm
	optimizer = Adam(lr=learning_rate, decay=decay)
	
	# metrics for judging performance of model
	metrics = ['categorical_accuracy'] # ['accuracy']  # if using 'top_k_categorical_accuracy', must specify k
	
	rnn_model.compile(loss='categorical_crossentropy', optimizer=optimizer,
			metrics=metrics)

	print(rnn_model.summary())

	# use fit generator to generate data on the fly
	history = rnn_model.fit_generator(generator=generator,
									steps_per_epoch=steps_per_epoch,
									epochs=nb_epoch,
									verbose=1,
									callbacks=[tb, checkpointer],
									validation_data=(X_val, y_val),
									validation_steps=1)  # using all validation data for better metrics

	return history
예제 #2
0
파일: train.py 프로젝트: adnappp/deeplabv3
    pretraining = True


# 打印以下超参数
for key in args.__dict__:
    if key.find('__') == -1:
        offset = 20 - key.__len__()
        print(key + ' ' * offset, args.__dict__[key])

# 使用那一块显卡
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

data_path_df = pd.read_csv('dataset/path_list.csv')
data_path_df = data_path_df.sample(frac=1)  # 第一次打乱

dataset = DataSet(image_path=data_path_df['image'].values,
                  label_path=data_path_df['label'].values)

model = Deeplab_v3(batch_norm_decay=args.batch_norm_decay)

image = tf.placeholder(tf.float32, [None, 1024, 1024, 3], name='input_x')
label = tf.placeholder(tf.int32, [None, 1024, 1024])
lr = tf.placeholder(tf.float32, )

logits = model.forward_pass(image)
logits_prob = tf.nn.softmax(logits=logits, name='logits_prob')
predicts = tf.argmax(logits, axis=-1, name='predicts')

variables_to_restore = tf.trainable_variables(scope='resnet_v2_50')

# finetune resnet_v2_50的参数(block1到block4)
restorer = tf.train.Saver(variables_to_restore)
예제 #3
0
    gpu_num = 0
    pretraining = False
    ckpt_step = 2000

for key in args.__dict__:
    if key.find('__') == -1:
        offset = 20 - key.__len__()
        print(key + ' ' * offset, args.__dict__[key])

os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % args.gpu_num

data_path_df = pd.read_csv(args.dataset+'/path_list.csv')
data_path_df = data_path_df.sample(frac=1)

dataset = DataSet(rgb_path=data_path_df['image'].values, 
                  thermal_path=None, 
                  label_path=data_path_df['label'].values,
                  data_type='rgb')

model = Deeplab_v3(batch_norm_decay=args.batch_norm_decay)

image = tf.placeholder(tf.float32, [None, 512, 512, 3], name='input_x')
label = tf.placeholder(tf.int32, [None, 512, 512])
lr = tf.placeholder(tf.float32, )

logits = model.forward_pass(image)
logits_prob = tf.nn.softmax(logits=logits, name='logits_prob')
predicts = tf.argmax(logits, axis=-1, name='predicts')

variables_to_restore = tf.trainable_variables(scope='resnet_v2_50')

restorer = tf.train.Saver(variables_to_restore)
    base_model = Xception(weights='imagenet', include_top=True)

    # get the feature outputs of second-to-last layer (final FC layer)
    outputs = base_model.get_layer('avg_pool').output

    cnn_model = Model(inputs=base_model.input, outputs=outputs)

    return cnn_model


if __name__ == "__main__":

    cnn_model = load_cnn_model()

    seq_length = 16  # sequence length of frames to downsample each video to
    dataset = DataSet(cnn_model)

    # generate Xception features and time it
    currtime = time.time()

    for ind, sample in enumerate(dataset.data):
        # save the sequences of frame features to npy files for eventual model training
        path = os.path.join(
            'data', 'sequences', sample[1],
            sample[2] + '-' + str(seq_length) + '-Xception_features.npy')

        if os.path.isfile(path):
            print(sample)
            print("Sequence: {} already exists".format(ind))
        else:
            print(sample)
예제 #5
0
        'categorical_accuracy'
    ]  # ['accuracy']  # if using 'top_k_categorical_accuracy', must specify k

    rnn_model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=metrics)

    # load saved weights
    # folder_path = 'experiments/base_model/base_model_dropout_rate_0.00e+00/checkpoints/'
    folder_path = 'experiments/base_model/base_model_dropout_rate_3.00e-01/checkpoints/'

    saved_weights = os.path.join(folder_path, 'lstm_weights.0300-0.619.hdf5')
    rnn_model.load_weights(saved_weights)

    # load and prepare test set
    dataset = DataSet(cnn_model)

    X_train, Y_train = dataset.generate_data('train')
    X_val, Y_val = dataset.generate_data('validation')
    X_test, Y_test = dataset.generate_data('test')

    score = rnn_model.evaluate(x=X_train, y=Y_train, verbose=1)
    print("Train Loss: %2.3f" % score[0])
    print("Train Accuracy: %1.3f\n" % score[1])

    score = rnn_model.evaluate(x=X_val, y=Y_val, verbose=1)
    print("Val Loss: %2.3f" % score[0])
    print("Val Accuracy: %1.3f\n" % score[1])

    score = rnn_model.evaluate(x=X_test, y=Y_test, verbose=1)
    print("Test Loss: %2.3f" % score[0])
예제 #6
0
    """
    base_model = InceptionV3(weights='imagenet', include_top=True)

    # get the feature outputs of second-to-last layer (final FC layer)
    outputs = base_model.get_layer('avg_pool').output

    cnn_model = Model(inputs=base_model.input, outputs=outputs)

    return cnn_model

if __name__ == "__main__":

    cnn_model = load_cnn_model()

    seq_length = 16 # sequence length of frames to downsample each video to
    dataset = DataSet(cnn_model)

    # generate InceptionV3 features and time it
    currtime = time.time()

    for ind, sample in enumerate(dataset.data):
        # save the sequences of frame features to npy files for eventual model training
        path = os.path.join('data', 'sequences', sample[1], sample[2] + '-' + str(seq_length) + '-features.npy')

        if os.path.isfile(path):
            print(sample)
            print("Sequence: {} already exists".format(ind))
        else:
            print(sample)
            print("Generating and saving sequence: {}".format(ind))
            sequence = dataset.extract_seq_features(sample)
예제 #7
0
from data_utils import DataSet
import random
from up_ResNet import ResNet50_1
from ResNet50 import ResNet50
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD, Adam
from keras.models import load_model
from VGG16 import VGG_16
from keras.callbacks import TensorBoard
import pickle
from keras import regularizers

seed = 7
np.random.seed(seed)
X, Y, X_test, Y_test = DataSet()
X_train, X_valid, Y_train, Y_valid = train_test_split(
    X, Y, test_size=0.1, random_state=random.randint(0, 100))
# _, test_X, _, test_Y = train_test_split(X, Y, test_size=0.5, random_state=random.randint(0, 100))

reg = regularizers.l2(1e-6)
model = ResNet50_1(input_shape=(128, 128, 3), classes=2, reg=reg)
adam = Adam(lr=1e-4, decay=1e-7)
model.compile(optimizer=adam,
              loss="categorical_crossentropy",
              metrics=["accuracy"])

# 普通的训练
history = model.fit(X_train,
                    Y_train,
                    epochs=200,