示例#1
0
sess = tf.Session()
K.set_session(sess)

model = RCNet_THdim_model()
# model = RCNet_THdim_dropout_model()
# model = RCNet_shortdense_THdim_model()
model.summary()

from yolo.training_v1 import darkeras_loss, _TRAINER
from yolo.datacenter.data import shuffle, test_shuffle

inp_x = model.input
net_out = model.output

say("Building {} loss function".format(cfg.model_name), verbalise=verbalise)
loss_ph, loss_op = darkeras_loss(net_out)
say("Building {} train optimizer".format(cfg.model_name), verbalise=verbalise)
optimizer = _TRAINER[cfg.trainer](cfg.lr)
gradients = optimizer.compute_gradients(loss_op, var_list=model.trainable_weights)
train_op = optimizer.apply_gradients(gradients)

sess.run(tf.global_variables_initializer())
	
model.load_weights(pretrain_weight_path, by_name=True)

# End User Custom Training Function
collect_enduser_trainset()

batches = shuffle()
示例#2
0
show_trainable_state = False # 여기를 True 로 바꾸면, conv layer 와 dense layer 의 학습별 weigths 가 변하는지 안변하는지를 확인할 수 있다.
trained_save_weights_prefix = 'models/train/{}-'.format(cfg.model_name)

sess = tf.Session()
K.set_session(sess)

model = yolo_vgg16_TFdim_model(is_top=True, is_new_training=True)
model.summary()

from yolo.training_v1 import darkeras_loss, _TRAINER
from yolo.datacenter.data import shuffle

inp_x = model.input
net_out = model.output

say("Building {} loss function".format(cfg.model_name), verbalise=verbalise)
loss_ph, loss_op = darkeras_loss(net_out)
say("Building {} train optimizer".format(cfg.model_name), verbalise=verbalise)
optimizer = _TRAINER[cfg.trainer](cfg.lr)
gradients = optimizer.compute_gradients(loss_op, var_list=model.trainable_weights)
train_op = optimizer.apply_gradients(gradients)

sess.run(tf.global_variables_initializer())

model.load_weights(pretrain_weight_path, by_name=True)

batches = shuffle()
for i, (x_batch, datum) in enumerate(batches):
	train_feed_dict = {
	   loss_ph[key]:datum[key] for key in loss_ph 
	}
    model.add(LeakyReLU(alpha=0.1))
    model.add(Dense(1470))
    return model


model = make_yolotiny_network(is_freeze)
model.summary()

from yolo.training_v1 import darkeras_loss, _TRAINER
from yolo.datacenter.data import shuffle

inp_x = model.input
net_out = model.output
sess = K.get_session()

say("Building {} loss function".format(cfg.model_name), verbalise=verbalise)
loss_ph, loss_op = darkeras_loss(net_out)
say("Building {} train optimizer".format(cfg.model_name), verbalise=verbalise)
optimizer = _TRAINER[cfg.trainer](cfg.lr)
gradients = optimizer.compute_gradients(loss_op)
train_op = optimizer.apply_gradients(gradients)

sess.run(tf.global_variables_initializer())
model.load_weights(weights_path)
say("Setting weigths : {}", format(weights_path), verbalise=verbalise)

model.summary()
print(model.output_shape)

batches = shuffle()
for i, (x_batch, datum) in enumerate(batches):


# In[6]:

from yolo.training_v1 import darkeras_loss, _TRAINER
from yolo.datacenter.data import shuffle


# In[7]:

inp_x = model.input
net_out = model.output
sess = K.get_session()

say("Building {} loss function".format(cfg.model_name), verbalise=verbalise)
loss_ph, loss_op = darkeras_loss(net_out)
say("Building {} train optimizer".format(cfg.model_name), verbalise=verbalise)
optimizer = _TRAINER[cfg.trainer](cfg.lr)
gradients = optimizer.compute_gradients(loss_op)
train_op = optimizer.apply_gradients(gradients)

sess.run(tf.global_variables_initializer())
model.load_weights(weights_path)
say("Setting weigths : {}",format(weights_path), verbalise=verbalise)

pop_layer = model.pop() # dense_25
say("{} layer poped".format(pop_layer), verbalise=verbalise)
pop_layer = model.pop() # leakyrelu_34
say("{} layer poped".format(pop_layer), verbalise=verbalise)
pop_layer = model.pop() # dense_24
示例#5
0
sess = tf.Session()
K.set_session(sess)

model = RCNet_THdim_model()
# model = RCNet_THdim_dropout_model()
# model = RCNet_shortdense_THdim_model()
model.summary()

from yolo.training_v1 import darkeras_loss, _TRAINER
from yolo.datacenter.data import shuffle, test_shuffle

inp_x = model.input
net_out = model.output

say("Building {} loss function".format(cfg.model_name), verbalise=verbalise)
loss_ph, loss_op = darkeras_loss(net_out)
say("Building {} train optimizer".format(cfg.model_name), verbalise=verbalise)
optimizer = _TRAINER[cfg.trainer](cfg.lr)
gradients = optimizer.compute_gradients(loss_op,
                                        var_list=model.trainable_weights)
train_op = optimizer.apply_gradients(gradients)

sess.run(tf.global_variables_initializer())

model.load_weights(pretrain_weight_path, by_name=True)

batches = shuffle()

train_histories = {}
train_histories['train_loss'] = []