Exemple #1
0
from yolo_top import yolov3
import numpy as np
import tensorflow as tf
from data_pipeline import data_pipeline
import config as cfg
import os
os.environ["CUDA_VISIBLE_DEVICES"]="3"
file_path = 'E:/Python/tensorflow/YOLO/people count/yuncong_data/our/trainval_2014.tfrecord'
log_dir='E:/Python/tensorflow/YOLO/people count/yuncong_data/our/log/'
imgs, true_boxes = data_pipeline(file_path, cfg.batch_size)

istraining = tf.constant(True, tf.bool)
model = yolov3(imgs, true_boxes, istraining)

with tf.name_scope('loss'):
    loss,AVG_IOU,coordinates_loss_sum,objectness_loss,no_objects_loss_mean = model.compute_loss()
    tf.summary.scalar('loss', loss)
    tf.summary.scalar('avg', AVG_IOU)
    tf.summary.scalar('coord', coordinates_loss_sum)
    tf.summary.scalar('obj', objectness_loss)
    tf.summary.scalar('no_obj', no_objects_loss_mean)
global_step = tf.Variable(0, trainable=False)
#lr = tf.train.exponential_decay(0.0001, global_step=global_step, decay_steps=2e4, decay_rate=0.1)
lr = tf.train.piecewise_constant(global_step, [30000, 45000], [1e-4, 5e-5, 1e-5]) ##作用在不同步长时更改学习率
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.00001)
update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
vars_det = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="Head")
# for var in vars_det:
#     print(var)
with tf.control_dependencies(update_op):
Exemple #2
0
from yolo_top import yolov3
import numpy as np
import tensorflow as tf
from data_pipeline import data_pipeline
from config import cfg

file_path = 'trainval0712.tfrecords'
imgs, true_boxes = data_pipeline(file_path, cfg.batch_size)

istraining = tf.constant(True, tf.bool)
model = yolov3(imgs, true_boxes, istraining)

loss = model.compute_loss()
global_step = tf.Variable(0, trainable=False)
# lr = tf.train.exponential_decay(0.0001, global_step=global_step, decay_steps=2e4, decay_rate=0.1)
lr = tf.train.piecewise_constant(global_step, [40000, 45000], [1e-3, 1e-4, 1e-5])
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
# optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)
update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
vars_det = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="Head")
# for var in vars_det:
#     print(var)
with tf.control_dependencies(update_op):
    train_op = optimizer.minimize(loss, global_step=global_step, var_list=vars_det)
saver = tf.train.Saver()
ckpt_dir = './ckpt/'

gs = 0
batch_per_epoch = 2000
cfg.train.max_batches = int(batch_per_epoch * 10)
cfg.train.image_resized = 608
Exemple #3
0
import time
import numpy as np
import matplotlib.pyplot as plt

from data_pipeline import data_pipeline

#%%
# device GPU / CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print ('Available devices ', torch.cuda.device_count())
#print ('Current cuda device ', torch.cuda.current_device())
#print(torch.cuda.get_device_name(device))

# Data parameters 
Data_dir = '/home/sss-linux1/project/leejun/Thermo/Experiment/'

# NN training parameters
TENSORBOARD_STATE = True
num_epoch = 100
BATCH_SIZE = 32
val_ratio = 0.3
Learning_rate = 0.001
L2_decay = 1e-8
LRSTEP = 5
GAMMA = 0.1

#%% DataLoader
dataset = data_pipeline(Data_dir)
train_dataset, val_dataset = torch.utils.data.random_split(dataset, (320000, 12800) )
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)
val_loader = DataLoader(val_dataset, shuffle=False, num_workers=0)
Exemple #4
0
from yolo_top import yolov3
import numpy as np
import tensorflow as tf
from data_pipeline import data_pipeline
import config as cfg
import os
#os.environ["CUDA_VISIBLE_DEVICES"]="3"
file_path = 'E:/Python/tensorflow/count/trainval_2018.tfrecord'
log_dir = 'E:/Python/tensorflow/count/logs/'
img_w = 416
img_h = 416
istraining = tf.constant(True, tf.bool)
imgs, true_boxes = data_pipeline(file_path, cfg.batch_size, img_w, img_h, True)
x = tf.placeholder(tf.float32, shape=(None, 416, 416, 3))
y = tf.placeholder(tf.float32, shape=(None, 100, 5))
model = yolov3(imgs, true_boxes, istraining)

with tf.name_scope('loss'):
    loss, AVG_IOU, coordinates_loss_sum, objectness_loss, no_objects_loss_mean, a, b, c, d, e, f = model.compute_loss(
    )
    tf.summary.scalar('loss', loss)
    tf.summary.scalar('avg', AVG_IOU)
    tf.summary.scalar('coord', coordinates_loss_sum)
    tf.summary.scalar('obj', objectness_loss)
    tf.summary.scalar('no_obj', no_objects_loss_mean)
global_step = tf.Variable(0, trainable=False)
#lr = tf.train.exponential_decay(0.0001, global_step=global_step, decay_steps=2e4, decay_rate=0.1)
lr = tf.train.piecewise_constant(global_step, [30000, 45000],
                                 [1e-4, 5e-5, 1e-5])  ##作用在不同步长时更改学习率
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr)