def build_dataset(data_directory, img_width):
    X, y, tags = dataset.dataset(data_directory, int(img_width))
    nb_classes = len(tags)

    sample_count = len(y)
    train_size = sample_count
    print("train size : {}".format(train_size))
    feature = X
    label = np_utils.to_categorical(y, nb_classes)
    return feature, label, nb_classes
예제 #2
0
# target[:, :, 4] = target[:, :, 4] / h
# print(target)

# target = torch.tensor(target, dtype=torch.float32).to(device)   #label: [class, centerx, centery, w, h]
# anchors = [[66, 84], [66, 84], [66, 84], [66, 84], [66, 84], [66, 84], [66, 84], [66, 84], [66, 84]]
#
# img = np.array(img, dtype=np.float32)   #通过np.array的图片的shape都会是[h, w, c]
# img = np.transpose(img, (2, 0, 1))
# img = img[np.newaxis, :] / 255.0
# img = np.concatenate((img, img), axis=0)

PbatchSize = 1
anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119],
           [116, 90], [156, 198], [373, 326]]

data = dataset.dataset("./data/label/", (160, 160))

model = YOLOV3(PclassNum=2)
# model = torch.load("./yolov3.pkl")
model = model.to(device)
# inputs = torch.from_numpy(img).to(device)
opt_SGD = torch.optim.Adam(model.parameters(), lr=0.0001)
for i in range(200005):
    target = []
    inputs = None
    for j in range(PbatchSize):
        inputs2, target2 = data()
        # print("target2:", target2.shape)
        inputs2 = inputs2[np.newaxis, :, :, :]
        if inputs is None:
            inputs = inputs2
예제 #3
0
from utils.dataset import dataset

print("cuda available=", torch.cuda.is_available())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 8
lr_G = 4e-4
lr_D = 4e-4
max_epoch = 2000
show_step = 1
save_epoch = 1
model_save_path = './saved_models/'
optim_level = 'O1'
pretrained = True

# ----------------------load dataset------------------------------
train_data = dataset(['./aligned'])
train_dataloader = DataLoader(train_data,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=1,
                              drop_last=True)

# ----------------------define networks---------------------------
# Indentity Encoder
arcface = Backbone(50, 0.6, 'ir_se').to(device)
arcface.eval()
arcface.load_state_dict(torch.load('./face_modules/model_ir_se50.pth',
                                   map_location=device),
                        strict=False)

# AEI_net
예제 #4
0
import tensorflow as tf
import time

from utils.dataset import dataset
from net.network import PoseNet
import net.config as cfg

slim = tf.contrib.slim


training_data = dataset(data_size = cfg.DATA_SIZE)
net = PoseNet()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(cfg.PATH + '/save', graph = sess.graph)

#loading YOLO weights as pretrained parameters
if cfg.PRETRAIN_WEIGHTS is not None:
	include = []
	for num in [2, 4, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 28, 29, 30]:
		weights = 'yolo/conv_' + str(num) + '/weights'
		biases = 'yolo/conv_' + str(num) + '/biases'
		include.append(weights)
		include.append(biases)
	variables_to_restore = slim.get_variables_to_restore(include = include)
	restorer = tf.train.Saver(variables_to_restore, max_to_keep = None)
	print('Loading pretrained weights from ' + cfg.PRETRAIN_WEIGHTS)
	restorer.restore(sess, cfg.PRETRAIN_WEIGHTS)


print('Start Training.....')