Esempio n. 1
0
image_preprocess_config = {
    'data_format': 'channels_last',  # 'channels_last' ,'channels_first'
    'target_size': [320, 320],
    'shorter_side': 480,
    'is_random_crop': False,
    'random_horizontal_flip': 0.5,  # <=1.0
    'random_vertical_flip': 0.,  # < 1.0
    'pad_truth_to': 60  # >=2 ,  >= the maximum of number of bbox per image + 1
}

data = [
    './test/test_00000-of-00005.tfrecord',
    './test/test_00001-of-00005.tfrecord'
]

train_gen = voc_utils.get_generator(data, batch_size, buffer_size,
                                    image_preprocess_config)
trainset_provider = {
    'data_shape': [320, 320, 3],
    'num_train': 5000,
    'num_val': 0,  # not used
    'train_generator': train_gen,
    'val_generator': None  # not used
}
refinedet = net.RefineDet320(config, trainset_provider)
# refinedet.load_weight('./refinedet320/test-64954')
for i in range(epochs):
    print('-' * 25, 'epoch', i, '-' * 25)
    if i in reduce_lr_epoch:
        lr = lr / 10.
        print('reduce lr, lr=', lr, 'now')
    mean_loss = refinedet.train_one_epoch(lr)
    'output_shape': [384, 384],
    'zoom_size': [400, 400],
    'crop_method': 'random',
    'flip_prob': [0., 0.5],
    'fill_mode': 'BILINEAR',
    'keep_aspect_ratios': False,
    'constant_values': 0.,
    'color_jitter_prob': 0.5,
    'rotate': [0.5, -10., 10.],
    'pad_truth_to': 60,
}

data = os.listdir('./voc2007/')
data = [os.path.join('./voc2007/', name) for name in data]

train_gen = voc_utils.get_generator(data, batch_size, buffer_size,
                                    image_augmentor_config)
trainset_provider = {
    'data_shape': [384, 384, 3],
    'num_train': 5011,
    'num_val': 0,  # not used
    'train_generator': train_gen,
    'val_generator': None  # not used
}
centernet = net.CenterNet(config, trainset_provider)
# centernet.load_weight('./centernet/test-8350')
# centernet.load_pretrained_weight('./centernet/test-8350')
for i in range(epochs):
    print('-' * 25, 'epoch', i, '-' * 25)
    if i in reduce_lr_epoch:
        lr = lr / 10.
        print('reduce lr, lr=', lr, 'now')
Esempio n. 3
0
def run(j, learn_rate, test_net, init_list, mean_loss):
    if j % resize_num == 0 and j != 0:
        global input_shape
        test_net.save_section_weight('latest', './yolo2/latest/latest',
                                     mean_loss, j)
        test_net.sess.close()
        init_list.append(test_net.train_initializer)
        index = random.randint(0, 9)
        input_shape = [shape[index], shape[index], 3]
        print('Resize:  ' + str(input_shape))
        new_factor = float(input_shape[0] / 320)
        test_net.data_shape = input_shape
        priors = [[round(0.42 * new_factor, 3),
                   round(0.72 * new_factor, 3)],
                  [round(0.92 * new_factor, 3),
                   round(1.63 * new_factor, 3)],
                  [round(1.78 * new_factor, 3),
                   round(3.2 * new_factor, 3)],
                  [round(3.68 * new_factor, 3),
                   round(5.2 * new_factor, 3)],
                  [round(7.825 * new_factor, 3),
                   round(7.72 * new_factor, 3)]]
        priors = tf.convert_to_tensor(priors, dtype=tf.float32)
        test_net.priors = tf.reshape(priors, [1, 1, test_net.num_priors, 2])
        image_augmentor_config['output_shape'] = [
            input_shape[0], input_shape[1]
        ]

        train_gen1 = voc_utils.get_generator(data,
                                             batch_size * config['num_gpu'],
                                             buffer_size,
                                             image_augmentor_config)
        test_net.train_generator = train_gen1
        test_net.train_initializer, test_net.train_iterator = test_net.train_generator
        test_net.sess = tf.Session()
        test_net.sess.run(tf.global_variables_initializer())
        test_net.sess.run(init_list)
        test_net.define_inputs()
        for file in os.listdir('./yolo2/latest/'):
            if (file.split('.')[0]).startswith('latest-backone'):
                global backone
                backone = file.split('.')[0]
            elif (file.split('.')[0]).startswith('latest-head'):
                global head
                head = file.split('.')[0]
        test_net.latest_weight_saver1.restore(test_net.sess,
                                              './yolo2/latest/' + str(backone))
        test_net.latest_weight_saver2.restore(test_net.sess,
                                              './yolo2/latest/' + str(head))

    print('-' * 25, 'epoch', j, '-' * 25)
    if j == 500:
        learn_rate = learn_rate / 5.
        print('reduce lr, lr=', learn_rate, 'now')
    if j == 800:
        learn_rate = learn_rate / 2.
        print('reduce lr, lr=', lr, 'now')
    mean_loss = test_net.train_one_epoch(learn_rate, j)
    print('>> mean loss', mean_loss)
    test_net.save_section_weight('best', './yolo2/best/best', mean_loss,
                                 j)  # 'latest', 'best'