def train_net(args):

    imagedb    = ImageDB(args.anno_file)
    train_imdb = imagedb.load_imdb()
    # train_imdb = imagedb.append_flipped_images(train_imdb)

    imagedb    = ImageDB(args.eval_file)
    eval_imdb  = imagedb.load_imdb()

    print('train : %d\teval : %d' % (len(train_imdb), len(eval_imdb)))

    train_pnet(args, train_imdb, eval_imdb)
Example #2
0
def train_net(annotation_file,
              model_store_path,
              end_epoch=16,
              frequent=200,
              lr=0.01,
              batch_size=128,
              use_cuda=False):

    imagedb = ImageDB(annotation_file)
    gt_imdb = imagedb.load_imdb()
    gt_imdb = imagedb.append_flipped_images(gt_imdb)
    train_pnet(model_store_path=model_store_path,
               end_epoch=end_epoch,
               imdb=gt_imdb,
               batch_size=batch_size,
               frequent=frequent,
               base_lr=lr,
               use_cuda=use_cuda)
Example #3
0
def train_net(annotation_file,
              model_store_path,
              end_epoch=16,
              frequent=200,
              lr=0.01,
              lr_epoch_decay=[9],
              batch_size=128,
              use_cuda=False,
              load=''):

    imagedb = ImageDB(annotation_file)
    gt_imdb = imagedb.load_imdb()
    print('DATASIZE', len(gt_imdb))
    gt_imdb = imagedb.append_flipped_images(gt_imdb)
    print('FLIP DATASIZE', len(gt_imdb))
    train_pnet(model_store_path=model_store_path,
               end_epoch=end_epoch,
               imdb=gt_imdb,
               batch_size=batch_size,
               frequent=frequent,
               base_lr=lr,
               lr_epoch_decay=lr_epoch_decay,
               use_cuda=use_cuda,
               load=load)