Ejemplo n.º 1
0
train_net(args.network,
          args.train_path,
          args.num_class,
          args.batch_size,
          args.data_shape, [args.mean_r, args.mean_g, args.mean_b],
          args.resume,
          args.finetune,
          args.pretrained,
          args.epoch,
          args.prefix,
          ctx,
          args.begin_epoch,
          args.end_epoch,
          args.frequent,
          args.learning_rate,
          args.momentum,
          args.weight_decay,
          args.lr_refactor_step,
          args.lr_refactor_ratio,
          val_path=args.val_path,
          num_example=args.num_example,
          class_names=class_names,
          label_pad_width=args.label_width,
          freeze_layer_pattern=args.freeze_pattern,
          iter_monitor=args.monitor,
          monitor_pattern=args.monitor_pattern,
          log_file=args.log_file,
          nms_thresh=args.nms_thresh,
          force_nms=args.force_nms,
          ovp_thresh=args.overlap_thresh,
          use_difficult=args.use_difficult,
          voc07_metric=args.use_voc07_metric)
Ejemplo n.º 2
0
                        type=float,
                        default=0.9,
                        help='ratio to refactor learning rate')
    parser.add_argument('--log',
                        dest='log_file',
                        type=str,
                        default="train.log",
                        help='save training log to file')
    parser.add_argument(
        '--monitor',
        dest='monitor',
        type=int,
        default=0,
        help='log network parameters every N iters if larger than 0')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')]
    ctx = mx.cpu() if not ctx else ctx
    train_net(args.network, args.dataset, args.image_set, args.year,
              args.devkit_path, args.batch_size, args.data_shape,
              [args.mean_r, args.mean_g, args.mean_b], args.resume,
              args.finetune, args.pretrained, args.epoch, args.prefix, ctx,
              args.begin_epoch, args.end_epoch, args.frequent,
              args.learning_rate, args.momentum, args.weight_decay,
              args.val_image_set, args.val_year, args.lr_refactor_epoch,
              args.lr_refactor_ratio, args.monitor, args.log_file)
Ejemplo n.º 3
0
Archivo: train.py Proyecto: 4ker/mxnet
                        help='weight decay')
    parser.add_argument('--mean-r', dest='mean_r', type=float, default=123,
                        help='red mean value')
    parser.add_argument('--mean-g', dest='mean_g', type=float, default=117,
                        help='green mean value')
    parser.add_argument('--mean-b', dest='mean_b', type=float, default=104,
                        help='blue mean value')
    parser.add_argument('--lr-epoch', dest='lr_refactor_epoch', type=int, default=50,
                        help='refactor learning rate every N epoch')
    parser.add_argument('--lr-ratio', dest='lr_refactor_ratio', type=float, default=0.9,
                        help='ratio to refactor learning rate')
    parser.add_argument('--log', dest='log_file', type=str, default="train.log",
                        help='save training log to file')
    parser.add_argument('--monitor', dest='monitor', type=int, default=0,
                        help='log network parameters every N iters if larger than 0')
    args = parser.parse_args()
    return args

if __name__ == '__main__':
    args = parse_args()
    ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')]
    ctx = mx.cpu() if not ctx else ctx
    train_net(args.network, args.dataset, args.image_set, args.year,
              args.devkit_path, args.batch_size,
              args.data_shape, [args.mean_r, args.mean_g, args.mean_b],
              args.resume, args.finetune, args.pretrained,
              args.epoch, args.prefix, ctx, args.begin_epoch, args.end_epoch,
              args.frequent, args.learning_rate, args.momentum, args.weight_decay,
              args.val_image_set, args.val_year, args.lr_refactor_epoch,
              args.lr_refactor_ratio, args.monitor, args.log_file)
Ejemplo n.º 4
0
    return class_names

if __name__ == '__main__':
    args = parse_args()
    # context list
    ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
    ctx = [mx.cpu()] if not ctx else ctx
    # class names if applicable
    class_names = parse_class_names(args)
    # start training
    train_net(args.network, args.train_path,
              args.num_class, args.batch_size,
              args.data_shape, [args.mean_r, args.mean_g, args.mean_b],
              args.resume, args.finetune, args.pretrained,
              args.epoch, args.prefix, ctx, args.begin_epoch, args.end_epoch,
              args.frequent, args.learning_rate, args.momentum, args.weight_decay,
              args.lr_refactor_step, args.lr_refactor_ratio,
              val_path=args.val_path,
              num_example=args.num_example,
              class_names=class_names,
              label_pad_width=args.label_width,
              freeze_layer_pattern=args.freeze_pattern,
              iter_monitor=args.monitor,
              monitor_pattern=args.monitor_pattern,
              log_file=args.log_file,
              nms_thresh=args.nms_thresh,
              force_nms=args.force_nms,
              ovp_thresh=args.overlap_thresh,
              use_difficult=args.use_difficult,
              voc07_metric=args.use_voc07_metric)
Ejemplo n.º 5
0
from train.train_net import train_net

#import tools.find_mxnet
import mxnet as mx
import os
import sys
import opt

if __name__ == '__main__':
    train_net()