コード例 #1
0
def run_default_validation_server(datadir='../dataset', prefix='val_'):
    '''
  run a validation server with mostly default values.
  '''
    try:
        datapath = glob.glob(os.path.join(datadir, 'camera', prefix) + "*")
        if len(datapath) == 0:
            print 'no files found to validate with'
        else:
            print 'validating with', len(datapath), 'files'
            gen = datagen(datapath,
                          time_len=1,
                          batch_size=256,
                          ignore_goods=False,
                          show_time=False)
            start_server(gen, port=5556, hwm=20)
    except KeyboardInterrupt:
        pass
        'D:/DR/Dataset/comma-dataset/comma_dataset/%s/2016-02-02--10-16-58.h5'
        % (str_data),
        'D:/DR/Dataset/comma-dataset/comma_dataset/%s/2016-02-08--14-56-28.h5'
        % (str_data),
        'D:/DR/Dataset/comma-dataset/comma_dataset/%s/2016-02-11--21-32-47.h5'
        % (str_data),
        'D:/DR/Dataset/comma-dataset/comma_dataset/%s/2016-03-29--10-50-20.h5'
        % (str_data),
        'D:/DR/Dataset/comma-dataset/comma_dataset/%s/2016-04-21--14-48-08.h5'
        % (str_data),
        'D:/DR/Dataset/comma-dataset/comma_dataset/%s/2016-05-12--22-20-00.h5'
        % (str_data),
    ]

    # 2 for validation
    validation_path = [
        'D:/DR/Dataset/comma-dataset/comma_dataset/%s/2016-06-02--21-39-29.h5'
        % (str_data),
        'D:/DR/Dataset/comma-dataset/comma_dataset/%s/2016-06-08--11-46-01.h5'
        % (str_data)
    ]

    if args.validation: datapath = validation_path
    else: datapath = train_path

    gen = datagen(datapath,
                  time_len=args.time,
                  batch_size=args.batch,
                  ignore_goods=args.nogood)
    start_server(gen, port=args.port, hwm=args.buffer)
コード例 #3
0
                        help='Serve validation dataset instead.')
    parser.add_argument('--small',
                        dest='small',
                        action='store_true',
                        default=False)
    args, more = parser.parse_known_args()

    if config.UseFeat: str_data = "feat"
    else: str_data = "cam"

    if args.small:  # 10% of dataset
        if args.validation:
            filenames = os.path.join(config.h5path, 'val_small.txt')
        else:
            filenames = os.path.join(config.h5path, 'train_small.txt')
    else:
        if args.validation: filenames = os.path.join(config.h5path, 'val.txt')
        else: filenames = os.path.join(config.h5path, 'train.txt')

    with open(filenames, 'r') as f:
        file_paths = [
            '%s%s/%s.h5' % (config.h5path, str_data, x.strip())
            for x in f.readlines()
        ]

    gen = datagen(file_paths,
                  time_len=config.timelen,
                  batch_size=config.batch_size,
                  ignore_goods=args.nogood)
    start_server(gen, port=args.port, hwm=args.buffer)
コード例 #4
0
ファイル: server.py プロジェクト: TulioLima1502/TG
    # 9 for training
    train_path = [
        './extract-bagfiles/retas_1.h5',
        './extract-bagfiles/retas_2.h5',
        './extract-bagfiles/curvas_suaves_1.h5',
        './extract-bagfiles/curvas_suaves_2.h5',
        './extract-bagfiles/curvas_em_T_1.h5',
    ]

    # 2 for validation
    validation_path = [
        './extract-bagfiles/retas_1.h5',
        './extract-bagfiles/retas_2.h5',
        './extract-bagfiles/curvas_suaves_1.h5',
        './extract-bagfiles/curvas_suaves_2.h5',
        './extract-bagfiles/curvas_em_T_1.h5',
    ]

    if args.validation:
        datapath = validation_path
    else:
        datapath = train_path

    gen = datagen(datapath,
                  time_len=args.time,
                  batch_size=args.batch,
                  ignore_goods=args.nogood,
                  data_set=args.DataTrainAndTest)
    start_server(gen, port=args.port, hwm=args.buffer)
コード例 #5
0
ファイル: trans_data.py プロジェクト: Glooow1024/comma
# 2 for validation
validation_path = [
    './camera/2016-06-02--21-39-29.h5', './camera/2016-06-08--11-46-01.h5'
]

# 2 for test
test_path = [
    './camera/2016-01-30--13-46-00.h5',
    './camera/2016-05-12--22-20-00.h5',
]

datapath = test_path
time_length = 30
pack_size = 256  # 每个tfrecords文件中有256个视频序列
gen = datagen(datapath,
              time_len=time_length,
              batch_size=pack_size,
              ignore_goods=False)
data = next(gen)
dataset_num = data[3]

count = 0

while count * 30 < dataset_num / 25:  #dataset_num :
    try:
        data = next(gen)  # 取出256*30张图片
        starts = count
        ends = starts + pack_size - 1
        count = count + pack_size
        tfrecords_filename = '../video_prediction/data/comma/test/traj_%d_to_%d.tfrecords' % (
            starts, ends)
        writer = python_io.TFRecordWriter(
コード例 #6
0
ファイル: server.py プロジェクト: 1165048017/research
  parser.add_argument('--validation', dest='validation', action='store_true', default=False, help='Serve validation dataset instead.')
  args, more = parser.parse_known_args()

  # 9 for training
  train_path = [
    './dataset/camera/2016-01-30--11-24-51.h5',
    './dataset/camera/2016-01-30--13-46-00.h5',
    './dataset/camera/2016-01-31--19-19-25.h5',
    './dataset/camera/2016-02-02--10-16-58.h5',
    './dataset/camera/2016-02-08--14-56-28.h5',
    './dataset/camera/2016-02-11--21-32-47.h5',
    './dataset/camera/2016-03-29--10-50-20.h5',
    './dataset/camera/2016-04-21--14-48-08.h5',
    './dataset/camera/2016-05-12--22-20-00.h5',
  ]

  # 2 for validation
  validation_path = [
    './dataset/camera/2016-06-02--21-39-29.h5',
    './dataset/camera/2016-06-08--11-46-01.h5'
  ]

  if args.validation:
    datapath = validation_path
  else:
    datapath = train_path

  gen = datagen(datapath, time_len=args.time, batch_size=args.batch, ignore_goods=args.nogood)
  start_server(gen, port=args.port, hwm=args.buffer)