Exemple #1
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')
    print('Dataset directory:', FLAGS.dataset_dir)
    print('Output directory:', FLAGS.output_dir)
    pascalvoc_to_tfrecords.run(FLAGS.dataset_dir, FLAGS.output_dir,
                               FLAGS.output_name)
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    print('Dataset directory:', FLAGS.dataset_dir)
    print('Output directory:', FLAGS.output_dir)

    if FLAGS.dataset_name == 'pascalvoc':
        pascalvoc_to_tfrecords.run(FLAGS.dataset_dir, FLAGS.output_dir, FLAGS.output_name)
    else:
        raise ValueError('Dataset [%s] was not recognized.' % FLAGS.dataset_name)
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    print('Dataset directory:', FLAGS.dataset_dir)
    print('Output directory:', FLAGS.output_dir)

    if FLAGS.dataset_name == 'pascalvoc':
        pascalvoc_to_tfrecords.run(FLAGS.dataset_dir, FLAGS.output_dir, FLAGS.output_name)
    else:
        raise ValueError('Dataset [%s] was not recognized.' % FLAGS.dataset_name)
Exemple #4
0
def main(_):

    gpu_options = tf.GPUOptions(allow_growth=True)
    config = tf.ConfigProto(log_device_placement=False,
                            gpu_options=gpu_options)

    sess = tf.Session(config=config)
    rdn = RDN(sess,
              is_train=FLAGS.is_train,
              is_eval=FLAGS.is_eval,
              image_size=FLAGS.image_size,
              noise_level=FLAGS.noise_level,
              c_dim=FLAGS.c_dim,
              batch_size=FLAGS.batch_size,
              D=FLAGS.D,
              C=FLAGS.C,
              G=FLAGS.G,
              G0=FLAGS.G0,
              kernel_size=FLAGS.kernel_size)

    if (rdn.is_train):
        rdn.train(FLAGS)

    else:

        if rdn.is_eval:
            rdn.eval(FLAGS)
            dataset_dir = os.path.join(
                os.path.join(os.getcwd(), FLAGS.result_dir),
                '%s_sigma/' % FLAGS.noise_level)
            output_eval_path = os.path.join(os.path.join(os.getcwd(), 'Data'),
                                            FLAGS.eval_set)
            output_eval_path = output_eval_path + '/sigma_%s' % FLAGS.noise_level
            if not os.path.exists(output_eval_path):
                os.makedirs(output_eval_path)
            pascalvoc_to_tfrecords.run(dataset_dir, output_eval_path,
                                       'voc_2007_test')

            eval_logdir = os.path.join(os.path.join(os.getcwd(), 'logs'),
                                       '%d_sigma' % FLAGS.noise_level)
            output_eval_path = output_eval_path + '/'
            ssd_network.ssd_eval('pascalvoc_2007',
                                 dataset_dir=output_eval_path,
                                 batch_size=FLAGS.batch_size,
                                 eval_dir=eval_logdir)

        else:
            # Testing the whole model
            # testing is first done on RDN network
            #            rdn.test(FLAGS)
            # Then, testing the images on SSD network for object detection
            #            test_path = os.path.join(os.path.join(os.getcwd(), 'Test'), FLAGS.test_set)
            test_path = os.path.join(os.getcwd(), FLAGS.result_dir, 'test')
            ssd_network.ssd_test(test_path)
def main(_):
    print('Dataset root dir:', FLAGS.dataset_root)
    print('Output directory:', FLAGS.output_dir)

    if FLAGS.dataset_name == 'pascalvoc':
        pascalvoc_to_tfrecords.run(FLAGS.dataset_root,
                                   FLAGS.year,
                                   FLAGS.split,
                                   FLAGS.output_dir,
                                   shuffling=True)
    elif FLAGS.dataset_name == 'kitti':
        kitti_object_to_tfrecords.run(FLAGS.dataset_root,
                                   FLAGS.split,
                                   FLAGS.output_dir,
                                   shuffling=True)
    else:
        raise ValueError('Dataset [%s] was not recognized.' % FLAGS.dataset_name)
def main(_):
    print('Dataset root dir:', FLAGS.dataset_root)
    print('Output directory:', FLAGS.output_dir)

    if FLAGS.dataset_name == 'pascalvoc':
        pascalvoc_to_tfrecords.run(FLAGS.dataset_root,
                                   FLAGS.year,
                                   FLAGS.split,
                                   FLAGS.output_dir,
                                   shuffling=True)
    elif FLAGS.dataset_name == 'kitti':
        kitti_object_to_tfrecords.run(FLAGS.dataset_root,
                                   FLAGS.split,
                                   FLAGS.output_dir,
                                   shuffling=True)
    else:
        raise ValueError('Dataset [%s] was not recognized.' % FLAGS.dataset_name)
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')
    print('Dataset directory:', FLAGS.dataset_dir)
    print('Output directory:', FLAGS.output_dir)

    if FLAGS.dataset_name == 'pascalvoc':
        pascalvoc_to_tfrecords.run(FLAGS.dataset_dir, FLAGS.output_dir,
                                   FLAGS.output_name)
    elif FLAGS.dataset_name == 'kitti':
        if FLAGS.need_validation_split == 'True':
            kitti_to_tfrecords.run(dataset_dir=FLAGS.dataset_dir,
                                   output_dir=FLAGS.output_dir,
                                   name=FLAGS.output_name,
                                   need_validation_split=True)
        else:
            kitti_to_tfrecords.run(dataset_dir=FLAGS.dataset_dir,
                                   output_dir=FLAGS.output_dir,
                                   name=FLAGS.output_name,
                                   need_validation_split=False)
    else:
        raise ValueError('Dataset [%s] was not recognized.' %
                         FLAGS.dataset_name)