Example #1
0
def build_coco_dsets(args):
    dset_kwargs = {
        'image_dir': args.coco_train_image_dir,
        'instances_json': args.coco_train_instances_json,
        'stuff_json': args.coco_train_stuff_json,
        'stuff_only': args.coco_stuff_only,
        'image_size': args.image_size,
        'mask_size': args.mask_size,
        'max_samples': args.num_train_samples,
        'min_object_size': args.min_object_size,
        'min_objects_per_image': args.min_objects_per_image,
        'instance_whitelist': args.instance_whitelist,
        'stuff_whitelist': args.stuff_whitelist,
        'include_other': args.coco_include_other,
        'include_relationships': args.include_relationships,
    }
    train_dset = CocoSceneGraphDataset(**dset_kwargs)
    num_objs = train_dset.total_objects()
    num_imgs = len(train_dset)
    print('Training dataset has %d images and %d objects' %
          (num_imgs, num_objs))
    print('(%.2f objects per image)' % (float(num_objs) / num_imgs))

    dset_kwargs['image_dir'] = args.coco_val_image_dir
    dset_kwargs['instances_json'] = args.coco_val_instances_json
    dset_kwargs['stuff_json'] = args.coco_val_stuff_json
    dset_kwargs['max_samples'] = args.num_val_samples
    val_dset = CocoSceneGraphDataset(**dset_kwargs)

    assert train_dset.vocab == val_dset.vocab
    vocab = json.loads(json.dumps(train_dset.vocab))

    return vocab, train_dset, val_dset
Example #2
0
def build_coco_dsets(args):
    dset_kwargs = {
        'image_dir': args.coco_train_image_dir,
        'instances_json': args.coco_train_instances_json,
        'stuff_json': args.coco_train_stuff_json,
        'stuff_only': args.coco_stuff_only,
        'image_size': args.image_size,
        'mask_size': args.mask_size,
        'max_samples': args.num_train_samples,
        'min_object_size': args.min_object_size,
        'min_objects_per_image': args.min_objects_per_image,
        'instance_whitelist': args.instance_whitelist,
        'stuff_whitelist': args.stuff_whitelist,
        'include_other': args.coco_include_other,
        'include_relationships': args.include_relationships,
        'seed': 0  # randomize for train
        #'heuristics_ordering' : args.heuristics_ordering
    }

    train_dset = None
    if args.coco_view_validation_error is False:
        train_dset = CocoSceneGraphDataset(**dset_kwargs)
        num_objs = train_dset.total_objects()
        num_imgs = len(train_dset)
        print('Training dataset has %d images and %d objects' %
              (num_imgs, num_objs))
        print('(%.2f objects per image)' % (float(num_objs) / num_imgs))

    dset_kwargs['image_dir'] = args.coco_val_image_dir
    dset_kwargs['instances_json'] = args.coco_val_instances_json
    dset_kwargs['stuff_json'] = args.coco_val_stuff_json
    dset_kwargs['max_samples'] = args.num_val_samples
    #  *deactivate* randomization for val (for consistent results)
    dset_kwargs['seed'] = args.random_seed
    val_dset = CocoSceneGraphDataset(**dset_kwargs)

    if args.coco_view_validation_error is True:
        print('Using val dataset at train dataset to view validation error')
        train_dset = val_dset
        num_objs = train_dset.total_objects()
        num_imgs = len(train_dset)
        print('Valdiation dataset has %d images and %d objects' %
              (num_imgs, num_objs))
        print('(%.2f objects per image)' % (float(num_objs) / num_imgs))

    assert train_dset.vocab == val_dset.vocab
    vocab = json.loads(json.dumps(train_dset.vocab))

    return vocab, train_dset, val_dset
Example #3
0
def build_coco_dset(args, checkpoint):

    checkpoint_args = checkpoint['args']
    dset_kwargs = {
        'image_dir': args.coco_train_image_dir,
        'instances_json': args.coco_train_instances_json,
        'stuff_json': args.coco_train_stuff_json,
        'stuff_only': checkpoint_args['coco_stuff_only'],
        'oldlist_txt': args.coco_train_oldlist_txt,
        'image_size': args.image_size,
        'mask_size': checkpoint_args['mask_size'],
        'max_samples': args.num_samples,
        'min_object_size': checkpoint_args['min_object_size'],
        'min_objects_per_image': checkpoint_args['min_objects_per_image'],
        'instance_whitelist': checkpoint_args['instance_whitelist'],
        'stuff_whitelist': checkpoint_args['stuff_whitelist'],
        'include_other': checkpoint_args.get('coco_include_other', True),
    }
    # training set
    if (args.which_data == 'train'):
        dset = CocoSceneGraphDataset(**dset_kwargs)
    else:  # validation set
        dset_kwargs['image_dir'] = args.coco_val_image_dir
        dset_kwargs['instances_json'] = args.coco_val_instances_json
        dset_kwargs['stuff_json'] = args.coco_val_stuff_json
        dset_kwargs['oldlist_txt'] = args.coco_val_oldlist_txt
        dset = CocoSceneGraphDataset(**dset_kwargs)

    num_objs = dset.total_objects()
    num_imgs = len(dset)
    print('Training dataset has %d images and %d objects' %
          (num_imgs, num_objs))
    print('(%.2f objects per image)' % (float(num_objs) / num_imgs))
    print('Training dataset has %d images and %d objects' %
          (num_imgs, num_objs))
    print('(%.2f objects per image)' % (float(num_objs) / num_imgs))

    return dset
Example #4
0
def build_coco_dset(args, checkpoint):
  checkpoint_args = checkpoint['args']
  print('include other: ', checkpoint_args.get('coco_include_other'))
  dset_kwargs = {
    'image_dir': args.coco_image_dir,
    'instances_json': args.instances_json,
    'stuff_json': args.stuff_json,
    'stuff_only': checkpoint_args['coco_stuff_only'],
    'image_size': args.image_size,
    'mask_size': checkpoint_args['mask_size'],
    'max_samples': args.num_samples,
    'min_object_size': checkpoint_args['min_object_size'],
    'min_objects_per_image': checkpoint_args['min_objects_per_image'],
    'instance_whitelist': checkpoint_args['instance_whitelist'],
    'stuff_whitelist': checkpoint_args['stuff_whitelist'],
    'include_other': checkpoint_args.get('coco_include_other', True),
  }
  dset = CocoSceneGraphDataset(**dset_kwargs)
  return dset
Example #5
0
def build_coco_dsets(args):
    dset_kwargs = {
        'image_dir': args.coco_train_image_dir,
        'instances_json': args.coco_train_instances_json,
        'stuff_json': args.coco_train_stuff_json,
        'stuff_only': args.coco_stuff_only,
        'image_size': args.image_size,
        'mask_size': args.mask_size,
        'max_samples': args.num_train_samples,
        'min_object_size': args.min_object_size,
        'min_objects_per_image': args.min_objects_per_image,
        'instance_whitelist': args.instance_whitelist,
        'stuff_whitelist': args.stuff_whitelist,
        'include_other': args.coco_include_other,
        'include_relationships': args.include_relationships,
        'seed': args.random_seed
    }

    train_dset = None
    #train_dset = CocoSceneGraphDataset(**dset_kwargs)
    #num_objs = train_dset.total_objects()
    #num_imgs = len(train_dset)
    #print('Training dataset has %d images and %d objects' % (num_imgs, num_objs))
    #print('(%.2f objects per image)' % (float(num_objs) / num_imgs))

    dset_kwargs['image_dir'] = args.coco_val_image_dir
    dset_kwargs['instances_json'] = args.coco_val_instances_json
    dset_kwargs['stuff_json'] = args.coco_val_stuff_json
    dset_kwargs['max_samples'] = args.num_val_samples
    #  *deactivate* randomization for val (for consistent results)
    dset_kwargs['seed'] = args.random_seed
    val_dset = CocoSceneGraphDataset(**dset_kwargs)

    #assert train_dset.vocab == val_dset.vocab
    #vocab = json.loads(json.dumps(train_dset.vocab))
    vocab = json.loads(json.dumps(val_dset.vocab))

    return vocab, train_dset, val_dset