def _check_if_continue(self, iter, max_iters, snapshot_add):
     img_start_idx = cfg.DRL_RPN_TRAIN.IMG_START_IDX
     if iter > img_start_idx:
         return iter, max_iters, snapshot_add, False
     if iter < img_start_idx:
         print("iter %d < img_start_idx %d -- continuing" %
               (iter, img_start_idx))
         iter += 1
         return iter, max_iters, snapshot_add, True
     if iter == img_start_idx:
         print("Adjusting stepsize, train-det-start etcetera")
         snapshot_add = img_start_idx
         max_iters -= img_start_idx
         iter = 0
         cfg_from_list(['DRL_RPN_TRAIN.IMG_START_IDX', -1])
         cfg_from_list([
             'DRL_RPN_TRAIN.DET_START',
             cfg.DRL_RPN_TRAIN.DET_START - img_start_idx
         ])
         cfg_from_list([
             'DRL_RPN_TRAIN.STEPSIZE',
             cfg.DRL_RPN_TRAIN.STEPSIZE - img_start_idx
         ])
         cfg_from_list(
             ['TRAIN.STEPSIZE', [cfg.TRAIN.STEPSIZE[0] - img_start_idx]])
         cfg_from_list([
             'DRL_RPN_TRAIN.POST_SS',
             [cfg.DRL_RPN_TRAIN.POST_SS[0] - img_start_idx]
         ])
         print("Done adjusting stepsize, train-det-start etcetera")
         return iter, max_iters, snapshot_add, False
Exemple #2
0
    def __init__(self, args):

        self.imdb_name = args.imdb_name
        self.net_name = args.net_name
        self.tag = args.tag
        self.iters = args.iters

        # Config
        cfg_file = osp.join(mrcn_dir,
                            'experiments/cfgs/%s.yml' % self.net_name)
        cfg_list = [
            'ANCHOR_SCALES', [4, 8, 16, 32], 'ANCHOR_RATIOS', [0.5, 1, 2]
        ]
        if cfg_file is not None: cfg_from_file(cfg_file)
        if cfg_list is not None: cfg_from_list(cfg_list)
        print('Using config:')
        pprint.pprint(cfg)

        # load imdb
        self.imdb = get_imdb(get_imdb_name(self.imdb_name)['TEST_IMDB'])

        # Load network
        self.net = self.load_net()

        self._scale = None
Exemple #3
0
def main(args):

    opt = vars(args)

    # initialize
    opt['dataset_splitBy'] = opt['dataset'] + '_' + opt['splitBy']
    checkpoint_dir = osp.join(opt['checkpoint_path'], opt['dataset_splitBy'])
    if not osp.isdir(checkpoint_dir): os.makedirs(checkpoint_dir)

    # set random seed
    torch.manual_seed(opt['seed'])
    random.seed(opt['seed'])

    # set up loader
    data_json = osp.join('cache/prepro', opt['dataset_splitBy'], 'data.json')
    data_h5 = osp.join('cache/prepro', opt['dataset_splitBy'], 'data.h5')

    loader = GtMRCNLoader(data_json, data_h5)

    # set up model
    opt['vocab_size'] = loader.vocab_size
    opt['C4_feat_dim'] = 1024
    net = resnetv1(opt, batch_size=opt['batch_size'],
                   num_layers=101)  # determine batch size in opt.py

    # output directory where the models are saved
    output_dir = osp.join(opt['dataset_splitBy'],
                          'output_{}'.format(opt['output_postfix']))
    print('Output will be saved to `{:s}`'.format(output_dir))

    # tensorboard directory where the summaries are saved during training
    tb_dir = osp.join(opt['dataset_splitBy'],
                      'tb_{}'.format(opt['output_postfix']))
    print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir))

    # also add the validation set, but with no flipping images
    orgflip = cfg.TRAIN.USE_FLIPPED
    cfg.TRAIN.USE_FLIPPED = False
    cfg.TRAIN.USE_FLIPPED = orgflip

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    #train_net(net, imdb, roidb, valroidb, output_dir, tb_dir,
    train_net(
        net,
        loader,
        output_dir,
        tb_dir,
        pretrained_model=
        'pyutils/mask-faster-rcnn/output/res101/coco_2014_train_minus_refer_valtest+coco_2014_valminusminival/notime/res101_mask_rcnn_iter_1250000.pth',
        max_iters=args.max_iters)
Exemple #4
0
  def __init__(self, args):

    self.imdb_name = args.imdb_name
    self.net_name = args.net_name
    self.tag = args.tag
    self.iters = args.iters

    # Config
    cfg_file = osp.join(mrcn_dir, 'experiments/cfgs/%s.yml' % self.net_name)
    cfg_list = ['ANCHOR_SCALES', [4,8,16,32], 'ANCHOR_RATIOS', [0.5,1,2]]
    if cfg_file is not None: cfg_from_file(cfg_file)
    if cfg_list is not None: cfg_from_list(cfg_list)
    print('Using config:')
    pprint.pprint(cfg)

    # Load network
    self.num_classes = 81  # hard code this
    self.net = self.load_net()
def my_main(imdb_name, network, cfg_file, set_cfgs, tag, max_iters, im_names,
            score_thresh, clip_bbox):

    # Clip bboxes after bbox reg to image boundary
    cfg_from_list(['TEST.BBOX_CLIP', str(clip_bbox)])

    # Already set everything here, so the path can be determined correctly
    if cfg_file:
        cfg_from_file(cfg_file)
    if set_cfgs:
        cfg_from_list(set_cfgs)

    model_dir = osp.abspath(
        osp.join(cfg.ROOT_DIR, 'output', 'frcnn', cfg.EXP_DIR, imdb_name, tag))
    model = osp.join(
        model_dir,
        cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}'.format(max_iters) + '.pth')
    output_dir = osp.join(model_dir, 'demo')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    args = {
        'imdb_name': imdb_name,
        'net': network,
        'cfg_file': None,
        'set_cfgs': None,
        'tag': tag,
        'output_dir': output_dir,
        'model': model,
        'im_names': im_names,
        'score_thresh': score_thresh
    }

    print('Called with args:')
    print(args)

    frcnn_demo(args)
Exemple #6
0
def evaluate(args):

    opt = vars(args)

    # make other options
    opt['dataset_splitBy'] = opt['dataset'] + '_' + opt['splitBy']

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    print('Using config:')
    pprint.pprint(cfg)

    # set up loader
    data_json = osp.join('cache/prepro', opt['dataset_splitBy'], 'data.json')
    data_h5 = osp.join('cache/prepro', opt['dataset_splitBy'], 'data.h5')
    loader = GtMRCNLoader(data_json, data_h5)

    # set up model
    opt['vocab_size'] = loader.vocab_size
    opt['C4_feat_dim'] = 1024
    net = resnetv1(opt, batch_size=1, num_layers=101)

    net.create_architecture(81,
                            tag='default',
                            anchor_scales=cfg.ANCHOR_SCALES,
                            anchor_ratios=cfg.ANCHOR_RATIOS)

    sfile = osp.join(opt['dataset_splitBy'],
                     'output_{}'.format(opt['output_postfix']),
                     'res101_mask_rcnn_iter_{}.pth'.format(opt['model_iter']))
    print('Restoring model snapshots from {:s}'.format(sfile))
    saved_state_dict = torch.load(str(sfile))
    count_1 = 0
    new_params = net.state_dict().copy()
    for name, param in new_params.items():
        #print(name, param.size(), saved_state_dict[name].size())
        if name in saved_state_dict and param.size(
        ) == saved_state_dict[name].size():
            new_params[name].copy_(saved_state_dict[name])
            #print('---- copy ----')
        else:
            print(name, '----')
            count_1 += 1
    print('size not match:', count_1)
    net.load_state_dict(new_params)

    net.eval()
    net.cuda()

    split = opt['split']

    crit = None
    acc, eval_seg_iou_list, seg_correct, seg_total, cum_I, cum_U, num_sent = eval_split(
        loader, net, crit, split, opt)
    print('Comprehension on %s\'s %s (%s sents) is %.2f%%' % \
          (opt['dataset_splitBy'], split, num_sent, acc*100.))

    # write to results.txt
    f = open('experiments/det_results.txt', 'a')
    f.write('[%s][%s], id[%s]\'s acc is %.2f%%\n' % \
            (opt['dataset_splitBy'], opt['split'], opt['id'], acc*100.0))

    # print
    print('Segmentation results on [%s][%s]' % (opt['dataset_splitBy'], split))
    results_str = ''
    for n_eval_iou in range(len(eval_seg_iou_list)):
        results_str += '    precision@%s = %.2f\n' % \
          (str(eval_seg_iou_list[n_eval_iou]), seg_correct[n_eval_iou]*100./seg_total)
    results_str += '    overall IoU = %.2f\n' % (cum_I * 100. / cum_U)
    print(results_str)

    # save results
    #save_dir = osp.join('cache/results', opt['dataset_splitBy'], 'masks')
    #if not osp.isdir(save_dir):
    #  os.makedirs(save_dir)

    #results['iou'] = cum_I*1./cum_U
    #assert 'rle' in results['predictions'][0]
    #with open(osp.join(save_dir, args.id+'_'+split+'.json'), 'w') as f:
    #  json.dump(results, f)

    # write to results.txt
    f = open('experiments/mask_results.txt', 'a')
    f.write('[%s][%s]\'s iou is:\n%s' % \
            (opt['dataset_splitBy'], split, results_str))
Exemple #7
0
    imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
  else:
    imdb = get_imdb(imdb_names)
  return imdb, roidb


if __name__ == '__main__':
  args = parse_args()

  print('Called with args:')
  print(args)

  if args.cfg_file is not None:
    cfg_from_file(args.cfg_file)
  if args.set_cfgs is not None:
    cfg_from_list(args.set_cfgs)

  print('Using config:')
  pprint.pprint(cfg)
  np.random.seed(cfg.RNG_SEED)

  # train set
  imdb, roidb = combined_roidb(args.imdb_name)
  print('{:d} roidb entries'.format(len(roidb)))

  # Set class names in config file based on IMDB
  class_names = imdb.classes
  cfg_from_list(['CLASS_NAMES', [class_names]])

  # Update config depending on if class-specific history used or not
  if not args.use_hist:
def main(args):
  opt = vars(args)
  
  # initialize
  opt['dataset_splitBy'] = opt['dataset'] + '_' + opt['splitBy']
  checkpoint_dir = osp.join(opt['checkpoint_path'], opt['dataset_splitBy'])
  if not osp.isdir(checkpoint_dir): os.makedirs(checkpoint_dir)

  # set random seed
  torch.manual_seed(opt['seed'])
  random.seed(opt['seed'])
  
  # set up loader
  data_json = osp.join('cache/prepro', opt['dataset_splitBy'], 'data.json')
  data_h5 = osp.join('cache/prepro', opt['dataset_splitBy'], 'data.h5')
  
  loader = CycleLoader(data_json, data_h5) ####
  
  # set up model
  opt['vocab_size']= loader.vocab_size
  #opt['fc7_dim']   = loader.fc7_dim
  #opt['pool5_dim'] = loader.pool5_dim
  #opt['num_atts']  = loader.num_atts
  #model = JointMatching(opt)
  opt['C4_feat_dim'] = 1024
  opt['use_att'] = utils.if_use_att(opt['caption_model'])
  opt['seq_length'] = loader.label_length
  
  
  #### can change to restore opt from info.pkl
  #infos = {}
  #histories = {}
  if opt['start_from']is not None:
    # open old infos and check if models are compatible
    with open(os.path.join(opt['dataset_splitBy'], opt['start_from'], 'infos-best.pkl')) as f:
      infos = cPickle.load(f)
      saved_model_opt = infos['opt']
      need_be_same = ['caption_model', 'rnn_type', 'rnn_size', 'num_layers']
      for checkme in need_be_same:
        assert vars(saved_model_opt)[checkme] == opt[checkme], "Command line argument and saved model disagree on '%s'" % checkme

    #if os.path.isfile(os.path.join(opt['dataset_splitBy'], opt['start_from'], 'histories.pkl')):
    #  with open(os.path.join(opt['dataset_splitBy'], opt['start_from'], 'histories.pkl')) as f:
    #    histories = cPickle.load(f)
  
  
  
  net = resnetv1(opt, batch_size=opt['batch_size'], num_layers=101) #### determine batch size in opt.py
  
  # output directory where the models are saved
  output_dir = osp.join(opt['dataset_splitBy'], 'output_{}'.format(opt['output_postfix']))
  print('Output will be saved to `{:s}`'.format(output_dir))

  # tensorboard directory where the summaries are saved during training
  tb_dir = osp.join(opt['dataset_splitBy'], 'tb_{}'.format(opt['output_postfix']))
  print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir))

  # also add the validation set, but with no flipping images
  orgflip = cfg.TRAIN.USE_FLIPPED
  cfg.TRAIN.USE_FLIPPED = False
  #_, valroidb = combined_roidb('coco_2014_minival')
  #_, valroidb = combined_roidb('refcoco_test')
  #print('{:d} validation roidb entries'.format(len(valroidb)))
  cfg.TRAIN.USE_FLIPPED = orgflip
  
  if args.cfg_file is not None:
    cfg_from_file(args.cfg_file)
  if args.set_cfgs is not None:
    cfg_from_list(args.set_cfgs)
  
  
  #train_net(net, imdb, roidb, valroidb, output_dir, tb_dir,
  train_net(net, loader, output_dir, tb_dir,
            pretrained_model='pyutils/mask-faster-rcnn/output/res101/coco_2014_train_minus_refer_valtest+coco_2014_valminusminival/notime/res101_mask_rcnn_iter_1250000.pth',
            max_iters=args.max_iters)
Exemple #9
0
  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, np.array(im_scale_factors)


if __name__ == '__main__':
    args = parse_args()

    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    print('Using config:')
    pprint.pprint(cfg)

    # if has model, get the name from it
    # if does not, then just use the initialization weights
    if args.model:
        filename = os.path.splitext(os.path.basename(args.model))[0]
    else:
        filename = os.path.splitext(os.path.basename(args.weight))[0]

    tag = args.tag
    tag = tag if tag else 'default'
    filename = tag + '/' + filename
Exemple #10
0
        sys.exit(1)

    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()

    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    print('Using config:')
    pprint.pprint(cfg)

    # if has model, get the name from it
    # if does not, then just use the initialization weights
    if args.model:
        filename = os.path.splitext(os.path.basename(args.model))[0]
    else:
        filename = os.path.splitext(os.path.basename(args.weight))[0]

    tag = args.tag
    tag = tag if tag else 'default'
    filename = tag + '/' + filename
    def launch_train(self, conf):
        '''
        
        '''
        args = {}
        args['cfg_file'] = conf.frcnn_cfg
        args['weight'] = conf.starting_weights
        args['imdb_name'] = conf.train_set
        args['imdbval_name'] = conf.valid_set
        args['max_iters'] = conf.iters
        args['tag'] = conf.frcnn_tag
        args['net'] = conf.frcnn_net
        args['set_cfgs'] = None

        print('Called with args:')
        print(args)

        if args['cfg_file'] is not None:
            cfg_from_file(args['cfg_file'])
        if args['set_cfgs'] is not None:
            cfg_from_list(args['set_cfgs'])

        print('Using config:')
        pprint.pprint(cfg)

        np.random.seed(cfg.RNG_SEED)

        # train set
        imdb, roidb = combined_roidb(args['imdb_name'], conf)
        print('{:d} roidb entries'.format(len(roidb)))

        # output directory where the models are saved
        output_dir = conf.backup_folder  #get_output_dir(imdb, args.tag)
        print('Output will be saved to `{:s}`'.format(output_dir))

        # tensorboard directory where the summaries are saved during training
        tb_dir = conf.backup_folder  # get_output_tb_dir(imdb, args.tag)
        print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir))

        # also add the validation set, but with no flipping images
        orgflip = cfg.TRAIN.USE_FLIPPED
        cfg.TRAIN.USE_FLIPPED = False
        _, valroidb = combined_roidb(args['imdbval_name'], conf)
        print('{:d} validation roidb entries'.format(len(valroidb)))
        cfg.TRAIN.USE_FLIPPED = orgflip
        if args['net'] == 'vgg16':
            net = vgg16(batch_size=cfg.TRAIN.IMS_PER_BATCH)
        elif args['net'] == 'res50':
            net = resnetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=50)
        elif args['net'] == 'res101':
            net = resnetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=101)

        # load network
        elif args['net'] == 'res152':
            net = resnetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH, num_layers=152)
        elif args['net'] == 'mobile':
            net = mobilenetv1(batch_size=cfg.TRAIN.IMS_PER_BATCH)
        else:
            raise NotImplementedError

        train_net(net,
                  imdb,
                  roidb,
                  valroidb,
                  output_dir,
                  tb_dir,
                  pretrained_model=args['weight'],
                  max_iters=args['max_iters'])
Exemple #12
0
        for r in roidbs[1:]:
            roidb.extend(r)
        tmp = get_imdb(imdb_names.split('+')[1])
        imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
    else:
        imdb = get_imdb(imdb_names)
    return imdb, roidb


if __name__ == '__main__':
    args = parse_args()

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    np.random.seed(cfg.RNG_SEED)

    # train set
    imdb, roidb = combined_roidb(args.imdb_name)

    # Set class names in config file based on IMDB
    class_names = imdb.classes
    cfg_from_list(['CLASS_NAMES', [class_names]])

    if args.alpha:
        cfg_from_list(['LRP_HAI.ALPHA', True])

    # Update config to match start of training detector
    cfg_from_list(['LRP_HAI_TRAIN.DET_START', args.det_start])
Exemple #13
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# setup matlpoltib to use without display
import matplotlib
matplotlib.use('Agg')

import os.path as osp
import sys


def add_path(path):
    if path not in sys.path:
        sys.path.insert(0, path)


this_dir = osp.dirname(__file__)

root_path = osp.join(this_dir, '..', '..')

# Add src to PYTHONPATH
src_path = osp.join(root_path, 'src')
add_path(src_path)

# Change paths in configs file
import frcnn
from model.config import cfg_from_list
data_path = osp.join(root_path, 'data')
cfg_from_list(['ROOT_DIR', root_path, 'DATA_DIR', data_path])
Exemple #14
0
    return args


if __name__ == '__main__':
    args = parse_args()

    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    # if args.set_cfgs is not None:
    #     cfg_from_list(args.set_cfgs)
    # cfg_from_file("D:/hoseok/project/pytorch-faster-rcnn/experiments/cfgs/res101.yml")
    # cfg_from_file("D:/hoseok/project/pytorch-faster-rcnn/experiments/cfgs/res101_vg.yml")
    cfg_from_list(["ANCHOR_SCALES", "[8,16,32]", "ANCHOR_RATIOS", "[0.5,1,2]"])

    print('Using config:')
    pprint.pprint(cfg)

    # model = "D:/hoseok/project/pytorch-faster-rcnn/output/res101/visual_genome_train_diff/FRCNN/res101_faster_rcnn_iter_1200000.pth"
    # filename = os.path.splitext(os.path.basename(model))[0]
    # if has model, get the name from it
    # if does not, then just use the initialization weights
    if args.model:
        filename = os.path.splitext(os.path.basename(args.model))[0]
    else:
        filename = os.path.splitext(os.path.basename(args.weight))[0]

    # tag = args.tag
    # tag = tag if tag else 'default'
Exemple #15
0
    set_cfgs = [
        'ANCHOR_SCALES', '[8,16,32]', 'ANCHOR_RATIOS', '[0.5,1,2]',
        'TRAIN.STEPSIZE', '[40000]'
    ]
    imdb_name = 'voc_2007_trainval'
    imdbval_name = 'voc_2007_test'
    max_iters = 20000
    net = 'vgg16'
    tag = None
    max_per_image = 100
    weight = '/mnt/data//tf-faster-rcnn-master/data/imagenet_weights/vgg16.ckpt'

    if cfg_file is not None:
        cfg_from_file(cfg_file)
    if set_cfgs is not None:
        cfg_from_list(set_cfgs)

    print('Using config:')
    pprint.pprint(cfg)

    np.random.seed(cfg.RNG_SEED)

    # train set
    imdb, roidb = combined_roidb(imdb_name)
    print('{:d} roidb entries'.format(len(roidb)))

    # output directory where the models are saved
    output_dir = get_output_dir(imdb, tag)
    print('Output will be saved to `{:s}`'.format(output_dir))

    # tensorboard directory where the summaries are saved during training
Exemple #16
0
        = run_LRP_HAI(sess, net, blobs, timers, 'test', cfg.LRP_HAI_TEST.BETA,
                      im_idx, alpha=cfg.LRP_HAI.ALPHA)

    return scores, pred_bboxes, timers, _


if __name__ == '__main__':
    args = parse_args()

    # print('Called with args:')
    # print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    # print('Using config:')
    # pprint.pprint(cfg)

    # if has model, get the name from it
    # if does not, then just use the initialization weights
    if args.model:
        filename = os.path.splitext(os.path.basename(args.model))[0]
    else:
        filename = os.path.splitext(os.path.basename(args.weight))[0]

    tag = args.tag
    tag = tag if tag else 'default'
    filename = tag + '/' + filename
Exemple #17
0
from datasets.factory import get_imdb
from model.test import test_net
import os

# load model for detection
net = vgg16()
net.create_architecture("TEST", tag='default')
net.build_drl_rpn_network(False)

# load database of images
imdb_name = 'voc_2007_test'
imdb = get_imdb(imdb_name)

# Set class names in config file based on IMDB
class_names = imdb.classes
cfg_from_list(['CLASS_NAMES', [class_names]])

# Update config depending on if class-specific history used or not
cfg_from_list(['DRL_RPN.USE_POST',
               False])  # THROWS AN ERROR IF SET TO TRUE, WHY ?

# Specify if run drl-RPN in auto mode or a fix number of iterations
cfg_from_list(['DRL_RPN_TEST.NBR_FIX', 0])

# Specify if run drl-RPN in auto mode or a fix number of iterations
cfg_from_list(['DIMS_TIME', 4])

# test the network
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
Exemple #18
0
        sys.exit(1)

    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()

    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    print('Using config:')
    pprint.pprint(cfg)

    # if has model, get the name from it
    # if does not, then just use the initialization weights
    if args.model:
        filename = os.path.splitext(os.path.basename(args.model))[0]
    else:
        filename = os.path.splitext(os.path.basename(args.weight))[0]

    tag = args.tag
    tag = tag if tag else 'default'
    filename = tag + '/' + filename
    def launch_test(self, conf, hash_model):
        '''
        '''
        args = {}
        args['cfg_file'] = conf.frcnn_cfg
        args['weight'] = conf.starting_weights
        args['model'] = hash_model
        args['imdb_name'] = conf.valid_set
        args['comp_mode'] = False
        args['tag'] = conf.frcnn_tag
        args['net'] = conf.frcnn_net
        args['set_cfgs'] = None
        args['max_per_image'] = 5

        print('Called with args:')
        print(args)

        if args['cfg_file'] is not None:
            cfg_from_file(argsargs['cfg_file'])
        if args['set_cfgs'] is not None:
            cfg_from_list(args['set_cfgs'])

        print('Using config:')
        pprint.pprint(cfg)

        # if has model, get the name from it
        # if does not, then just use the inialization weights
        if args['model']:
            filename = os.path.splitext(os.path.basename(args['model']))[0]
        else:
            filename = os.path.splitext(os.path.basename(args['weight']))[0]

        tag = args['tag']
        tag = tag if tag else 'default'
        filename = tag + '/' + filename

        # TODO This is really bad but it works, I'm sincerely sorry
        conf_copy = copy.deepcopy(conf)
        conf_copy.train_set = conf_copy.valid_set
        imdb = get_imdb(args['imdb_name'], conf_copy)
        print(args['imdb_name'])
        imdb.competition_mode(args['comp_mode'])

        tfconfig = tf.ConfigProto(allow_soft_placement=True)
        tfconfig.gpu_options.allow_growth = True

        # init session
        sess = tf.Session(config=tfconfig)
        # load network
        if args['net'] == 'vgg16':
            net = vgg16(batch_size=1)
        elif args['net'] == 'res50':
            net = resnetv1(batch_size=1, num_layers=50)
        elif args['net'] == 'res101':
            net = resnetv1(batch_size=1, num_layers=101)
        elif args['net'] == 'res152':
            net = resnetv1(batch_size=1, num_layers=152)
        elif args['net'] == 'mobile':
            net = mobilenetv1(batch_size=1)
        else:
            raise NotImplementedError

        # load model
        net.create_architecture(sess,
                                "TEST",
                                imdb.num_classes,
                                tag='default',
                                anchor_scales=cfg.ANCHOR_SCALES,
                                anchor_ratios=cfg.ANCHOR_RATIOS)

        if args['model']:
            print(
                ('Loading model check point from {:s}').format(args['model']))
            saver = tf.train.Saver()
            saver.restore(sess, args['model'])
            print('Loaded.')
        else:
            print(('Loading initial weights from {:s}').format(args['weight']))
            sess.run(tf.global_variables_initializer())
            print('Loaded.')

        test_net(sess,
                 net,
                 imdb,
                 filename,
                 max_per_image=args['max_per_image'])

        sess.close()