Ejemplo n.º 1
0
def main(args_list):
    args = parse_args(args_list)

    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    cfg.GPU_ID = args.GPU_ID

    print('Using config:')
    pprint.pprint(cfg)

    if not args.randomize:
        # fix the random seeds (numpy and caffe) for reproducibility
        np.random.seed(cfg.RNG_SEED)
        caffe.set_random_seed(cfg.RNG_SEED)

    print 'Setting GPU device %d for training' % cfg.GPU_ID
    caffe.set_mode_gpu()
    caffe.set_device(cfg.GPU_ID)

    imdb, roidb = combined_roidb(args.imdb_name)
    print '{:d} roidb entries'.format(len(roidb))

    output_dir = get_output_dir(imdb)
    print 'Output will be saved to `{:s}`'.format(output_dir)

    train_net(args.solver, roidb, output_dir,
              pretrained_model=args.pretrained_model,
              max_iters=args.max_iters)
Ejemplo n.º 2
0
def _init_caffe(cfg):

    np.random.seed(cfg.RNG_SEED)
    caffe.set_random_seed(cfg.RNG_SEED)

    caffe.set_mode_gpu()
    caffe.set_device(cfg.GPU_ID)
def _init_caffe(cfg):
    """Initialize pycaffe in a training process.
    """

    import caffe
    # fix the random seeds (numpy and caffe) for reproducibility
    np.random.seed(cfg.RNG_SEED)
    caffe.set_random_seed(cfg.RNG_SEED)
    # set up caffe
    caffe.set_mode_gpu()
    caffe.set_device(cfg.GPU_ID)
Ejemplo n.º 4
0
def _init_caffe(cfg):
    """Initialize pycaffe in a training process.
    """

    import caffe
    # fix the random seeds (numpy and caffe) for reproducibility
    np.random.seed(cfg.RNG_SEED)
    caffe.set_random_seed(cfg.RNG_SEED)
    # set up caffe
    caffe.set_mode_gpu()
    caffe.set_device(cfg.GPU_ID)
Ejemplo n.º 5
0
    def run(self):
        """This method runs in the new process."""
        if self.device >= 0:
            os.environ['CUDA_VISIBLE_DEVICES'] = str(self.device)
        import caffe
        if self.device >= 0:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()

        caffe.set_random_seed(0)
        np.random.seed(0)

        self.model = CaffeModel(*self.model_info)
        self.model.img = np.zeros((3, 1, 1), dtype=np.float32)

        while True:
            req = self.req_q.get()
            layers = []

            if isinstance(req, FeatureMapRequest):
                for layer in reversed(self.model.layers()):
                    if layer in req.layers:
                        layers.append(layer)
                features = self.model.eval_features_tile(req.img.array, layers)
                req.img.unlink()
                features_shm = {
                    layer: SharedNDArray.copy(features[layer])
                    for layer in features
                }
                self.resp_q.put(FeatureMapResponse(req.resp, features_shm))

            if isinstance(req, SCGradRequest):
                for layer in reversed(self.model.layers()):
                    if layer in req.content_layers + req.style_layers + req.dd_layers:
                        layers.append(layer)
                self.model.roll(req.roll, jitter_scale=1)
                loss, grad = self.model.eval_sc_grad_tile(
                    req.img.array, req.start, layers, req.content_layers,
                    req.style_layers, req.dd_layers, req.content_weight,
                    req.style_weight, req.dd_weight)
                req.img.unlink()
                self.model.roll(-req.roll, jitter_scale=1)
                self.resp_q.put(
                    SCGradResponse(req.resp, loss, SharedNDArray.copy(grad)))

            if isinstance(req, SetFeaturesAndGrams):
                self.model.features = \
                    {layer: req.features[layer].array.copy() for layer in req.features}
                self.model.grams = \
                    {layer: req.grams[layer].array.copy() for layer in req.grams}
                self.resp_q.put(())
Ejemplo n.º 6
0
def roi_generate(queue=None,
                 imdb_name=None,
                 roi_conv_model_path=None,
                 cfg=None,
                 test_prototxt=None,
                 overwrite=None):
    """Use a trained RPN to generate proposals.
    """
    output_dir = './output/'
    net_name = os.path.splitext(os.path.basename(roi_conv_model_path))[0]
    output_path_name = os.path.join(output_dir, net_name)
    queue.put({'proposal_path': output_path_name})

    if not os.path.exists(output_path_name):
        os.makedirs(output_path_name)
    elif overwrite:
        shutil.rmtree(output_path_name)
        os.makedirs(output_path_name)
    else:
        return

    print 'RPN model: {}'.format(roi_conv_model_path)
    print('Using config:')
    pprint.pprint(cfg)

    # fix the random seeds (numpy and caffe) for reproducibility
    import caffe
    np.random.seed(cfg.RNG_SEED)
    caffe.set_random_seed(cfg.RNG_SEED)
    # set up caffe
    caffe.set_mode_gpu()
    caffe.set_device(cfg.GPU_ID)

    # NOTE: the matlab implementation computes proposals on flipped images, too.
    # We compute them on the image once and then flip the already computed
    # proposals. This might cause a minor loss in mAP (less proposal jittering).

    from datasets.factory import get_imdb
    imdb = get_imdb(imdb_name)
    print 'Loaded dataset `{:s}` for bbox generation'.format(imdb.name)
    # Load RPN and configure output directory
    roi_net = caffe.Net(test_prototxt, roi_conv_model_path, caffe.TEST)
    roi_net.name = net_name
    print 'Output will be saved to `{:s}`'.format(output_path_name)
    print 'roinet.name: ', roi_net.name
    # Generate proposals on the imdb
    roi_proposals = imdb_proposals(roi_net, imdb, output_path_name)
Ejemplo n.º 7
0
def train(args):
    '''train lnet using data prepare by `prepare()`
  '''
    def get_data_size(txt):
        size = 0
        with open(txt, 'r') as fin:
            for line in fin.readlines():
                line = line.strip()
                data = h5py.File(line, 'r')
                size += data['target'].shape[0]
                data.close()
        return size

    # init caffe
    np.random.seed(cfg.RNG_SEED)
    caffe.set_random_seed(cfg.RNG_SEED)
    if cfg.GPU_ID < 0:
        caffe.set_mode_cpu()
    else:
        caffe.set_mode_gpu()
        caffe.set_device(cfg.GPU_ID)
    # solver parameter setup
    batch_size = 128
    train_size = get_data_size('data/lnet_train.txt')
    val_size = get_data_size('data/lnet_val.txt')
    iter_train = train_size / batch_size
    iter_test = val_size / batch_size
    max_iter = args.epoch * iter_train
    final_model = 'tmp/lnet_iter_%d.caffemodel' % max_iter
    solver_param = caffe_pb2.SolverParameter()
    with open('proto/l_solver.prototxt', 'r') as fin:
        text_format.Merge(fin.read(), solver_param)
    solver_param.max_iter = max_iter
    solver_param.snapshot = iter_train
    solver_param.test_interval = iter_train
    solver_param.test_iter[0] = iter_test
    solver_param.base_lr = args.lr
    solver_param.gamma = args.lrw
    solver_param.stepsize = args.lrp * iter_train
    tmp_solver_prototxt = 'tmp/l_solver.prototxt'
    with open(tmp_solver_prototxt, 'w') as fout:
        fout.write(text_format.MessageToString(solver_param))
    # solver setup
    solver = caffe.SGDSolver(tmp_solver_prototxt)
    # train
    solver.solve(args.snapshot)
    shutil.copyfile(final_model, 'model/l.caffemodel')
    def rpn_generate_signle_gpu(rank):
        cfg.GPU_ID=gpus[rank]
        
        print('Using config:')
        pprint.pprint(cfg)

        import caffe
        np.random.seed(cfg.RNG_SEED)
        caffe.set_random_seed(cfg.RNG_SEED)
        # set up caffe
        caffe.set_mode_gpu()
        caffe.set_device(cfg.GPU_ID)

        # Load RPN and configure output directory
        rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
        
        # Generate proposals on the imdb
        rpn_proposals = imdb_proposals(rpn_net, imdb, rank, len(gpus), output_dir)
Ejemplo n.º 9
0
    def init_detection_net(self, gpu_id=0, prototxt=None, caffemodel=None):
        """init extraction network"""
        cfg.TEST.HAS_RPN = True  # Use RPN for proposals
        if prototxt is None:
            prototxt = os.path.join(cfg.ROOT_DIR, 'models', NETS['zf'][0],
                            'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
        if caffemodel is None:
            caffemodel = os.path.join(cfg.ROOT_DIR, 'output/default/train',
                              NETS['zf'][1])

        if not os.path.isfile(caffemodel):
            raise IOError(('{:s} not found.\nDid you run ./data/script/'
                       'fetch_faster_rcnn_models.sh?').format(caffemodel))
                       
        #np.random.seed(cfg.RNG_SEED)
        caffe.set_random_seed(cfg.RNG_SEED)       
        caffe.set_mode_gpu()
        caffe.set_device(gpu_id)        
        self.net_d = caffe.Net(prototxt, caffemodel, caffe.TEST)
Ejemplo n.º 10
0
def main(args):
    """TODO: Docstring for main.
    """
    # Fix random seeds (numpy and caffe) for reproducibility
    logger.info('Initializing caffe..')
    np.random.seed(RANDOM_SEED)
    caffe.set_random_seed(RANDOM_SEED)

    if GPU_ONLY:
        caffe.set_mode_gpu()
        caffe.set_device(int(args['gpu_id']))
    else:
        caffe.set_mode_cpu()

    logger.info('Loading training data')
    # Load imagenet training images and annotations
    imagenet_folder = os.path.join(args['imagenet'], 'images')
    imagenet_annotations_folder = os.path.join(args['imagenet'], 'gt')
    objLoaderImgNet = loader_imagenet(imagenet_folder,
                                      imagenet_annotations_folder, logger)
    train_imagenet_images = objLoaderImgNet.loaderImageNetDet()

    # Load alov training images and annotations
    alov_folder = os.path.join(args['alov'], 'images')
    alov_annotations_folder = os.path.join(args['alov'], 'gt')
    objLoaderAlov = loader_alov(alov_folder, alov_annotations_folder, logger)
    objLoaderAlov.loaderAlov()
    train_alov_videos = objLoaderAlov.get_videos()

    # create example generator and setup the network
    objExampleGen = example_generator(float(args['lamda_shift']),
                                      float(args['lamda_scale']),
                                      float(args['min_scale']),
                                      float(args['max_scale']), logger)
    objRegTrain = regressor_train(args['train_prototxt'],
                                  args['init_caffemodel'], int(args['gpu_id']),
                                  args['solver_prototxt'], logger)
    objTrackTrainer = tracker_trainer(objExampleGen, objRegTrain, logger)

    while objTrackTrainer.num_batches_ < kNumBatches:
        train_image(objLoaderImgNet, train_imagenet_images, objTrackTrainer)
        train_video(train_alov_videos, objTrackTrainer)
Ejemplo n.º 11
0
 def check_gradient_single(self,
                           layer,
                           bottom,
                           top,
                           check_bottom='all',
                           top_id=0,
                           top_data_id=0):
     """"""
     # Retrieve Blobs to check
     propagate_down = [False for i in xrange(len(bottom))]
     blobs_to_check = []
     for blob in layer.blobs:
         blobs_to_check += [blob]
     if check_bottom == 'all':
         check_bottom = range(len(bottom))
     assert len(check_bottom) <= len(bottom)
     for cb in check_bottom:
         blobs_to_check += [bottom[cb]]
         propagate_down[cb] = True
     # Compute the gradient analytically using Backward
     caffe.set_random_seed(self.seed_)
     layer.Reshape(bottom, top)
     layer.Forward(bottom, top)
     self.get_obj_and_gradient(layer, top, top_id, top_data_id)
     layer.Backward(top, propagate_down, bottom)
     # Store computed diff
     ana_grads = [b.diff.copy() for b in blobs_to_check]
     # Compute finite diff
     for bi, (ana_grad, blob) in enumerate(zip(ana_grads, blobs_to_check)):
         for fi in xrange(blob.count):
             step = self.stepsize_
             # L(fi <-- fi+step)
             blob.data.flat[fi] += step
             caffe.set_random_seed(self.seed_)
             layer.Reshape(bottom, top)
             layer.Forward(bottom, top)
             ploss = self.get_obj_and_gradient(layer, top, top_id,
                                               top_data_id)
             # L(fi <-- fi-step)
             blob.data.flat[fi] -= 2 * step
             caffe.set_random_seed(self.seed_)
             layer.Reshape(bottom, top)
             layer.Forward(bottom, top)
             nloss = self.get_obj_and_gradient(layer, top, top_id,
                                               top_data_id)
             grad = (ploss - nloss) / (2. * step)
             agrad = ana_grad.flat[fi]
             feat = blob.data.flat[fi]
             if self.kink_ - self.kink_range_ > np.abs(feat) \
                     or np.abs(feat) > self.kink_ + self.kink_range_:
                 scale = max(max(np.abs(agrad), np.abs(grad)), 1.0)
                 assert np.isclose(
                     agrad, grad, rtol=0, atol=self.threshold_ *
                     scale), ("(top_id, top_data_id, blob_id, feat_id)"
                              "=(%d, %d, %d, %d); feat=%g; "
                              "objective+ = %g; objective- = %g; "
                              "analitical_grad=%g; finite_grad=%g" %
                              (top_id, top_data_id, bi, fi, feat, ploss,
                               nloss, agrad, grad))
Ejemplo n.º 12
0
    def __init__(self,
                 weights_path,
                 image_net_proto,
                 lstm_net_proto,
                 vocab_path,
                 device_id=-1):
        if device_id >= 0:
            caffe.set_mode_gpu()
            caffe.set_device(device_id)
        else:
            caffe.set_mode_cpu()

        caffe.set_random_seed(0)

        # Setup image processing net.
        phase = caffe.TEST
        self.image_net = caffe.Net(image_net_proto, weights_path, phase)
        image_data_shape = self.image_net.blobs['data'].data.shape
        self.transformer = caffe.io.Transformer({'data': image_data_shape})
        channel_mean = np.zeros(image_data_shape[1:])
        channel_mean_values = [104, 117, 123]
        assert channel_mean.shape[0] == len(channel_mean_values)
        for channel_index, mean_val in enumerate(channel_mean_values):
            channel_mean[channel_index, ...] = mean_val
        self.transformer.set_mean('data', channel_mean)
        self.transformer.set_channel_swap('data', (2, 1, 0))
        self.transformer.set_transpose('data', (2, 0, 1))
        # Setup sentence prediction net.
        self.lstm_net = caffe.Net(lstm_net_proto, weights_path, phase)
        self.vocab = ['<EOS>']
        with open(vocab_path, 'r') as vocab_file:
            self.vocab += [word.strip() for word in vocab_file.readlines()]
        net_vocab_size = self.lstm_net.blobs['predict'].data.shape[2]
        if len(self.vocab) != net_vocab_size:
            raise Exception('Invalid vocab file: contains %d words; '
                            'net expects vocab with %d words' %
                            (len(self.vocab), net_vocab_size))
Ejemplo n.º 13
0
    def _run(self, cnndata, gpu=None):
        if gpu is not None:
            os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
        import caffe
        caffe.set_random_seed(0)

        self.net = caffe.Net(str(cnndata.deploy), 1, weights=str(cnndata.model))
        self.data = dd._LayerIndexer(self.net, 'data')
        self.diff = dd._LayerIndexer(self.net, 'diff')

        if gpu is not None:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()

        while True:
            req = self.req_q.get()
            if req.cleanup:
                for blob in self.net.blobs:
                    self.diff[blob] = 0
            grad = self._grad_single_tile(req.data, req.layers)
            resp = TileResponse(req.resp, grad)
            self.resp_q.put(resp)
            self.req_q.task_done()
Ejemplo n.º 14
0
def main(args_list):
    args = parse_args(args_list)

    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    cfg.GPU_ID = args.GPU_ID

    print('Using config:')
    pprint.pprint(cfg)

    if not args.randomize:
        # fix the random seeds (numpy and caffe) for reproducibility
        np.random.seed(cfg.RNG_SEED)
        caffe.set_random_seed(cfg.RNG_SEED)

    print 'Setting GPU device %d for training' % cfg.GPU_ID
    caffe.set_mode_gpu()
    caffe.set_device(cfg.GPU_ID)

    imdb, roidb = combined_roidb(args.imdb_name)
    print '{:d} roidb entries'.format(len(roidb))

    output_dir = get_output_dir(imdb)
    print 'Output will be saved to `{:s}`'.format(output_dir)

    train_net(args.solver,
              roidb,
              output_dir,
              pretrained_model=args.pretrained_model,
              max_iters=args.max_iters)
Ejemplo n.º 15
0
def solve(proto, roidb, pretrained_model, gpus, uid, rank, output_dir,
          max_iter):
    caffe.set_random_seed(cfg.RNG_SEED)
    caffe.set_mode_gpu()
    caffe.set_device(gpus[rank])
    caffe.set_solver_count(len(gpus))
    caffe.set_solver_rank(rank)
    caffe.set_multiprocess(True)
    cfg.GPU_ID = gpus[rank]

    solverW = SolverWrapper(proto, roidb, output_dir, rank, pretrained_model)
    solver = solverW.getSolver()
    nccl = caffe.NCCL(solver, uid)
    nccl.bcast()
    solver.add_callback(nccl)

    if solver.param.layer_wise_reduce:
        solver.net.after_backward(nccl)
    count = 0
    while count < max_iter:
        solver.step(cfg.TRAIN.SNAPSHOT_ITERS)
        if rank == 0:
            solverW.snapshot()
        count = count + cfg.TRAIN.SNAPSHOT_ITERS
Ejemplo n.º 16
0
    #renderContext = {}
    #jinjiaEnv = jinja2.Environment(loader = jinja2.FileSystemLoader('./')).get_template('template.html')

    savedNet = sys.argv[1]
    predictDataset = sys.argv[2]
    outtag = sys.argv[3]
    gpuid = int(sys.argv[4])

    resultfolder, modelfile = os.path.split(savedNet)
    
    outputFolder = resultfolder + r'/test_{}'.format(outtag)
    if(os.path.exists(outputFolder) == False):
        os.makedirs(outputFolder)

    caffe.set_random_seed(23333)
    caffe.set_mode_gpu()
    caffe.set_device(gpuid)
    
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    fh = logging.FileHandler(outputFolder + '/test_log_text.log')
    fh.setLevel(logging.DEBUG)
    ch = logging.StreamHandler()
    ch.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
    ch.setFormatter(formatter)
    fh.setFormatter(formatter)
    logger.addHandler(ch)
    logger.addHandler(fh)
Ejemplo n.º 17
0
def worker(rank, uid, gpus, solver_prototxt, roidb, pretrained_model, max_iter,
           output_dir):
    """
    Training worker
    :param rank: The process rank
    :param uid: The caffe NCCL uid
    :param solver_proto: Solver prototxt
    :param roidb: Training roidb
    :param pretrained_model: Pretrained model
    :param gpus: GPUs to be used for training
    :param max_iter: Maximum number of training iterations
    :param output_dir: Output directory used for saving models
    :return:
    """

    # Setup caffe
    cfg.RANK = rank
    cfg.GPU_ID = gpus[rank]  # Will be used in gpu_nms
    caffe.set_device(cfg.GPU_ID)
    caffe.set_random_seed(cfg.RNG_SEED + rank)
    caffe.set_mode_gpu()
    caffe.set_solver_count(len(gpus))
    caffe.set_solver_rank(rank)
    caffe.set_multiprocess(True)

    # Setup Solver
    solverW = SolverWrapper(
        solver_prototxt=str(solver_prototxt),
        roidb=roidb,
        output_dir=str(output_dir),
        rank=rank,
        pretrained_model=str(pretrained_model))
    solver = solverW.get_solver()
    nccl = caffe.NCCL(solver, uid)
    nccl.bcast()
    solver.add_callback(nccl)

    if solver.param.layer_wise_reduce:
        solver.net.after_backward(nccl)

    # Train the model for the specified number of iterations
    target_layers = filter(lambda x: x.startswith('target_layer'),
                           solver.net.layer_dict.keys())

    if rank == 0:
        t = Timer()

    while solver.iter < max_iter:
        for n in target_layers:
            solver.net.layer_dict[n].set_iter(solver.iter)
        if rank == 0:
            t.tic()
        solver.step(1)
        if (solver.iter % cfg.TRAIN.SNAPSHOT == 0
                or solver.iter == max_iter) and rank == 0:
            # Snapshot only in the main process
            solverW.snapshot(solver.iter == max_iter)
        if rank == 0:
            t.toc()
            eta_in_s = int((max_iter - solver.iter) * t.average_time)
            try:
                for loss_name, loss_val in solver.net.blobs.items():
                    if 'loss' not in loss_name:
                        continue
                    tb.sess.add_scalar_value(
                        loss_name, float(loss_val.data), step=solver.iter)
                for n in target_layers:
                    tb.sess.add_scalar_value(
                        n + '_accuracy',
                        float(solver.net.layer_dict[n].accuracy),
                        step=solver.iter)
                tb.sess.add_scalar_value(
                    "speed", 1. / t.average_time, step=solver.iter)
                tb.sess.add_scalar_value(
                    "ETA (min)", eta_in_s / 60., step=solver.iter)
            except:
                logger.warning('Failed to submit data to Tensorboard')
            sys.stdout.write('\r{}, Speed: {:5f} iter/sec, ETA: {:8s}'.format(
                ', '.join([
                    '{}: {:5f}'.format(i[0], i[1].data)
                    for i in solver.net.blobs.items() if 'loss' in i[0]
                ] + [
                    '{}: {:5f}'.format(
                        n +
                        '_accuracy', float(solver.net.layer_dict[n].accuracy))
                    for n in target_layers
                ]), 1. / t.average_time,
                str(datetime.timedelta(seconds=eta_in_s))))
            sys.stdout.flush()
Ejemplo n.º 18
0
#!/usr/bin/env python

import _init_paths
import caffe
import numpy as np
import os.path as osp
from pprint import pprint
caffe.set_mode_cpu()

caffe.set_random_seed(13397)
np.random.seed(0)

net = caffe.Net('TensorStuff.prototxt', caffe.TRAIN)
print("blobs {}\nparams {}".format(net.blobs.keys(), net.params.keys()))

N = net.blobs['input_data'].num
assert net.blobs['gt_viewpoint'].data.shape == (N, 3)

net.blobs['input_data'].data[...] = np.random.rand(
    *net.blobs['input_data'].data.shape)
net.blobs['gt_viewpoint'].data[...] = np.random.uniform(-np.pi, np.pi, (N, 3))
net.params['scale_fc_viewpoint_3x2'][0].data[...] = np.array([1., 2, 3])

out = net.forward()

assert np.allclose(net.blobs['fc_viewpoint_6'].data.reshape(4, 3, 2),
                   net.blobs['fc_viewpoint_3x2'].data)

assert np.allclose(
    net.blobs['fc_viewpoint_3x2'].data * np.array([1., 2, 3]).reshape(1, 3, 1),
    net.blobs['fc_viewpoint_3x2_by_T'].data)
Ejemplo n.º 19
0
    parser.add_argument('-i','--input',type=str,required=True,help="Input .types file to predict")
    parser.add_argument('-g','--gpu',type=int,help='Specify GPU to run on',default=-1)
    parser.add_argument('-o','--output',type=str,help='Output file name',default=None)
    parser.add_argument('-s','--seed',type=int,help='Random seed',default=None)
    parser.add_argument('-k','--keep',action='store_true',default=False,help="Don't delete prototxt files")
    parser.add_argument('--rotations',type=int,help='Number of rotations; rotatation must be enabled in test net!',default=1)
    parser.add_argument('--max_score',action='store_true',default=False,help="take max score per ligand as its score")
    parser.add_argument('--max_affinity',action='store_true',default=False,help="take max affinity per ligand as its score")
    parser.add_argument('--notcalc_predictions', type=str, default='',help='use file of predictions instead of calculating')
    return parser.parse_args(argv)


if __name__ == '__main__':
    args = parse_args()
    if not args.output:
        out = sys.stdout
    else:
        out = open(args.output, 'w')
    if args.seed != None:
        caffe.set_random_seed(args.seed)
    if not args.notcalc_predictions:
        predictions = predict_lines(args)
    else:
        with open(args.notcalc_predictions, 'r') as f:
            predictions = f.readlines()
        if args.max_score or args.max_affinity:
            predictions = maxLigandScore(predictions, args.max_affinity)
            
    out.writelines(predictions)

Ejemplo n.º 20
0
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    train_cfg.GPU_ID = args.gpu_id

    print('Using config:')
    pprint.pprint(train_cfg)

    if not args.randomize:
        # fix the random seeds (numpy and caffe) for reproducibility
        np.random.seed(train_cfg.RNG_SEED)
        caffe.set_random_seed(train_cfg.RNG_SEED)

    # set up caffe
    caffe.set_mode_gpu()
    caffe.set_device(args.gpu_id)

    print 'imdb name `{:s}`'.format(args.imdb_name)
    imdb, roidb = combined_roidb(args.imdb_name)
    print '{:d} roidb entries'.format(len(roidb))

    output_dir = get_output_dir(imdb)
    print 'Output will be saved to `{:s}`'.format(output_dir)

    train_net(args.solver, roidb, output_dir,
              pretrained_model=args.pretrained_model,
              max_iters=args.max_iters)
Ejemplo n.º 21
0
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.image as mpimg
# import helper_functions
import timeit
import lmdb
from caffe.proto import caffe_pb2
import cv2
from google.protobuf import text_format
import operator
from PIL import Image

#Initialise Caffe using GPU
caffe.set_device(0)
caffe.set_mode_gpu()
caffe.set_random_seed(0)
np.random.seed(0)
print('Initialized Caffe!')


#network_path = '/home/d/Desktop/model_compare_caffe/svhn/simple/3fcc_sigmoid_model_svhn.prototxt';
network_path = '/home/d/Desktop/model_compare_caffe/svhn/simple/svhn_simple.prototxt';
weight_path  = '/home/d/Desktop/model_compare_caffe/svhn/simple/svhn_simple.caffemodel'
net          = caffe.Net(network_path, weight_path, caffe.TEST) # caffe.TEST for testing

width  = 32
height = 32
min_range = 0 
max_range = 26000
data = '/home/d/Desktop/model_compare_caffe/svhn/svhn_test_images/'
result = '/home/d/Desktop/model_compare_caffe/svhn/svhn_test_images.txt'
Ejemplo n.º 22
0
    print('Called with args:')
    print(args)

# ================ init
    # set conf
    if args.cfg_file is not None:
        rpn_config.cfg_from_file(args.cfg_file)
    
    print('RPN using config:')
    pprint.pprint(rpn_config.cfg)

    # set up caffe
    if not args.randomize:
        # fix the random seeds (numpy and caffe) for reproducibility
        np.random.seed(rpn_config.cfg.RNG_SEED)
        caffe.set_random_seed(rpn_config.cfg.RNG_SEED)
    
    if args.cpu_mode:
        caffe.set_mode_cpu()
    else:
        caffe.set_mode_gpu()
        caffe.set_device(args.gpu_id)

# ============== stage-pre
    print 'start stage-pre...'
    if not os.path.exists(args.test_def):
        raise IOError(('{:s} not found!').format(args.test_def))

    # calculate ouput size map and prepare anchors
    output_w, output_h = rpn_train.proposal_calc_output_size(args.imdb_name, 
                                                             args.test_def)
Ejemplo n.º 23
0
    formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
    ch.setFormatter(formatter)
    fh.setFormatter(formatter)
    logger.addHandler(ch)
    logger.addHandler(fh)

    logger.info('outfolder = {}'.format(outfolder))

    print(configFilePath)

    params = loadParams(configFilePath)

    random.seed(params['randomSeed'])
    np.random.seed(params['randomSeed'])
    logger.info('Setting Seed...')
    caffe.set_random_seed(params['randomSeed'])

    caffe.set_mode_gpu()
    logger.info('Setting GPU...')
    caffe.set_device(gpuid)
    logger.info('Done.')

    logger.info('Loading network and solver settings...')

    BRDFNet = groove_network()

    
    BRDFNet.createNet(params['batchSize'], 0, params['BN'], params['NormalizeInput'])

    BRDFNet.saveNet(outfolder)
Ejemplo n.º 24
0
import numpy as np
from pprint import pprint
from sys import exit
from PIL import Image
from os import path
import caffe, cv2
caffe.set_random_seed(666)
import numpy.random
numpy.random.seed(666)
import random
random.seed(666)
cv2.setRNGSeed(666)

import util


class RpnDetector:
    def __init__(self, configFile):
        root = path.dirname(configFile)

        self.config = util.readConfig(configFile)

        caffe.set_device(0)
        caffe.set_mode_gpu()

        self.net = caffe.Net(
            path.join(root, self.config["dnn_deploy_file"][0]),
            path.join(
                root,
                self.config["dnn_weight_files"][0],
            ), caffe.TEST)
        imdb = get_imdb(imdb_names)
    return imdb, roidb

if __name__ == '__main__':

    if CFG_FILE is not None:
        cfg_from_file(CFG_FILE)
    if SET_CFGS is not None:
        cfg_from_list(SET_CFGS)

    print('Using config:')
    pprint.pprint(cfg)

    if not RANDOMIZE:
        # fix the random seeds (numpy and caffe) for reproducibility
        np.random.seed(cfg.RNG_SEED)
        caffe.set_random_seed(cfg.RNG_SEED)

    # set up caffe
    caffe.set_mode_cpu()

    imdb, roidb = combined_roidb(IMDB_NAME)
    print '{:d} roidb entries'.format(len(roidb))

    output_dir = get_output_dir(imdb)
    print 'Output will be saved to `{:s}`'.format(output_dir)

    # train_net(SOLVER, roidb, output_dir,
    #           pretrained_model=PRETRAINED_WEIGHTS,
    #           max_iters=MAX_ITERS)
Ejemplo n.º 26
0
def main(argv):
    args = parse_args(argv)

    if args.wandb:
        wandb.init(project='gentrain', config=args)
        if args.out_prefix == '':
            try:
                os.mkdir('wandb_output')
            except FileExistsError:
                pass
            args.out_prefix = 'wandb_output/' + wandb.run.id
            sys.stderr.write("Setting output prefix to %s\n" % args.out_prefix)

    config = open('%s.config' % args.out_prefix, 'wt')
    config.write('\n'.join(map(lambda kv: '%s : %s' % kv, vars(args).items())))
    config.close()

    # read solver and model param files and set general params
    # batch size is set through string replacement because
    data_str = open(args.data_model_file).read()
    data_str = data_str.replace('BATCH_SIZE', str(args.batch_size))
    data_param = NetParameter.from_prototxt_str(data_str)

    gen_str = open(args.gen_model_file).read()
    gen_str = gen_str.replace('BATCH_SIZE', str(args.batch_size))
    gen_param = NetParameter.from_prototxt_str(gen_str)

    disc_str = open(args.disc_model_file).read()
    disc_str = disc_str.replace('BATCH_SIZE', str(args.batch_size))
    disc_param = NetParameter.from_prototxt_str(disc_str)

    gen_param.force_backward = True
    disc_param.force_backward = True

    if args.solver_file:
        solver_param = SolverParameter.from_prototxt(args.solver_file)
    else:
        solver_param = SolverParameter()
    solver_param.max_iter = args.max_iter
    solver_param.test_interval = args.max_iter + 1
    solver_param.random_seed = args.random_seed
    caffe.set_random_seed(args.random_seed)  #this should be redundant

    #check for cmdline overrides
    if args.solver is not None:
        solver_param.type = args.solver
    if args.clip_gradients is not None:
        solver_param.clip_gradients = args.clip_gradients
    if args.momentum is not None:
        solver_param.momentum = args.momentum
    if args.momentum2 is not None:
        solver_param.momentum2 = args.momentum2
    if args.lr_policy is not None:
        solver_param.lr_policy = args.lr_policy
    if args.base_lr is not None:
        solver_param.base_lr = args.base_lr
    if args.weight_decay is not None:
        solver_param.weight_decay = args.weight_decay

    for fold, train_file, test_file in get_train_and_test_files(
            args.data_prefix, args.fold_nums):

        # create nets for producing train and test data
        print('Creating train data net')
        data_param.set_molgrid_data_source(train_file, args.data_root)
        train_data = Net.from_param(data_param, phase=caffe.TRAIN)

        print('Creating test data net')
        test_data = {}
        data_param.set_molgrid_data_source(train_file, args.data_root)
        test_data['train'] = Net.from_param(data_param, phase=caffe.TEST)
        if test_file != train_file:
            data_param.set_molgrid_data_source(test_file, args.data_root)
            test_data['test'] = Net.from_param(data_param, phase=caffe.TEST)

        # create solver for training generator net
        print('Creating generator solver')
        gen_prefix = '{}_{}_gen'.format(args.out_prefix, fold)
        gen = Solver.from_param(solver_param,
                                net_param=gen_param,
                                snapshot_prefix=gen_prefix)
        if args.gen_weights_file:
            gen.net.copy_from(args.gen_weights_file)
        if 'lig_gauss_conv' in gen.net.blobs:
            gen.net.copy_from('lig_gauss_conv.caffemodel')

        # create solver for training discriminator net
        print('Creating discriminator solver')
        disc_prefix = '{}_{}_disc'.format(args.out_prefix, fold)
        disc = Solver.from_param(solver_param,
                                 net_param=disc_param,
                                 snapshot_prefix=disc_prefix)
        if args.disc_weights_file:
            disc.net.copy_from(args.disc_weights_file)

        # continue previous training state, or start new training output file
        loss_file = '{}_{}.training_output'.format(args.out_prefix, fold)
        print('loss file', loss_file)
        if args.cont_iter:
            gen.restore('{}_iter_{}.solverstate'.format(
                gen_prefix, args.cont_iter))
            disc.restore('{}_iter_{}.solverstate'.format(
                disc_prefix, args.cont_iter))
            loss_df = pd.read_csv(loss_file,
                                  sep=' ',
                                  header=0,
                                  index_col=[0, 1])
            loss_df = loss_df[:args.cont_iter + 1]
        else:
            columns = ['iteration', 'phase']
            loss_df = pd.DataFrame(columns=columns).set_index(columns)

        plot_file = '{}_{}.png'.format(args.out_prefix, fold)

        # begin training GAN
        try:
            train_GAN_model(train_data, test_data, gen, disc, loss_df,
                            loss_file, plot_file, args)
        except:
            raise
            gen.snapshot()
            disc.snapshot()
            raise
Ejemplo n.º 27
0
    # force the number of filters to be square
    n = int(np.ceil(np.sqrt(data.shape[0])))
    padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
    data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))

    # tile the filters into an image
    data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
    data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])

    plt.imshow(data)


if __name__ == "__main__":
    caffe.set_device(1)
    caffe.set_mode_gpu()
    caffe.set_random_seed(1000)
    net_def = 'test_generate_cls_vector.prototxt'
    net = caffe.Net(net_def, caffe.TRAIN)

    net.forward()

    # print net.blobs['data'].data.shape
    # print net.blobs['bbox'].data.shape
    # print net.blobs['seg'].data.shape

    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2, 0, 1))
    transformer.set_mean('data', np.array([104, 117, 123]))  # mean pixel
    transformer.set_raw_scale('data', 255)  # the reference model operates on images in [0,255] range instead of [0,1]
    transformer.set_channel_swap('data', (2, 1, 0))  # the reference model has channels in BGR order instead of RGB
Ejemplo n.º 28
0
def test_groups():

    caffe.set_random_seed(800)
    caffe.set_mode_gpu()

    def runmodel(model):
        m = open('tmp.model', 'w')
        m.write(model)
        m.close()
        net = caffe.Net('tmp.model', caffe.TRAIN)
        res = net.forward()
        os.remove('tmp.model')
        return res, net

    #plain
    res, net = runmodel('''layer {
      name: "data"
      type: "MolGridData"
      top: "data"
      top: "label"
      top: "seqcont"
      molgrid_data_param {
        source: "typesfiles/grouped.types"
        batch_size: 2
        max_group_size: 4
        dimension: 23.5
        resolution: 0.5
        shuffle: false
        balanced: false
        root_folder: "typesfiles"
      }
    }''')
    labels = res['label']
    seqcont = res['seqcont']
    data = res['data']
    assert labels.shape == (4, 2)
    assert seqcont.shape == (4, 2)
    assert list(seqcont[0]) == [0, 0]
    assert list(seqcont[1]) == [1, 1]
    assert data[3][0].sum() == 0
    assert data[3][1].sum() == 0

    res = net.forward()
    labels = res['label']
    seqcont = res['seqcont']
    data = res['data']
    assert labels.shape == (4, 2)
    assert seqcont.shape == (4, 2)
    assert list(seqcont[0]) == [0, 0]
    assert list(seqcont[1]) == [1, 1]
    assert data[3][0].sum() > 0
    assert data[3][1].sum() > 0

    #with chunks
    res, net = runmodel('''layer {
      name: "data"
      type: "MolGridData"
      top: "data"
      top: "label"
      top: "seqcont"
      molgrid_data_param {
        source: "typesfiles/grouped.types"
        batch_size: 2
        max_group_size: 4
        max_group_chunk_size: 2
        dimension: 23.5
        resolution: 0.5
        shuffle: false
        balanced: false
        root_folder: "typesfiles"
      }
    }''')
    labels = res['label']
    seqcont = res['seqcont']
    data = res['data']
    assert labels.shape == (2, 2)
    assert seqcont.shape == (2, 2)
    assert list(seqcont[0]) == [0, 0]
    assert list(seqcont[1]) == [1, 1]
    assert data[0][0].sum() > 0
    assert data[0][1].sum() > 0
    assert data[1][0].sum() > 0
    assert data[1][1].sum() > 0

    res = net.forward()
    labels = res['label']
    seqcont = res['seqcont']
    data = res['data']
    assert labels.shape == (2, 2)
    assert seqcont.shape == (2, 2)
    assert list(seqcont[0]) == [1, 1]
    assert list(seqcont[1]) == [1, 1]
    assert data[0][0].sum() > 0
    assert data[0][1].sum() > 0
    assert data[1][0].sum() == 0
    assert data[1][1].sum() == 0
  args = parse_args()
  print('called with args:')
  pprint.pprint(vars(args))

  sys.path.insert(0, args.caffe_python_path)
  import caffe
  import caffe_tools

  # get data
  print 'loading roi infos from:', args.roi_infos_file, '...'
  with open(args.roi_infos_file, 'rb') as f:
    roi_infos = cPickle.load(f)

  if not args.randomize:
    np.random.seed(3)
    caffe.set_random_seed(3)

  # set up caffe
  caffe.set_mode_gpu()
  if args.gpu_id is not None:
    caffe.set_device(args.gpu_id)
  net = caffe.Net(args.net_prototxt, args.pretrained_model, caffe.TEST)
  net.name = os.path.splitext(os.path.basename(args.pretrained_model))[0]

  args.blob_names = args.blob_names.split(',')

  args.outfiles = args.outfiles.split(',')
  fws = [open(outfile, 'w') for outfile in args.outfiles]

  print 'net:', args.net_prototxt
  print 'data:', args.roi_infos_file
Ejemplo n.º 30
0
    parser.add_argument('-i','--input',type=str,required=True,help="Input .types file to predict")
    parser.add_argument('-g','--gpu',type=int,help='Specify GPU to run on',default=-1)
    parser.add_argument('-o','--output',type=str,help='Output file name',default=None)
    parser.add_argument('-s','--seed',type=int,help='Random seed',default=None)
    parser.add_argument('-k','--keep',action='store_true',default=False,help="Don't delete prototxt files")
    parser.add_argument('--rotations',type=int,help='Number of rotations; rotatation must be enabled in test net!',default=1)
    parser.add_argument('--max_score',action='store_true',default=False,help="take max score per ligand as its score")
    parser.add_argument('--max_affinity',action='store_true',default=False,help="take max affinity per ligand as its score")
    parser.add_argument('--notcalc_predictions', type=str, default='',help='use file of predictions instead of calculating')
    return parser.parse_args(argv)


if __name__ == '__main__':
    args = parse_args()
    if not args.output:
        out = sys.stdout
    else:
        out = open(args.output, 'w')
    if args.seed != None:
        caffe.set_random_seed(args.seed)
    if not args.notcalc_predictions:
        predictions = predict_lines(args)
    else:
        with open(args.notcalc_predictions, 'r') as f:
            predictions = f.readlines()
        if args.max_score or args.max_affinity:
            predictions = maxLigandScore(predictions, args.max_affinity)
            
    out.writelines(predictions)

Ejemplo n.º 31
0
    print('[INFO] Called with args:')
    print(args)

    # setup & load configs
    _C = Config(config_pn="config/config.ini")
    cfg = _C.cfg

    cfg.MAIN_DEFAULT_GPU_ID = args['gpu_id']

    print('Using config:')
    pprint.pprint(cfg)

    if not args['rand']:
        # fix the random seeds (numpy and caffe) for reproducibility
        np.random.seed(cfg.MAIN_DEFAULT_RNG_SEED)
        caffe.set_random_seed(cfg.MAIN_DEFAULT_RNG_SEED)

    # set up caffe
    caffe.set_mode_gpu()
    caffe.set_device(args['gpu_id'])

    print('[INFO] loading dataset {} for training...'.format(args["dataset"]))
    dataset_DIR = osp.join(cfg.MAIN_DIR_ROOT, "data", args["dataset"])
    gt_set = args['gt_set']
    task = args['task']
    in_gt_dir = args['in_gt_dir']
    in_pr_dir = args['in_pr_dir']

    dataset = None
    ds_pascal = ["voc_2007", "bsd_voc2012"]
def train_action_net():
  args = parse_args()

  print('Called with args:')
  print(args)
  print
  print

  if args.cfg_file is not None:
    cfg_from_file(args.cfg_file)

  print('Using config:')
  pprint.pprint(cfg)
  print
  print

  # Fix the random seeds (numpy and caffe) for reproducibility
  if not args.randomize:
    np.random.seed(cfg.RNG_SEED)
    caffe.set_random_seed(cfg.RNG_SEED)

  # Set up caffe
  caffe.set_mode_gpu()
  if args.gpu_id is not None:
    caffe.set_device(args.gpu_id)

  # 
  print "args.imdb_name:", args.imdb_name

  # get_imdb(args.imdb_name) == action_datasets.rcnn_action(datatype, split)
  #   where datatype is the name of action dataset, like:
  #     PascalVoc2012, Willowactions, Stanford, MPII
  #   where split indicates the train/val/trainval/test
  # instance of action_datasets.rcnn_action, **note that rcnn_action inherits imdb**
  # 
  # imdb_name: rcnn_<datatype>_<imageset>
  #   where datatype: PascalVoc2012, Willowactions, Stanford, MPII, or more...
  #   imageset: train, val, trainval, test
  imdb = get_imdb(args.imdb_name)

  # 
  print 'Loaded dataset `{:s}` for training'.format(imdb.name)
  roidb = get_training_roidb(imdb)

  # 
  sub_dir = "action_output"
  output_dir = get_output_dir(imdb=imdb, sub_dir=sub_dir, net=None)
  print 'Output will be saved to `{:s}`'.format(output_dir)

  exper_name = args.exper_name
  if exper_name:
    print "exper_name:", exper_name
  re_iter = args.re_iter

  sleep_time = 5
  while not os.path.exists(args.pretrained_model) and args.wait:
    print('Waiting for {} to exist...'.format(args.pretrained_model))
    time.sleep(sleep_time)
  print "Initialize from", args.pretrained_model
  # 
  train_net(args.solver, roidb, output_dir,
            pretrained_model=args.pretrained_model, \
            max_iters=args.max_iters, \
            exper_name=exper_name, \
            re_iter=re_iter)
Ejemplo n.º 33
0
    def_cfg('UDET')

    cfg_from_file(cfg)
    pprint.pprint(fconfig.cfg)

    caffe.set_mode_gpu()
    caffe.set_device(gpu_id)
    # setup the dataset's path
    dataset = os.path.join('..', 'data', imdb_name)
    # load pixel mean
    pixel_means = None
    if os.path.exists(os.path.join(dataset, 'mean.npy')):
        pixel_means = np.load(os.path.join(dataset, 'mean.npy'))
        fconfig.cfg.PIXEL_MEANS = pixel_means
        print 'Loaded mean.npy: {}'.format(pixel_means)
    else:
        print 'Cannot find mean.npy and we will use default mean.'

    imdb = IMDB()
    imdb.get_roidb(load_data_with_boxes, dataset=dataset)
    roidb = get_training_roidb(imdb)

    np.random.seed(fconfig.cfg.RNG_SEED)
    caffe.set_random_seed(fconfig.cfg.RNG_SEED)

    train_net(solver,
              roidb,
              out,
              pretrained_model=pretrained_model,
              max_iters=max_iters)
Ejemplo n.º 34
0
    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    print('Using config:')
    pprint.pprint(cfg)

    if not args.randomize:
        # fix the random seeds (numpy and caffe) for reproducibility
        np.random.seed(cfg.RNG_SEED)
        caffe.set_random_seed(cfg.RNG_SEED)

    # set up caffe
    caffe.set_mode_gpu()
    if args.gpu_id is not None:
        caffe.set_device(args.gpu_id)

    imdb = get_imdb(args.imdb_name)
    print 'Loaded dataset `{:s}` for training'.format(imdb.name)
    roidb = get_training_roidb(imdb)

    output_dir = get_output_dir(imdb, None)
    print 'Output will be saved to `{:s}`'.format(output_dir)

 #   roidb_s,roidb_w, = weakly_supervised_roidb(roidb) 
Ejemplo n.º 35
0
def main():
    args = parse_args()

    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)
    cfg.GPU_ID = args.gpu_id

    # fix the random seeds (numpy and caffe) for reproducibility
    np.random.seed(cfg.RNG_SEED)
    caffe.set_random_seed(cfg.RNG_SEED)
    # set up caffe
    caffe.set_mode_gpu()
    caffe.set_device(cfg.GPU_ID)

    # queue for communicated results between processes
    mp_queue = mp.Queue()

    print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
    print 'Stage 1 RPN, init from ImageNet model'
    print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'

    cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
    rpn_stage1_out = train_rpn(imdb_names=args.imdb_name,
                               init_model=args.pretrained_model,
                               solver=args.solver,
                               max_iters=args.max_iters,
                               cfg=cfg)
    #  mp_kwargs = dict(
    #          queue=mp_queue,
    #          imdb_names=args.imdb_name,
    #          init_model=args.pretrained_model,
    #          solver=args.solver,
    #          max_iters=args.max_iters,
    #          cfg=cfg)
    #  p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
    #  p.start()
    #  rpn_stage1_out = mp_queue.get()
    #  p.join()
    #  rpn_stage1_out = \
    #          {'model_path': '/home/leoyolo/research/py-faster-rcnn-another/output/rpn_small_obj/voc_2007_trainval/vgg_cnn_m_1024_rpn_small_obj_stage1_iter_80000.caffemodel'}

    print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
    print 'Stage 1 RPN, generate proposals for the test set'
    print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'

    rpn_stage1_out['proposal_path'] = \
        rpn_generate(imdb_name=args.test_imdb_name,
                     rpn_model_path=str(rpn_stage1_out['model_path']),
                     cfg=cfg,
                     rpn_test_prototxt=args.rpn_test_prototxt
        )['proposal_path']

    #  mp_kwargs = dict(
    #          queue=mp_queue,
    #          imdb_name=args.test_imdb_name,
    #          rpn_model_path=str(rpn_stage1_out['model_path']),
    #          cfg=cfg,
    #          rpn_test_prototxt=args.rpn_test_prototxt)
    #  p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
    #  p.start()
    #  rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
    #  p.join()

    for area in ['all', 'small', 'medium', 'large']:

        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
        print 'Stage 1 RPN, eval recall with area {}'.format(area)
        print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'

        rpn_test(imdb_name=args.test_imdb_name,
                 rpn_proposal_path=rpn_stage1_out['proposal_path'],
                 area=area)