コード例 #1
0
    train_iters = 0
    iters_sum = train_iters
    discard_num = 0
    # record number for some al proportion
    checkpoint = [int(x * imdb.num_images) for x in np.linspace(0.1, 1, 10)]

    # get solver instance
    sw = SolverWrapper(args.solver,
                       train_roidb,
                       output_dir,
                       pretrained_model=pretrained_model_name)
    #sw.train_model(70000)

    while (True):
        # detact remaining samples
        remaining = list(set(range(imdb.num_images)) - set(tableA.nonzero()))
        # load latest trained model
        pretrained_model_name = choose_model(output_dir)
        modelpath = os.path.join(output_dir, pretrained_model_name)
        protopath = os.path.join('models/pascal_voc/ResNet-50/rfcn_end2end',
                                 'test_agnostic.prototxt')
        print 'choose latest model:{}'.format(modelpath)
        model = load_model(protopath, modelpath)
        scoreMatrix, boxRecord, yVecs, eps = bulk_detect(
            model, remaining, imdb, mylambda)
        logging.debug('scoreMatrix:{}, boxRecord:{}, eps:{}, yVecs:{}'.format(
            scoreMatrix.shape, boxRecord.shape, eps, yVecs.shape))
        # use detect result updatable
        al_candidate = []  # record sample index in imdb
        ss_candidate = []
        ss_fake_gt = []  # record fake labels for ss
コード例 #2
0
 gamma = 0.15;  clslambda = np.array([-np.log(0.9)]*imdb.num_classes)
 # train record
 loopcounter = 0; train_iters = 0;iters_sum = train_iters
 # control al proportion
 al_proportion_checkpoint = [x*initialnum for x in np.linspace(0.3,2,10)]
 # control ss proportion with respect to al proportion
 ss_proportion_checkpoint = [x*initialnum for x in np.linspace(0.2,2,10)]
 # get solver object
 sw = SolverWrapper(net, imdb, train_roidb, valroidb, output_dir, tb_dir,
                    pretrained_model=pretrained_model_name)
 train_iters = 70000
 iters_sum = train_iters;
 sw.train_model(iters_sum)
 while(True):
     # detact unlabeledidx samples
     unlabeledidx = list(set(range(total_num))-set(bitmapImdb.nonzero()))
     # detect labeledidx
     labeledsample = list(set(bitmapImdb.nonzero()))
     pretrained_model_name = choose_model(output_dir)
     # load latest trained model
     saved_model = os.path.join(output_dir,pretrained_model_name)
     net.create_architecture(21,
                         tag='default', anchor_scales=[8, 16, 32])
     
     net.load_state_dict(torch.load(saved_model))
     print('load lastest model:{} sucessfully!'.format(pretrained_model_name))
     net.eval()
     net.cuda()
     print('Process detect the unlabeled images ...')
     # return detect results of the unlabeledidx samples with the latest model
     scoreMatrix,boxRecord,yVecs, al_idx = detect_im(net,unlabeledidx,imdb,clslambda)
コード例 #3
0
ファイル: train_net.py プロジェクト: yanxp/detectron-ssm
def main():
    # Initialize C2
    workspace.GlobalInit(
        ['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1'])
    # Set up logging and load config options
    logger = setup_logging(__name__)
    logging.getLogger('detectron.roi_data.loader').setLevel(logging.INFO)
    args = parse_args()
    logger.info('Called with args:')
    logger.info(args)
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    if args.opts is not None:
        merge_cfg_from_list(args.opts)

    assert_and_infer_cfg()
    logger.info('Training with config:')
    logger.info(pprint.pformat(cfg))
    # Note that while we set the numpy random seed network training will not be
    # deterministic in general. There are sources of non-determinism that cannot
    # be removed with a reasonble execution-speed tradeoff (such as certain
    # non-deterministic cudnn functions).
    np.random.seed(cfg.RNG_SEED)
    # Execute the training run

    fs = open('imgnames.pkl', 'rb')
    roidbnames = pickle.load(fs)
    fs.close()

    logger.info('Loading dataset: {}'.format(cfg.TRAIN.DATASETS))

    dataset_names = cfg.TRAIN.DATASETS
    proposal_files = cfg.TRAIN.PROPOSAL_FILES

    roidb = get_training_roidb(dataset_names, proposal_files)

    logger.info('{:d} roidb entries'.format(len(roidb)))

    total_num = len(roidb)

    # bitmap idx indicated for training
    bitmapRoidb = BitMap(total_num)

    # initial samples
    #    initial_num = int(total_num*0.2)
    #    for i in range(initial_num):
    #        bitmapRoidb.set(i)
    #
    #    train_roidb = [roidb[i] for i in range(initial_num)]

    initialidx = []
    train_roidb = []

    for i, x in enumerate(roidb):
        if x['image'].split('/')[-1] in roidbnames:
            initialidx.append(i)
            train_roidb.append(x)

    for i in initialidx:
        bitmapRoidb.set(i)

    logger.info('{:d} the number initial roidb entries'.format(
        len(train_roidb)))
    # append flipped images
    train_roidb = flipped_roidb_for_training(train_roidb)

    logger.info('{:d} the number initial roidb entries'.format(
        len(train_roidb)))
    alamount = 0
    ssamount = 0
    gamma = 0.95
    # control al proportion
    al_proportion_checkpoint = [
        int(x * total_num * 0.4) for x in np.linspace(0.2, 1, 10)
    ]
    # control ss proportion
    ss_proportion_checkpoint = [
        int(x * total_num) for x in np.linspace(0.2, 2, 10)
    ]

    next_iters = 90000
    sum_iters = next_iters
    '''load the lasted checkpoints'''
    checkpoints = detectron.utils.train.train_model(sum_iters, train_roidb,
                                                    cfg.TRAIN.WEIGHTS)
    while True:
        # to do a test on the test dataset
        test_model(checkpoints[(sum_iters - 1)], args.multi_gpu_testing,
                   args.opts)
        if sum_iters > cfg.SOLVER.MAX_ITER:
            break
        # next detect unlabeled samples
        unlabeledidx = list(set(range(total_num)) - set(bitmapRoidb.nonzero()))
        # labeled samples
        labeledidx = list(set(bitmapRoidb.nonzero()))
        # detect unlabeled samples
        BBoxes, YClass, Scores, al_candidate_idx, ALScore = detect_im(
            checkpoints[(sum_iters - 1)],
            roidb,
            gamma,
            idxs=unlabeledidx,
            gpu_id=0)

        al_avg_idx = np.argsort(np.array(ALScore))
        al_candidate_idx = [al_candidate_idx[i] for i in al_avg_idx]

        gamma = max(gamma - 0.05, 0.7)

        # the ss candidate idx
        ss_candidate_idx = [
            i for i in unlabeledidx if i not in al_candidate_idx
        ]

        # update roidb for next training
        train_roidb = replace_roidb(roidb, BBoxes, YClass, ss_candidate_idx)

        # control the proportion
        if alamount + len(al_candidate_idx) >= al_proportion_checkpoint[0]:
            al_candidate_idx = al_candidate_idx[:int(
                al_proportion_checkpoint[0] - alamount)]
            tmp = al_proportion_checkpoint.pop(0)
            al_proportion_checkpoint.append(al_proportion_checkpoint[-1])
        if ssamount + len(ss_candidate_idx) >= ss_proportion_checkpoint[0]:
            ss_candidate_idx = ss_candidate_idx[:int(
                ss_proportion_checkpoint[0] - ssamount)]
            tmp = ss_proportion_checkpoint.pop(0)
            ss_proportion_checkpoint.append(ss_proportion_checkpoint[-1])

        # record ss and al factor

        alamount += len(al_candidate_idx)
        ssamount += len(ss_candidate_idx)

        logger.info('alfactor:{},ssfactor:{}'.format(alamount / total_num,
                                                     ssamount / total_num))

        #       for idx in al_candidate_idx:
        #            bitmapRoidb.set(idx)
        next_train_idx = bitmapRoidb.nonzero()
        next_train_idx.extend(ss_candidate_idx)

        train_roidb = blur_image(train_roidb, ss_candidate_idx)
        # the next training roidb
        train_roidb = [train_roidb[i] for i in next_train_idx]
        # flipped the roidb
        train_roidb = flipped_roidb_for_training(train_roidb)
        # the next training iters
        next_iters = 30000
        sum_iters += next_iters
        checkpoints = detectron.utils.train.train_model(
            sum_iters, train_roidb, checkpoints[(sum_iters - next_iters - 1)])
コード例 #4
0
ファイル: train_net.py プロジェクト: yinglang/SSM
    loopcounter = 0; train_iters = 0; iters_sum = train_iters
    # control al proportion
    al_proportion_checkpoint = [int(x*initial_num) for x in np.linspace(0.3,2,10)]
    # control ss proportion with respect to al proportion
    ss_proportion_checkpoint = [int(x*initial_num) for x in np.linspace(0.1,4,10)]
    
    
    # get solver object
    sw = SolverWrapper(args.solver, train_roidb, output_dir,
                        pretrained_model=pretrained_model_name)
    # with voc2007 to pretrained an initial model
    sw.train_model(70000)

    while(True):
        # detact unlabeledidx samples
        unlabeledidx = list(set(range(imdb.num_images))-set(bitmapImdb.nonzero()))
        # detect labeledidx
        labeledidx = list(set(bitmapImdb.nonzero()))
        # load latest trained model
        trained_models = choose_model(output_dir)
        pretrained_model_name = trained_models[-1] 
        modelpath = os.path.join(output_dir, pretrained_model_name)
        protopath = os.path.join('models/pascal_voc/ResNet-101/rfcn_end2end',
                'test_agnostic.prototxt')
        print 'choose latest model:{}'.format(modelpath)
        model = load_model(protopath,modelpath)
        # return detect results of the unlabeledidx samples with the latest model
        print('Process detect the unlabeled images...')
        scoreMatrix, boxRecord, yVecs, al_idx  = bulk_detect(model, unlabeledidx, imdb, clslambda)
     #   logging.debug('scoreMatrix:{}, boxRecord:{}, yVecs:{}'.format(scoreMatrix.shape,
     #       boxRecord.shape, yVecs.shape))
コード例 #5
0
    # control ss proportion with respect to al proportion
    ss_proportion_checkpoint = [
        int(x * sample_num) for x in np.linspace(0.1, 23, 12)
    ]

    sw = SolverWrapper(args.solver,
                       train_roidb,
                       output_dir,
                       pretrained_model=pretrained_model_name)
    # with voc2007 to pretrained an initial model
    # sw.train_model(150000)

    while (True):
        # detact unlabeledidx samples
        unlabeledidx = list(
            set(range(imdb.num_images)) - set(bitmapImdb.nonzero()))
        # detect labeledidx
        labeledidx = list(set(bitmapImdb.nonzero()))
        # load latest trained model
        trained_models = choose_model(output_dir)
        pretrained_model_name = trained_models[-1]
        modelpath = os.path.join(output_dir, pretrained_model_name)
        protopath = os.path.join('models/coco/ResNet-101/rfcn_end2end',
                                 'test_agnostic.prototxt')
        print 'choose latest model:{}'.format(modelpath)
        model = load_model(protopath, modelpath)

        # record some detect results for updatable
        al_candidate_idx = []  # record al samples index in imdb
        ss_candidate_idx = []  # record ss samples index in imdb
        ss_fake_gt = []  # record fake labels for ss
コード例 #6
0
    loopcounter = 0; train_iters = 0; iters_sum = train_iters
    discard_num = 0
    # record number for some al proportion
    # checkpoint = [x*imdb.num_images for x in np.linspace(0.1,1,10)]
    # get solver instance
    sw = SolverWrapper(args.solver, train_roidb, output_dir,
                        pretrained_model=pretrained_model_name)
    while(True):
        # use chosen samples finetune W
        train_iters = min(12000 ,len(train_roidb)*10-train_iters)
        iters_sum += train_iters
        sw.update_roidb(train_roidb)
        sw.train_model(train_iters)

        # detact remaining samples
        remaining = list(set(range(imdb.num_images))-set(tableA.nonzero()))
        # load latest trained model
        pretrained_model_name = choose_model(output_dir) 
        modelpath = os.path.join(output_dir, pretrained_model_name)
        protopath = os.path.join('models/CaffeNet','test.prototxt')
        print 'choose latest model:{}'.format(modelpath)

        model = load_model(protopath,modelpath)
        scoreMatrix, boxRecord,yVecs, eps = bulk_detect(model, remaining, imdb, mylambda)
        logging.debug('scoreMatrix:{}, boxRecord:{}, eps:{}, yVecs:{}'.format(scoreMatrix.shape,
            boxRecord.shape, eps, yVecs.shape))
        # use detect result updatable
        al_candidate = [] # record sample index in imdb
        ss_candidate = []
        ss_fake_gt = [] # record fake labels for ss
        cls_loss_sum = np.zeros((imdb.num_classes,))