def main():
    args = parse_args()
    setup_logger(args.verbose)

    suites = []
    if args.all or 'RenderComponentThroughput' in args.suites:
        suites.append(RenderComponentThroughputSuite())
    if args.all or 'TTI' in args.suites:
        suites.append(TTISuite())
    if args.all or 'ApkSize' in args.suites:
        suites.append(ApkSize())

    #  abis = ('armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64')
    abis = ('arm64-v8a', )
    apk_abi = abis[0] if len(abis) == 1 else None

    jsc_dist_manager = JSDistManager('jsc_250230')
    jsc_dist_manager.prepare()

    v8_dist_manager = JSDistManager('v8_80_nointl')
    v8_dist_manager.prepare()

    hermes_dist_manager = JSDistManager('hermes_041')
    hermes_dist_manager.prepare()

    logger.info(h1('Config'))
    show_configs(abis, jsc_dist_manager, v8_dist_manager, hermes_dist_manager)

    jsc_apk_install_kwargs = {
        'app_id':
        'jsc',
        'maven_repo_prop':
        'MAVEN_REPO=' + jsc_dist_manager.prepare(),
        'abi':
        apk_abi,
        'verbose':
        args.verbose,
        'extra_gradle_props':
        ('INTL=true', ) if jsc_dist_manager.info.get('intl') else None,
    }

    v8_apk_install_kwargs = {
        'app_id': 'v8',
        'maven_repo_prop': 'MAVEN_REPO=' + v8_dist_manager.prepare(),
        'abi': apk_abi,
        'verbose': args.verbose,
    }

    hermes_apk_install_kwargs = {
        'app_id': 'hermes',
        'maven_repo_prop': 'MAVEN_REPO=' + hermes_dist_manager.prepare(),
        'abi': apk_abi,
        'verbose': args.verbose,
    }

    for suite in suites:
        suite.run(jsc_apk_install_kwargs, v8_apk_install_kwargs,
                  hermes_apk_install_kwargs)

    return 0
Esempio n. 2
0
def main():
    args = parse_args()
    cfg = cfg_factory[args.model]

    if not osp.exists(cfg.respth): os.makedirs(cfg.respth)
    setup_logger('{}-eval'.format(cfg.model_type), cfg.respth)
    evaluate(cfg, args.weight_pth)
Esempio n. 3
0
def main():
    if not osp.exists(cfg.respth): os.makedirs(cfg.respth)
    setup_logger('{}-train'.format(cfg.model_type), cfg.respth)
    print("Args sent: ", args.saveCheckpointDir, args.loadCheckpointLocation,
          args.saveOnEveryEpoch, args.tensorBoardLogDir)

    setupTensorBoardWriter()
    train()
Esempio n. 4
0
def main():
    torch.cuda.set_device(args.local_rank)
    dist.init_process_group(backend='nccl',
                            init_method='tcp://127.0.0.1:{}'.format(args.port),
                            world_size=torch.cuda.device_count(),
                            rank=args.local_rank)
    if not osp.exists(cfg.respth): os.makedirs(cfg.respth)
    setup_logger('{}-train'.format(cfg.model_type), cfg.respth)
    train()
def setup(config: Config, args):
    """
    Sets up a new config by creating the required directories and setting up logging.

    :param Config config: config to create directories for and to set config from based on the args
    :param ArgumentParser args: args to use to control config.
    """
    config.opts.new = args.new
    config.opts.activation = args.activation
    config.resource.create_directories()
    setup_logger(config.resource.main_log_path)
Esempio n. 6
0
def main():

    torch.cuda.set_device(args.local_rank)

    if not osp.exists(cfg.respth): os.makedirs(cfg.respth)

    cfg.respth = cfg.respth + '/' + cfg.model_type + '/' + datetime.now(
    ).strftime("%d-%b-%Y-%H-%M-%S")
    os.makedirs(cfg.respth)

    setup_logger('{}-train'.format(cfg.model_type), cfg.respth)
    train()
Esempio n. 7
0
def main():
    args = parse_args()
    cfg = cfg_factory[args.model]
    if not args.local_rank == -1:
        torch.cuda.set_device(args.local_rank)
        dist.init_process_group(backend='nccl',
        init_method='tcp://127.0.0.1:{}'.format(args.port),
        world_size=torch.cuda.device_count(),
        rank=args.local_rank
    )
    if not osp.exists(cfg.respth): os.makedirs(cfg.respth)
    setup_logger('{}-eval'.format(cfg.model_type), cfg.respth)
    evaluate(cfg, args.weight_pth)
Esempio n. 8
0
def main():
    np.set_printoptions(precision=8, floatmode='maxprec', suppress=True)
    args = parse_args()
    cfg = cfg_factory[args.model]
    if not args.local_rank == -1:
        torch.cuda.set_device(args.local_rank)
        # dist.init_process_group(backend='nccl',
        # init_method='tcp://127.0.0.1:{}'.format(args.port),
        # world_size=torch.cuda.device_count(),
        # rank=args.local_rank
        # )
    if not os.path.exists(cfg.respth): os.makedirs(cfg.respth)
    setup_logger('{}-eval-{}'.format(cfg.model_type, cfg.log_level),
                 cfg.respth, cfg.log_level)
    evaluate(cfg, args.weight_path)
Esempio n. 9
0
def main():
    torch.cuda.set_device(args.local_rank)
    # dist.init_process_group(
    #     backend='nccl',
    #     init_method='tcp://127.0.0.1:{}'.format(args.port),
    #     world_size=torch.cuda.device_count(),
    #     rank=args.local_rank
    # )
    if not os.path.exists(cfg.respth): os.makedirs(cfg.respth)
    np.set_printoptions(precision=8, floatmode='maxprec', suppress=True)
    # setup_logger('{}-train-{}'.format(cfg.model_type,cfg.log_level), cfg.respth, cfg.log_level)
    loginfo = setup_logger('{}-train-{}'.format(cfg.model_type, cfg.log_level),
                           cfg.respth, cfg.log_level)
    train(loginfo)
Esempio n. 10
0
def run():
    """
    Query for completed subjects, calculate kmeans vertex centroids, fetch subject images, split
    columns by centroids, row segmentatino with Ocropy.
    """

    logger = setup_logger(settings.APP_NAME,
                          'log/kmeans_and_enqueue_completed_subjects.log',
                          logging.DEBUG)

    subject_set_csv = SubjectSetCSV()
    workflow_router = SubjectSetWorkflowRouter(subject_set_csv, settings,
                                               logger)
    pages_raw_subject_ids = subject_set_csv.raw_pages_subject_ids()
    logger.debug("Running Wires and Rails Workflow Processor")
    Panoptes.connect(username=settings.PANOPTES_USERNAME,
                     password=settings.PANOPTES_PASSWORD)

    retired_subject_ids = []

    vertices_and_target_subject_sets = []

    for _subject_set_id, metadata in settings.COLUMNS_WORKFLOW_METADATA.items(
    ):

        logger.debug("Loading vertices / subject retirement info for %(debug_name)s subject set " \
            "(subject set id: %(subject_set_id)d; workflow id: %(workflow_id)d; task id: " \
            " %(task_id)s", metadata)

        classification_kwargs = {
            'scope': 'project',
            'project_id': settings.PROJECT_ID,
            'workflow_id': metadata['workflow_id']
        }
        logger.debug("Loading classifications by params %s",
                     str(classification_kwargs))
        classifications_records = [
            c for c in Classification.where(**classification_kwargs)
        ]

        classifications = VertexClassifications(classifications_records,
                                                pages_raw_subject_ids)

        # Aggregate vertex centroids
        centroids_by_subject = classifications.vertex_centroids(
            metadata['task_id'])
        for subject_id, centroids in centroids_by_subject.items():
            # Find target subject set ID, or log and skip the subject
            try:
                target_subject_set_id = workflow_router \
                    .target_subject_set_id(subject_id, classifications_records)
            except UnidentifiedRawSubjectSetException as ex:
                logger.error(ex.args[0])
                continue
            except SharedMajorityException as ex:
                # TODO need add'l monitoring for this, e.g. manual report exception
                logger.error(ex.args[0])
                continue
            vertices_and_target_subject_sets.append(
                [subject_id, centroids, target_subject_set_id])

        # Aggregate retired subjects
        workflow = Workflow.find(metadata['workflow_id'])
        retirement_count = workflow.retirement['options']['count']
        retired_subject_ids += classifications.retired_subject_ids(
            metadata['task_id'], retirement_count)

    logger.debug(
        'Retrieved the following subject centroids for image segmentation: %s',
        str(vertices_and_target_subject_sets))

    logger.debug('For the following retired subject IDs: %s',
                 str(retired_subject_ids))

    queue = Queue(connection=Redis(host=settings.REDIS_HOST))

    for subject_id, centroids, target_subject_set_id in vertices_and_target_subject_sets:
        if subject_id not in retired_subject_ids:
            continue
        subject = Subject.find(subject_id)
        if settings.METADATA_KEY_ALREADY_PROCESSED in subject.metadata and \
           subject.metadata[settings.METADATA_KEY_ALREADY_PROCESSED]:
            logger.debug('Skipping subject id %d; already processed.',
                         subject_id)
            continue
        logger.debug('Enqueuing subjects id: %d', subject_id)
        queue.enqueue(QueueOperations.queue_new_subject_creation,
                      subject_id,
                      centroids,
                      target_subject_set_id,
                      timeout=2 * 60 * 60)
        QueueOperations.flag_subject_as_queued(subject)
Esempio n. 11
0
def main():
    torch.cuda.set_device(args.local_rank)
    
    if not osp.exists(cfg.respth): os.makedirs(cfg.respth)
    setup_logger('{}-train'.format(cfg.model_type), cfg.respth)
    train()
Esempio n. 12
0
def setup(config: Config, args):
    config.resource.create_directories()
    setup_logger(config.resource.main_log_path)
    config.load_parameter()
Esempio n. 13
0
        help='path prefix for log files (when not provided we send the ' +
        'output to the console)',
        type=str)

    parser.add_argument('--log-colorized',
                        action='store_true',
                        help='use colorized logging')

    parser.add_argument('--debug',
                        action='store_true',
                        help='enable debug mode')

    args = parser.parse_args()

    # set-up the log handler with optional colors etc.
    setup_logger(args)

    # respond to --version argument
    if args.version:
        sys.exit('''
SiriDB HTTP Server {version}
Maintainer: {maintainer} <{email}>
Home-page: http://siridb.net
        '''.strip().format(version=__version__,
                           maintainer=__maintainer__,
                           email=__email__))

    config = configparser.RawConfigParser()

    with open(args.config, 'r', encoding='utf-8') as f:
        config.read_file(f)
Esempio n. 14
0
def main():
    if not osp.exists(cfg.respth): os.makedirs(cfg.respth)
    setup_logger('{}-train'.format('banet'), cfg.respth)

    best_prec1 = (-1)
    logger = logging.getLogger()

    ## model
    net, criteria = set_model()
    ## optimizer
    optim = set_optimizer(net)

    ## fp16
    if has_apex:
        opt_level = 'O1' if cfg.use_fp16 else 'O0'
        net, optim = amp.initialize(net, optim, opt_level=opt_level)
    ## lr scheduler
    lr_schdr = WarmupPolyLrScheduler(
        optim,
        power=0.9,
        max_iter=cfg.epoch * 371,
        warmup_iter=cfg.warmup_iters * 371,
        warmup_ratio=0.1,
        warmup='exp',
        last_epoch=-1,
    )

    for epoch in range(cfg.start_epoch, args.epoch_to_train):
        lr_schdr, time_meter, loss_meter = train(epoch, optim, net, criteria,
                                                 lr_schdr)
        if True:
            #if ((epoch+1)!=cfg.epoch):
            lr = lr_schdr.get_lr()
            print(lr)
            lr = sum(lr) / len(lr)
            loss_avg = print_log_msg(epoch, cfg.epoch, lr, time_meter,
                                     loss_meter)
            writer.add_scalar('loss', loss_avg, epoch + 1)

        if ((epoch + 1) == cfg.epoch) or ((epoch + 1) == args.epoch_to_train):
            #if ((epoch+1)%1==0) and ((epoch+1)>cfg.warmup_iters):
            torch.cuda.empty_cache()
            heads, mious, miou = eval_model(net,
                                            ims_per_gpu=2,
                                            im_root=cfg.im_root,
                                            im_anns=cfg.val_im_anns,
                                            it=epoch)
            filename = osp.join(cfg.respth, args.store_name)
            state = net.state_dict()
            save_checkpoint(state, False, filename=filename)
            #writer.add_scalar('mIOU',miou,epoch+1)
            with open('lr_record.txt', 'w') as m:
                print('lr to store', lr)
                m.seek(0)
                m.write((str(epoch + 1) + '   '))
                m.write(str(lr))
                m.truncate()
                m.close()
            with open('best_miou.txt', 'r+') as f:
                best_miou = f.read()
                #print(best_miou)
                best_miou = best_miou.replace('\n', ' ')
                x = best_miou.split(' ')
                while ('' in x):
                    x.remove('')
                best_miou = eval(x[-1])
                is_best = miou > best_miou
                if is_best:
                    best_miou = miou
                    print('Is best? : ', is_best)
                    f.seek(0)
                    f.write((str(epoch + 1) + '   '))
                    f.write(str(best_miou))
                    f.truncate()
                    f.close()
                    save_checkpoint(state, is_best, filename)
            print('Have Stored Checkpoint')
            #if((epoch+1)==cfg.epoch) or ((epoch+1)==args.epoch_to_train):
            state = net.state_dict()
            torch.cuda.empty_cache()
            #heads, mious = eval_model(net, 2, cfg.im_root, cfg.val_im_anns,it=epoch)
            logger.info(tabulate([
                mious,
            ], headers=heads, tablefmt='orgtbl'))
            save_checkpoint(state, False, filename)
            print('Have Saved Final Model')
            break
Esempio n. 15
0
def main():
    if opt.dataset == 'linemod':
        opt.num_obj = 1
        opt.list_obj = [1, 2, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15]
        opt.occ_list_obj = [1, 5, 6, 8, 9, 10, 11, 12]
        opt.list_name = ['ape', 'benchvise', 'cam', 'can', 'cat', 'driller', 'duck', 'eggbox', 'glue', 'holepuncher', 'iron', 'lamp', 'phone']
        obj_name = opt.list_name[opt.list_obj.index(opt.obj_id)]
        opt.sym_list = [10, 11]
        opt.num_points = 500
        meta_file = open('{0}/models/models_info.yml'.format(opt.dataset_root), 'r')
        meta = yaml.load(meta_file)
        diameter = meta[opt.obj_id]['diameter'] / 1000.0 * 0.1
        if opt.render:
            opt.repeat_num = 1
        elif opt.fuse:
            opt.repeat_num = 1
        else:
            opt.repeat_num = 5
        writer = SummaryWriter('experiments/runs/linemod/{}{}'.format(obj_name, opt.experiment_name))
        opt.outf = 'trained_models/linemod/{}{}'.format(obj_name, opt.experiment_name)
        opt.log_dir = 'experiments/logs/linemod/{}{}'.format(obj_name, opt.experiment_name)
        if not os.path.exists(opt.outf):
            os.mkdir(opt.outf)
        if not os.path.exists(opt.log_dir):
            os.mkdir(opt.log_dir)
    else:
        print('Unknown dataset')
        return

    estimator = PoseNet(num_points = opt.num_points, num_vote = 9, num_obj = opt.num_obj)
    estimator.cuda()
    refiner = PoseRefineNet(num_points = opt.num_points, num_obj = opt.num_obj)
    refiner.cuda()

    if opt.resume_posenet != '':
        estimator.load_state_dict(torch.load('{0}/{1}'.format(opt.outf, opt.resume_posenet)))
    if opt.resume_refinenet != '':
        refiner.load_state_dict(torch.load('{0}/{1}'.format(opt.outf, opt.resume_refinenet)))
        opt.refine_start = True
        opt.lr = opt.lr_refine
        opt.batch_size = int(opt.batch_size / opt.iteration)
        optimizer = optim.Adam(refiner.parameters(), lr=opt.lr)
    else:
        opt.refine_start = False
        optimizer = optim.Adam(estimator.parameters(), lr=opt.lr)

    dataset = PoseDataset_linemod('train', opt.num_points, opt.dataset_root, opt.real, opt.render, opt.fuse, opt.obj_id)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers)
    test_dataset = PoseDataset_linemod('test', opt.num_points, opt.dataset_root, True, False, False, opt.obj_id)
    testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers)

    print('>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}'.format(len(dataset), len(test_dataset), opt.num_points))
    if opt.obj_id in opt.occ_list_obj:
        occ_test_dataset = PoseDataset_occ('test', opt.num_points, opt.occ_dataset_root, opt.obj_id)
        occtestdataloader = torch.utils.data.DataLoader(occ_test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers)
        print('length of the occ testing set: {}'.format(len(occ_test_dataset)))

    criterion = Loss(opt.num_points, opt.sym_list)
    criterion_refine = Loss_refine(opt.num_points, opt.sym_list)
    best_test = np.Inf

    if opt.start_epoch == 1:
        for log in os.listdir(opt.log_dir):
            os.remove(os.path.join(opt.log_dir, log))
    st_time = time.time()
    train_scalar = 0

    for epoch in range(opt.start_epoch, opt.nepoch):
        logger = setup_logger('epoch%d' % epoch, os.path.join(opt.log_dir, 'epoch_%d_log.txt' % epoch))
        logger.info('Train time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Training started'))
        train_count = 0
        train_loss_avg = 0.0
        train_loss = 0.0
        train_dis_avg = 0.0
        train_dis = 0.0
        if opt.refine_start:
            estimator.eval()
            refiner.train()
        else:
            estimator.train()
        optimizer.zero_grad()
        for rep in range(opt.repeat_num):
            for i, data in enumerate(dataloader, 0):
                points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = data
                if len(points.size()) == 2:
                    print('pass')
                    continue
                points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = points.cuda(), choose.cuda(), img.cuda(), target.cuda(), model_points.cuda(), model_kp.cuda(), vertex_gt.cuda(), idx.cuda(), target_r.cuda(), target_t.cuda()
                vertex_pred, c_pred, emb = estimator(img, points, choose, idx)
                vertex_loss, pose_loss, dis, new_points, new_target = criterion(vertex_pred, vertex_gt, c_pred, points, target, model_points, model_kp, opt.obj_id, target_r, target_t)
                loss = 10 * vertex_loss + pose_loss
                if opt.refine_start:
                    for ite in range(0, opt.iteration):
                        pred_r, pred_t = refiner(new_points, emb, idx)
                        dis, new_points, new_target = criterion_refine(pred_r, pred_t, new_points, new_target, model_points, opt.obj_id)
                        dis.backward()
                else:
                    loss.backward()

                train_loss_avg += loss.item()
                train_loss += loss.item()
                train_dis_avg += dis.item()
                train_dis += dis.item()
                train_count += 1
                train_scalar += 1

                if train_count % opt.batch_size == 0:
                    logger.info('Train time {0} Epoch {1} Batch {2} Frame {3} Avg_loss:{4} Avg_diss:{5}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, int(train_count / opt.batch_size), train_count, train_loss_avg / opt.batch_size, train_dis_avg / opt.batch_size))
                    writer.add_scalar('linemod training loss', train_loss_avg / opt.batch_size, train_scalar)
                    writer.add_scalar('linemod training dis', train_dis_avg / opt.batch_size, train_scalar)
                    optimizer.step()
                    optimizer.zero_grad()
                    train_loss_avg = 0
                    train_dis_avg = 0

                if train_count != 0 and train_count % 1000 == 0:
                    if opt.refine_start:
                        torch.save(refiner.state_dict(), '{0}/pose_refine_model_current.pth'.format(opt.outf))
                    else:
                        torch.save(estimator.state_dict(), '{0}/pose_model_current.pth'.format(opt.outf))

        print('>>>>>>>>----------epoch {0} train finish---------<<<<<<<<'.format(epoch))
        train_loss = train_loss / train_count
        train_dis = train_dis / train_count
        logger.info('Train time {0} Epoch {1} TRAIN FINISH Avg loss: {2} Avg dis: {3}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, train_loss, train_dis))

        logger = setup_logger('epoch%d_test' % epoch, os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch))
        logger.info('Test time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started'))
        test_loss = 0.0
        test_vertex_loss = 0.0
        test_pose_loss = 0.0
        test_dis = 0.0
        test_count = 0
        success_count = 0
        estimator.eval()
        refiner.eval()

        for j, data in enumerate(testdataloader, 0):
            points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = data
            if len(points.size()) == 2:
                logger.info('Test time {0} Lost detection!'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time))))
                continue
            points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = points.cuda(), choose.cuda(), img.cuda(), target.cuda(), model_points.cuda(), model_kp.cuda(), vertex_gt.cuda(), idx.cuda(), target_r.cuda(), target_t.cuda()
            vertex_pred, c_pred, emb = estimator(img, points, choose, idx)
            vertex_loss, pose_loss, dis, new_points, new_target = criterion(vertex_pred, vertex_gt, c_pred, points, target, model_points, model_kp, opt.obj_id, target_r, target_t)
            loss = 10 * vertex_loss + pose_loss
            if opt.refine_start:
                for ite in range(0, opt.iteration):
                    pred_r, pred_t = refiner(new_points, emb, idx)
                    dis, new_points, new_target = criterion_refine(pred_r, pred_t, new_points, new_target, model_points, opt.obj_id)

            test_loss += loss.item()
            test_vertex_loss += vertex_loss.item()
            test_pose_loss += pose_loss.item()
            test_dis += dis.item()
            logger.info('Test time {0} Test Frame No.{1} loss:{2} dis:{3}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), test_count, loss, dis))
            if dis.item() < diameter:
                success_count += 1
            test_count += 1

        test_loss = test_loss / test_count
        test_vertex_loss = test_vertex_loss / test_count
        test_pose_loss = test_pose_loss / test_count
        test_dis = test_dis / test_count
        success_rate = float(success_count) / test_count
        logger.info('Test time {0} Epoch {1} TEST FINISH Avg loss: {2} Avg dis: {3} Success rate: {4}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, test_loss, test_dis, success_rate))
        writer.add_scalar('linemod test loss', test_loss, epoch)
        writer.add_scalar('linemod test vertex loss', test_vertex_loss, epoch)
        writer.add_scalar('linemod test pose loss', test_pose_loss, epoch)
        writer.add_scalar('linemod test dis', test_dis, epoch)
        writer.add_scalar('linemod success rate', success_rate, epoch)
        writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
        if test_dis <= best_test:
            best_test = test_dis
        if opt.refine_start:
            torch.save(refiner.state_dict(), '{0}/pose_refine_model_{1}_{2}.pth'.format(opt.outf, epoch, test_dis))
        else:
            torch.save(estimator.state_dict(), '{0}/pose_model_{1}_{2}.pth'.format(opt.outf, epoch, test_dis))
        print(epoch, '>>>>>>>>----------MODEL SAVED---------<<<<<<<<')

        if opt.obj_id in opt.occ_list_obj:
            logger = setup_logger('epoch%d_occ_test' % epoch, os.path.join(opt.log_dir, 'epoch_%d_occ_test_log.txt' % epoch))
            logger.info('Occ test time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started'))
            occ_test_dis = 0.0
            occ_test_count = 0
            occ_success_count = 0
            estimator.eval()
            refiner.eval()

            for j, data in enumerate(occtestdataloader, 0):
                points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = data
                if len(points.size()) == 2:
                    logger.info('Occ test time {0} Lost detection!'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time))))
                    continue
                points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = points.cuda(), choose.cuda(), img.cuda(), target.cuda(), model_points.cuda(), model_kp.cuda(), vertex_gt.cuda(), idx.cuda(), target_r.cuda(), target_t.cuda()
                vertex_pred, c_pred, emb = estimator(img, points, choose, idx)
                vertex_loss, pose_loss, dis, new_points, new_target = criterion(vertex_pred, vertex_gt, c_pred, points, target, model_points, model_kp, opt.obj_id, target_r, target_t)
                if opt.refine_start:
                    for ite in range(0, opt.iteration):
                        pred_r, pred_t = refiner(new_points, emb, idx)
                        dis, new_points, new_target = criterion_refine(pred_r, pred_t, new_points, new_target, model_points, opt.obj_id)

                occ_test_dis += dis.item()
                logger.info('Occ test time {0} Test Frame No.{1} dis:{2}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), occ_test_count, dis))
                if dis.item() < diameter:
                    occ_success_count += 1
                occ_test_count += 1

            occ_test_dis = occ_test_dis / occ_test_count
            occ_success_rate = float(occ_success_count) / occ_test_count
            logger.info('Occ test time {0} Epoch {1} TEST FINISH Avg dis: {2} Success rate: {3}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, occ_test_dis, occ_success_rate))
            writer.add_scalar('occ test dis', occ_test_dis, epoch)
            writer.add_scalar('occ success rate', occ_success_rate, epoch)

        if best_test < opt.refine_margin and not opt.refine_start:
            opt.refine_start = True
            opt.lr = opt.lr_refine
            opt.batch_size = int(opt.batch_size / opt.iteration)
            optimizer = optim.Adam(refiner.parameters(), lr=opt.lr)
            print('>>>>>>>>----------Refine started---------<<<<<<<<')

    writer.close()
Esempio n. 16
0
from lib import settings
from lib.logger import setup_logger
from lib.queue_operations import QueueOperations
from lib.models.classifications import Classifications, SharedMajorityException
from lib.models.vertex_classifications import VertexClassifications
from lib.subject_set_csv import SubjectSetCSV
from lib.subject_set_workflow_router import SubjectSetWorkflowRouter, \
  UnidentifiedRawSubjectSetException

from panoptes_client import Classification, Panoptes, Subject, Workflow
from redis import Redis
from rq import Queue

logger = setup_logger(settings.APP_NAME,
                      'log/kmeans_and_enqueue_completed_subjects.log',
                      logging.DEBUG)

subject_set_csv = SubjectSetCSV()
workflow_router = SubjectSetWorkflowRouter(subject_set_csv, settings, logger)
pages_raw_subject_ids = subject_set_csv.raw_pages_subject_ids()
logger.debug("Running Wires and Rails Workflow Processor")
Panoptes.connect(username=settings.PANOPTES_USERNAME,
                 password=settings.PANOPTES_PASSWORD)

retired_subject_ids = []

vertices_and_target_subject_sets = []

for _subject_set_id, metadata in settings.COLUMNS_WORKFLOW_METADATA.items():
Esempio n. 17
0
def main():
    if not osp.exists(cfg.respth): os.makedirs(cfg.respth)
    setup_logger('{}-train'.format(cfg.model_type), cfg.respth)
    print(args.saveCheckpointDir, args.loadCheckpointLocation, args.saveOnEveryIt)
    train()
Esempio n. 18
0
def main():
    args = parse_args()
    if not osp.exists(cfg.respth): os.makedirs(cfg.respth)
    setup_logger('{}-eval'.format('BANet', cfg.respth))
    evaluate(cfg, args.weight_pth)
def run_subject_push_test():
    """Run subject push test"""
    logger = setup_logger('TestLogger', 'log/test_queue_operations.log')
    row_paths_by_column = {
        0: [
            '/tmp/5823821_0/010001.bin.png',
            '/tmp/5823821_0/01000b.bin.png',
            '/tmp/5823821_0/010015.bin.png',
            '/tmp/5823821_0/01001f.bin.png',
            '/tmp/5823821_0/010029.bin.png',
            '/tmp/5823821_0/010033.bin.png',
            '/tmp/5823821_0/01003d.bin.png',
            '/tmp/5823821_0/010047.bin.png',
            '/tmp/5823821_0/010051.bin.png',
            '/tmp/5823821_0/01005b.bin.png',
            '/tmp/5823821_0/010065.bin.png',
            '/tmp/5823821_0/010002.bin.png',
            '/tmp/5823821_0/01000c.bin.png',
            '/tmp/5823821_0/010016.bin.png',
            '/tmp/5823821_0/010020.bin.png',
            '/tmp/5823821_0/01002a.bin.png',
            '/tmp/5823821_0/010034.bin.png',
            '/tmp/5823821_0/01003e.bin.png',
            '/tmp/5823821_0/010048.bin.png',
            '/tmp/5823821_0/010052.bin.png',
            '/tmp/5823821_0/01005c.bin.png',
            '/tmp/5823821_0/010066.bin.png',
            '/tmp/5823821_0/010003.bin.png',
            '/tmp/5823821_0/01000d.bin.png',
            '/tmp/5823821_0/010017.bin.png',
            '/tmp/5823821_0/010021.bin.png',
            '/tmp/5823821_0/01002b.bin.png',
            '/tmp/5823821_0/010035.bin.png',
            '/tmp/5823821_0/01003f.bin.png',
            '/tmp/5823821_0/010049.bin.png',
            '/tmp/5823821_0/010053.bin.png',
            '/tmp/5823821_0/01005d.bin.png',
            '/tmp/5823821_0/010067.bin.png',
            '/tmp/5823821_0/010004.bin.png',
            '/tmp/5823821_0/01000e.bin.png',
            '/tmp/5823821_0/010018.bin.png',
            '/tmp/5823821_0/010022.bin.png',
            '/tmp/5823821_0/01002c.bin.png',
            '/tmp/5823821_0/010036.bin.png',
            '/tmp/5823821_0/010040.bin.png',
            '/tmp/5823821_0/01004a.bin.png',
            '/tmp/5823821_0/010054.bin.png',
            '/tmp/5823821_0/01005e.bin.png',
            '/tmp/5823821_0/010068.bin.png',
            '/tmp/5823821_0/010005.bin.png',
            '/tmp/5823821_0/01000f.bin.png',
            '/tmp/5823821_0/010019.bin.png',
            '/tmp/5823821_0/010023.bin.png',
            '/tmp/5823821_0/01002d.bin.png',
            '/tmp/5823821_0/010037.bin.png',
            '/tmp/5823821_0/010041.bin.png',
            '/tmp/5823821_0/01004b.bin.png',
            '/tmp/5823821_0/010055.bin.png',
            '/tmp/5823821_0/01005f.bin.png',
            '/tmp/5823821_0/010069.bin.png',
            '/tmp/5823821_0/010006.bin.png',
            '/tmp/5823821_0/010010.bin.png',
            '/tmp/5823821_0/01001a.bin.png',
            '/tmp/5823821_0/010024.bin.png',
            '/tmp/5823821_0/01002e.bin.png',
            '/tmp/5823821_0/010038.bin.png',
            '/tmp/5823821_0/010042.bin.png',
            '/tmp/5823821_0/01004c.bin.png',
            '/tmp/5823821_0/010056.bin.png',
            '/tmp/5823821_0/010060.bin.png',
            '/tmp/5823821_0/010007.bin.png',
            '/tmp/5823821_0/010011.bin.png',
            '/tmp/5823821_0/01001b.bin.png',
            '/tmp/5823821_0/010025.bin.png',
            '/tmp/5823821_0/01002f.bin.png',
            '/tmp/5823821_0/010039.bin.png',
            '/tmp/5823821_0/010043.bin.png',
            '/tmp/5823821_0/01004d.bin.png',
            '/tmp/5823821_0/010057.bin.png',
            '/tmp/5823821_0/010061.bin.png',
            '/tmp/5823821_0/010008.bin.png',
            '/tmp/5823821_0/010012.bin.png',
            '/tmp/5823821_0/01001c.bin.png',
            '/tmp/5823821_0/010026.bin.png',
            '/tmp/5823821_0/010030.bin.png',
            '/tmp/5823821_0/01003a.bin.png',
            '/tmp/5823821_0/010044.bin.png',
            '/tmp/5823821_0/01004e.bin.png',
            '/tmp/5823821_0/010058.bin.png',
            '/tmp/5823821_0/010062.bin.png',
            '/tmp/5823821_0/010009.bin.png',
            '/tmp/5823821_0/010013.bin.png',
            '/tmp/5823821_0/01001d.bin.png',
            '/tmp/5823821_0/010027.bin.png',
            '/tmp/5823821_0/010031.bin.png',
            '/tmp/5823821_0/01003b.bin.png',
            '/tmp/5823821_0/010045.bin.png',
            '/tmp/5823821_0/01004f.bin.png',
            '/tmp/5823821_0/010059.bin.png',
            '/tmp/5823821_0/010063.bin.png',
            '/tmp/5823821_0/01000a.bin.png',
            '/tmp/5823821_0/010014.bin.png',
            '/tmp/5823821_0/01001e.bin.png',
            '/tmp/5823821_0/010028.bin.png',
            '/tmp/5823821_0/010032.bin.png',
            '/tmp/5823821_0/01003c.bin.png',
            '/tmp/5823821_0/010046.bin.png',
            '/tmp/5823821_0/010050.bin.png',
            '/tmp/5823821_0/01005a.bin.png',
            '/tmp/5823821_0/010064.bin.png'
        ],
        1: [
            '/tmp/5823821_1/010001.bin.png',
            '/tmp/5823821_1/01000b.bin.png',
            '/tmp/5823821_1/010015.bin.png',
            '/tmp/5823821_1/01001f.bin.png',
            '/tmp/5823821_1/010029.bin.png',
            '/tmp/5823821_1/010033.bin.png',
            '/tmp/5823821_1/01003d.bin.png',
            '/tmp/5823821_1/010047.bin.png',
            '/tmp/5823821_1/010051.bin.png',
            '/tmp/5823821_1/01005b.bin.png',
            '/tmp/5823821_1/010065.bin.png',
            '/tmp/5823821_1/010002.bin.png',
            '/tmp/5823821_1/01000c.bin.png',
            '/tmp/5823821_1/010016.bin.png',
            '/tmp/5823821_1/010020.bin.png',
            '/tmp/5823821_1/01002a.bin.png',
            '/tmp/5823821_1/010034.bin.png',
            '/tmp/5823821_1/01003e.bin.png',
            '/tmp/5823821_1/010048.bin.png',
            '/tmp/5823821_1/010052.bin.png',
            '/tmp/5823821_1/01005c.bin.png',
            '/tmp/5823821_1/010066.bin.png',
            '/tmp/5823821_1/010003.bin.png',
            '/tmp/5823821_1/01000d.bin.png',
            '/tmp/5823821_1/010017.bin.png',
            '/tmp/5823821_1/010021.bin.png',
            '/tmp/5823821_1/01002b.bin.png',
            '/tmp/5823821_1/010035.bin.png',
            '/tmp/5823821_1/01003f.bin.png',
            '/tmp/5823821_1/010049.bin.png',
            '/tmp/5823821_1/010053.bin.png',
            '/tmp/5823821_1/01005d.bin.png',
            '/tmp/5823821_1/010067.bin.png',
            '/tmp/5823821_1/010004.bin.png',
            '/tmp/5823821_1/01000e.bin.png',
            '/tmp/5823821_1/010018.bin.png',
            '/tmp/5823821_1/010022.bin.png',
            '/tmp/5823821_1/01002c.bin.png',
            '/tmp/5823821_1/010036.bin.png',
            '/tmp/5823821_1/010040.bin.png',
            '/tmp/5823821_1/01004a.bin.png',
            '/tmp/5823821_1/010054.bin.png',
            '/tmp/5823821_1/01005e.bin.png',
            '/tmp/5823821_1/010068.bin.png',
            '/tmp/5823821_1/010005.bin.png',
            '/tmp/5823821_1/01000f.bin.png',
            '/tmp/5823821_1/010019.bin.png',
            '/tmp/5823821_1/010023.bin.png',
            '/tmp/5823821_1/01002d.bin.png',
            '/tmp/5823821_1/010037.bin.png',
            '/tmp/5823821_1/010041.bin.png',
            '/tmp/5823821_1/01004b.bin.png',
            '/tmp/5823821_1/010055.bin.png',
            '/tmp/5823821_1/01005f.bin.png',
            '/tmp/5823821_1/010069.bin.png',
            '/tmp/5823821_1/010006.bin.png',
            '/tmp/5823821_1/010010.bin.png',
            '/tmp/5823821_1/01001a.bin.png',
            '/tmp/5823821_1/010024.bin.png',
            '/tmp/5823821_1/01002e.bin.png',
            '/tmp/5823821_1/010038.bin.png',
            '/tmp/5823821_1/010042.bin.png',
            '/tmp/5823821_1/01004c.bin.png',
            '/tmp/5823821_1/010056.bin.png',
            '/tmp/5823821_1/010060.bin.png',
            '/tmp/5823821_1/010007.bin.png',
            '/tmp/5823821_1/010011.bin.png',
            '/tmp/5823821_1/01001b.bin.png',
            '/tmp/5823821_1/010025.bin.png',
            '/tmp/5823821_1/01002f.bin.png',
            '/tmp/5823821_1/010039.bin.png',
            '/tmp/5823821_1/010043.bin.png',
            '/tmp/5823821_1/01004d.bin.png',
            '/tmp/5823821_1/010057.bin.png',
            '/tmp/5823821_1/010061.bin.png',
            '/tmp/5823821_1/010008.bin.png',
            '/tmp/5823821_1/010012.bin.png',
            '/tmp/5823821_1/01001c.bin.png',
            '/tmp/5823821_1/010026.bin.png',
            '/tmp/5823821_1/010030.bin.png',
            '/tmp/5823821_1/01003a.bin.png',
            '/tmp/5823821_1/010044.bin.png',
            '/tmp/5823821_1/01004e.bin.png',
            '/tmp/5823821_1/010058.bin.png',
            '/tmp/5823821_1/010062.bin.png',
            '/tmp/5823821_1/010009.bin.png',
            '/tmp/5823821_1/010013.bin.png',
            '/tmp/5823821_1/01001d.bin.png',
            '/tmp/5823821_1/010027.bin.png',
            '/tmp/5823821_1/010031.bin.png',
            '/tmp/5823821_1/01003b.bin.png',
            '/tmp/5823821_1/010045.bin.png',
            '/tmp/5823821_1/01004f.bin.png',
            '/tmp/5823821_1/010059.bin.png',
            '/tmp/5823821_1/010063.bin.png',
            '/tmp/5823821_1/01000a.bin.png',
            '/tmp/5823821_1/010014.bin.png',
            '/tmp/5823821_1/01001e.bin.png',
            '/tmp/5823821_1/010028.bin.png',
            '/tmp/5823821_1/010032.bin.png',
            '/tmp/5823821_1/01003c.bin.png',
            '/tmp/5823821_1/010046.bin.png',
            '/tmp/5823821_1/010050.bin.png',
            '/tmp/5823821_1/01005a.bin.png',
            '/tmp/5823821_1/010064.bin.png'
        ]
    }
    subject_id = 5823821
    queue_ops = QueueOperations(logger)
    queue_ops.push_new_row_subjects(subject_id, row_paths_by_column)
Esempio n. 20
0
        removals_by_target_set = defaultdict(list)

        for subject_id, target_subject_set_id in segmented_rows_and_their_target_sets.items(
        ):
            # target_subject_set = self._get_subject_set(target_subject_set_id)
            self._logger.debug('Saving segmented row %d to set: %s',
                               subject_id, target_subject_set_id)
            subject = Subject.find(subject_id)
            additions_by_target_set[target_subject_set_id].append(subject)

            for curr_subject_set in subject.links.subject_sets:
                removals_by_target_set[curr_subject_set.id].append(subject_id)

        # Remove to appropriate target sets
        for target_subject_set_id, new_subjects in additions_by_target_set.items(
        ):
            target_subject_set = self._get_subject_set(target_subject_set_id)
            target_subject_set.add(new_subjects)


if __name__ == '__main__':
    dry_run = True
    logger = setup_logger(settings.APP_NAME, 'log/row_migrations.log',
                          logging.DEBUG)
    subject_set_csv = SubjectSetCSV()
    workflow_router = SubjectSetWorkflowRouter(subject_set_csv, settings,
                                               logger)
    pages_raw_subject_ids = subject_set_csv.raw_pages_subject_ids()
    MigrateRowsToAppropriateSubjectSet(workflow_router, subject_set_csv,
                                       logger, dry_run).run()
Esempio n. 21
0
def setup(config: Config, args):
    config.opts.new = args.new
    if args.total_step is not None:
        config.trainer.start_total_steps = args.total_step
    config.resource.create_directories()
    setup_logger(config.resource.main_log_path)
Esempio n. 22
0
def main():
    if opt.dataset == 'ycb':
        opt.num_obj = 21
        opt.sym_list = [12, 15, 18, 19, 20]
        opt.num_points = 1000
        writer = SummaryWriter('experiments/runs/ycb/{0}'.format(opt.experiment_name))
        opt.outf = 'trained_models/ycb/{0}'.format(opt.experiment_name)
        opt.log_dir = 'experiments/logs/ycb/{0}'.format(opt.experiment_name)
        opt.repeat_num = 1
        if not os.path.exists(opt.outf):
            os.mkdir(opt.outf)
        if not os.path.exists(opt.log_dir):
            os.mkdir(opt.log_dir)
    else:
        print('Unknown dataset')
        return

    estimator = PoseNet(num_points = opt.num_points, num_vote = 9, num_obj = opt.num_obj)
    estimator.cuda()
    refiner = PoseRefineNet(num_points = opt.num_points, num_obj = opt.num_obj)
    refiner.cuda()

    if opt.resume_posenet != '':
        estimator.load_state_dict(torch.load('{0}/{1}'.format(opt.outf, opt.resume_posenet)))
    if opt.resume_refinenet != '':
        refiner.load_state_dict(torch.load('{0}/{1}'.format(opt.outf, opt.resume_refinenet)))
        opt.refine_start = True
        opt.lr = opt.lr_refine
        opt.batch_size = int(opt.batch_size / opt.iteration)
        optimizer = optim.Adam(refiner.parameters(), lr=opt.lr)
    else:
        opt.refine_start = False
        optimizer = optim.Adam(estimator.parameters(), lr=opt.lr)

    dataset = PoseDataset_ycb('train', opt.num_points, True, opt.dataset_root)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers)
    test_dataset = PoseDataset_ycb('test', opt.num_points, False, opt.dataset_root)
    testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers)

    print('>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}'.format(len(dataset), len(test_dataset), opt.num_points))

    criterion = Loss(opt.num_points, opt.sym_list)
    criterion_refine = Loss_refine(opt.num_points, opt.sym_list)
    best_test = np.Inf

    if opt.start_epoch == 1:
        for log in os.listdir(opt.log_dir):
            os.remove(os.path.join(opt.log_dir, log))
    st_time = time.time()
    train_scalar = 0

    for epoch in range(opt.start_epoch, opt.nepoch):
        logger = setup_logger('epoch%d' % epoch, os.path.join(opt.log_dir, 'epoch_%d_log.txt' % epoch))
        logger.info('Train time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Training started'))
        train_count = 0
        train_loss_avg = 0.0
        train_loss = 0.0
        train_dis_avg = 0.0
        train_dis = 0.0
        if opt.refine_start:
            estimator.eval()
            refiner.train()
        else:
            estimator.train()
        optimizer.zero_grad()
        for rep in range(opt.repeat_num):
            for i, data in enumerate(dataloader, 0):
                points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = data
                points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = points.cuda(), choose.cuda(), img.cuda(), target.cuda(), model_points.cuda(), model_kp.cuda(), vertex_gt.cuda(), idx.cuda(), target_r.cuda(), target_t.cuda()
                vertex_pred, c_pred, emb = estimator(img, points, choose, idx)
                vertex_loss, pose_loss, dis, new_points, new_target = criterion(vertex_pred, vertex_gt, c_pred, points, target, model_points, model_kp, idx, target_r, target_t)
                loss = 10 * vertex_loss + pose_loss
                if opt.refine_start:
                    for ite in range(0, opt.iteration):
                        pred_r, pred_t = refiner(new_points, emb, idx)
                        dis, new_points, new_target = criterion_refine(pred_r, pred_t, new_points, new_target, model_points, idx)
                        dis.backward()
                else:
                    loss.backward()
                train_loss_avg += loss.item()
                train_loss += loss.item()
                train_dis_avg += dis.item()
                train_dis += dis.item()
                train_count += 1
                train_scalar += 1

                if train_count % opt.batch_size == 0:
                    logger.info('Train time {0} Epoch {1} Batch {2} Frame {3} Avg_loss:{4} Avg_diss:{5}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, int(train_count / opt.batch_size), train_count, train_loss_avg / opt.batch_size, train_dis_avg / opt.batch_size))
                    writer.add_scalar('ycb training loss', train_loss_avg / opt.batch_size, train_scalar)
                    writer.add_scalar('ycb training dis', train_dis_avg / opt.batch_size, train_scalar)
                    optimizer.step()
                    optimizer.zero_grad()
                    train_loss_avg = 0
                    train_dis_avg = 0

                if train_count != 0 and train_count % 1000 == 0:
                    if opt.refine_start:
                        torch.save(refiner.state_dict(), '{0}/pose_refine_model_current.pth'.format(opt.outf))
                    else:
                        torch.save(estimator.state_dict(), '{0}/pose_model_current.pth'.format(opt.outf))

        print('>>>>>>>>----------epoch {0} train finish---------<<<<<<<<'.format(epoch))
        train_loss = train_loss / train_count
        train_dis = train_dis / train_count
        logger.info('Train time {0} Epoch {1} TRAIN FINISH Avg loss: {2} Avg dis: {3}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, train_loss, train_dis))

        logger = setup_logger('epoch%d_test' % epoch, os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch))
        logger.info('Test time {0}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) + ', ' + 'Testing started'))
        test_loss = 0.0
        test_vertex_loss = 0.0
        test_pose_loss = 0.0
        test_dis = 0.0
        test_count = 0
        success_count = 0
        estimator.eval()
        refiner.eval()
        for j, data in enumerate(testdataloader, 0):
            points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = data
            points, choose, img, target, model_points, model_kp, vertex_gt, idx, target_r, target_t = points.cuda(), choose.cuda(), img.cuda(), target.cuda(), model_points.cuda(), model_kp.cuda(), vertex_gt.cuda(), idx.cuda(), target_r.cuda(), target_t.cuda()
            vertex_pred, c_pred, emb = estimator(img, points, choose, idx)
            vertex_loss, pose_loss, dis, new_points, new_target = criterion(vertex_pred, vertex_gt, c_pred, points, target, model_points, model_kp, idx, target_r, target_t)
            loss = 10 * vertex_loss + pose_loss
            if opt.refine_start:
                for ite in range(0, opt.iteration):
                    pred_r, pred_t = refiner(new_points, emb, idx)
                    dis, new_points, new_target = criterion_refine(pred_r, pred_t, new_points, new_target, model_points, idx)
            test_loss += loss.item()
            test_vertex_loss += vertex_loss.item()
            test_pose_loss += pose_loss.item()
            test_dis += dis.item()
            logger.info('Test time {0} Test Frame No.{1} loss:{2} dis:{3}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), test_count, loss, dis))
            test_count += 1
            if dis.item() < 0.02:
                success_count += 1

        test_loss = test_loss / test_count
        test_vertex_loss = test_vertex_loss / test_count
        test_pose_loss = test_pose_loss / test_count
        test_dis = test_dis / test_count
        logger.info('Test time {0} Epoch {1} TEST FINISH Avg loss: {2} Avg dis: {3}'.format(time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)), epoch, test_loss, test_dis))
        logger.info('Success rate: {}'.format(float(success_count) / test_count))
        writer.add_scalar('ycb test loss', test_loss, epoch)
        writer.add_scalar('ycb test vertex loss', test_vertex_loss, epoch)
        writer.add_scalar('ycb test pose loss', test_pose_loss, epoch)
        writer.add_scalar('ycb test dis', test_dis, epoch)
        writer.add_scalar('ycb success rate', float(success_count) / test_count, epoch)
        writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
        if test_dis <= best_test:
            best_test = test_dis
        if opt.refine_start:
            torch.save(refiner.state_dict(), '{0}/pose_refine_model_{1}_{2}.pth'.format(opt.outf, epoch, test_dis))
        else:
            torch.save(estimator.state_dict(), '{0}/pose_model_{1}_{2}.pth'.format(opt.outf, epoch, test_dis))
        print(epoch, '>>>>>>>>----------MODEL SAVED---------<<<<<<<<')

        if best_test < opt.refine_margin and not opt.refine_start:
            opt.refine_start = True
            opt.lr = opt.lr_refine
            opt.batch_size = int(opt.batch_size / opt.iteration)
            optimizer = optim.Adam(refiner.parameters(), lr=opt.lr)
            print('>>>>>>>>----------Refine started---------<<<<<<<<')

    writer.close()