def save(self, dataset_dir="dataset", fname="0"):
     fname = str(fname)
     if self.get("inst") is not None:
         inst_dir = pathjoin(dataset_dir, "instance_map")
         os.makedirs(inst_dir, exist_ok=True)
         inst_path = pathjoin(inst_dir, fname + ".png")
         cv2.imwrite(inst_path, self["inst"].clip(0).astype(np.uint16))
     if self.get("depth") is not None:
         depth_dir = pathjoin(dataset_dir, "depth")
         os.makedirs(depth_dir, exist_ok=True)
         depth_path = pathjoin(depth_dir, fname)
         savenp(depth_path, self["depth"].astype(np.float16))
     if (
         self.get("image") is not None
         and self.get("inst") is not None
         and self.get("depth") is not None
     ):
         vis_dir = pathjoin(dataset_dir, "vis")
         os.makedirs(vis_dir, exist_ok=True)
         vis_path = pathjoin(vis_dir, fname + ".jpg")
         cv2.imwrite(vis_path, self.vis()[..., ::-1])
     if self.get("ycb_6d_pose") is not None:
         pose_dir = pathjoin(dataset_dir, "ycb_6d_pose")
         os.makedirs(pose_dir, exist_ok=True)
         pose_path = pathjoin(pose_dir, fname + ".mat")
         scipy.io.savemat(pose_path, self["ycb_6d_pose"])
     # save image at last for unstable compute enviroment
     if self.get("image") is not None:
         image_dir = pathjoin(dataset_dir, "image")
         os.makedirs(image_dir, exist_ok=True)
         image_path = pathjoin(image_dir, fname + ".jpg")
         cv2.imwrite(image_path, self["image"][..., ::-1])
Exemple #2
0
def render_data(render_image=True, render_annotation=True):
    path = pathjoin(tempfile.gettempdir(), "render_" + str(time.time()))
    render_result = {}
    if render_image:
        png_path = path + ".png"
        with set_image_render(), withattr(render, "filepath", png_path):
            print("Render image using:", render.engine)
            bpy.ops.render.render(write_still=True)
        render_result["image"] = imread(png_path)
        os.remove(png_path)

    if render_annotation:
        exr_path = path + ".exr"
        with set_inst_material(), set_annotation_render(), withattr(
            render, "filepath", exr_path
        ):
            print("Render annotation using:", render.engine)
            bpy.ops.render.render(write_still=True)
        render_result["exr"] = parser_exr(exr_path)
        os.remove(exr_path)
    result = ImageWithAnnotation(**render_result)
    if "render_6dof_pose" and "Camera" in bpy.data.objects:
        objs = [obj for obj in bpy.data.objects if "inst_id" in obj]
        ycb_meta = get_6dof_pose(bpy.data.objects["Camera"], objs, inst=result["inst"])
        result["ycb_meta"] = ycb_meta
    return result
Exemple #3
0
    def testDump(self):
        import yaml

        cfg = self.cfg.clone()
        yamlp = pathjoin(tmpboxx(), "test.yaml")
        cfg.dump(yamlp)
        cfg_dict = yaml.load(open(yamlp))
        cfgd = CfgNode(cfg_dict)
        self.assertTrue(str(cfg.dump()) == str(cfgd.dump()))
    def __init__(self, opt):
        UnalignedDataset.__init__(self, opt)

        self.npy = npy = np.load(pathjoin(opt.dataroot, 'fgs.npy'))
        #        npy[...,:3][npy[..., -1]<128] = 128

        transform = T.Compose([T.Resize((opt.crop_size, opt.crop_size))] +
                              self.transform_A.transforms[-2:])

        pils = map2(Image.fromarray, npy)
        self.fgs = map2(transform, pils)

        self.fg_size = len(self.fgs)
Exemple #5
0
def get_texture_paths(texture_dir):
    bnames = listdir(texture_dir)
    for i, cs in enumerate(zip(*bnames)):
        if len(set(cs)) != 1:
            break
    for _i, cs in enumerate(zip(*[b[::-1] for b in bnames])):
        if len(set(cs)) != 1:
            break
    texture_paths = {
        alias_texture_name_to_name(bname[i:-_i]): pathjoin(texture_dir, bname)
        for bname in bnames
    }
    texture_paths["name"] = bnames[0][:i] + "." + bnames[0][-_i:]
    return texture_paths
def evaluateByJsp(resJsp,
                  gtJsp,
                  log=True,
                  method=None,
                  levels=['averaged', 'easy', 'medium', 'hard']):
    if method is None:
        method = basename(dirname(dirname(dirname(resJsp))))


#        method=basename(dirname(resJsp))
    resTable = defaultdict(lambda: {})

    resJs = loadjson(resJsp)
    gtJs = loadjson(gtJsp)

    if 'averaged' in levels:
        level = 'averaged'
        row = evaluateByJson(
            resJs,
            gtJs,
        )

        row['method'] = method
        row['level'] = level
        resTable[level] = dict(row)
        tree - row
    for level in filter(lambda x: x in levels, ['easy', 'medium', 'hard']):
        coco = loadjson(gtJsp)
        coco['images'] = [d for d in coco['images'] if d['level'] == level]
        imgIds = [d['id'] for d in coco['images']]
        coco['annotations'] = [
            bb for bb in coco['annotations'] if bb['image_id'] in imgIds
        ]
        resJs = loadjson(resJsp)
        row = evaluateByJson(
            resJs,
            coco,
        )

        row['method'] = method
        row['level'] = level
        resTable[level] = dict(row)
        tree - row

    resdir = dirname(resJsp)
    savejson(resTable, pathjoin(resdir, 'resTable.json'))
    return resTable
Exemple #7
0
    def __init__(self, src='zh', dest='en', api='baidu', cache_path=None):
        self.src = src
        self.dest = dest
        self.df = {}

        self.api = apiDicts[api](src=src, dest=dest)

        if cache_path is None:
            makedirs('translate.cache')
            cache_path = pathjoin('translate.cache', f'{src}_to_{dest}.csv')

        self.cache_path = cache_path
        if cache_path:
            if not isfile(cache_path):
                df = pd.DataFrame(columns=['src', 'dest']).set_index('src')
                df.to_csv(cache_path)

            self.lock = threading.Lock()
            self.df = df = pd.read_csv(cache_path).set_index('src')
Exemple #8
0
        def download(name):
            try:
                if self.debug:
                    print(name)
                prefix = f"{name}_{resolution}"
                paths = boxx.glob(os.path.join(hdri_dir, prefix + "*"))
                if len(paths):
                    return paths[0]
                url = f"https://hdrihaven.com/hdri/?h={name}"
                html = BeautifulSoup(
                    rq.get(url, timeout=5).text,
                    features="html.parser",
                )
                href = [
                    a["href"] for a in html.find_all("a")
                    if f"_{resolution}." in a.get("href")
                ][0]
                cats = [
                    a.text for a in html.find(
                        text="Categories:").parent.parent.find_all("a")
                ]
                tags = [
                    a.text for a in html.find(
                        text="Tags:").parent.parent.find_all("a")
                ]
                name = f"{prefix}.{'='.join(cats)}.{'='.join(tags)}.{href[-3:]}"

                path = pathjoin(hdri_dir, name)
                r = rq.get(href, timeout=5)
                assert r.status_code == 200
                os.makedirs(hdri_dir, exist_ok=True)
                with open(path, "wb") as f:
                    f.write(r.content)
                return path
            except Exception as e:
                if self.debug:
                    boxx.pred - name
                    boxx.g()
                raise e
Exemple #9
0
Created on Mon Apr  1 22:18:48 2019

Synthesis glass dataset
"""
from boxx import *
import cv2
from affineFit import affine_fit
from boxx import np, pathjoin, pd, randchoice, imread, os, randfloat, makedirs
import bpp

cf.cloud = sysi.user == 'yanglei'
cf.debug = not cf.cloud

root = os.path.expanduser('~/dataset/celeba')

partition = pd.read_csv(pathjoin(root, 'list_eval_partition.csv'))
trainKeys = partition[partition.partition == 0].image_id

imgdir = pathjoin(root, 'img_align_celeba')

attr = 'Eyeglasses'
npyp = pathjoin(pathjoin(root, "%s_cycle_dataset" % attr.lower()), 'fgs.npy')

datasetdir = pathjoin(root, "%s_stgan_dataset" % attr.lower())

trainADir = pathjoin(datasetdir, 'trainA')
makedirs(trainADir)
trainBDir = pathjoin(datasetdir, 'trainB')
makedirs(trainBDir)

glasss = np.load(npyp)
    savejson(resTable, pathjoin(resdir, 'resTable.json'))
    return resTable

diffs = ['easy', 'medium', 'hard']

#gtJsp = cocoCheckBboxAnnJsp

junkDir = '/home/dl/junk'
if winYl:
    junkDir = 'c:/D/junk'

resJsps = sorted(
    sum(
        map(
            lambda key: glob(
                pathjoin(junkDir,
                         'output/{key}/inference/coco_format_val/bbox.json')),
            globKeys), []))
if __name__ == "__main__":

    from printAnd2latex import exportResultMd

    for resJsp in resJsps:
        resTable = evaluateByJsp(resJsp, gtJsp)

        resOld = {k: {'mix': v} for k, v in resTable.items()}
        exportResultMd(resOld)

#import matplotlib.pyplot as plt
#for s in scoress.T:
#    plt.plot(xs, s)
#    plt.show()   loga(ll-imgdf[imgdf.wh.apply(x_[0])<1750].wh)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: DIYer22@github
@mail: [email protected]
Created on Wed Apr  3 16:44:01 2019
"""
from boxx import *
from boxx import makedirs, np, npa, imread, pathjoin, glob, os, resize, p, uint8

ignoreWarning()

celebA_path = os.path.expanduser('~/dataset/celeba')
dataset = pathjoin(celebA_path, 'eyeglasses_stgan_dataset')

st_gan_dataset = pathjoin(dataset, 'tf_st_gan_dataset')

psa = sorted(glob(pathjoin(dataset, 'trainA/*')))[0::2]
psb = sorted(glob(pathjoin(dataset, 'trainB/*')))[1::2]

lena = len(psa)
lenb = len(psb)

attribute = np.zeros((lena + lenb, 40), np.bool)
attribute[-lenb:, 15] = True

img = imread(psa[0])
shape = img.shape

makedirs(p / st_gan_dataset)
imgns = psa + psb
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 11:53:41 2018

@author: yl
"""
#from evaluateByBbox import *
#from evaluateByBbox import resJsps
#from config import skuDf
from boxx import *
from boxx import loadjson, pd, Counter, p
from boxx import reduce, add, ll, tree, x_, os, pathjoin

skuDf = pd.read_csv(
    os.path.abspath(pathjoin(__file__, '../../sku_info_generated.csv')))

#resJsp = resJsps[0]
#resJsp = r'c:/D/junk\output\valAsTrain_raw\bbox_coco_2017_val_results.json'
#resJsp = r'c:/D/junk\output\testAsTrain_raw\bbox_coco_2017_val_results.json'
#resJsp = r'c:/D/junk\output\mix_11_oldgan\bbox_coco_2017_val_results.json'
resJsp = "/home/dl/junk/output/mix_11/inference/coco_format_val/bbox.json"
resJsp = "/home/dl/junk/output/testAsTrain/inference/coco_format_val/bbox.json"
print(resJsp)
thre = .784321  # mix 11
thre = 0.84  # testAsTrain
diff = 'easy'
#diff = 'medium'
#diff = 'hard'
diff = 'all'
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir

#def main():
if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Inference")
    parser.add_argument(
        "--config-file",
        default=os.path.abspath(
            pathjoin(
                __file__,
                "../../configs/on_sb2.yaml",
            )),
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument(
        "--data_root",
        default="",
        metavar="FILE",
        help="path to coco format",
        type=str,
    )
    parser.add_argument(
        "--task",
        default="rpc",
        metavar="NAME",
 def exist(self, dirr, index):
     return os.path.isfile(pathjoin(dirr, "image", f"{index}.jpg"))
            expected_results=cfg.TEST.EXPECTED_RESULTS,
            expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
            output_folder=output_folder,
        )
        synchronize()


#def main():
if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Training")
    parser.add_argument(
        "--config-file",
        default=os.path.abspath(
            pathjoin(
                __file__,
                "../../configs/on_sb2.yaml",
            )),
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument(
        "--data_root",
        default="",
        metavar="FILE",
        help="path to coco format",
        type=str,
    )
    parser.add_argument(
        "--task",
        default="rpc",
Exemple #16
0
        '舒亮皓齿白80g', '云南白药牙膏45g', '舒克宝贝儿童牙刷', '清风原木纯品金装100x3', '洁柔face150x3',
        '斑布100x3', '维达婴儿150x3', '相印小黄人150x3', '清风原木纯品黑耀150x3', '洁云绒触感130x3',
        '舒洁萌印花120x2', '相印红悦130x3', '得宝苹果木味90x4', '清风新韧纯品130x3', '金鱼竹浆绿135x3',
        '清风原木纯品150x2', '洁柔face130x3', '维达立体美110x3', '洁柔CS单包*', '相印小黄人单包*',
        '清风原色单包*', '相印茶语单包*', '清风质感纯品单包*', '米奇1928笔记本', '广博固体胶15g', '票据文件袋',
        '晨光蜗牛改正带', '鸿泰液体胶50g', '马培德自粘性标签', '东亚记号笔'
    ]))

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Webcam Demo")
    parser.add_argument(
        "--config-file",
        default=os.path.abspath(
            pathjoin(
                __file__,
                "../../configs/101_fpn_coco_format_1x.yaml",
            )),
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument(
        "--confidence-threshold",
        type=float,
        default=0.78,
        help="Minimum score for the prediction to be shown",
    )
    parser.add_argument(
        "--min-image-size",
        type=int,
        #        default=224,
        default=800,
def main():
    args = parse_args()

    cf.args = args

    reset_config(config, args)

    logger, final_output_dir, tb_log_dir = create_logger(
        config, args.cfg, 'train')

    tb_log_dir = pathjoin(
        dirname(tb_log_dir), 'w%s,m%s,rs%s,t%s_' %
        (args.pointMaxW, args.probMargin, ''.join(map(str, args.rs)), args.t) +
        basename(tb_log_dir))

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    # cudnn related setting
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED

    model = eval('models.' + config.MODEL.NAME + '.get_pose_net')(
        config, is_train=True)

    # copy model file
    this_dir = os.path.dirname(__file__)
    shutil.copy2(
        os.path.join(this_dir, '../lib/models', config.MODEL.NAME + '.py'),
        final_output_dir)

    writer_dict = {
        'writer': SummaryWriter(log_dir=tb_log_dir),
        'train_global_steps': 0,
        'valid_global_steps': 0,
    }

    dump_input = torch.rand(
        (config.TRAIN.BATCH_SIZE, 3, config.MODEL.IMAGE_SIZE[1],
         config.MODEL.IMAGE_SIZE[0]))
    #writer_dict['writer'].add_graph(model, (dump_input, ), verbose=False)

    gpus = [int(i) for i in config.GPUS.split(',')]
    model = torch.nn.DataParallel(model, device_ids=gpus).cuda()

    # define loss function (criterion) and optimizer
    #    criterion = JointsMSELoss(
    #        use_target_weight=config.LOSS.USE_TARGET_WEIGHT
    #    ).cuda()

    if config.TRAIN.CRITERION == 'msssm_mean':
        criterion = MultiScaleSpatialSoftmax(log_freq=60 * 10,
                                             cyc_rs=args.rs,
                                             poolings=['avg', 'max'][:],
                                             pointMaxW=args.pointMaxW,
                                             probMargin=args.probMargin,
                                             temper=args.t)
        # p[1, 4, 10]* m[0, .5, .8]
#        criterion = MultiScaleSpatialSoftMax( poolings=['avg', 'max'], pointMaxW=1)
#        criterion = MultiScaleSpatialSoftMax(cyc_rs=[8, 4, 2, ], pointMaxW=1)
    elif config.TRAIN.CRITERION == 'ssm_mean':
        criterion = SpatialSoftmax()


#    criterion = torch.nn.DataParallel(criterion, device_ids=gpus).cuda()

#    cf.debugPoinMax = 30
    cf.debugPoinMax = False

    if cf.debugPoinMax:
        criterion = MultiScaleSpatialSoftmax(
            log_freq=30,
            cyc_rs=[],
            poolings=['avg', 'max'][:],
            pointMaxW=args.pointMaxW,
        )

    optimizer = get_optimizer(config, model)

    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR)

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_dataset = eval('dataset.' + config.DATASET.DATASET)(
        config, config.DATASET.ROOT, config.DATASET.TRAIN_SET, True,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    valid_dataset = eval('dataset.' + config.DATASET.DATASET)(
        config, config.DATASET.ROOT, config.DATASET.TEST_SET, False,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.TRAIN.BATCH_SIZE * len(gpus),
        shuffle=config.TRAIN.SHUFFLE,
        num_workers=config.WORKERS,
        pin_memory=True)
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=config.TEST.BATCH_SIZE * len(gpus),
        shuffle=False,
        num_workers=config.WORKERS,
        pin_memory=True)

    best_perf = 0.0
    best_model = False
    for epoch in range(config.TRAIN.BEGIN_EPOCH, config.TRAIN.END_EPOCH):
        lr_scheduler.step()

        # train for one epoch
        train(config, train_loader, model, criterion, optimizer, epoch,
              final_output_dir, tb_log_dir, writer_dict)

        # evaluate on validation set
        perf_indicator = validate(config, valid_loader, valid_dataset, model,
                                  criterion, final_output_dir, tb_log_dir,
                                  writer_dict)

        if perf_indicator > best_perf:
            best_perf = perf_indicator
            best_model = True
        else:
            best_model = False

        logger.info('=> saving checkpoint to {}'.format(final_output_dir))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': get_model_name(config),
                'state_dict': model.state_dict(),
                'perf': perf_indicator,
                'optimizer': optimizer.state_dict(),
            }, best_model, final_output_dir)

    final_model_state_file = os.path.join(final_output_dir,
                                          'final_state.pth.tar')
    logger.info(
        'saving final model state to {}'.format(final_model_state_file))
    torch.save(model.module.state_dict(), final_model_state_file)
    writer_dict['writer'].close()
    print(args)