示例#1
0
 def __init__(self, dir=None):
     if dir is None:
         dir = get_dataset_path('ilsvrc_metadata')
     self.dir = os.path.expanduser(dir)
     mkdir_p(self.dir)
     f = os.path.join(self.dir, 'synsets.txt')
     if not os.path.isfile(f):
         self._download_caffe_meta()
     self.caffepb = None
示例#2
0
def do_visualize(model, model_path, nr_visualize=100, output_dir='output'):
    """
    Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
    """
    df = get_train_dataflow()
    df.reset_state()

    pred = OfflinePredictor(
        PredictConfig(model=model,
                      session_init=SmartInit(model_path),
                      input_names=['image', 'gt_boxes', 'gt_labels'],
                      output_names=[
                          'generate_{}_proposals/boxes'.format(
                              'fpn' if cfg.MODE_FPN else 'rpn'),
                          'generate_{}_proposals/scores'.format(
                              'fpn' if cfg.MODE_FPN else 'rpn'),
                          'fastrcnn_all_scores',
                          'output/boxes',
                          'output/scores',
                          'output/labels',
                      ]))

    if os.path.isdir(output_dir):
        shutil.rmtree(output_dir)
    fs.mkdir_p(output_dir)
    with tqdm.tqdm(total=nr_visualize) as pbar:
        for idx, dp in itertools.islice(enumerate(df), nr_visualize):
            img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp[
                'gt_labels']

            rpn_boxes, rpn_scores, all_scores, \
                final_boxes, final_scores, final_labels = pred(
                    img, gt_boxes, gt_labels)

            # draw groundtruth boxes
            gt_viz = draw_annotation(img, gt_boxes, gt_labels)
            # draw best proposals for each groundtruth, to show recall
            proposal_viz, good_proposals_ind = draw_proposal_recall(
                img, rpn_boxes, rpn_scores, gt_boxes)
            # draw the scores for the above proposals
            score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind],
                                         all_scores[good_proposals_ind])

            results = [
                DetectionResult(*args)
                for args in zip(final_boxes, final_scores, final_labels,
                                [None] * len(final_labels))
            ]
            final_viz = draw_final_outputs(img, results)

            viz = tpviz.stack_patches(
                [gt_viz, proposal_viz, score_viz, final_viz], 2, 2)

            if os.environ.get('DISPLAY', None):
                tpviz.interactive_imshow(viz)
            cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
            pbar.update()
示例#3
0
 def __init__(self, dir=None):
     if dir is None:
         print "plz input the dataset path"
     self.dir = dir
     mkdir_p(self.dir)
     f = os.path.join(self.dir, 'image_tr.txt')
     if not os.path.isfile(f):
         print "can not find image_tr.txt"
     self.caffepb = None
def proceed_validation(args, is_save=True, is_densecrf=False):
    import cv2
    #name = "ningbo_val"
    name = "val"
    ds = dataset.PSSD(args.base_dir, args.meta_dir, name)
    ds = BatchData(ds, 1)

    pred_config = PredictConfig(model=Model(),
                                session_init=get_model_loader(args.load),
                                input_names=['image'],
                                output_names=['prob'])
    predictor = OfflinePredictor(pred_config)
    from tensorpack.utils.fs import mkdir_p
    result_dir = "result/pssd_apr26"
    #result_dir = "ningbo_validation"
    mkdir_p(result_dir)
    i = 1
    stat = MIoUStatistics(CLASS_NUM)
    logger.info("start validation....")
    for image, label in tqdm(ds.get_data()):
        label = np.squeeze(label)
        image = np.squeeze(image)

        def mypredictor(input_img):
            #input image: 1*H*W*3
            #output : H*W*C
            output = predictor(input_img)
            return output[0][0]

        prediction = predict_scaler(image,
                                    mypredictor,
                                    scales=[0.5, 0.75, 1, 1.25, 1.5],
                                    classes=CLASS_NUM,
                                    tile_size=CROP_SIZE,
                                    is_densecrf=is_densecrf)
        prediction = np.argmax(prediction, axis=2)
        stat.feed(prediction, label)

        if is_save:
            cv2.imwrite(
                os.path.join(result_dir, "{}.png".format(i)),
                np.concatenate((image, visualize_label(label),
                                visualize_label(prediction)),
                               axis=1))
            #imwrite_grid(image,label,prediction, border=512, prefix_dir=result_dir, imageId = i)
        i += 1

    logger.info("mIoU: {}".format(stat.mIoU))
    logger.info("mean_accuracy: {}".format(stat.mean_accuracy))
    logger.info("accuracy: {}".format(stat.accuracy))
示例#5
0
def generate_trimap_pascal(rador=1):
    #main_img_dir = "/data_a/dataset/cityscapes"
    #meta_txt = "cityscapes"

    main_img_dir = "/data_a/dataset/pascalvoc2012/VOC2012trainval/VOCdevkit/VOC2012"
    meta_txt = "pascalvoc12"

    from tensorpack.utils.fs import mkdir_p
    trimap_dir = os.path.join(main_img_dir, "trimap_gt{}".format(rador))
    mkdir_p(trimap_dir)
    print(trimap_dir)
    f = open(os.path.join(meta_txt, "train.txt"))
    result_f = open(
        os.path.join(meta_txt, "train_tripmap{}.txt".format(rador)), "w")
    lines = f.readlines()
    from tqdm import tqdm
    for l in tqdm(lines):
        l = l.strip("\n")
        img_dir, label_dir = l.split(" ")
        img = cv2.imread(os.path.join(main_img_dir, img_dir))
        label = cv2.imread(os.path.join(main_img_dir, label_dir), 0)
        new_label = label.copy()
        basename = os.path.basename(label_dir)
        #edge = cv2.Canny(label, 100, 200).astype("float32")
        #xs,ys = np.where(edge==255)
        w, h = label.shape
        for x in range(w):
            for y in range(h):
                if is_edge(x, y, label):
                    new_label[x - rador:x + rador, y - rador:y + rador] = 255

        tripmap_name = os.path.join(trimap_dir, basename)

        cv2.imshow("im", img / 255.0)
        cv2.imshow("raw-originlabel", label)
        cv2.imshow("color-originlabel", visualize_label(label))
        cv2.imshow("raw-newlabel", new_label)
        cv2.imshow("color-newlabel", visualize_label(new_label))
        cv2.waitKey(0)

        #cv2.imwrite(tripmap_name, new_label)
        result_f.write("{} {}\n".format(img_dir, tripmap_name))
    f.close()
    result_f.close()
def proceed_test(args, is_densecrf=False):
    import cv2
    ds = dataset.Aerial(args.base_dir, args.meta_dir, "test")
    imglist = ds.imglist
    ds = BatchData(ds, 1)

    pred_config = PredictConfig(model=Model(),
                                session_init=get_model_loader(args.load),
                                input_names=['image'],
                                output_names=['prob'])
    predictor = OfflinePredictor(pred_config)

    from tensorpack.utils.fs import mkdir_p
    result_dir = "test-{}".format(os.path.basename(__file__).rstrip(".py"))
    import shutil
    shutil.rmtree(result_dir, ignore_errors=True)
    mkdir_p(result_dir)
    mkdir_p(os.path.join(result_dir, "compressed"))

    import subprocess

    logger.info("start validation....")
    _itr = ds.get_data()
    for i in tqdm(range(len(imglist))):
        image = next(_itr)
        name = os.path.basename(imglist[i]).rstrip(".tif")
        image = np.squeeze(image)
        prediction = predict_scaler(image,
                                    predictor,
                                    scales=[0.9, 1, 1.1],
                                    classes=CLASS_NUM,
                                    tile_size=CROP_SIZE,
                                    is_densecrf=is_densecrf)
        prediction = np.argmax(prediction, axis=2)
        prediction = prediction * 255  # to 0-255
        file_path = os.path.join(result_dir, "{}.tif".format(name))
        compressed_file_path = os.path.join(result_dir, "compressed",
                                            "{}.tif".format(name))
        cv2.imwrite(file_path, prediction)
        command = "gdal_translate --config GDAL_PAM_ENABLED NO -co COMPRESS=CCITTFAX4 -co NBITS=1 " + file_path + " " + compressed_file_path
        print command
        subprocess.call(command, shell=True)
def view_data(base_dir, meta_dir, batch_size):
    ds = RepeatedData(get_data('train', base_dir, meta_dir, batch_size), -1)
    ds.reset_state()
    from tensorpack.utils.fs import mkdir_p
    result_dir = "result/view"
    #result_dir = "ningbo_validation"
    mkdir_p(result_dir)
    i = 0
    for ims, labels in ds.get_data():
        for im, label in zip(ims, labels):
            #aa = visualize_label(label)
            #pass
            #cv2.imshow("im", im / 255.0)
            #cv2.imshow("raw-label", label)
            #cv2.imshow("color-label", visualize_label(label))
            cv2.imwrite(os.path.join(result_dir, "{}.png".format(i)),
                        np.concatenate((im, visualize_label(label)), axis=1))
            #cv2.waitKey(0)
            i += 1
            print i
def proceed_test_dir(args):
    import cv2
    ll = os.listdir(args.test_dir)

    pred_config = PredictConfig(model=Model(),
                                session_init=get_model_loader(args.load),
                                input_names=['image'],
                                output_names=['prob'])
    predictor = OfflinePredictor(pred_config)

    from tensorpack.utils.fs import mkdir_p
    result_dir = "test-from-dir"
    visual_dir = os.path.join(result_dir, "visualization")
    final_dir = os.path.join(result_dir, "final")
    import shutil
    shutil.rmtree(result_dir, ignore_errors=True)
    mkdir_p(result_dir)
    mkdir_p(visual_dir)
    mkdir_p(final_dir)

    logger.info("start validation....")

    def mypredictor(input_img):
        # input image: 1*H*W*3
        # output : H*W*C
        output = predictor(input_img[np.newaxis, :, :, :])
        return output[0][0]

    for i in tqdm(range(len(ll))):
        filename = ll[i]
        image = cv2.imread(os.path.join(args.test_dir, filename))
        prediction = predict_scaler(image,
                                    mypredictor,
                                    scales=[0.5, 0.75, 1, 1.25, 1.5],
                                    classes=CLASS_NUM,
                                    tile_size=CROP_SIZE,
                                    is_densecrf=False)
        prediction = np.argmax(prediction, axis=2)
        cv2.imwrite(os.path.join(final_dir, "{}".format(filename)), prediction)
        cv2.imwrite(
            os.path.join(visual_dir, "{}".format(filename)),
            np.concatenate((image, visualize_label(prediction)), axis=1))
示例#9
0
parser.add_argument('-s', '--scale',
                    help='scale the image data (maybe by 255)', default=1, type=int)
parser.add_argument('--index',
                    help='index of the image component in datapoint',
                    default=0, type=int)
parser.add_argument('-n', '--number', help='number of images to dump',
                    default=10, type=int)
args = parser.parse_args()
logger.auto_set_dir(action='d')

get_config_func = imp.load_source('config_script', args.config).get_config
config = get_config_func()
config.dataset.reset_state()

if args.output:
    mkdir_p(args.output)
    cnt = 0
    index = args.index   # TODO: as an argument?
    for dp in config.dataset.get_data():
        imgbatch = dp[index]
        if cnt > args.number:
            break
        for bi, img in enumerate(imgbatch):
            cnt += 1
            fname = os.path.join(args.output, '{:03d}-{}.png'.format(cnt, bi))
            cv2.imwrite(fname, img * args.scale)

NR_DP_TEST = args.number
logger.info("Testing dataflow speed:")
ds = RepeatedData(config.dataset, -1)
with tqdm.tqdm(total=NR_DP_TEST, leave=True, unit='data points') as pbar:
示例#10
0
def cam(model, option, gradcam=False, flag=None):
    model_file = option.load
    data_dir = option.data
    if option.imagenet:
        valnum = 50000
    elif option.cub:
        valnum = 5794

    ds = get_data('val', option)

    pred_config = PredictConfig(
        model=model,
        session_init=get_model_loader(model_file),
        input_names=['input', 'label','bbox'],
        output_names=
            ['wrong-top1', 'top5', 'actmap', 'grad'],
        return_input=True
    )

    if option.imagenet:
        meta = Imagenet.ImagenetMeta(dir=option.data). \
                get_synset_words_1000(option.dataname)
        meta_labels = Imagenet.ImagenetMeta(dir=option.data). \
                get_synset_1000(option.dataname)
    elif option.cub:
        meta = CUB200.CUB200Meta(dir=option.data). \
                get_synset_words_1000(option.dataname)
        meta_labels = CUB200.CUB200Meta(dir=option.data). \
                get_synset_1000(option.dataname)
    pred = SimpleDatasetPredictor(pred_config, ds)

    cnt = 0
    cnt_false = 0
    hit_known = 0
    hit_top1 = 0

    index = int(option.locthr*100)

    if option.camrelu:

        dirname = os.path.join(
            'train_log',option.logdir,'result_camrelu',str(index))
    else:

        dirname = os.path.join(
            'train_log',option.logdir,'result_norelu',str(index))

    if not os.path.isdir(dirname):
        mkdir_p(dirname)

    for inp, outp in pred.get_result():
        images, labels, bbox = inp

        if gradcam:
            wrongs, top5, convmaps, grads_val = outp
            batch = wrongs.shape[0]
            if option.chlast:
                NUMBER,HEIGHT,WIDTH,CHANNEL = np.shape(convmaps)
            else:
                NUMBER,CHANNEL,HEIGHT,WIDTH = np.shape(convmaps)
            if not option.chlast:
                grads_val = np.transpose(grads_val, [0,2,3,1])
            W = np.mean(grads_val, axis=(1,2))
            if option.chlast:
                convmaps = np.transpose(convmaps, [0,3,1,2])
        else:
            wrongs, top5, convmaps, W = outp
            batch = wrongs.shape[0]
            NUMBER,CHANNEL,HEIGHT,WIDTH = np.shape(convmaps)

        for i in range(batch):
            gxa = int(bbox[i][0][0])
            gya = int(bbox[i][0][1])
            gxb = int(bbox[i][1][0])
            gyb = int(bbox[i][1][1])

            # generating heatmap
            weight = W[i]   # c x 1

            convmap = convmaps[i, :, :, :]  # c x h x w
            mergedmap = np.matmul(weight, convmap.reshape((CHANNEL, -1))). \
                            reshape(HEIGHT, WIDTH)
            if option.camrelu: mergedmap = np.maximum(mergedmap, 0)
            mergedmap = cv2.resize(mergedmap,
                            (option.final_size, option.final_size))
            heatmap = viz.intensity_to_rgb(mergedmap, normalize=True)
            blend = images[i] * 0.5 + heatmap * 0.5

            # initialization for boundary box
            bbox_img = images[i]
            bbox_img = bbox_img.astype('uint8')
            heatmap = heatmap.astype('uint8')
            blend = blend.astype('uint8')

            # thresholding heatmap
            # For computation efficiency, we revise this part by directly using mergedmap.
            gray_heatmap = cv2.cvtColor(heatmap,cv2.COLOR_RGB2GRAY)
            th_value = np.max(gray_heatmap)*option.locthr

            _, thred_gray_heatmap = \
                        cv2.threshold(gray_heatmap,int(th_value),
                                                255,cv2.THRESH_TOZERO)
            _, contours, _ = \
                        cv2.findContours(thred_gray_heatmap,
                                cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

            # calculate bbox coordinates
            rect = []
            for c in contours:
                x, y, w, h = cv2.boundingRect(c)
                rect.append([x,y,w,h])

            if rect == []:
                estimated_box = [0,0,1,1] #dummy
            else:
                x,y,w,h = large_rect(rect)
                estimated_box = [x,y,x+w,y+h]
                cv2.rectangle(bbox_img, (x, y), (x + w, y + h), (0, 255, 0), 2)


            cv2.rectangle(bbox_img, (gxa, gya), (gxb, gyb), (0, 0, 255), 2)
            gt_box = [gxa,gya,gxb,gyb]

            IOU_ = bb_IOU(estimated_box, gt_box)

            if IOU_ > 0.5 or IOU_ == 0.5:
                hit_known = hit_known + 1

            if (IOU_ > 0.5 or IOU_ == 0.5) and not wrongs[i]:
                hit_top1 = hit_top1 + 1

            if wrongs[i]:
                cnt_false += 1

            concat = np.concatenate((bbox_img, heatmap, blend), axis=1)
            classname = meta[meta_labels[labels[i]]].split(',')[0]

            if cnt < 500:
                if option.camrelu:
                    cv2.imwrite(
                        'train_log/{}/result_camrelu/{}/cam{}-{}.jpg'. \
                            format(option.logdir, index, cnt, classname),
                            concat)
                else:

                    cv2.imwrite(
                        'train_log/{}/result_norelu/{}/cam{}-{}.jpg'. \
                            format(option.logdir, index, cnt, classname),
                            concat)

            cnt += 1
            if cnt == valnum:
                if option.camrelu:

                    fname = 'train_log/{}/result_camrelu/{}/Loc.txt'. \
                            format(option.logdir, index)
                else:

                    fname = 'train_log/{}/result_norelu/{}/Loc.txt'. \
                            format(option.logdir, index)
                f = open(fname, 'w')
                acc_known = hit_known/cnt
                acc_top1 = hit_top1/cnt
                top1_acc = 1 - cnt_false / (cnt)
                if option.camrelu: print ("\nGRADCAM (use relu)")
                else: print ("\nCAM (do not use relu)")
                print ('Flag: {}\nCAM Threshold: {}\nGT-known Loc: {} \
                        \nTop-1 Loc: {}\nTop-1 Acc: {}' \
                        .format(flag,option.locthr,acc_known,acc_top1,top1_acc))
                line = 'GT-known Loc: {}\nTop-1 Loc: {}\nTop-1 Acc: {}'. \
                        format(acc_known,acc_top1,top1_acc)
                f.write(line)
                f.close()
                return
示例#11
0
def predict_unlabeled(model,
                      model_path,
                      nr_visualize=100,
                      output_dir='output_patch_samples'):
    """Predict the pseudo label information of unlabeled data."""

    assert cfg.EVAL.PSEUDO_INFERENCE, 'set cfg.EVAL.PSEUDO_INFERENCE=True'
    df, dataset_size = get_eval_unlabeled_dataflow(cfg.DATA.TRAIN,
                                                   return_size=True)
    df.reset_state()
    predcfg = PredictConfig(
        model=model,
        session_init=SmartInit(model_path),
        input_names=['image'],  # ['image', 'gt_boxes', 'gt_labels'],
        output_names=[
            'generate_{}_proposals/boxes'.format(
                'fpn' if cfg.MODE_FPN else 'rpn'),
            'generate_{}_proposals/scores'.format(
                'fpn' if cfg.MODE_FPN else 'rpn'),
            'fastrcnn_all_scores',
            'output/boxes',
            'output/scores',  # score of the labels
            'output/labels',
        ])
    pred = OfflinePredictor(predcfg)

    if os.path.isdir(output_dir):
        if os.path.isfile(os.path.join(output_dir, 'pseudo_data.npy')):
            os.remove(os.path.join(output_dir, 'pseudo_data.npy'))
        if not os.path.isdir(os.path.join(output_dir, 'vis')):
            os.makedirs(os.path.join(output_dir, 'vis'))
        else:
            shutil.rmtree(os.path.join(output_dir, 'vis'))
            fs.mkdir_p(output_dir + '/vis')
    else:
        fs.mkdir_p(output_dir)
        fs.mkdir_p(output_dir + '/vis')
    logger.warning('-' * 100)
    logger.warning('Write to {}'.format(output_dir))
    logger.warning('-' * 100)

    with tqdm.tqdm(total=nr_visualize) as pbar:
        for idx, dp in itertools.islice(enumerate(df), nr_visualize):
            img, img_id = dp  # dp['image'], dp['img_id']
            rpn_boxes, rpn_scores, all_scores, \
                final_boxes, final_scores, final_labels = pred(img)
            outs = {
                'proposals_boxes': rpn_boxes,  # (?,4)
                'proposals_scores': rpn_scores,  # (?,)
                'boxes': final_boxes,
                'scores': final_scores,
                'labels': final_labels
            }
            ratios = [10,
                      10]  # [top 20% as background, bottom 20% as background]
            bg_ind, fg_ind = custom.find_bg_and_fg_proposals(all_scores,
                                                             ratios=ratios)

            bg_viz = draw_predictions(img, rpn_boxes[bg_ind],
                                      all_scores[bg_ind])

            fg_viz = draw_predictions(img, rpn_boxes[fg_ind],
                                      all_scores[fg_ind])

            results = [
                DetectionResult(*args)
                for args in zip(final_boxes, final_scores, final_labels,
                                [None] * len(final_labels))
            ]
            final_viz = draw_final_outputs(img, results)

            viz = tpviz.stack_patches([bg_viz, fg_viz, final_viz], 2, 2)

            if os.environ.get('DISPLAY', None):
                tpviz.interactive_imshow(viz)
            assert cv2.imwrite('{}/vis/{:03d}.png'.format(output_dir, idx),
                               viz)
            pbar.update()
    logger.info('Write {} samples to {}'.format(nr_visualize, output_dir))

    ## Parallel inference the whole unlabled data
    pseudo_preds = collections.defaultdict(list)

    num_tower = max(cfg.TRAIN.NUM_GPUS, 1)
    graph_funcs = MultiTowerOfflinePredictor(predcfg, list(
        range(num_tower))).get_predictors()
    dataflows = [
        get_eval_unlabeled_dataflow(cfg.DATA.TRAIN,
                                    shard=k,
                                    num_shards=num_tower)
        for k in range(num_tower)
    ]

    all_results = multithread_predict_dataflow(dataflows, graph_funcs)

    for id, result in tqdm.tqdm(enumerate(all_results)):
        img_id = result['image_id']
        outs = {
            'proposals_boxes':
            result['proposal_box'].astype(np.float16),  # (?,4)
            'proposals_scores':
            result['proposal_score'].astype(np.float16),  # (?,)
            # 'frcnn_all_scores': result['frcnn_score'].astype(np.float16),
            'boxes': result['bbox'].astype(np.float16),  # (?,4)
            'scores': result['score'].astype(np.float16),  # (?,)
            'labels': result['category_id'].astype(np.float16)  # (?,)
        }
        pseudo_preds[img_id] = outs
    logger.warn('Writing to {}'.format(
        os.path.join(output_dir, 'pseudo_data.npy')))
    try:
        dd.io.save(os.path.join(output_dir, 'pseudo_data.npy'), pseudo_preds)
    except RuntimeError:
        logger.error('Save failed. Check reasons manually...')
# Author: Yuxin Wu <*****@*****.**>

import glob, cv2
import numpy as np
import sys, os

from tensorpack.utils.fs import mkdir_p
from runner import get_runner, get_parallel_runner
from model import colorize
from calibr import load_camera_from_calibr

if __name__ == '__main__':
    dir = sys.argv[1]
    undistdir = sys.argv[2]
    outdir = sys.argv[3]
    mkdir_p(undistdir)
    mkdir_p(outdir)
    runner, _ = get_runner('../data/cpm.npy')

    C0, C1, d0, d1 = load_camera_from_calibr(
        '../calibr-1211/camchain-homeyihuaDesktopCPM3D_kalibrfinal3.yaml')
    for f in sorted(glob.glob(os.path.join(dir, '*.jpg'))):
        im = cv2.imread(f, cv2.IMREAD_COLOR)

        im = cv2.undistort(im, C0.K, d0)

        cv2.imwrite(os.path.join(undistdir, os.path.basename(f)), im)

        im = cv2.resize(im, (368, 368))
        out = runner(im)
        np.save(os.path.join(outdir, os.path.basename(f)), out)
示例#13
0
def main():
    parser = argparse.ArgumentParser(description='CortexML')

    parser.add_argument("--arch",
                        type=str,
                        default="small",
                        help="Architecture to use")
    parser.add_argument("--dataset",
                        type=str,
                        default="MNIST",
                        help="The dataset you want to use",
                        choices=datasets.DATASETS.keys())
    parser.add_argument("--epochs",
                        type=int,
                        default=None,
                        help="Number of training epochs")
    parser.add_argument("--quant",
                        type=str,
                        default="affine",
                        choices=["affine", "symmetric", "none"])
    parser.add_argument('--no-cache', action='store_true')
    parser.add_argument("--weight-clusters", type=int, default=0)
    parser.add_argument("--batch",
                        type=int,
                        default=32,
                        help="Batch size per GPU to use")
    parser.add_argument("--checkpoint-dir", type=str, default="./checkpoints")
    parser.add_argument("--log-dir", type=str, default="./train_log")
    parser.add_argument("--lr",
                        type=str,
                        default=None,
                        help="Learning rate or learning rate schedule")
    parser.add_argument(
        "--post-quantize",
        action='store_true',
        help=
        "Will load a pretrained model and quantize for the specified amount of epochs"
    )
    args = parser.parse_args()

    logger.set_logger_dir(args.log_dir, action='n')
    fs.mkdir_p(args.checkpoint_dir)

    # 1. Train the model (if it doesn't exist)
    if args.no_cache or not os.path.exists(
            args.checkpoint_dir + "/checkpoint") or args.post_quantize:
        print("Model not found, training...")
        train(args.checkpoint_dir,
              model_name=args.arch,
              dataset=args.dataset,
              num_epochs=args.epochs,
              quant_type=args.quant,
              batch_size_per_gpu=args.batch,
              lr=args.lr,
              post_quantize_only=args.post_quantize)
        print("Model training complete.")
        K.clear_session()
    else:
        print(
            "Using a model in {}. Delete the corresponding folder to start afresh."
            .format(args.checkpoint_dir))

    if args.quant == "none":
        return

    # 2. Evaluate the model after training
    # TODO

    # 3. Convert quantized model into a simplified protobuf graphdef
    protobuf_file = args.checkpoint_dir + "/compact_graph.pb"
    print("Exporting quantized model as a protobuf file:", protobuf_file)
    predictor_tensors = export_eval_protobuf_model(args.checkpoint_dir,
                                                   model_name=args.arch,
                                                   dataset=args.dataset,
                                                   quant_type=args.quant,
                                                   output_file=protobuf_file,
                                                   batch_size=args.batch)
    inputs, outputs, input_shapes = predictor_tensors

    # 4. Convert the model into tflite format
    tflite_model_file = args.checkpoint_dir + "/quantized_model.tflite"
    print("Converting quantized model into a tflite file:", tflite_model_file)
    create_tflite_model(protobuf_file, inputs, outputs, input_shapes,
                        tflite_model_file)
    if args.weight_clusters > 0:
        cluster_tflite_model_weights(tflite_model_file, args.weight_clusters)
        tflite_model_file = args.checkpoint_dir + "/clustered_quantized_model.tflite"

    evaluate_tflite_model(tflite_model_file, dataset=args.dataset)
示例#14
0
 def __init__(self, dir=None):
     self.dir = dir
     mkdir_p(self.dir)
     f = os.path.join(self.dir, 'synsets.txt')
     self.caffepb = None
示例#15
0
def get_log_dir(option):
    threshold_idx = int(option.cam_threshold * 100)
    dirname = ospj('train_log', option.log_dir, 'result', str(threshold_idx))
    if not os.path.isdir(dirname):
        mkdir_p(dirname)
    return dirname, threshold_idx