Пример #1
0
	def get_templates(self, dir):
		'''
		templates 				= object that contains the file, filename, folder instances
		templates.files 			= file instance
		templates.filenames 		= filename instance
		templates.folder 			= folder instance
		'''
		templates = EasyDict()
		templates_files = []
		templates_filenames = []
		templates_folders = []
		templates_folderfiles = []
		templates_transferNames = []

		for root, dirs, filenames in os.walk(dir):
			for filename in filenames:
				if self.is_template(filename):
					path = os.path.join(root, filename)
					folderfile = path[(len(dir)+1):len(path)]
					transferNames = folderfile.replace("/", "_").replace("_", "-")

					templates_files.append(path)
					templates_filenames.append(filename)
					templates_folderfiles.append(folderfile)
					templates_transferNames.append(transferNames)

		templates.files = templates_files
		templates.filenames = templates_filenames
		templates.folderfiles = templates_folderfiles
		templates.transferNames = templates_transferNames
		
		return templates
Пример #2
0
def main():
    args = parser()
    update_config(args.cfg)

    # Use just the first GPU for demo
    context = [mx.gpu(int(config.gpus[0]))]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Get image dimensions
    width, height = Image.open(args.im_path).size

    # Pack image info
    roidb = [{'image': args.im_path, 'width': width, 'height': height, 'flipped': False}]

    # Creating the Logger
    logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)

    # Create the model and initialize the weights
    model_prefix = os.path.join(output_path, args.save_prefix)
    arg_params, aux_params = load_param(model_prefix, config.TEST.TEST_EPOCH,
                                        convert=True, process=True)

    # Get the symbol definition
    sym_def = eval('{}.{}'.format(config.symbol, config.symbol))

    # Pack db info
    db_info = EasyDict()
    db_info.name = 'coco'
    db_info.result_path = 'data/demo'

    # Categories the detector trained for:
    db_info.classes = [u'BG', u'person', u'bicycle', u'car', u'motorcycle', u'airplane',
                       u'bus', u'train', u'truck', u'boat', u'traffic light', u'fire hydrant',
                       u'stop sign', u'parking meter', u'bench', u'bird', u'cat', u'dog', u'horse', u'sheep', u'cow',
                       u'elephant', u'bear', u'zebra', u'giraffe', u'backpack', u'umbrella', u'handbag', u'tie',
                       u'suitcase', u'frisbee', u'skis', u'snowboard', u'sports\nball', u'kite', u'baseball\nbat',
                       u'baseball glove', u'skateboard', u'surfboard', u'tennis racket', u'bottle', u'wine\nglass',
                       u'cup', u'fork', u'knife', u'spoon', u'bowl', u'banana', u'apple', u'sandwich', u'orange',
                       u'broccoli', u'carrot', u'hot dog', u'pizza', u'donut', u'cake', u'chair', u'couch',
                       u'potted plant', u'bed', u'dining table', u'toilet', u'tv', u'laptop', u'mouse', u'remote',
                       u'keyboard', u'cell phone', u'microwave', u'oven', u'toaster', u'sink', u'refrigerator', u'book',
                       u'clock', u'vase', u'scissors', u'teddy bear', u'hair\ndrier', u'toothbrush']
    db_info.num_classes = len(db_info.classes)

    # Perform detection for each scale in parallel
    p_args = []
    for s in config.TEST.SCALES:
        p_args.append([s, context, config, sym_def, roidb, db_info, arg_params, aux_params])
    pool = Pool(len(config.TEST.SCALES))
    all_detections = pool.map(scale_worker, p_args)

    tester = Tester(None, db_info, roidb, None, cfg=config, batch_size=1)
    all_detections = tester.aggregate(all_detections, vis=True, cache_name=None, vis_path='./data/demo/',
                                          vis_name='demo_detections')
def index():
    ctx = EasyDict()
    ctx.para = tp.generate_paragraph()

    degrade_res = tp.degrade_latin(ctx.para)

    ctx.para_rendered = degrade_res.html_representation

    ctx.para_mod = degrade_res.text

    return render_template_string('''
<style>
/* text diff styling */
div.box {
    width: 600px;
    border: 1px solid gray;
    padding: 0.5em;
}

span.altered {
    background: orange;
    text-decoration: underline;
    color: green;
}

span.deleted {
    text-decoration: line-through;
    color: red;
}

</style>

<table class="box">
<tr>
<td>
{{ para }}

</td>

<td>
{{ para_mod }}

</td>

<td>
{{ para_rendered|safe }}

</td>
</tr>
</table>
    
''', **ctx)
    def build(self, **opts):
        """
        Build items filter.

        :rtype: ItemsFilter
        """
        self.opts = EasyDict(opts)
        self.built_list = self._build()
        return self
Пример #5
0
    def get_templates(self, dir):
        """
		templates 				= object that contains the file, filename, folder instances
		templates.files 			= file instance
		templates.filenames 		= filename instance
		templates.folder 			= folder instance
		"""
        templates = EasyDict()
        templates_files = []
        templates_filenames = []
        templates_folders = []
        templates_folderfiles = []
        templates_transferNames = []

        for root, dirs, filenames in os.walk(dir):
            for filename in filenames:
                if self.is_template(filename):
                    # if root == dir:
                    # 	folder = "root"
                    # else:
                    # 	folder = os.path.basename(root).replace("/", "_").replace("_", "-")

                    # print "%s -------------- root = %s ------------ DIR = %s -------- Folder = %s" % (filename, root, dir, folder)
                    path = os.path.join(root, filename)
                    folderfile = path[(len(dir) + 1) : len(path)]
                    transferNames = folderfile.replace("/", "_").replace("_", "-")

                    templates_files.append(path)
                    templates_filenames.append(filename)
                    # templates_folders.append(folder)
                    templates_folderfiles.append(folderfile)
                    templates_transferNames.append(transferNames)

        templates.files = templates_files
        templates.filenames = templates_filenames
        # templates.folders = templates_folders
        templates.folderfiles = templates_folderfiles
        templates.transferNames = templates_transferNames

        return templates
Пример #6
0
    def setUp(self):
        super(RCNNProposalTest, self).setUp()

        self._num_classes = 3
        self._image_shape = (900, 1440)
        self._config = EasyDict({
            'class_max_detections': 100,
            'class_nms_threshold': 0.6,
            'total_max_detections': 300,
            'min_prob_threshold': 0.0,
        })

        self._equality_delta = 1e-03

        self._shared_model = RCNNProposal(self._num_classes, self._config)
        tf.reset_default_graph()
Пример #7
0
	def get_templates(self, dir):
		'''
		templates 				= object that contains the file, filename, folder instances
		templates.files 			= file instance
		templates.filenames 		= filename instance
		templates.folder 			= folder instance
		'''
		templates = EasyDict()
		templates_files = []
		templates_filenames = []
		templates_folders = []
		templates_basepaths = []
		templates_transfer_filename_underscores = []
		templates_transfer_filename_dashes = []

		for root, dirs, filenames in os.walk(dir):
			for filename in filenames:
				if root == self.THEME_DIR:
					folder = "root"
				else:
					folder = os.path.basename(root).replace("/", "-")
				path = os.path.join(root, filename)
				basepath = path[(len(dir)+1):len(path)]
				transfer_filename_underscore = basepath.replace("/", "_")
				transfer_filename_dash = transfer_filename_underscore.replace("_", "-")

				if self.is_template(filename):
					templates_files.append(path)
					templates_filenames.append(filename)
					templates_folders.append(folder)
					templates_basepaths.append(basepath)
					templates_transfer_filename_underscores.append(transfer_filename_underscore)
					templates_transfer_filename_dashes.append(transfer_filename_dash)

		templates.files = templates_files
		templates.folders = templates_folders
		templates.basepaths = templates_basepaths

		templates.filenames = templates_filenames
		templates.transferNamesUS = templates_transfer_filename_underscores
		templates.transferNames = templates_transfer_filename_dashes
		
		return templates
Пример #8
0
	def get_templates(self, dir):
		'''
		templates 				= object that contains the file, filename, folder instances
		templates.files 			= file instance
		templates.filenames 		= filename instance
		templates.folder 		= folder instance
		'''
		templates = EasyDict()
		templates_files = []
		templates_filenames = []
		templates_folders = []
		templates_basepaths = []
		templates_transfer_filename_underscores = []
		templates_transfer_filename_dashes = []

		for root, dirs, filenames in os.walk(dir):
			for filename in filenames:
				folder = os.path.basename(root)
				path = os.path.join(root, filename)
				basepath = path[(len(dir)+1):len(path)]
				transfer_filename_underscore = basepath.replace("/", "_")
				transfer_filename_dash = transfer_filename_underscore.replace("_", "-")

				if filename.lower().endswith('.html'):
					templates_files.append(path)
					templates_filenames.append(filename)
					templates_folders.append(folder)
					templates_basepaths.append(basepath)
					templates_transfer_filename_underscores.append(transfer_filename_underscore)
					templates_transfer_filename_dashes.append(transfer_filename_dash)

		templates.files = templates_files
		templates.filenames = templates_filenames
		templates.folders = templates_folders
		templates.basepaths = templates_basepaths
		templates.transferNameUnderscores = templates_transfer_filename_underscores
		templates.transferName = templates_transfer_filename_dashes
		
		return templates
Пример #9
0
Файл: b.py Проект: liuwei16/demo
        elif self.reduction == 'mean':
            return torch.mean(loss)
        else:
            raise ValueError(
                'unrecognized option, expect reduction to be one of none, mean, sum'
            )


# timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
logger = Logger(log_file_name=args.work_path + '/logs_18_cos200_lsr.txt',
                log_level=logging.DEBUG,
                logger_name="CIFAR").get_log()
with open(args.work_path + '/config.yaml') as f:
    config = yaml.load(f)
# print(type(config))
config = EasyDict(config)
# print(type(config))
# logger.info(config)
net = get_model(config)
# print(count_parameters(net))
device = 'cuda' if config.use_gpu else 'cpu'
net.to(device)
# define loss and optimizer
# criterion = nn.CrossEntropyLoss()
criterion = LSR()
optimizer = torch.optim.SGD(net.parameters(),
                            config.lr_scheduler.base_lr,
                            momentum=config.optimize.momentum,
                            weight_decay=config.optimize.weight_decay,
                            nesterov=config.optimize.nesterov)
# resume from a checkpoint
Пример #10
0
class RCNNProposalTest(tf.test.TestCase):

    def setUp(self):
        super(RCNNProposalTest, self).setUp()

        self._num_classes = 3
        self._image_shape = (900, 1440)
        self._config = EasyDict({
            'class_max_detections': 100,
            'class_nms_threshold': 0.6,
            'total_max_detections': 300,
            'min_prob_threshold': 0.0,
        })

        self._equality_delta = 1e-03

        self._shared_model = RCNNProposal(self._num_classes, self._config)
        tf.reset_default_graph()

    def _run_rcnn_proposal(self, model, proposals, bbox_pred, cls_prob,
                           image_shape=None):
        if image_shape is None:
            image_shape = self._image_shape
        rcnn_proposal_net = model(proposals, bbox_pred, cls_prob, image_shape)
        with self.test_session() as sess:
            return sess.run(rcnn_proposal_net)

    def _compute_tf_graph(self, graph):
        with self.test_session() as sess:
            return sess.run(graph)

    def _get_bbox_pred(self, proposed_boxes, gt_boxes_per_class):
        """Computes valid bbox_pred from proposals and gt_boxes for each class.

        Args:
            proposed_boxes: Tensor with shape (num_proposals, 5).
            gt_boxes_per_class: Tensor holding the ground truth boxes for each
                class. Has shape (num_classes, num_gt_boxes_per_class, 4).

        Returns:
            A tensor with shape (num_proposals, num_classes * 4), holding the
            correct bbox_preds.
        """

        def bbox_encode(gt_boxes):
            return encode(
                proposed_boxes, gt_boxes
            )
        bbox_pred_tensor = tf.map_fn(
            bbox_encode, gt_boxes_per_class,
            dtype=tf.float32
        )
        # We need to explicitly unstack the tensor so that tf.concat works
        # properly.
        bbox_pred_list = tf.unstack(bbox_pred_tensor)
        return tf.concat(bbox_pred_list, 1)

    def _check_proposals_are_clipped(self, proposals, image_shape):
        """Asserts that no proposals exceed the image boundaries.
        """
        for proposal in proposals:
            self.assertLess(proposal[0], image_shape[1])
            self.assertLess(proposal[1], image_shape[0])
            self.assertLess(proposal[2], image_shape[1])
            self.assertLess(proposal[3], image_shape[0])
            for i in range(4):
                self.assertGreaterEqual(proposal[i], 0)

    def testBackgroundFilter(self):
        """Tests that we're not returning an object when a proposal is background.

        This includes two sub-tests. One case in which there is a foreground
        proposal, and one in which all proposals are background. We use the
        same proposed_boxes and gt_boxes, but change the cls_prob.
        """

        proposed_boxes = tf.constant([
            (85, 500, 730, 590),
            (50, 500, 70, 530),
            (700, 570, 740, 598),
        ])
        gt_boxes_per_class = tf.constant([
            [(101, 101, 201, 249)],
            [(200, 502, 209, 532)],
            [(86, 571, 743, 599)],
        ])
        bbox_pred = self._get_bbox_pred(proposed_boxes, gt_boxes_per_class)
        cls_prob_one_foreground = tf.constant([
            (0., .3, .3, .4),
            (1., 0., 0., 0.),
            (.35, .3, .2, .15),
        ])
        cls_prob_all_background = tf.constant([
            (.4, 0., .3, .3),
            (.8, .1, .1, 0.),
            (.7, .05, .2, .05),
        ])

        proposal_prediction_one_foreground = self._run_rcnn_proposal(
            self._shared_model,
            proposed_boxes,
            bbox_pred,
            cls_prob_one_foreground,
        )
        proposal_prediction_all_background = self._run_rcnn_proposal(
            self._shared_model,
            proposed_boxes,
            bbox_pred,
            cls_prob_all_background,
        )
        # Assertion for 'one foreground' case.
        # This assertion has two purposes:
        #     1. checking that we only get one object.
        #     2. checking that that object has the same box as class 2.
        # We take this to mean we're correctly ignoring the two proposals
        # where 'background' is the highest probability class.
        self.assertAllClose(
            proposal_prediction_one_foreground['objects'],
            self._compute_tf_graph(gt_boxes_per_class)[2],
            atol=self._equality_delta
        )
        # Assertion for 'all background' case.
        self.assertEqual(
            len(proposal_prediction_all_background['objects']), 0
        )

    def testNMSFilter(self):
        """Tests that we're applying NMS correctly.
        """

        # The first two boxes have a very high IoU between them. One of them
        # should be filtered by the NMS filter.
        proposed_boxes = tf.constant([
            (85, 500, 730, 590),
            (50, 500, 740, 570),
            (700, 570, 740, 598),
        ])
        gt_boxes_per_class = tf.constant([
            [(101, 101, 201, 249)],
            [(200, 502, 209, 532)],
            [(86, 571, 743, 599)],
        ])
        bbox_pred = self._get_bbox_pred(proposed_boxes, gt_boxes_per_class)
        cls_prob = tf.constant([
            (0., .1, .3, .6),
            (.1, .2, .25, .45),
            (.2, .3, .25, .25),
        ])

        proposal_prediction = self._run_rcnn_proposal(
            self._shared_model,
            proposed_boxes,
            bbox_pred,
            cls_prob,
        )
        labels = proposal_prediction['proposal_label']

        # Assertions
        self.assertEqual(proposal_prediction['objects'].shape[0], 2)
        self.assertIn(0, labels)
        self.assertIn(2, labels)

    def testImageClipping(self):
        """Tests that we're clipping images correctly.

        We test two image shapes, (1440, 900) and (900, 1440). Note we pass
        shapes as (height, width).
        """

        proposed_boxes = tf.constant([
            (1300, 800, 1435, 870),
            (10, 1, 30, 7),
            (2, 870, 80, 898),
        ])
        gt_boxes_per_class = tf.constant([
            [(1320, 815, 1455, 912)],
            [(5, -8, 31, 8)],
            [(-120, 910, 78, 1040)],
        ])
        bbox_pred = self._get_bbox_pred(proposed_boxes, gt_boxes_per_class)
        cls_prob = tf.constant([
            (0., 1., 0., 0.),
            (.2, .25, .3, .25),
            (.45, 0., 0., .55),
        ])

        shape1 = (1440, 900)
        shape2 = (900, 1440)

        proposal_prediction_shape1 = self._run_rcnn_proposal(
            self._shared_model,
            proposed_boxes,
            bbox_pred,
            cls_prob,
            image_shape=shape1,
        )
        proposal_prediction_shape2 = self._run_rcnn_proposal(
            self._shared_model,
            proposed_boxes,
            bbox_pred,
            cls_prob,
            image_shape=shape2,
        )
        # Assertions
        self._check_proposals_are_clipped(
            proposal_prediction_shape1['objects'],
            shape1,
        )
        self._check_proposals_are_clipped(
            proposal_prediction_shape2['objects'],
            shape2,
        )

    def testBboxPred(self):
        """Tests that we're using bbox_pred correctly.
        """

        proposed_boxes = tf.constant([
            (200, 315, 400, 370),
            (56, 0, 106, 4),
            (15, 15, 20, 20),
        ])

        gt_boxes_per_class = tf.constant([
            [(0, 0, 1, 1)],
            [(5, 5, 10, 10)],
            [(15, 15, 20, 20)],
        ])
        bbox_pred = self._get_bbox_pred(proposed_boxes, gt_boxes_per_class)

        cls_prob = tf.constant([
            (0., 1., 0., 0.),
            (.2, .25, .3, .25),
            (.45, 0., 0., .55),
        ])

        proposal_prediction = self._run_rcnn_proposal(
            self._shared_model,
            proposed_boxes,
            bbox_pred,
            cls_prob,
        )

        objects = self._compute_tf_graph(
            tf.squeeze(gt_boxes_per_class, axis=1)
        )
        # We need to sort the objects by `cls_prob` from high to low score.
        cls_prob = self._compute_tf_graph(cls_prob)
        # Ignoring background prob get the reverse argsort for the max of each
        # object.
        decreasing_idxs = cls_prob[:, 1:].max(axis=1).argsort()[::-1]
        # Sort by indexing.
        objects_sorted = objects[decreasing_idxs]

        self.assertAllClose(
            proposal_prediction['objects'],
            objects_sorted,
            atol=self._equality_delta
        )

    def testLimits(self):
        """Tests that we're respecting the limits imposed by the config.
        """

        limits_config = self._config.copy()
        limits_config['class_max_detections'] = 2
        limits_config['total_max_detections'] = 3
        limits_config = EasyDict(limits_config)
        limits_num_classes = 2
        limits_model = RCNNProposal(limits_num_classes, limits_config)

        proposed_boxes = tf.constant([
            (0, 0, 1, 1),  # class 0
            (5, 5, 10, 10),  # class 1
            (15, 15, 20, 20),  # class 1
            (25, 25, 30, 30),  # class 0
            (35, 35, 40, 40),
            (38, 40, 65, 65),
            (70, 50, 90, 90),  # class 0
            (95, 95, 100, 100),
            (105, 105, 110, 110),  # class 1
        ])
        # All zeroes for our bbox_pred.
        bbox_pred = tf.constant([[0.] * limits_num_classes * 4] * 9)
        cls_prob = tf.constant([
            (0., 1., 0.),
            (0., .2, .8),
            (0., .45, .55),
            (0., .55, .45),
            (1., 0., 0.),
            (1., 0., 0.),
            (0., .95, .05),
            (1., 0., 0.),
            (0., .495, .505),
        ])

        proposal_prediction = self._run_rcnn_proposal(
            limits_model,
            proposed_boxes,
            bbox_pred,
            cls_prob,
        )
        labels = proposal_prediction['proposal_label']
        num_class0 = labels[labels == 0].shape[0]
        num_class1 = labels[labels == 1].shape[0]

        self.assertLessEqual(num_class0, limits_config.class_max_detections)
        self.assertLessEqual(num_class1, limits_config.class_max_detections)
        num_total = labels.shape[0]
        self.assertLessEqual(num_total, limits_config.total_max_detections)
def annotate():
    ctx = EasyDict()
    ctx.text = document_text_list[2]

    return render_template('annotate.html', **ctx)
Пример #12
0
import torch
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict
from easydict import EasyDict
from _main_base import main
import os

#---
# config
#---
cfg = EasyDict()

# class
cfg.CLASS_LABEL = ['akahara', 'madara']
cfg.CLASS_NUM = len(cfg.CLASS_LABEL)

# model
cfg.INPUT_HEIGHT = 64
cfg.INPUT_WIDTH = 64
cfg.INPUT_CHANNEL = 3

cfg.GPU = False
cfg.DEVICE = torch.device(
    "cuda" if cfg.GPU and torch.cuda.is_available() else "cpu")

cfg.MODEL_SAVE_PATH = 'models/ZFNet_{}.pt'
cfg.MODEL_SAVE_INTERVAL = 200
cfg.ITERATION = 1000
cfg.MINIBATCH = 8
cfg.OPTIMIZER = torch.optim.SGD
Пример #13
0
 def __init__(self, **opts):
     self.opts = EasyDict(opts)
     self._client = None
Пример #14
0
import numpy as np
from easydict import EasyDict

__C = EasyDict()
cfg = __C

__C.TRAIN = EasyDict()
__C.TEST = EasyDict()

__C.ANCHORS = [(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892),
               (9.47112, 4.84053), (11.2364, 10.0071)]

__C.STRIDE = 32

__C.CLASS_NUM = 20

__C.SATURATION = 1.5
__C.EXPOSURE = 1.5
__C.HUE = 0.1

__C.JITTER = 0.3

__C.INPUT_SIZE = (416, 416)
__C.TEST_SIZE = (416, 416)

__C.DEBUG = False

####################################
# LOSS
####################################
__C.COORD_SCALE = 1.0
Пример #15
0
import numpy as np
from easydict import EasyDict
import random

opt = EasyDict()
'''
the range of the side length of the block resonator
Please fill the range here.
'''
opt.a_range = [50, 100]
'''
sep: from 1/80 a  to 1/5 a
'''
opt.gap_min_ratio = 1.0 / 80
opt.gap_max_ratio = 1.0 / 5

opt.gap_min_ratio_tight = 1.0 / 50
opt.gap_max_ratio_tight = 1.0 / 20
'''
shift: three level of shift [0.5, 0.2, 0.0] * a'''
opt.shift_range = [0.5, 0.2, 0.0]

opt.w = 6
opt.gap_len = 2.4

DATA_CONST = opt


# ======================================================================================================================
def log_uniform(l, r):
    l, r = np.log(l), np.log(r)
Пример #16
0
    from trainers.utils import Logger, bcolors, logits_to_onehot, init_weights

    with open(args.model_config_file) as f:
        model_config = json.load(f)
    module = importlib.import_module(args.model_module)
    model = getattr(module, "get_model")(model_config).to(args.device)

    # load pretrained weights
    checkpoint_data = torch.load(args.model_weights_file)
    model.load_state_dict(checkpoint_data["model"])
    model.eval()

    # load dataset
    data_config = EasyDict(
        dict(data_root=args.data_root,
             label_map_file=args.label_map_file,
             augment_data=None,
             preprocess=None,
             target_size=(256, 256)))
    val_dataset = StandardDataset(data_config, split="val")
    val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.num_workers)

    # evaluation
    progress_bar = tqdm(enumerate(val_loader), total=len(val_loader))
    metric_progress = []
    for step, batch in progress_bar:
        images = batch["image"].to(args.device)
        masks, onehot_masks = batch["mask"].to(
            args.device), batch["onehot_mask"].to(args.device)
Пример #17
0
"""Network parameters."""
from easydict import EasyDict

pretrain_config = EasyDict({
    # LR
    "base_lr": 0.0009,
    "warmup_step": 30000,
    "warmup_ratio": 1 / 3.0,
    "total_epoch": 10,
})
finetune_config = EasyDict({
    # LR
    "base_lr": 0.0005,
    "warmup_step": 300,
    "warmup_ratio": 1 / 3.0,
    "total_epoch": 50,
})

# use for low case number
config = EasyDict({
    "img_width":
    960,
    "img_height":
    576,
    "keep_ratio":
    False,
    "flip_ratio":
    0.0,
    "photo_ratio":
    0.0,
    "expand_ratio":
Пример #18
0
def main():
    global args, config

    args = parser.parse_args()

    with open(args.config) as rPtr:
        config = EasyDict(yaml.load(rPtr))

    config.save_path = os.path.dirname(args.config)

    # Random seed
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed(config.seed)
    np.random.seed(config.seed)
    random.seed(config.seed)

    # Datasets
    train_transform = transforms.Compose([
        transforms.RandomCrop((32, 32), padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262))
    ])
    val_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.491, 0.482, 0.447), (0.247, 0.243, 0.262))
    ])

    trainset = Datasets.CIFAR10(root='data',
                                train=True,
                                download=True,
                                transform=train_transform)
    trainloader = Data.DataLoader(trainset,
                                  batch_size=config.batch_size,
                                  shuffle=True,
                                  num_workers=config.workers)

    testset = Datasets.CIFAR10(root='data',
                               train=False,
                               download=True,
                               transform=val_transform)
    testloader = Data.DataLoader(testset,
                                 batch_size=config.batch_size,
                                 shuffle=False,
                                 num_workers=config.workers)

    # Model
    model = resnet32()
    model = model.cuda()

    # Optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=config.lr_scheduler.base_lr,
                          momentum=config.momentum,
                          weight_decay=config.weight_decay)

    # LR scheduler
    lr_scheduler = CosineAnnealing(optimizer,
                                   len(trainloader) * config.max_iter)

    global PCA, Writer
    PCA = PerClassAccuracy(num_classes=config.num_classes)
    Writer = SummaryWriter(config.save_path + '/events')
    for iter_idx in range(config.max_iter):
        train(model, iter_idx, criterion, lr_scheduler, optimizer, trainloader)
        val(model, iter_idx, criterion, testloader)

    Writer.close()
if __name__ == "__main__":
    # load det results in one frame
    det_file = sys.argv[1]
    ret = defaultdict(list)
    with open(det_file, 'r') as f:
      lines = f.readlines()
      for line in lines:
        line = line.strip().split()
        item = {
                'fid': int(line[0]),
                'class_index': int(line[1]),
                'score': float(line[2]),
                'bbox': map(float, line[3:])
                }
        item = EasyDict(item)
        ret[item.fid].append(item)
    print len(ret.keys())
    sorted_det_keys = sorted(ret.keys())
    print (sorted_det_keys)

    ## open the output file
    output = open(sys.argv[2], 'w')

    ## load model 
    tracknet = goturn_net.TRACKNET(BATCH_SIZE, train = False)
    tracknet.build()

    sess = tf.Session()
    init = tf.global_variables_initializer()
    init_local = tf.local_variables_initializer()
Пример #20
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import yaml

from easydict import EasyDict

config = EasyDict()
config.NUM_GPUS = 1
config.OUTPUT_DIR = ""
config.MODEL = "baseline"

config.DATALOADER = EasyDict()
config.DATALOADER.WORKERS = 2
config.DATALOADER.SHUFFLE = True
config.DATALOADER.LIDAR_DATA_PATH = "/home/fatih/LidarLabelsCameraViewTest"
config.DATALOADER.CAMERA_DATA_PATH = "/home/fatih/SegmentedInputTest"
config.DATALOADER.PIX2PIX = False
config.DATALOADER.RGB_PATH = "/SPACE/DATA/KITTI_Data/KITTI_raw_data/kitti/2011_09_26"
config.DATALOADER.SEGMENTED_PATH = "/home/fatih/Inputs/CameraData"

config.LIDAR_GENERATOR = EasyDict()
# Base Learning rate for optimizer
config.LIDAR_GENERATOR.BASE_LR = 0.0006
# Change learning rate in each step_size number of iterations by multiplying it with gamma
config.LIDAR_GENERATOR.STEP_SIZE = 5
config.LIDAR_GENERATOR.STEP_GAMMA = 0.1
config.LIDAR_GENERATOR.PIXEL_LAMBDA = 0.2
Пример #21
0
from easydict import EasyDict

__C = EasyDict()

cfg = __C

__C.MULTI = 0.1
Пример #22
0
PRETRAIN_FILE = ""
# distributed training
IS_DISTRIBUTION = False

# for debug
# MAX_STEPS = 10
# BATCH_SIZE = 31
# SAVE_STEPS = 2
# TEST_STEPS = 10
# SUMMARISE_STEPS = 2
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([Resize(size=IMAGE_SIZE), PerImageStandardization()])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
step_per_epoch = int(50000 / BATCH_SIZE)
NETWORK.LEARNING_RATE_KWARGS = {
    "values": [0.01, 0.001, 0.0001, 0.00001],
    "boundaries":
    [step_per_epoch * 50, step_per_epoch * 100, step_per_epoch * 150],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.0005

# dataset
Пример #23
0
def main():
    if os.path.exists(save_dir):
        shutil.rmtree(save_dir)
    print(f'save dir :{save_dir}')
    sys.stdout = Logger(os.path.join(save_dir, 'train.log'))

    device = 'cuda' if torch.cuda.is_available else 'cpu'

    model = model_selection(model_name=model_name, num_classes=1)

    fnet = FNet(model.num_ftrs).to(device)

    if model_path is not None:
        state_dict = torch.load(model_path)
        model.load_state_dict(state_dict)
        print(f'resume model from {model_path}')
    else:
        print('No model found, initializing random model.')
    if fnet_path is not None:
        state_dict = torch.load(fnet_path)
        fnet.load_state_dict(state_dict)
        print(f'resume model from {fnet_path}')
    else:
        print('No fnet_model found, initializing random model.')

    model = model.to(device)
    if parallel:
        model = nn.DataParallel(model)

    if parallel:
        optimizer = torch.optim.Adam(model.module.params(),
                                     lr=learning_rate,
                                     betas=(beta1, beta2))
    else:
        optimizer = torch.optim.Adam(model.params(),
                                     lr=learning_rate,
                                     betas=(beta1, beta2))

    optimizer_fnet = torch.optim.Adam(fnet.params(),
                                      lr=plr,
                                      betas=(beta1, beta2))

    scheduler = lr_scheduler.StepLR(optimizer,
                                    step_size=step_size,
                                    gamma=gamma)
    scheduler_fnet = lr_scheduler.StepLR(optimizer_fnet,
                                         step_size=step_size,
                                         gamma=gamma)

    criterion = torch.nn.BCELoss().to(device)
    criterion_oc = CompactLoss().to(device)

    _preproc = get_transform(input_size)['train']
    df_train_dataset = FFpp(split='train',
                            frame_nums=frame_nums,
                            transform=_preproc,
                            detect_name=detect_name,
                            compress=compress,
                            type="Deepfakes",
                            pair=True,
                            original_path=ffpp_original_path,
                            fake_path=ffpp_fake_path)
    f2f_train_dataset = FFpp(split='train',
                             frame_nums=frame_nums,
                             transform=_preproc,
                             detect_name=detect_name,
                             compress=compress,
                             type='Face2Face',
                             pair=True,
                             original_path=ffpp_original_path,
                             fake_path=ffpp_fake_path)
    fs_train_dataset = FFpp(split='train',
                            frame_nums=frame_nums,
                            transform=_preproc,
                            detect_name=detect_name,
                            compress=compress,
                            type='FaceSwap',
                            pair=True,
                            original_path=ffpp_original_path,
                            fake_path=ffpp_fake_path)
    nt_train_dataset = FFpp(split='train',
                            frame_nums=frame_nums,
                            transform=_preproc,
                            detect_name=detect_name,
                            compress=compress,
                            type='NeuralTextures',
                            pair=True,
                            original_path=ffpp_original_path,
                            fake_path=ffpp_fake_path)

    datasetlist = [
        df_train_dataset, f2f_train_dataset, fs_train_dataset, nt_train_dataset
    ]
    # if test_index<len(datasetlist):
    #     del datasetlist[test_index]

    _preproc = get_transform(input_size)['test']

    cele_test_dataset = CeleDF(train=False,
                               frame_nums=frame_nums,
                               transform=_preproc,
                               data_root=celebdf_path)  #98855)
    df_test_dataset = FFpp(split='test',
                           frame_nums=frame_nums,
                           transform=_preproc,
                           detect_name=detect_name,
                           compress=compress,
                           type="Deepfakes")
    f2f_test_dataset = FFpp(split='test',
                            frame_nums=frame_nums,
                            transform=_preproc,
                            detect_name=detect_name,
                            compress=compress,
                            type='Face2Face')
    fs_test_dataset = FFpp(split='test',
                           frame_nums=frame_nums,
                           transform=_preproc,
                           detect_name=detect_name,
                           compress=compress,
                           type='FaceSwap')
    nt_test_dataset = FFpp(split='test',
                           frame_nums=frame_nums,
                           transform=_preproc,
                           detect_name=detect_name,
                           compress=compress,
                           type='NeuralTextures')
    dfdc_test_dataset = DFDCDetection(root=dfdc_path,
                                      train=False,
                                      frame_nums=frame_nums,
                                      transform=_preproc)
    df_test_dataloader = data.DataLoader(df_test_dataset,
                                         batch_size=2,
                                         shuffle=False,
                                         num_workers=8)
    f2f_test_dataloader = data.DataLoader(f2f_test_dataset,
                                          batch_size=2,
                                          shuffle=False,
                                          num_workers=8)
    fs_test_dataloader = data.DataLoader(fs_test_dataset,
                                         batch_size=2,
                                         shuffle=False,
                                         num_workers=8)
    nt_test_dataloader = data.DataLoader(nt_test_dataset,
                                         batch_size=2,
                                         shuffle=False,
                                         num_workers=8)
    cele_test_dataloader = data.DataLoader(cele_test_dataset,
                                           batch_size=2,
                                           shuffle=True,
                                           num_workers=8)
    dfdc_test_dataloader = data.DataLoader(dfdc_test_dataset,
                                           batch_size=3,
                                           shuffle=True,
                                           num_workers=8)

    model.train()

    best_acc = 0.
    best_loss = 1000.
    domain_best_acc = 0.
    domain_best_loss = 1000.
    celedf_best_acc = 0.
    celedf_best_loss = 1000.
    dfdc_best_acc = 0.
    dfdc_best_loss = 1000.

    for epoch in range(epochs):
        '''
        random shuffile all the source domain and split it into training domain and meta domain
        '''
        copydatalist = copy.deepcopy(datasetlist)
        random.seed(epoch)
        random.shuffle(copydatalist)
        meta_dataset = copydatalist[2].cat(copydatalist[2], randomseed=epoch)

        copydatalist[0].set_meta_type(copydatalist[2].type)
        copydatalist[1].set_meta_type(copydatalist[2].type)
        train_dataset = copydatalist[0].cat(copydatalist[1])

        epoch_size = len(train_dataset) // batch_size
        print(
            f"train dataset is:{copydatalist[0].type},{copydatalist[1].type},meta dataset is:{meta_dataset.type}"
        )
        train_dataloader = data.DataLoader(train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=8,
                                           worker_init_fn=worker_init_fn)

        train(model, optimizer, fnet, optimizer_fnet, train_dataloader, None,
              criterion_oc, epoch, epoch_size, device)
        #train2(model,optimizer,train_dataloader,criterion,epoch,epoch_size,device,meta_dataloader=None)

        scheduler.step()
        scheduler_fnet.step()

        if (epoch + 1) % test_interval == 0:
            '''
            Testing model on serval test-set.
            '''

            df_metrics = test(df_test_dataloader, model, device)
            f2f_metrics = test(f2f_test_dataloader, model, device)
            fs_metrics = test(fs_test_dataloader, model, device)
            nt_metrics = test(nt_test_dataloader, model, device)
            celedf_metrics = test(cele_test_dataloader, model, device)
            dfdc_metrics = test(dfdc_test_dataloader, model, device)

            metrics_list = [df_metrics, f2f_metrics, fs_metrics, nt_metrics]
            avg_metrics = EasyDict()
            all_avg_metrics = EasyDict()
            avg_metrics.acc = (df_metrics.acc + f2f_metrics.acc +
                               fs_metrics.acc + nt_metrics.acc) / 4
            avg_metrics.loss = (df_metrics.loss + f2f_metrics.loss +
                                fs_metrics.loss + nt_metrics.loss) / 4
            avg_metrics.auc = (df_metrics.auc + f2f_metrics.auc +
                               fs_metrics.auc + nt_metrics.auc) / 4
            avg_metrics.eer = (df_metrics.eer + f2f_metrics.eer +
                               fs_metrics.eer + nt_metrics.eer) / 4

            all_avg_metrics.acc = (df_metrics.acc + f2f_metrics.acc +
                                   fs_metrics.acc + nt_metrics.acc +
                                   celedf_metrics.acc + dfdc_metrics.acc) / 6
            all_avg_metrics.loss = (
                df_metrics.loss + f2f_metrics.loss + fs_metrics.loss +
                nt_metrics.loss + celedf_metrics.loss + dfdc_metrics.loss) / 6
            all_avg_metrics.auc = (df_metrics.auc + f2f_metrics.auc +
                                   fs_metrics.auc + nt_metrics.auc +
                                   celedf_metrics.auc + dfdc_metrics.auc) / 6
            all_avg_metrics.eer = (df_metrics.eer + f2f_metrics.eer +
                                   fs_metrics.eer + nt_metrics.eer +
                                   celedf_metrics.eer + dfdc_metrics.eer) / 6

            print(
                f"df acc:{df_metrics.acc:.5f},loss:{df_metrics.loss:.3f},auc:{df_metrics.auc:.3f},eer:{df_metrics.eer:.3f}"
            )
            print(
                f"f2f acc:{f2f_metrics.acc:.3f},loss:{f2f_metrics.loss:.3f},auc:{f2f_metrics.auc:.3f},eer:{f2f_metrics.eer:.3f}"
            )
            print(
                f"fs acc:{fs_metrics.acc:.3f},loss:{fs_metrics.loss:.3f},auc:{fs_metrics.auc:.3f},eer:{fs_metrics.eer:.3f}"
            )
            print(
                f"nt acc:{nt_metrics.acc:.3f},loss:{nt_metrics.loss:.3f},auc:{nt_metrics.auc:.3f},eer:{nt_metrics.eer:.3f}"
            )
            print(
                f"avg acc:{avg_metrics.acc:.3f},loss:{avg_metrics.loss:.3f},auc:{avg_metrics.auc:.3f},eer:{avg_metrics.eer:.3f}"
            )
            print(
                f"celedf acc:{celedf_metrics.acc:.3f},loss:{celedf_metrics.loss:.3f},auc:{celedf_metrics.auc:.3f},eer:{celedf_metrics.eer:.3f}"
            )
            print(
                f"dfdc acc:{dfdc_metrics.acc:.3f},loss:{dfdc_metrics.loss:.3f},auc:{dfdc_metrics.auc:.3f},eer:{dfdc_metrics.eer:.3f}"
            )
            print(
                f"all_avg acc:{all_avg_metrics.acc:.3f},loss:{all_avg_metrics.loss:.3f},auc:{all_avg_metrics.auc:.3f},eer:{all_avg_metrics.eer:.3f}"
            )

            best_acc, best_loss = updatebest(avg_metrics, best_acc, best_loss,
                                             "avg", model, fnet)
            #domain_best_acc,domain_best_loss = updatebest(metrics_list[test_index],domain_best_acc,domain_best_loss,"domain",model,pnet)
            celedf_best_acc, celedf_best_loss = updatebest(
                celedf_metrics, celedf_best_acc, celedf_best_loss, "celedf",
                model, fnet)
            dfdc_best_acc, dfdc_best_loss = updatebest(dfdc_metrics,
                                                       dfdc_best_acc,
                                                       dfdc_best_loss, "dfdc",
                                                       model, fnet)
            save_checkpoint(model.state_dict(),
                            fpath=f'{save_dir}/{model_name}_lastepoch.pth')
            save_checkpoint(
                fnet.state_dict(),
                fpath=f'{save_dir}/{model_name}_pnet_lastepoch.pth')

    print(f'save dir :{save_dir} done!!!')
Пример #24
0
from easydict import EasyDict

configer = EasyDict()

configer.ckptdir = './ckpt'
configer.logdir = './log'

configer.inputsize = (3, 112, 96)  # (C, H, W)
configer.batchsize = 128
configer.n_epoch = 50
configer.valid_freq = 0

configer.lrbase = 0.001
configer.adjstep = [32, 44]
configer.gamma = 0.1

configer.cuda = True
Пример #25
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', default=0)
    parser.add_argument('-m', '--modelpath', default=None)
    args = parser.parse_args()

    # chainer config for demo
    gpu = args.gpu
    chainer.cuda.get_device_from_id(gpu).use()
    chainer.global_config.train = False
    chainer.global_config.enable_backprop = False

    # load config
    cfgpath = osp.join(filepath, 'cfg', 'demo.yaml')
    with open(cfgpath, 'r') as f:
        config = EasyDict(yaml.load(f))

    min_size = config.min_size
    max_size = config.max_size
    score_thresh = 1e-3
    nms_thresh = config.nms_thresh
    mask_merge_thresh = config.mask_merge_thresh
    binary_thresh = config.binary_thresh

    # load label_names
    n_class = len(coco_label_names)

    # load model
    model = fcis.models.FCISResNet101(n_class)
    modelpath = args.modelpath
    if modelpath is None:
        modelpath = model.download('coco')
    chainer.serializers.load_npz(modelpath, model)
    model.to_gpu(gpu)

    dataset = fcis.datasets.coco.COCOInstanceSegmentationDataset(
        split='minival2014',
        use_crowded=True,
        return_crowded=True,
        return_area=True)

    print('start')
    start = time.time()

    def inference_generator(model, dataset):
        for i in range(0, len(dataset)):
            img, gt_bbox, gt_whole_mask, gt_label, gt_crowded, gt_area = \
                dataset[i]
            _, H, W = img.shape
            size = (H, W)
            gt_mask = fcis.utils.whole_mask2mask(gt_whole_mask, gt_bbox)

            # prediction
            outputs = model.predict([img], min_size, max_size, score_thresh,
                                    nms_thresh, mask_merge_thresh,
                                    binary_thresh)
            pred_bbox = outputs[0][0]
            pred_whole_mask = outputs[1][0]
            pred_mask = fcis.utils.whole_mask2mask(pred_whole_mask, pred_bbox)
            pred_label = outputs[2][0]
            pred_score = outputs[3][0]

            if i % 100 == 0:
                print('{} / {}, avg iter/sec={:.2f}'.format(
                    i, len(dataset), (i + 1) / (time.time() - start)))

            yield i, size, pred_bbox, pred_mask, pred_label, pred_score, \
                gt_bbox, gt_mask, gt_label, gt_crowded, gt_area

    generator = inference_generator(model, dataset)
    results = eval_instance_segmentation_coco(generator)

    keys = [
        'ap/iou=0.50:0.95/area=all/maxDets=100',
        'ap/iou=0.50/area=all/maxDets=100',
        'ap/iou=0.75/area=all/maxDets=100',
        'ap/iou=0.50:0.95/area=small/maxDets=100',
        'ap/iou=0.50:0.95/area=medium/maxDets=100',
        'ap/iou=0.50:0.95/area=large/maxDets=100',
    ]
    print('================================')
    for key in keys:
        print('m{}={}'.format(key, results['m' + key]))
        for i, label_name in enumerate(coco_label_names):
            if i == 0:
                continue
            try:
                print('{}/{:s}={}'.format(key, label_name,
                                          results[key][i - 1]))
            except IndexError:
                print('{}/{:s}={}'.format(key, label_name, np.nan))
        print('================================')
Пример #26
0
def analyse(dicom_file):
    """计算给定胸部横切照片的Haller指数
    
    Args:
        dicom_file (str): 胸部横切dicom文件
    
    Returns:
        tuple: haller_index (Haller指数), figure_image(带辅助线的照片) 注:如果plot为Fasle, 将只返回Haller指数
    """
    # ------------------------------------------------------------------------- #
    #        读取dicom文件中的像素数据
    # ------------------------------------------------------------------------- #

    ds = pydicom.dcmread(dicom_file)
    img = cv2.convertScaleAbs(ds.pixel_array, alpha=(255.0 / 65535.0))
    hu = get_pixels_hu(ds)
    origin_img = set_dicom_window_width_center(hu, 360, 30)
    # ------------------------------------------------------------------------- #
    #        提取胸骨点和组织轮廓点
    # ------------------------------------------------------------------------- #
    contours, rib_contours = segment(img)

    # 将所有轮廓按轮廓点数量由大到小排序
    contours = sorted(contours, key=lambda x: len(x))

    # ------------------------------------------------------------------------- #
    #        找外胸腔轮廓及其关键点
    # ------------------------------------------------------------------------- #
    # 找到胸外轮廓(区域面积最大的为外胸廓轮廓点)
    out_contour, out_contour_area = find_outer_contour(contours)
    out_contour, (cx, cy) = sort_clockwise(out_contour)

    # 找到外胸轮廓的最高点和最低点
    out_contour_bottom = find_boundary_point(out_contour, "bottom")
    out_contour_top = find_boundary_point(out_contour, "top")

    # 过滤所有再外轮廓最低点之下的轮廓
    contours = filter_contours(contours,
                               y_max=out_contour_bottom[1] + 1,
                               mode="all")

    # ------------------------------------------------------------------------- #
    #        找内胸腔轮廓及其关键点
    # ------------------------------------------------------------------------- #
    # 找到内胸腔轮廓
    inner_contours = find_inner_contour(contours, out_contour_area)

    # 找到左右胸轮廓的两个最低点,lowest_1是左侧,lowest_2是右侧
    lowest_1 = find_boundary_point(inner_contours[0], position="bottom")
    lowest_2 = find_boundary_point(inner_contours[1], position="bottom")

    # 交换位置 1 是左胸,2 是右胸
    inner_contours[0], inner_contours[1] = (
        inner_contours[0],
        inner_contours[1]) if lowest_1[0] < lowest_2[0] else (
            inner_contours[1], inner_contours[0])
    inner_contours[0], _ = sort_clockwise(inner_contours[0])
    inner_contours[1], _ = sort_clockwise(inner_contours[1])

    lowest_1, lowest_2 = (
        lowest_1, lowest_2) if lowest_1[0] < lowest_2[0] else (lowest_2,
                                                               lowest_1)

    # ------------------------------------------------------------------------- #
    #        将图像及其轮廓旋转(将其摆正,使其水平)
    # ------------------------------------------------------------------------- #
    # 以左侧最低点为中心,连线为轴将图像旋转,使得两点连线与X轴平行
    dy = lowest_2[1] - lowest_1[1]
    dx = lowest_2[0] - lowest_1[0]

    angle = np.arctan(dy / dx) / math.pi * 180

    if abs(angle) <= 15:
        # 旋转将胸廓ct摆正
        matrix = cv2.getRotationMatrix2D((lowest_1[0], lowest_1[1]), angle,
                                         1.0)
        img = cv2.warpAffine(img, matrix, (img.shape[0], img.shape[1]))
        origin_img = cv2.warpAffine(origin_img, matrix,
                                    (img.shape[0], img.shape[1]))

        inner_contours = [
            rotate_contours(contour, matrix) for contour in inner_contours
        ]
        out_contour = rotate_contours(out_contour, matrix)
        rib_contours = [
            rotate_contours(contour, matrix) for contour in rib_contours
        ]

    inner_left_top_point = find_boundary_point(inner_contours[0], "top")
    inner_right_top_point = find_boundary_point(inner_contours[1], "top")

    # 找到外胸廓突点,和外轮廓凹点
    left_top = find_boundary_point(filter_contour_points(out_contour,
                                                         x_max=cx,
                                                         y_max=cy),
                                   position="top")
    right_top = find_boundary_point(filter_contour_points(out_contour,
                                                          x_min=cx,
                                                          y_max=cy),
                                    position="top")

    out_contour_left = find_boundary_point(out_contour, "left")
    out_contour_right = find_boundary_point(out_contour, "right")

    mid_bottom = find_boundary_point(filter_contour_points(out_contour,
                                                           x_min=left_top[0],
                                                           x_max=right_top[0],
                                                           y_max=cy),
                                     position="bottom")

    # ------------------------------------------------------------------------- #
    #        找到左右胸最外侧的点,计算a(即左右内胸腔边界连线)
    # ------------------------------------------------------------------------- #
    left_chest_leftmost = find_boundary_point(inner_contours[0],
                                              position="left")
    right_chest_rightmost = find_boundary_point(inner_contours[1],
                                                position="right")

    # ------------------------------------------------------------------------- #
    #        过滤排序胸骨相关轮廓
    # ------------------------------------------------------------------------- #
    rib_contours = filter_contours(rib_contours,
                                   y_max=out_contour_bottom[1] - 5,
                                   y_min=min(left_top[1], right_top[1]) + 5,
                                   x_min=out_contour_left[0] + 1,
                                   x_max=out_contour_right[0] - 1,
                                   mode="all")
    rib_contours = sorted(rib_contours, key=lambda x: len(x))
    rib_contours_all_in_one = np.concatenate(rib_contours)

    # ------------------------------------------------------------------------- #
    #        找脊椎骨与胸肋骨轮廓以及关键点  vertebra:胸肋骨   sternum:脊椎骨
    # ------------------------------------------------------------------------- #
    # 找到左右胸轮廓最靠近中间的点
    left_chest_rightmost = find_boundary_point(inner_contours[0],
                                               position="right")
    right_chest_leftmost = find_boundary_point(inner_contours[1],
                                               position="left")

    # 过滤掉胸骨中,点过少的轮廓点
    rib_contours = [i for i in rib_contours if len(i) > 15]

    rib_contours = filter_contours(rib_contours,
                                   x_min=lowest_1[0],
                                   x_max=lowest_2[0],
                                   mode='exist')

    # 取左右最外侧点的中点为上下胸分界点
    demarcation_point = (left_chest_leftmost[1] + right_chest_rightmost[1]
                         ) / 2  # 由于有的胸骨轮廓会超过中点线, 所以此处以重点线上方10像素为分界点

    # 以此分界点为接线,将胸骨分为上下两个部分
    bottom_rib_contours = filter_contours(rib_contours,
                                          y_min=demarcation_point,
                                          y_max=out_contour_bottom[1],
                                          x_min=left_chest_leftmost[0],
                                          x_max=right_chest_rightmost[0],
                                          mode="exist")

    # # 下胸骨选轮廓集合的top3
    # if len(bottom_rib_contours) >= 3:
    #     bottom_rib_contours = bottom_rib_contours[-3:]

    # 外胸廓凹陷点向下作为胸肋骨点
    tmp_points = mid_bottom.copy()

    # 将上下胸骨的轮廓合并
    vertebra_contour = filter_contours(rib_contours,
                                       y_max=tmp_points[1] + 70,
                                       y_min=mid_bottom[1],
                                       mode="all")
    vertebra_contour = filter_contours(vertebra_contour,
                                       x_min=left_top[0],
                                       x_max=right_top[0],
                                       mode="all")
    if len(vertebra_contour) > 0:  # 如果找到脊椎骨点, 则使用,否则使用下陷的点进行替代
        vertebra_contour = sorted(vertebra_contour, key=lambda x: len(x))[-1:]
        top_vertebra_point = find_boundary_point(
            np.concatenate(vertebra_contour), "bottom")
        if top_vertebra_point[1] - mid_bottom[1] < 10:
            tmp_points[1] += 30
            vertebra_contour = tmp_points.reshape(1, 1, -1)
            vertebra_avaliable = False
        else:
            vertebra_contour = np.concatenate(vertebra_contour)
            vertebra_avaliable = True
    else:
        tmp_points[1] += 30
        vertebra_contour = tmp_points.reshape(1, 1, -1)
        vertebra_avaliable = False

    bottom_rib_contours = [c for c in bottom_rib_contours if len(c) > 40]
    sternum_contour = np.concatenate(bottom_rib_contours)
    sternum_contour = filter_contour_points(sternum_contour,
                                            x_min=left_top[0] + 10,
                                            x_max=right_top[0] - 10,
                                            y_min=mid_bottom[1] + 30)

    # 寻找脊椎骨最上点, 和胸骨最下点
    top_vertebra_point = find_boundary_point(vertebra_contour, "bottom")
    bottom_sternum_point = find_boundary_point(sternum_contour, "top")

    # 用一些规则判断脊椎骨位置是否可用
    sternum_avaliable = True

    # ------------------------------------------------------------------------- #
    #        确定Haller指数的左右两个点位
    # ------------------------------------------------------------------------- #

    # 如果左右x轴相差过大,则使用胸骨点作为左右连线
    if abs(left_chest_leftmost[1] - right_chest_rightmost[1]) > 30:
        # 寻找环绕胸骨的最左侧点和最右侧点
        rib_contours_all_in_one = filter_contour_points(
            rib_contours_all_in_one,
            x_min=out_contour_left[0],
            x_max=out_contour_right[0])
        left_rib_point = find_boundary_point(rib_contours_all_in_one, "left")
        left_rib_point[0] = left_rib_point[0] + 20
        right_rib_point = find_boundary_point(rib_contours_all_in_one, "right")
        right_rib_point[0] = right_rib_point[0] - 20

        left_chest_leftmost = left_rib_point
        right_chest_rightmost = right_rib_point

    # ------------------------------------------------------------------------- #
    #       将有用的点集合,轮廓集合放在同一个字典中
    # ------------------------------------------------------------------------- #
    result_dict = EasyDict({
        "img": origin_img,  # CT影像像素值
        "left_chest_leftmost": left_chest_leftmost,  # 左侧胸腔最外侧的点
        "right_chest_rightmost": right_chest_rightmost,  # 右侧胸腔最外侧的点
        "top_vertebra_point": top_vertebra_point,  # 胸肋骨最靠近胸腔的点(只包含中间部分)
        "bottom_sternum_point": bottom_sternum_point,  #  脊椎骨最靠近胸腔的点
        "vertebra": vertebra_contour,  # 胸肋骨(中间部分)
        "vertebra_avaliable": vertebra_avaliable,  # 胸肋骨是否可用
        "sternum": sternum_contour,  # 脊椎骨
        "sternum_avaliable": sternum_avaliable,
        "left_chest": inner_contours[0],  # 左胸腔轮廓
        "right_chest": inner_contours[1],  # 右胸腔轮廓
        "out_contour": out_contour,  # 外轮廓
        "mid_bottom": mid_bottom,  # 外轮廓中间凹陷点
        "out_contour_top": out_contour_top,  # 外胸廓高点 (y轴方向最高)
        "left_top": left_top,
        "right_top": right_top,
        "inner_contour": None
    })

    return result_dict
Пример #27
0
class CuratorInvoke(object):
    def __init__(self, **opts):
        self.opts = EasyDict(opts)
        self._client = None

    @property
    def client(self):
        if not self._client:
            o = self.opts
            self._client = get_client(**({
                'host': o.host,
                'port': o.port,
                'url_prefix': o.url_prefix,
                'http_auth': o.http_auth,
                'use_ssl': o.use_ssl,
                'master_only': o.master_only,
                'timeout': o.timeout
            }))
        return self._client

    def fetch(self, act_on, on_nofilters_showall=False):
        """
        Forwarder method to indices/snapshots selector.
        """
        if act_on not in ['indices', 'snapshots', 'cluster']:
            raise ValueError('invalid argument: ' + act_on)

        if act_on == 'indices':
            return curator.IndexList(self.client)
        elif act_on == 'snapshots':
            return curator.SnapshotList(self.client)
        else:
            return []

    def command_kwargs(self, command):
        """
        Return kwargs dict for a specific command options or return empty dict.
        """
        opts = defaultdict(lambda: None, self.opts)

        kwargs = []

        # Get the available action specific options from curator
        dict_list = options.action_specific(command)

        for d in dict_list:
            for k in d:
                kwargs.append(str(k))

        # Define each of the action specific options using values from the opts dict
        command_kwargs = dict()
        for key in kwargs:
            command_kwargs[key] = opts[key]

        return compact_dict(command_kwargs)

    def run(self, act_on, command):
        """Invoke command which acts on indices and perform an api call.
        """
        kwargs = self.command_kwargs(command)

        config = {
            'action': command,
            'filters': json.loads('[' + self.opts.filters + ']')
        }

        if command == 'alias':
            kwargs['warn_if_no_indices'] = self.opts.get(
                'warn_if_no_indices', False)
            if self.opts.add is not None:
                config['add'] = json.loads('{"filters": [' + self.opts.add +
                                           ']}')
            if self.opts.remove is not None:
                config['remove'] = json.loads('{"filters": [' +
                                              self.opts.remove + ']}')
        elif command == 'reindex':
            kwargs['request_body'] = json.loads(
                self.opts.get('request_body', ''))

        config['options'] = kwargs

        process_action(self.client, config, **kwargs)

        return True

    def _get_filters_from_json(self, fn):
        """Read JSON-formatted filters from the specified file
        """
        filters = '{"filtertype": "none"}'
        name = os.path.expanduser(fn)
        if os.path.exists(name):
            f = open(fn, 'r')
            json_data = f.read().rstrip()
            if len(json_data) > 0:
                filters = json_data

        return filters

    def invoke(self, command=None, act_on=None):
        """Invoke command through translating it to curator api call.
        """
        if act_on is None:
            raise ValueError(
                "Requires act_on on `indices', `snapshots', or `cluster'")

        # If no filters are passed in opts, then try reading them from file opts.curator_json
        if self.opts.filters is None:
            self.opts.filters = self._get_filters_from_json(
                self.opts.curator_json)

        return self.run(act_on, command)
Пример #28
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', default=0)
    parser.add_argument('-m', '--modelpath', default=None)
    args = parser.parse_args()

    # chainer config for demo
    gpu = args.gpu
    chainer.cuda.get_device_from_id(gpu).use()
    chainer.global_config.train = False
    chainer.global_config.enable_backprop = False

    # load config
    cfgpath = osp.join(filepath, 'cfg', 'demo.yaml')
    with open(cfgpath, 'r') as f:
        config = EasyDict(yaml.load(f))

    min_size = config.min_size
    max_size = config.max_size
    score_thresh = 1e-3
    nms_thresh = config.nms_thresh
    mask_merge_thresh = config.mask_merge_thresh
    binary_thresh = config.binary_thresh
    min_drop_size = config.min_drop_size
    iter2 = True

    # load label_names
    n_class = len(voc_label_names)

    # load model
    model = fcis.models.FCISResNet101(
        n_class,
        ratios=(0.5, 1.0, 2.0),
        anchor_scales=(8, 16, 32),
        rpn_min_size=16)
    modelpath = args.modelpath
    if modelpath is None:
        modelpath = model.download('voc')
    chainer.serializers.load_npz(modelpath, model)
    model.to_gpu(gpu)

    dataset = fcis.datasets.sbd.SBDInstanceSegmentationDataset(split='val')

    print('start')
    start = time.time()

    def inference_generator(model, dateset):
        for i in range(0, len(dataset)):
            img, gt_bbox, gt_whole_mask, gt_label = dataset[i]
            _, H, W = img.shape
            size = (H, W)
            gt_whole_mask = gt_whole_mask.astype(bool)
            # suppress backgroud
            gt_label = gt_label - 1
            gt_mask = fcis.utils.whole_mask2mask(
                gt_whole_mask, gt_bbox)
            del gt_whole_mask

            # prediction
            outputs = model.predict(
                [img], min_size, max_size, score_thresh,
                nms_thresh, mask_merge_thresh, binary_thresh,
                min_drop_size, iter2=iter2)
            del img
            pred_bbox = outputs[0][0]
            pred_whole_mask = outputs[1][0]
            pred_label = outputs[2][0]
            pred_score = outputs[3][0]
            # suppress backgroud
            pred_label = pred_label - 1
            pred_whole_mask = pred_whole_mask.astype(bool)
            pred_mask = fcis.utils.whole_mask2mask(
                pred_whole_mask, pred_bbox)
            del pred_whole_mask

            if (i + 1) % 100 == 0:
                print('{} / {}, avg iter/sec={:.2f}'.format(
                    (i + 1), len(dataset), (i + 1) / (time.time() - start)))
            yield size, pred_bbox, pred_mask, pred_label, pred_score, \
                gt_bbox, gt_mask, gt_label, None

    generator = inference_generator(model, dataset)

    iou_thresh = (0.5, 0.7)
    results = eval_instance_segmentation_voc(
        generator, iou_thresh, use_07_metric=True)

    print('================================')
    print('iou_thresh={}'.format(0.5))
    print('[email protected]={}'.format(results['map0.5']))
    for i, label_name in enumerate(voc_label_names):
        if i == 0:
            continue
        try:
            print('[email protected]/{:s}={}'.format(
                label_name, results['ap0.5'][i - 1]))
        except IndexError:
            print('[email protected]/{:s}={}'.format(
                label_name, np.nan))
    print('================================')
    print('iou_thresh={}'.format(0.7))
    print('[email protected]={}'.format(results['map0.7']))
    for i, label_name in enumerate(voc_label_names):
        if i == 0:
            continue
        try:
            print('[email protected]/{:s}={}'.format(
                label_name, results['ap0.7'][i - 1]))
        except IndexError:
            print('[email protected]/{:s}={}'.format(
                label_name, np.nan))
    print('================================')
Пример #29
0
from lib.transform import Resize, Compose
from lib.transform import ToTensor, ToGridCellOffset
from lib._parser_config import parser_config

logging.basicConfig(
    format='[%(levelname)s] %(asctime)s:%(message)s', level=logging.DEBUG)

parser = argparse.ArgumentParser(description='test object-detection of single stage.')
parser.add_argument('--config', type=str, default='cfgs/yolo.yaml',
                    help='configuration file')
args = parser.parse_args()


if __name__ == '__main__':
    with open(args.config) as rptr:
        config = EasyDict(yaml.load(rptr))
    config = parser_config(config)

    torch.manual_seed(config.SEED)
    torch.cuda.manual_seed(config.SEED)
    np.random.seed(config.SEED)
    random.seed(config.SEED)

    rs_ = Resize(size=(448, 448))
    tt_ = ToTensor()
    img_trans = Compose([rs_, tt_])

    dataloader = MakeDataLoader(
        dataset=VOCDataset(config, phase='test',
                           img_transform=img_trans),
        batch_size=config.TEST.BATCH_SIZE,
Пример #30
0
def get_config_from_json(json_file):
    with open(json_file, 'r') as config_file:
        config_dict = json.load(config_file)
    config = EasyDict(config_dict)

    return config, config_dict
Пример #31
0
def test_training():
    """Test only no error raised."""
    config = EasyDict()

    config.NETWORK_CLASS = Darknet
    config.DATASET_CLASS = Dummy

    config.IS_DEBUG = False
    config.IMAGE_SIZE = [28, 14]
    config.BATCH_SIZE = 2
    config.TEST_STEPS = 1
    config.MAX_STEPS = 2
    config.SAVE_CHECKPOINT_STEPS = 1
    config.KEEP_CHECKPOINT_MAX = 5
    config.SUMMARISE_STEPS = 1
    config.IS_PRETRAIN = False
    config.TASK = Tasks.CLASSIFICATION

    # network model config
    config.NETWORK = EasyDict()
    config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer
    config.NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001}
    config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE
    config.NETWORK.BATCH_SIZE = config.BATCH_SIZE

    # daasegt config
    config.DATASET = EasyDict()
    config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)
    config.DATASET.BATCH_SIZE = config.BATCH_SIZE

    environment.init("test_darknet")
    prepare_dirs(recreate=True)
    start_training(config, profile_step=1)
class ItemsFilter(object):
    """
    Class implements curator indices/snapshot filtering by name.
    Supported opts: newer_than, older_than, suffix, prefix, regex, timestring.
    """

    def build(self, **opts):
        """
        Build items filter.

        :rtype: ItemsFilter
        """
        self.opts = EasyDict(opts)
        self.built_list = self._build()
        return self


    def get_timebased(self):
        """
        Get timebased specific filters.

        :rtype: tuple(newer_than, older_than)
        """
        result = {}
        for f in self.built_list:
            if f.get('method', None) in ('newer_than', 'older_than'):
                result[f['method']] = f
                if len(result) == 2: break

        return (result.get('newer_than', None), result.get('older_than', None))


    @property
    def all_items(self):
        return self.opts.get('all_{0}'.format(self.act_on), None)


    @property
    def filter_list(self):
        return self.built_list


    @property
    def closed_timerange(self):
        """
        Closed time range specified, newer_than and older_than both present.
        """
        return len(filter(None, self.get_timebased())) == 2


    def apply(self, working_list, act_on):
        """
        Apply filters to a working list of indices/snapshots and
        return resulting list.
        """
        self.act_on = act_on
        result_list = self._apply_closed_timerange(working_list)

        if self.all_items:
            logger.info('Matching all {0}. Ignoring parameters other than exclude.'.format(self.act_on))

        # Closed time range couldn't be applied
        if result_list is None:
            result_list = working_list

        # Apply filters one by one (if any) from the list.
        for f in self.built_list:
            is_timebased = f.get('method', None) in ('newer_than', 'older_than')
            # Don't apply timebased filters for a closed time range.
            if self.closed_timerange and is_timebased:
                continue
            # When all items are seleted ignore filters other than exclude
            if self.all_items and not 'exclude' in f:
                continue

            logger.debug('Applying filter: {0}'.format(f))
            result_list = apply_filter(result_list, **f)

        return result_list


    def _apply_closed_timerange(self, working_list):
        """
        Apply separated filtering for a closed time range.
        In case filtering is not applied None is returned.
        """
        if self.closed_timerange:
            newer_than, older_than = self.get_timebased()
            if newer_than['value'] < older_than['value']:
                print 'ERROR: Wrong time period newer_than parameter must be > older_than.'
                sys.exit(1)
            if not self.all_items:
                # We don't apply time range filtering in case of all_* options.
                logger.debug('Applying time range filters, result will be intersection\n'\
                             'newer_than: {0}\nolder_than: {1}'.format(newer_than, older_than))
                newer_range = set(apply_filter(working_list, **newer_than))
                older_range = set(apply_filter(working_list, **older_than))
                result_list = list(newer_range & older_range)
                return result_list


    def _build(self):
        """
        Build filter accoriding to filtering parameters.

        :rtype: list
        """
        opts = self.opts
        filter_list = []

        # No timestring parameter, range parameters a given
        if not opts.timestring and any(( xstr(opts.newer_than),
                                         xstr(opts.older_than) )):
            print 'ERROR: Parameters newer_than/older_than require timestring to be given'
            sys.exit(1)
        # Timestring used alone without newer_than/older_than
        if opts.timestring is not None and not all(( xstr(opts.newer_than),
                                                     xstr(opts.older_than) )):
            f = api.filter.build_filter(kindOf='timestring',
                                        value=opts.timestring)
            if f: filter_list.append(f)

        # Timebase filtering
        timebased = zip(('newer_than', 'older_than'), (opts.newer_than,
                                                       opts.older_than))
        for opt, value in timebased:
            if value is None: continue
            f = api.filter.build_filter(kindOf=opt, value=value,
                                        timestring=opts.timestring,
                                        time_unit=opts.time_unit)
            if f: filter_list.append(f)

        # Add filtering based on suffix|prefix|regex
        patternbased = zip(('suffix', 'prefix', 'regex'),
                           (opts.suffix, opts.prefix, opts.regex))

        for opt, value in patternbased:
            if value is None: continue
            f = api.filter.build_filter(kindOf=opt, value=value)
            if f: filter_list.append(f)

        # Add exclude filter
        patterns = filter(None, (opts.exclude or '').split(','))
        for pattern in patterns:
            f = {'pattern':  pattern, 'exclude': True}
            filter_list.append(f)

        return filter_list
Пример #33
0
from easydict import EasyDict

BerryKeys = EasyDict()

# Contains some auxiliary variables such as 'p' in Dropout layer
BerryKeys.AUX_INPUTS = "aux_inputs"

# Contains all the layer activations/outputs for easy access
BerryKeys.LAYER_OUTPUTS = "layer_outputs"
class MetricSegment(EvaluationMetric):
    configs = [
        EasyDict(
            name='SegEval',
            thresh_p=None,
            thresh_sIoU=np.linspace(0.25, 0.75, 11, endpoint=True),
            thresh_segsize=500,
            thresh_instsize=100,
        ),
        EasyDict(
            name='SegEval-AnomalyTrack',
            thresh_p=None,
            thresh_sIoU=np.linspace(0.25, 0.75, 11, endpoint=True),
            thresh_segsize=500,
            thresh_instsize=100,
        ),
        EasyDict(
            name='SegEval-ObstacleTrack',
            thresh_p=None,
            thresh_sIoU=np.linspace(0.25, 0.75, 11, endpoint=True),
            thresh_segsize=50,
            thresh_instsize=10,
        )
    ]

    @property
    def name(self):
        return self.cfg.name

    def vis_frame(self,
                  fid,
                  dset_name,
                  method_name,
                  mask_roi,
                  anomaly_p,
                  image=None,
                  **_):
        segmentation = np.copy(anomaly_p)
        segmentation[anomaly_p > self.cfg.thresh_p] = 1
        segmentation[anomaly_p <= self.cfg.thresh_p] = 0
        h, w = mask_roi.shape[:2]
        canvas = image.copy() if image is not None else np.zeros(
            (h, w, 3), dtype=np.uint8)
        heatmap_color = adapt_img_data(segmentation)
        canvas[mask_roi] = canvas[mask_roi] // 2 + heatmap_color[mask_roi] // 2
        imwrite(
            DIR_OUTPUTS / f'vis_SegPred' / method_name / dset_name /
            f'{fid}.webp', canvas)

    def process_frame(self,
                      label_pixel_gt: np.ndarray,
                      anomaly_p: np.ndarray,
                      fid: str = None,
                      dset_name: str = None,
                      method_name: str = None,
                      visualize: bool = True,
                      **_):
        """
        @param label_pixel_gt: HxW uint8
            0 = in-distribution / road
            1 = anomaly / obstacle
            255 = void / ignore
        @param anomaly_p: HxW float16
            heatmap of per-pixel anomaly detection, value from 0 to 1
        @param visualize: bool
            saves an image with segment predictions
        """

        mask_roi = label_pixel_gt < 255
        anomaly_gt, anomaly_pred = default_instancer(anomaly_p, label_pixel_gt,
                                                     self.cfg.thresh_p,
                                                     self.cfg.thresh_segsize,
                                                     self.cfg.thresh_instsize)

        results = segment_metrics(anomaly_gt, anomaly_pred,
                                  self.cfg.thresh_sIoU)

        if visualize and fid is not None and dset_name is not None and method_name is not None:
            self.vis_frame(fid=fid,
                           dset_name=dset_name,
                           method_name=method_name,
                           mask_roi=mask_roi,
                           anomaly_p=anomaly_p,
                           **_)

        return results

    def aggregate(self, frame_results: list, method_name: str,
                  dataset_name: str):

        sIoU_gt_mean = sum(np.sum(r.sIoU_gt) for r in frame_results) / sum(
            len(r.sIoU_gt) for r in frame_results)
        sIoU_pred_mean = sum(np.sum(r.sIoU_pred) for r in frame_results) / sum(
            len(r.sIoU_pred) for r in frame_results)
        ag_results = {
            "tp_mean": 0.,
            "fn_mean": 0.,
            "fp_mean": 0.,
            "f1_mean": 0.,
            "sIoU_gt": sIoU_gt_mean,
            "sIoU_pred": sIoU_pred_mean
        }
        print("Mean sIoU GT   :", sIoU_gt_mean)
        print("Mean sIoU PRED :", sIoU_pred_mean)
        for t in self.cfg.thresh_sIoU:
            tp = sum(r["tp_" + str(int(t * 100))] for r in frame_results)
            fn = sum(r["fn_" + str(int(t * 100))] for r in frame_results)
            fp = sum(r["fp_" + str(int(t * 100))] for r in frame_results)
            f1 = (2 * tp) / (2 * tp + fn + fp)
            if t in [0.25, 0.50, 0.75]:
                ag_results["tp_" + str(int(t * 100))] = tp
                ag_results["fn_" + str(int(t * 100))] = fn
                ag_results["fp_" + str(int(t * 100))] = fp
                ag_results["f1_" + str(int(t * 100))] = f1
            print("---sIoU thresh =", t)
            print("Number of TPs  :", tp)
            print("Number of FNs  :", fn)
            print("Number of FPs  :", fp)
            print("F1 score       :", f1)
            ag_results["tp_mean"] += tp
            ag_results["fn_mean"] += fn
            ag_results["fp_mean"] += fp
            ag_results["f1_mean"] += f1

        ag_results["tp_mean"] /= len(self.cfg.thresh_sIoU)
        ag_results["fn_mean"] /= len(self.cfg.thresh_sIoU)
        ag_results["fp_mean"] /= len(self.cfg.thresh_sIoU)
        ag_results["f1_mean"] /= len(self.cfg.thresh_sIoU)
        print("---sIoU thresh averaged")
        print("Number of TPs  :", ag_results["tp_mean"])
        print("Number of FNs  :", ag_results["fn_mean"])
        print("Number of FPs  :", ag_results["fp_mean"])
        print("F1 score       :", ag_results["f1_mean"])

        seg_info = ResultsInfo(
            method_name=method_name,
            dataset_name=dataset_name,
            **ag_results,
        )

        return seg_info

    def persistence_path_data(self, method_name, dataset_name):
        return DIR_OUTPUTS / self.name / 'data' / f'{self.name}Results_{method_name}_{dataset_name}.hdf5'

    def save(self,
             aggregated_result,
             method_name: str,
             dataset_name: str,
             path_override: Path = None):
        out_path = path_override or self.persistence_path_data(
            method_name, dataset_name)
        aggregated_result.save(out_path)

    def load(self,
             method_name: str,
             dataset_name: str,
             path_override: Path = None):
        out_path = path_override or self.persistence_path_data(
            method_name, dataset_name)
        return ResultsInfo.from_file(out_path)

    def fields_for_table(self):
        return [
            'sIoU_gt', 'sIoU_pred', 'fn_25', 'fp_25', 'f1_25', 'fn_50',
            'fp_50', 'f1_50', 'fn_75', 'fp_75', 'f1_75', 'f1_mean'
        ]

    def init(self, method_name, dataset_name):
        self.get_thresh_p_from_curve(method_name, dataset_name)

    def get_thresh_p_from_curve(self, method_name, dataset_name):
        out_path = DIR_OUTPUTS / "PixBinaryClass" / 'data' / f'PixClassCurve_{method_name}_{dataset_name}.hdf5'
        pixel_results = hdf5_read_hierarchy_from_file(out_path)
        if "best_f1_threshold" in pixel_results.keys():
            self.cfg.thresh_p = pixel_results.best_f1_threshold
        else:
            prc = pixel_results.curve_precision
            rec = pixel_results.curve_recall
            f1_scores = (2 * prc * rec) / (prc + rec)
            ix = np.nanargmax(f1_scores)
            self.cfg.thresh_p = float(pixel_results.thresholds[ix])
Пример #35
0
"""
"""
import os
from easydict import EasyDict as ED


_BASE_DIR = os.path.dirname(os.path.abspath(__file__))

Cfg = ED()
Cfg.batch = 2
Cfg.subdivisions = 1
Cfg.width = 608
Cfg.height = 608
Cfg.channels = 3
Cfg.momentum = 0.949
Cfg.decay = 0.0005
Cfg.angle = 0
Cfg.saturation = 1.5
Cfg.exposure = 1.5
Cfg.hue = .1

Cfg.learning_rate = 0.00261
Cfg.burn_in = 1000
Cfg.max_batches = 500500
Cfg.steps = [60000, 80000]
Cfg.policy = Cfg.steps
Cfg.scales = .1, .1

Cfg.cutmix = 0
Cfg.mosaic = 1
Пример #36
0
    def testClippingOfProposals(self):
        """
        Test clipping of proposals before and after NMS
        """
        # Before NMS
        gt_boxes = np.array([
            [0, 0, 10, 12],
            [10, 10, 20, 22],
            [10, 10, 20, 22],
            [30, 25, 39, 39],
        ])
        all_anchors = np.array([
            [-20, -10, 12, 6],
            [2, -10, 20, 20],
            [0, 0, 12, 16],
            [2, -10, 20, 2],
        ])
        rpn_cls_prob = np.array([
            [0.3, 0.7],
            [0.4, 0.6],
            [0.3, 0.7],
            [0.1, 0.9],
        ])

        rpn_bbox_pred = np.array([  # This is set to zeros so when decode is
            [0, 0, 0, 0],  # applied in RPNProposal the anchors don't
            [0, 0, 0, 0],  # change, leaving us with unclipped
            [0, 0, 0, 0],  # proposals.
            [0, 0, 0, 0],
        ])
        config = EasyDict(self.config)
        config['clip_after_nms'] = False
        results_before = self._run_rpn_proposal(all_anchors,
                                                rpn_cls_prob,
                                                config,
                                                gt_boxes=gt_boxes,
                                                rpn_bbox_pred=rpn_bbox_pred)
        im_size = tf.placeholder(tf.float32, shape=(2, ))
        proposals = tf.placeholder(
            tf.float32, shape=(results_before['proposals_unclipped'].shape))
        clip_bboxes_tf = clip_boxes(proposals, im_size)

        with self.test_session() as sess:
            clipped_proposals = sess.run(
                clip_bboxes_tf,
                feed_dict={
                    proposals: results_before['proposals_unclipped'],
                    im_size: self.im_size
                })

        # Check we clip proposals right after filtering the invalid area ones.
        self.assertAllEqual(results_before['proposals'], clipped_proposals)

        # Checks all NMS proposals have values inside the image boundaries
        nms_proposals = results_before['nms_proposals'][:, 1:]
        self.assertTrue((nms_proposals >= 0).all())
        self.assertTrue(
            (nms_proposals < np.array(self.im_size + self.im_size)).all())

        # After NMS
        config['clip_after_nms'] = True
        results_after = self._run_rpn_proposal(all_anchors,
                                               rpn_cls_prob,
                                               config,
                                               gt_boxes=gt_boxes,
                                               rpn_bbox_pred=rpn_bbox_pred)
        im_size = tf.placeholder(tf.float32, shape=(2, ))
        proposals = tf.placeholder(
            tf.float32, shape=(results_after['proposals_unclipped'].shape))
        clip_bboxes_tf = clip_boxes(proposals, im_size)

        with self.test_session() as sess:
            clipped_proposals = sess.run(
                clip_bboxes_tf,
                feed_dict={
                    proposals: results_after['proposals_unclipped'],
                    im_size: self.im_size
                })

        # Check we don't clip proposals in the beginning of the function.
        self.assertAllEqual(results_after['proposals'],
                            results_after['proposals_unclipped'])

        nms_proposals = results_after['nms_proposals'][:, 1:]
        # Checks all NMS proposals have values inside the image boundaries
        self.assertTrue((nms_proposals >= 0).all())
        self.assertTrue(
            (nms_proposals < np.array(self.im_size + self.im_size)).all())
Пример #37
0
def main():
    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)

    # Use just the first GPU for demo
    context = [mx.gpu(int(config.gpus[0]))]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Get image dimensions
    width, height = Image.open(args.im_path).size

    # Pack image info
    roidb = [{'image': args.im_path, 'width': width, 'height': height, 'flipped': False}]

    # Creating the Logger
    logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)

    # Pack db info
    db_info = EasyDict()
    db_info.name = 'coco'
    db_info.result_path = 'data/demo'

    # Categories the detector trained for:
    db_info.classes = [u'BG', u'person', u'bicycle', u'car', u'motorcycle', u'airplane',
                       u'bus', u'train', u'truck', u'boat', u'traffic light', u'fire hydrant',
                       u'stop sign', u'parking meter', u'bench', u'bird', u'cat', u'dog', u'horse', u'sheep', u'cow',
                       u'elephant', u'bear', u'zebra', u'giraffe', u'backpack', u'umbrella', u'handbag', u'tie',
                       u'suitcase', u'frisbee', u'skis', u'snowboard', u'sports\nball', u'kite', u'baseball\nbat',
                       u'baseball glove', u'skateboard', u'surfboard', u'tennis racket', u'bottle', u'wine\nglass',
                       u'cup', u'fork', u'knife', u'spoon', u'bowl', u'banana', u'apple', u'sandwich', u'orange',
                       u'broccoli', u'carrot', u'hot dog', u'pizza', u'donut', u'cake', u'chair', u'couch',
                       u'potted plant', u'bed', u'dining table', u'toilet', u'tv', u'laptop', u'mouse', u'remote',
                       u'keyboard', u'cell phone', u'microwave', u'oven', u'toaster', u'sink', u'refrigerator', u'book',
                       u'clock', u'vase', u'scissors', u'teddy bear', u'hair\ndrier', u'toothbrush']
    db_info.num_classes = len(db_info.classes)

    # Create the model
    sym_def = eval('{}.{}'.format(config.symbol, config.symbol))
    sym_inst = sym_def(n_proposals=400, test_nbatch=1)
    sym = sym_inst.get_symbol_rcnn(config, is_train=False)
    test_iter = MNIteratorTest(roidb=roidb, config=config, batch_size=1, nGPUs=1, threads=1,
                               crop_size=None, test_scale=config.TEST.SCALES[0],
                               num_classes=db_info.num_classes)
    # Create the module
    shape_dict = dict(test_iter.provide_data_single)
    sym_inst.infer_shape(shape_dict)
    mod = mx.mod.Module(symbol=sym,
                        context=context,
                        data_names=[k[0] for k in test_iter.provide_data_single],
                        label_names=None)
    mod.bind(test_iter.provide_data, test_iter.provide_label, for_training=False)

    # Initialize the weights
    model_prefix = os.path.join(output_path, args.save_prefix)
    arg_params, aux_params = load_param(model_prefix, config.TEST.TEST_EPOCH,
                                        convert=True, process=True)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # Create the tester
    tester = Tester(mod, db_info, roidb, test_iter, cfg=config, batch_size=1)

    # Sequentially do detection over scales
    # NOTE: if you want to perform detection on multiple images consider using main_test which is parallel and faster
    all_detections= []
    for s in config.TEST.SCALES:
        # Set tester scale
        tester.set_scale(s)
        # Perform detection
        all_detections.append(tester.get_detections(vis=False, evaluate=False, cache_name=None))

    # Aggregate results from multiple scales and perform NMS
    tester = Tester(None, db_info, roidb, None, cfg=config, batch_size=1)
    file_name, out_extension = os.path.splitext(os.path.basename(args.im_path))
    all_detections = tester.aggregate(all_detections, vis=True, cache_name=None, vis_path='./data/demo/',
                                          vis_name='{}_detections'.format(file_name), vis_ext=out_extension)
    return all_detections
Пример #38
0
def degrade_latin(para,
        om_frac   = 0.1,
        com_frac  = 0.1,
        max_N_om  = 5,
        max_N_com = 5,
        ):
    '''
    'latin' because this tokenizes using str.split

    takes a (str)paragraph
    returns a (str)paragraph'
    with possible degradations (errors):
    - omissions (deletions)
    - commissions (alterations)

    arguments:
    om_ratio, com_ratio:
        fraction of items to alter,
        where the basis is the number of TOKENS
                        
    max_N_om, max_N_com:
        maximum whole number count of
        tokens to alter
    '''

    buf = para.split()
    ntoken_ = len(buf)

    # a convenience rendered version
    html_rep = copy.copy(buf)
    result = EasyDict(
        omission_index_list = [],
        commission_index_list = [],
    )

    OM_LIMIT = int(math.ceil(om_frac*ntoken_))
    COM_LIMIT = int(math.ceil(com_frac*ntoken_))

    # run omissions first
    ilist = range(ntoken_)
    random.shuffle(ilist)
    result.omission_index_list = list(z.take(min(OM_LIMIT, max_N_om), ilist))
    result.omission_index_list.sort()

    for i in reversed(result.omission_index_list):
        del buf[i]
        html_rep[i] = '<span class="deleted">%s</span>'%html_rep[i]

    # THIS HAS CHANGED!
    ntoken_ = len(buf)

    # create new index -> original index mapping
    imapping = dict((i,i) for i in range(ntoken_))
    for i_deleted in result.omission_index_list:
        for i_inc in range(i_deleted, ntoken_):
            imapping[i_inc] += 1

    # then run commissions
    ilist = range(ntoken_)
    random.shuffle(ilist)
    com_idx_list = z.take(min(COM_LIMIT, max_N_com), ilist)
    for i in reversed(sorted(com_idx_list)):
        token = buf[i]
        j_degrade = random.randint(0, len(token)-1)
        while True:
            ch = random.choice(string.ascii_lowercase)
            if ch != token[j_degrade]:
                break
        buf[i] = token[:j_degrade]+ch+token[j_degrade+1:]
        original_index = imapping[i]
        #result.commission_index_list.append(original_index)
        html_rep[original_index] = \
                token[:j_degrade] + \
                '<span class="altered">%s</span>'%ch + \
                token[j_degrade+1:]

    result.text = ' '.join(buf)
    result.html_representation = ' '.join(html_rep)
    return result
Пример #39
0
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.

import os
from easydict import EasyDict
import numpy as np

_C = EasyDict()
cfg = _C
# data augument config
_C.expand_prob = 0.5
_C.expand_max_ratio = 2
_C.hue_prob = 0.5
_C.hue_delta = 18
_C.contrast_prob = 0.5
_C.contrast_delta = 0.5
_C.saturation_prob = 0.5
_C.saturation_delta = 0.5
_C.brightness_prob = 0.5
_C.brightness_delta = 0.125
_C.data_anchor_sampling_prob = 0.5
_C.min_face_size = 1.0
_C.apply_distort = True
_C.apply_expand = False
_C.img_mean = np.array([104., 117., 123.])[:, np.newaxis,
                                           np.newaxis].astype('float32')
_C.resize_width = 320
_C.resize_height = 320
_C.scale = 1 / 127.0
_C.anchor_sampling = True
_C.filter_min_face = True
Пример #40
0
def main():
    parser = buildArgParse()
    args = parser.parse_args()

    loguruInitialize(args.log_dir, args.data_workspace.split('/')[2])
    logger.info("------------------- Main start ------------------")

    use_gpu = torch.cuda.is_available()

    train_dataset, val_dataset, num_classes, attr_name, loss_weight = utils.GetDataset(
        args.data_workspace, args.dataset)

    # _model = model.inception_iccv(pretrained=True, num_classes=num_classes)
    # _model = model.PartBaseConvolution()
    _model = model.TopBDNet(num_classes=num_classes,
                            neck=True,
                            double_bottleneck=True,
                            drop_bottleneck_features=True)

    criterion = model.WeightedBinaryCrossEntropy(loss_weight)

    if args.optimizer == 'adam':
        optimizer = torch.optim.Adam(params=_model.parameters(),
                                     lr=args.lr,
                                     betas=(0.9, 0.999),
                                     weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.SGD(params=_model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

    state = EasyDict()
    state.batch_size = args.batch_size
    state.lr = args.lr
    state.workers = args.workers
    state.epoch_step = args.epoch_step
    state.save_model_path = args.save_model_path
    state.use_gpu = use_gpu
    state.print_freq = args.print_freq
    # Workaround
    state.attr_name = attr_name
    state.attr_num = num_classes

    engine = framework.TrainingEngine(state, )
    engine.learning(_model, criterion, train_dataset, val_dataset, optimizer)
Пример #41
0
from easydict import EasyDict
import os

config = EasyDict()

config.DEBUG = True

config.DATABASE_PATH = '/home/stephen/dataset/Reuters'

config.ROOT = '..' + os.sep

# WEIGHT TYPE has 3 kinds of type: tf-idf, wf and ntf
config.WEIGHT_TYPE = 'wf-idf'

config.PICKLE_PROTOCOL = 0

config.ID_HTML_FILE = './data/id_html.pkl'

config.TIERED_INDEX_FILE = './data/tiered_index.pkl'

config.PARA_TOP_K = 10

config.THRESHOLD = [5, 1]

config.STOP_WORDS = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now']
Пример #42
0
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import yaml
from easydict import EasyDict
import pickle
import anchors_box as abx
import tensorflow.contrib.slim as slim

from rcnn import roi_pool_features, rcnn_targets, rcnn_loss, rcnn_proposals

from model import resnet_v1_101_base, resnet_rcnn, base_network, get_rpn_preds, smooth_l1_loss, get_rpn_proposals, get_rpn_targets, rcnn_network
#generate_anchors, generate_anchors_reference, get_box_dim_center, get_box_from_deltas, clip_boxes
config = EasyDict(yaml.load(open('base_config.yml', 'r')))

dataset = pickle.load(open('../pickle_data/train_data_boxes.p', 'rb'))
images = dataset['images']
boxes = dataset['boxes']
debug_mode = True
num_epochs = 3
learning_rate = 1e-3
base_size = 64
aspect_ratios = [0.5, 1, 2, 3]
scales = [0.5, 1, 1.5]
num_anchors = 12
num_classes = 4
feature_map_shape = (1, 14, 14, 128)
MEAN = [123.68, 116.78, 103.94]
anchors_stride = config.model.anchors.stride  #16
anchors_stride = 16  #16
restore_model = False
Пример #43
0
def main(config="config/config.py", experiment_name="default", world_size=1, local_rank=-1):
    """Main function for the training script.

    KeywordArgs:
        config (str): Path to config file.
        experiment_name (str): Custom name for the experitment, only used in tensorboard.
        world_size (int): Number of total subprocesses in distributed training. 
        local_rank: Rank of the process. Should not be manually assigned. 0-N for ranks in distributed training (only process 0 will print info and perform testing). -1 for single training. 
    """

    ## Get config
    cfg = cfg_from_file(config)

    ## Collect distributed(or not) information
    cfg.dist = EasyDict()
    cfg.dist.world_size = world_size
    cfg.dist.local_rank = local_rank
    is_distributed = local_rank >= 0 # local_rank < 0 -> single training
    is_logging     = local_rank <= 0 # only log and test with main process
    is_evaluating  = local_rank <= 0

    ## Setup writer if local_rank > 0
    recorder_dir = os.path.join(cfg.path.log_path, experiment_name + f"config={config}")
    if is_logging: # writer exists only if not distributed and local rank is smaller
        ## Clean up the dir if it exists before
        if os.path.isdir(recorder_dir):
            os.system("rm -r {}".format(recorder_dir))
            print("clean up the recorder directory of {}".format(recorder_dir))
        writer = SummaryWriter(recorder_dir)

        ## Record config object using pprint
        import pprint

        formatted_cfg = pprint.pformat(cfg)
        writer.add_text("config.py", formatted_cfg.replace(' ', '&nbsp;').replace('\n', '  \n')) # add space for markdown style in tensorboard text
    else:
        writer = None

    ## Set up GPU and distribution process
    gpu = min(local_rank if is_distributed else cfg.trainer.gpu, torch.cuda.device_count() - 1)
    torch.backends.cudnn.benchmark = getattr(cfg.trainer, 'cudnn', False)
    torch.cuda.set_device(gpu)
    if is_distributed:
        torch.distributed.init_process_group(backend='nccl', init_method='env://')
    print(local_rank)
 
    ## define datasets and dataloader.
    dataset_train = DATASET_DICT[cfg.data.train_dataset](cfg)
    dataset_val = DATASET_DICT[cfg.data.val_dataset](cfg, "validation")

    dataloader_train = DataLoader(dataset_train, num_workers=cfg.data.num_workers,
                                  batch_size=cfg.data.batch_size, collate_fn=dataset_train.collate_fn, shuffle=local_rank<0, drop_last=True,
                                  sampler=torch.utils.data.DistributedSampler(dataset_train, num_replicas=world_size, rank=local_rank, shuffle=True) if local_rank >= 0 else None)
    dataloader_val = DataLoader(dataset_val, num_workers=cfg.data.num_workers,
                                batch_size=cfg.data.batch_size, collate_fn=dataset_val.collate_fn, shuffle=False, drop_last=True)

    ## Create the model
    detector = DETECTOR_DICT[cfg.detector.name](cfg.detector)

    ## Load old model if needed
    old_checkpoint = getattr(cfg.path, 'pretrained_checkpoint', None)
    if old_checkpoint is not None:
        state_dict = torch.load(old_checkpoint, map_location='cpu')
        detector.load_state_dict(state_dict)

    ## Convert to cuda
    if is_distributed:
        detector = torch.nn.SyncBatchNorm.convert_sync_batchnorm(detector)
        detector = torch.nn.parallel.DistributedDataParallel(detector.cuda(), device_ids=[gpu], output_device=gpu)
    else:
        detector = detector.cuda()
    detector.train()

    ## Record basic information of the model
    if is_logging:
        string1 = detector.__str__().replace(' ', '&nbsp;').replace('\n', '  \n')
        writer.add_text("model structure", string1) # add space for markdown style in tensorboard text
        num_parameters = get_num_parameters(detector)
        print(f'number of trained parameters of the model: {num_parameters}')
    
    ## define optimizer and weight decay
    optimizer = optimizers.build_optimizer(cfg.optimizer, detector)

    ## define scheduler
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, cfg.trainer.max_epochs, cfg.optimizer.lr_target)
    scheduler_config = getattr(cfg, 'scheduler', None)
    scheduler = schedulers.build_scheduler(scheduler_config, optimizer)
    is_iter_based = getattr(scheduler_config, "is_iter_based", False)

    ## define loss logger
    training_loss_logger =  LossLogger(writer, 'train') if is_logging else None

    ## training pipeline
    if 'training_func' in cfg.trainer:
        training_dection = PIPELINE_DICT[cfg.trainer.training_func]
    else:
        raise KeyError

    ## Get evaluation pipeline
    if 'evaluate_func' in cfg.trainer:
        evaluate_detection = PIPELINE_DICT[cfg.trainer.evaluate_func]
        print("Found evaluate function {}".format(cfg.trainer.evaluate_func))
    else:
        evaluate_detection = None
        print("Evaluate function not found")


    ## timer is used to estimate eta
    timer = Timer()

    print('Num training images: {}'.format(len(dataset_train)))

    global_step = 0

    for epoch_num in range(cfg.trainer.max_epochs):
        ## Start training for one epoch
        detector.train()
        if training_loss_logger:
            training_loss_logger.reset()
        for iter_num, data in enumerate(dataloader_train):
            training_dection(data, detector, optimizer, writer, training_loss_logger, global_step, cfg)

            global_step += 1

            if is_iter_based:
                scheduler.step()

            if is_logging and global_step % cfg.trainer.disp_iter == 0:
                ## Log loss, print out and write to tensorboard in main process
                log_str = 'Epoch: {} | Iteration: {}  | Running loss: {:1.5f} | eta:{}'.format(
                        epoch_num, iter_num, training_loss_logger.loss_stats['total_loss'].avg,
                        timer.compute_eta(global_step, len(dataloader_train) * cfg.trainer.max_epochs))
                print(log_str, end='\r')
                writer.add_text("training_log/train", log_str, global_step)
                training_loss_logger.log(global_step)

        if not is_iter_based:
            scheduler.step()

        ## save model in main process if needed
        if is_logging:
            torch.save(detector.module.state_dict() if is_distributed else detector.state_dict(), os.path.join(
                cfg.path.checkpoint_path, '{}_latest.pth'.format(
                    cfg.detector.name)
                )
            )
        if is_logging and (epoch_num + 1) % cfg.trainer.save_iter == 0:
            torch.save(detector.module.state_dict() if is_distributed else detector.state_dict(), os.path.join(
                cfg.path.checkpoint_path, '{}_{}.pth'.format(
                    cfg.detector.name,epoch_num)
                )
            )

        ## test model in main process if needed
        if is_evaluating and evaluate_detection is not None and cfg.trainer.test_iter > 0 and (epoch_num + 1) % cfg.trainer.test_iter == 0:
            print("\n/**** start testing after training epoch {} ******/".format(epoch_num))
            evaluate_detection(cfg, detector.module if is_distributed else detector, dataset_val, writer, epoch_num)
            print("/**** finish testing after training epoch {} ******/".format(epoch_num))

        if is_distributed:
            torch.distributed.barrier() # wait untill all finish a epoch

        if is_logging:
            writer.flush()
Пример #44
0
# for debug
# MAX_STEPS = 10
# BATCH_SIZE = 31
# SAVE_STEPS = 2
# TEST_STEPS = 10
# SUMMARISE_STEPS = 2
# IS_DEBUG = True

PRE_PROCESSOR = Sequence([
    Resize(size=IMAGE_SIZE),
    DivideBy255()
])
POST_PROCESSOR = None

NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
step_per_epoch = int(50000 / BATCH_SIZE)
NETWORK.LEARNING_RATE_KWARGS = {
    "values": [0.01, 0.001, 0.0001, 0.00001],
    "boundaries": [step_per_epoch * 50, step_per_epoch * 100, step_per_epoch * 150],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
    'bit': 2,
Пример #45
0
import os
from easydict import EasyDict

"""
This file contains the setting for running ActivityNet related experiments.
"""

ANET_CFG = EasyDict()
ANET_CFG.ANET_HOME = os.getenv("ANET_HOME",None)
if ANET_CFG.ANET_HOME is None:
    raise ValueError("To use this package, "
                     "set the environmental variable \"ANET_HOME\" to the root director of the codebase")


# Version and other macro settings

ANET_CFG.DB_VERSIONS = {
    '1.2': 'data/activity_net.v1-2.min.json',
    '1.3': 'data/activity_net.v1-3.min.json'
}

# Force the leaf node to be included in the label list
ANET_CFG.FORCE_INCLUDE = {"1.3": [], "1.2": []}

# Acceptable extension of the video files
ANET_CFG.ACC_EXT = {'.mp4', '.webm', '.avi', '.mkv'}

# File name pattern of the video files
ANET_CFG.SRC_ID_LEN = 11 # length of youtube IDs

# Max length of video, -1 for unlimited
# )

config = EasyDict({
    'mode': 'fed',
    'data': {
        'train_df_path': 'train_resized.csv',
        'val_df_path': 'val_resized.csv',
        'img_size': (256, 256, 3),
        'batch_size': 10,
        'n_classes': 12,
        'client_column': 'shard_non_iid',
    },
    'train': {
        'learning_rate': 1e-3,
        'epochs': 5,
        'client_fraction': 0.2,
        'num_clients': 10,
        'num_rounds': 1000,
        'decay': 0.99
    },
    'log': {
        'path': './results/02-fed-non-iid',
        'update_freq': 5
    },

    #     'resume': {
    #         'path': './results/01-fed-non-iid'
    #     }
})

# In[98]:
Пример #47
0
from easydict import EasyDict

__C = EasyDict()

cfg = __C

__C.MULTI = 0.1

__C.URL = 'http://0.0.0.0:5000/fulldetect'
Пример #48
0
def main():
    args = parser()
    update_config(args.cfg)
    if args.set_cfg_list:
        update_config_from_list(args.set_cfg_list)

    # Use just the first GPU for demo
    context = [mx.gpu(int(config.gpus[0]))]

    if not os.path.isdir(config.output_path):
        os.mkdir(config.output_path)

    # Get image dimensions
    width, height = Image.open(args.im_path).size

    # Pack image info
    roidb = [{
        'image': args.im_path,
        'width': width,
        'height': height,
        'flipped': False
    }]

    # Creating the Logger
    print config.output_path
    logger, output_path = create_logger(config.output_path, args.cfg,
                                        config.dataset.image_set)

    # Pack db info
    db_info = EasyDict()
    db_info.name = 'coco'
    db_info.result_path = 'data/demo'

    # Categories the detector trained for:
    db_info.classes = [u'BG', u'person', u'bicycle', u'car', u'motorcycle']

    db_info.num_classes = len(db_info.classes)
    print db_info.num_classes

    # Create the model
    sym_def = eval('{}.{}'.format(config.symbol, config.symbol))
    sym_inst = sym_def(n_proposals=400, test_nbatch=1)
    sym = sym_inst.get_symbol_rpn_ugly(config, is_train=False)
    test_iter = MNIteratorTest(roidb=roidb,
                               config=config,
                               batch_size=1,
                               nGPUs=1,
                               threads=1,
                               crop_size=None,
                               test_scale=config.TEST.SCALES[0],
                               num_classes=db_info.num_classes)
    # Create the module
    shape_dict = dict(test_iter.provide_data_single)
    sym_inst.infer_shape(shape_dict)
    mod = mx.mod.Module(
        symbol=sym,
        context=context,
        data_names=[k[0] for k in test_iter.provide_data_single],
        label_names=None)
    mod.bind(test_iter.provide_data,
             test_iter.provide_label,
             for_training=False)

    # Initialize the weights
    print output_path, args.save_prefix, config.TEST.TEST_EPOCH
    model_prefix = os.path.join(output_path, args.save_prefix)
    arg_params, aux_params = load_param(model_prefix,
                                        config.TEST.TEST_EPOCH,
                                        convert=True,
                                        process=True)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # Create the tester
    tester = Tester(mod, db_info, roidb, test_iter, cfg=config, batch_size=1)

    # Sequentially do detection over scales
    # NOTE: if you want to perform detection on multiple images consider using main_test which is parallel and faster
    all_detections = []
    #    config.TEST.SCALES = [(1400, 2000), (800, 1280), (480, 512)]
    #    config.TEST.VALID_RANGES = [(-1, -1), (32, 180), (75, -1)]
    print config.TEST.SCALES, config.TEST.VALID_RANGES
    for s in config.TEST.SCALES:
        # Set tester scale
        tester.set_scale(s)
        # Perform detection
        #all_detections.append(tester.get_detections(vis=False, evaluate=False, cache_name=None))
        all_detections.append(
            tester.extract_proposals(vis=False, cache_name='./'))

    # Aggregate results from multiple scales and perform NMS
    tester = Tester(None, db_info, roidb, None, cfg=config, batch_size=1)
    file_name, out_extension = os.path.splitext(os.path.basename(args.im_path))
    #    all_detections = tester.aggregate(all_detections, vis=True, cache_name=None, vis_path='./data/demo/',
    #                                          vis_name='{}_detections'.format(file_name), vis_ext=out_extension)
    all_detections = tester.aggregate_rpn(
        all_detections,
        vis=True,
        cache_name=None,
        vis_path='./data/demo/',
        vis_name='{}_detections'.format(file_name),
        vis_ext=out_extension)
    return all_detections