예제 #1
0
from datasets.data_generator import DataGenerator

from tensorboardX import SummaryWriter

# set random seed
np.random.seed(1)
torch.manual_seed(1)
torch.random.manual_seed(1)
torch.cuda.random.manual_seed_all(1)

# initialize the configuration
if not isinstance(args.extra_config_fns, (list, tuple)):
    args.extra_config_fns = [args.extra_config_fns]
config_fns = [args.base_config_fn]
config_fns.extend(args.extra_config_fns)
init_config(config_fns, args, is_display=True)



# specify the log path and checkpoints location
# It will first create a /log dir in the project root. 
# All experiments' training information will be saved in ./log 
# Each experiment has it's unique name as log_name+TIMESTAMP.
# checkpoints will be saved in ./log/log_name_TIMESTAMP/checkpoints
TIMESTAMP = "_{0:%Y-%m-%d-%H-%M-%S/}".format(datetime.now())
log_dir = os.path.join('./log', Config.LOG.NAME)
log_dir += TIMESTAMP
writer = SummaryWriter(log_dir)
mrcnn_writer = Maskrcnn_logger(writer)
checkpoint_dir = os.path.join(log_dir, 'checkpoints')
예제 #2
0
from configs.mrcnn_config import init_config
from configs.config import Config
from configs.args import args
from libs.networks.model_component.anchors import generate_pyramid_anchors
from datasets.wisdom.wisdomDataset import ImageDataset
from datasets.data_generator import DataGenerator

if __name__ == '__main__':

    # Configurations
    # conf_fns = ['./configs/base_config.yml', './datasets/wisdom/wisdomConfig.yml']
    conf_fns = [
        './configs/base_config.yml', './datasets/wisdom/wisdomInference.yml'
    ]
    init_config(conf_fns)
    ## basic config and special config such as dataset
    dataset_train = ImageDataset('train')
    anchors = generate_pyramid_anchors(
        Config.RPN.ANCHOR.SCALES, Config.RPN.ANCHOR.RATIOS,
        Config.BACKBONE.SHAPES, Config.BACKBONE.STRIDES,
        Config.RPN.ANCHOR.STRIDE, Config.TRAINING.BATCH_SIZE).to(Config.DEVICE)
    train_set = DataGenerator(dataset_train,
                              augmentation=None,
                              anchors=anchors[0])
    print(anchors[0].shape)
    for step, inputs in enumerate(train_set):
        print('train set load', len(train_set))
        print('train inputs', '\nimage:', inputs[0].shape, '\nimage_metas:', inputs[1],\
        '\nrpn_match:', inputs[2].shape, '\nrpn_bbox.shape: ',inputs[3].shape,\
        '\ngt_class_ids:', inputs[4][:20], '\ngt_boxes:', inputs[5].shape,'\ngt_mask.shape:',inputs[6].shape)
        EXCLUDE = ['classifier.linear_class.weight',
           'classifier.linear_class.bias',
           'classifier.linear_bbox.weight',
           'classifier.linear_bbox.bias',
           'mask.conv5.weight',
           'mask.conv5.bias']
        if os.path.exists(filepath):
            state_dict = torch.load(filepath)
            state_dict = {key: value for key, value in state_dict.items()
                        if key not in EXCLUDE}
            self.load_state_dict(state_dict, strict=False)


if __name__ == "__main__":
    from configs.mrcnn_config import init_config
    init_config(['./configs/base_config.yml'])
    net = MaskRCNN(backbone='resnet50')
    net.load_pretrain('./pretrained_model/resnet50_imagenet.pth')

    B = Config.TRAINING.BATCH_SIZE
    B, H, W = 2, 768, 576
    num_anchors = 1000
    num_pos_roi = 128
    num_neg_roi = 128
    images = torch.randn((B, 3, H, W))
    rpn_match = torch.zeros((B, num_anchors))
    rpn_match[:, :num_pos_roi] = 1
    rpn_match[:, -num_neg_roi:] = -1
    rpn_bbox = torch.randn((B, num_anchors, 4))
    gt_class_ids = torch.ones((B, num_anchors))
    gt_boxes = torch.ones((B, num_anchors, 4))