示例#1
0
 def __init__(self, args, config):
     self.args = args
     self.config = config
     self.cache_dir = utils.get_cache_dir(config)
     self.model_dir = utils.get_model_dir(config)
     self.category = utils.get_category(config, self.cache_dir if os.path.exists(self.cache_dir) else None)
     self.draw_bbox = utils.visualize.DrawBBox(self.category, colors=args.colors, thickness=args.thickness)
     self.anchors = torch.from_numpy(utils.get_anchors(config)).contiguous()
     self.height, self.width = tuple(map(int, config.get('image', 'size').split()))
     self.path, self.step, self.epoch = utils.train.load_model(self.model_dir)
     state_dict = torch.load(self.path, map_location=lambda storage, loc: storage)
     self.dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config, state_dict), self.anchors, len(self.category))
     self.dnn.load_state_dict(state_dict)
     self.inference = model.Inference(config, self.dnn, self.anchors)
     self.inference.eval()
     if torch.cuda.is_available():
         self.inference.cuda()
     logging.info(humanize.naturalsize(sum(var.cpu().numpy().nbytes for var in self.inference.state_dict().values())))
     self.create_cap()
     self.create_cap_size()
     self.writer = self.create_writer()
     self.keys = set(args.keys)
     self.resize = transform.parse_transform(config, config.get('transform', 'resize_test'))
     self.transform_image = transform.get_transform(config, config.get('transform', 'image_test').split())
     self.transform_tensor = transform.get_transform(config, config.get('transform', 'tensor').split())
示例#2
0
 def get_loader(self):
     paths = [
         os.path.join(self.cache_dir, phase + '.pkl')
         for phase in self.config.get('eval', 'phase').split()
     ]
     dataset = utils.data.Dataset(utils.data.load_pickles(paths))
     logging.info('num_examples=%d' % len(dataset))
     size = tuple(map(int, self.config.get('image', 'size').split()))
     try:
         workers = self.config.getint('data', 'workers')
     except configparser.NoOptionError:
         workers = multiprocessing.cpu_count()
     collate_fn = utils.data.Collate(
         transform.parse_transform(
             self.config, self.config.get('transform', 'resize_eval')),
         [size],
         transform_image=transform.get_transform(
             self.config,
             self.config.get('transform', 'image_test').split()),
         transform_tensor=transform.get_transform(
             self.config,
             self.config.get('transform', 'tensor').split()),
     )
     return torch.utils.data.DataLoader(dataset,
                                        batch_size=self.args.batch_size,
                                        num_workers=workers,
                                        collate_fn=collate_fn)
示例#3
0
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    resize = transform.parse_transform(config, config.get('transform', 'resize_test'))
    transform_image = transform.get_transform(config, config.get('transform', 'image_test').split())
    transform_tensor = transform.get_transform(config, config.get('transform', 'tensor').split())
    # load image
    image_bgr = cv2.imread('image.jpg')
    image_resized = resize(image_bgr, height, width)
    image = transform_image(image_resized)
    tensor = transform_tensor(image).unsqueeze(0)
    # Caffe2
    init_net = caffe2_pb2.NetDef()
    with open(os.path.join(model_dir, 'init_net.pb'), 'rb') as f:
        init_net.ParseFromString(f.read())
    predict_net = caffe2_pb2.NetDef()
    with open(os.path.join(model_dir, 'predict_net.pb'), 'rb') as f:
        predict_net.ParseFromString(f.read())
    p = workspace.Predictor(init_net, predict_net)
    results = p.run([tensor.numpy()])
    logging.info(utils.abs_mean(results[0]))
    logging.info(hashlib.md5(results[0].tostring()).hexdigest())
示例#4
0
 def __init__(self, args, config):
     self.args = args
     self.config = config
     self.device = torch.device(
         'cuda' if torch.cuda.is_available() else 'cpu')
     self.cache_dir = utils.get_cache_dir(config)
     self.model_dir = utils.get_model_dir(config)
     _, self.num_parts = utils.get_dataset_mappers(config)
     self.limbs_index = utils.get_limbs_index(config)
     if args.debug is None:
         self.draw_cluster = utils.visualize.DrawCluster(
             colors=args.colors, thickness=args.thickness)
     else:
         self.draw_feature = utils.visualize.DrawFeature()
         s = re.search('(-?[0-9]+)([a-z]+)(-?[0-9]+)', args.debug)
         stage = int(s.group(1))
         name = s.group(2)
         channel = int(s.group(3))
         self.get_feature = lambda outputs: outputs[stage][name][0][channel]
     self.height, self.width = tuple(
         map(int,
             config.get('image', 'size').split()))
     if args.caffe:
         init_net = caffe2_pb2.NetDef()
         with open(os.path.join(self.model_dir, 'init_net.pb'), 'rb') as f:
             init_net.ParseFromString(f.read())
         predict_net = caffe2_pb2.NetDef()
         with open(os.path.join(self.model_dir, 'predict_net.pb'),
                   'rb') as f:
             predict_net.ParseFromString(f.read())
         p = workspace.Predictor(init_net, predict_net)
         self.inference = lambda tensor: [{
             'parts': torch.from_numpy(parts),
             'limbs': torch.from_numpy(limbs)
         } for parts, limbs in zip(
             *[iter(p.run([tensor.detach().cpu().numpy()]))] * 2)]
     else:
         self.step, self.epoch, self.dnn, self.stages = self.load()
         self.inference = model.Inference(config, self.dnn, self.stages)
         self.inference.eval()
         if torch.cuda.is_available():
             self.inference.cuda()
         logging.info(
             humanize.naturalsize(
                 sum(var.cpu().numpy().nbytes
                     for var in self.inference.state_dict().values())))
     self.cap = self.create_cap()
     self.keys = set(args.keys)
     self.resize = transform.parse_transform(
         config, config.get('transform', 'resize_test'))
     self.transform_image = transform.get_transform(
         config,
         config.get('transform', 'image_test').split())
     self.transform_tensor = transform.get_transform(
         config,
         config.get('transform', 'tensor').split())
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    cache_dir = utils.get_cache_dir(config)
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(
        config, cache_dir if os.path.exists(cache_dir) else None)
    anchors = utils.get_anchors(config)
    anchors = torch.from_numpy(anchors).contiguous()
    path, step, epoch = utils.train.load_model(model_dir)
    state_dict = torch.load(path, map_location=lambda storage, loc: storage)
    dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(
        config, state_dict), anchors, len(category))
    dnn.load_state_dict(state_dict)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    resize = transform.parse_transform(config,
                                       config.get('transform', 'resize_test'))
    transform_image = transform.get_transform(
        config,
        config.get('transform', 'image_test').split())
    transform_tensor = transform.get_transform(
        config,
        config.get('transform', 'tensor').split())
    # load image
    image_bgr = cv2.imread('image.jpg')
    image_resized = resize(image_bgr, height, width)
    image = transform_image(image_resized)
    tensor = transform_tensor(image).unsqueeze(0)
    # Checksum
    for key, var in dnn.state_dict().items():
        a = var.cpu().numpy()
        print('\t'.join(
            map(str, [
                key, a.shape,
                utils.abs_mean(a),
                hashlib.md5(a.tostring()).hexdigest()
            ])))
    output = dnn(torch.autograd.Variable(tensor, volatile=True)).data
    for key, a in [
        ('image_bgr', image_bgr),
        ('image_resized', image_resized),
        ('tensor', tensor.cpu().numpy()),
        ('output', output.cpu().numpy()),
    ]:
        print('\t'.join(
            map(str, [
                key, a.shape,
                utils.abs_mean(a),
                hashlib.md5(a.tostring()).hexdigest()
            ])))
示例#6
0
def get_stereo_transform():
    endoscope_chesspts = list(read_camera.load_all('../calibration/endoscope_chesspts.p'))
    camera_info = list(read_camera.load_all('../camera_data/camera_info.p'))
    left_chesspts = np.matrix(list(read_camera.load_all('../camera_data/left_chesspts'))[0])
    right_chesspts = np.matrix(list(read_camera.load_all('../camera_data/right_chesspts'))[0])

    z = np.zeros((25, 1))
    left_chesspts = np.hstack((left_chesspts, z))
    right_chesspts = np.hstack((right_chesspts, z))

    TL_R = transform.get_transform("Left Camera", "Right Camera", left_chesspts, right_chesspts, verbose=False)
    return TL_R
示例#7
0
def main():
    # train on the GPU or on the CPU, if a GPU is not available
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    # our dataset has two classes only - background and person
    num_classes = 2
    # use our dataset and defined transformations
    dataset = PennFudanDataset('C:\\zhulei\\maskRcnn\\data\\train',
                               get_transform(train=True))
    dataset_test = PennFudanDataset('C:\\zhulei\\maskRcnn\\data\\test',
                                    get_transform(train=False))

    # split the dataset in train and test set
    # indices = torch.randperm(len(dataset)).tolist()
    # dataset = torch.utils.data.Subset(dataset, indices[:-50])
    # dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])

    # define training and validation data loaders
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=2,
                                              shuffle=True,
                                              num_workers=4,
                                              collate_fn=utils.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=4,
                                                   collate_fn=utils.collate_fn)

    # get the model using our helper function
    model = get_model_instance_segmentation(num_classes)

    # move model to the right device
    model.to(device)

    # construct an optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)
    # and a learning rate scheduler
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=3,
                                                   gamma=0.1)

    # let's train it for 10 epochs
    num_epochs = 10
    save_dir = "C:\\zhulei\\maskRcnn"

    for epoch in range(num_epochs):
        # train for one epoch, printing every 10 iterations
        train_one_epoch(model,
                        optimizer,
                        data_loader,
                        device,
                        epoch,
                        print_freq=10)
        # update the learning rate
        lr_scheduler.step()
        # save the model
        torch.save(
            model.state_dict(),
            os.path.join(save_dir, 'models', '_epoch-' + str(epoch) + '.pth'))
        # evaluate on the test dataset
        evaluate(model, data_loader_test, device=device)

    print("That's it!")
示例#8
0
from dataloader import PennFudanDataset
from PIL import Image
import os
import numpy as np
import matplotlib as mpl
import numpy
from models.detection.rpn import AnchorGenerator
import models
from model import get_instance_segmentation_model
from transform import get_transform

os.environ["CUDA_VISIBLE_DEVICES"] = "1"
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# use the PennFudan dataset and defined transformations
dataset = PennFudanDataset(root='./patch_data/', transforms=get_transform(train=True))
dataset_test = PennFudanDataset(root='./patch_data_test/same_tissue/', transforms=get_transform(train=False))

# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
    dataset, batch_size=4, shuffle=True, num_workers=0,
    collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
    dataset_test, batch_size=1, shuffle=True, num_workers=0,
    collate_fn=utils.collate_fn)

# the dataset has two classes only - background and person
num_classes = 2

# get the model using the helper function
#bone: 'resnet50'/'mobilenet_v2'/'googlenet'/'densenet121'/'resnet50'/'shufflenet_v2_x1_0'/'inception_v3'/'squeezenet1_0'/
示例#9
0
def main():
    """ main function
        """

    ### header
    parser = argparse.ArgumentParser()

    # path
    parser.add_argument('--root-path', default=CFG.root_path, help="root path")
    parser.add_argument('--log-path', default=CFG.log_path, help="log path")
    parser.add_argument('--model-path',
                        default=CFG.model_path,
                        help="model path")
    parser.add_argument('--pretrained-path', help='pretrained path')

    # image
    parser.add_argument('--transform-version',
                        default=0,
                        type=int,
                        help="image transform version ex) 0, 1, 2 ...")
    parser.add_argument('--image-size',
                        default=64,
                        type=int,
                        help="image size(64)")

    # model
    parser.add_argument('--model-name',
                        default=CFG.model_name,
                        help=f"model name({CFG.model_name})")
    parser.add_argument('--backbone-name',
                        default=CFG.backbone_name,
                        help=f"backbone name({CFG.backbone_name})")

    # learning
    parser.add_argument('--batch-size',
                        default=CFG.batch_size,
                        type=int,
                        help=f"batch size({CFG.batch_size})")
    parser.add_argument('--learning-rate',
                        default=CFG.learning_rate,
                        type=float,
                        help=f"learning rate({CFG.learning_rate})")
    parser.add_argument('--num-epochs',
                        default=CFG.num_epochs,
                        type=int,
                        help=f"number of epochs({CFG.num_epochs})")

    # etc
    parser.add_argument("--seed",
                        default=CFG.seed,
                        type=int,
                        help=f"seed({CFG.seed})")
    parser.add_argument("--workers",
                        default=CFG.workers,
                        type=int,
                        help=f"number of workers({CFG.workers})")
    parser.add_argument("--debug", action="store_true", help="debug mode")
    parser.add_argument("--val-fold",
                        default=CFG.val_fold,
                        choices=[list(range(0, CFG.n_folds))],
                        help=f"fold number for validation({CFG.val_fold})")

    args = parser.parse_args()

    # path
    CFG.root_path = args.root_path
    CFG.model_path = args.model_path
    CFG.log_path = args.log_path
    CFG.pretrained_path = args.pretrained_path

    # image
    CFG.transform_version = args.transform_version
    CFG.image_size = args.image_size

    # model
    CFG.model_name = args.model_name
    CFG.backbone_name = args.backbone_name

    # learning
    CFG.batch_size = args.batch_size
    CFG.learning_rate = args.learning_rate
    CFG.num_epochs = args.num_epochs

    # etc
    CFG.seed = args.seed
    CFG.workers = args.workers
    CFG.debug = args.debug

    # get device
    CFG.device = get_device()

    # get version
    _, version, _ = sys.argv[0].split('/')
    CFG.version = version

    # update log path
    if not CFG.debug:
        CFG.log_path = os.path.join(CFG.log_path, CFG.version)
        os.makedirs(CFG.log_path, exist_ok=True)
        CFG.log_path = os.path.join(
            CFG.log_path, f'exp_{get_exp_id(CFG.log_path, prefix="exp_")}')
        os.makedirs(CFG.log_path, exist_ok=True)
    else:
        CFG.log_path = os.path.join(CFG.log_path, "debug")
        os.makedirs(CFG.log_path, exist_ok=True)
        CFG.log_path = os.path.join(CFG.log_path, "debug")
        os.makedirs(CFG.log_path, exist_ok=True)

    # update model path
    if not CFG.debug:
        CFG.model_path = os.path.join(CFG.model_path, version)
        os.makedirs(CFG.model_path, exist_ok=True)
        CFG.model_path = os.path.join(
            CFG.model_path, f'exp_{get_exp_id(CFG.model_path, prefix="exp_")}')
        os.makedirs(CFG.model_path, exist_ok=True)
    else:
        CFG.model_path = os.path.join(CFG.model_path, "debug")
        os.makedirs(CFG.model_path, exist_ok=True)
        CFG.model_path = os.path.join(CFG.model_path, "debug")
        os.makedirs(CFG.model_path, exist_ok=True)

    pprint({k: v for k, v in dict(CFG.__dict__).items() if '__' not in k})
    json.dump({k: v
               for k, v in dict(CFG.__dict__).items() if '__' not in k},
              open(os.path.join(CFG.log_path, 'CFG.json'), "w"))
    print()

    ### Seed all
    seed_everything(CFG.seed)

    ### Data Related
    # load raw data
    print("Load Raw Data")
    train_df, test_df, ss_df = load_data(CFG)

    # preprocess data
    print("Preprocess Data")
    train_df = preprocess_data(CFG, train_df)

    # split data
    print("Split Data")
    train_df, valid_df = split_data(CFG, train_df)

    # get transform
    print("Get Transforms")
    train_transforms, test_transforms = get_transform(CFG)

    # get dataset
    print("Get Dataset")
    trn_data = DFDDataset(CFG, train_df, train_transforms)
    val_data = DFDDataset(CFG, valid_df, test_transforms)

    ### Model related
    # get learner
    learner = Learner(CFG)
    learner.name = f"model.fold_{CFG.val_fold}"
    if CFG.pretrained_path:
        print("Load Pretrained Model")
        print(f"... Pretrained Info - {CFG.pretrained_path}")
        learner.load(CFG.pretrained_path, f"model_state_dict")
        model = learner.best_model.to(CFG.deivce)
    else:
        print(f"Load Model")
        model = get_model(CFG)
        model = model.to(CFG.device)

    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)

    # get optimizer
    optimizer = optim.Adam(model.parameters(), lr=CFG.learning_rate)

    # get scheduler
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           mode='min',
                                                           patience=2,
                                                           verbose=False,
                                                           factor=0.5)

    ### train related
    # train model
    learner.train(trn_data, val_data, model, optimizer, scheduler)
示例#10
0
def get_transform(config, sequence):
    return transform.get_transform(config, sequence)
示例#11
0
def main():
    parser = argparse.ArgumentParser()
    # path
    parser.add_argument('--root-path', default=CFG.root_path, help="root path")
    parser.add_argument('--save-path', default=CFG.save_path, help="save path")
    parser.add_argument('--sub-name',
                        default=CFG.sub_name,
                        help="submission name")

    # learning
    parser.add_argument('--batch-size',
                        default=CFG.batch_size,
                        type=int,
                        help=f"batch size({CFG.batch_size})")
    parser.add_argument("--workers",
                        default=CFG.workers,
                        type=int,
                        help=f"number of workers({CFG.workers})")
    parser.add_argument("--seed",
                        default=CFG.seed,
                        type=int,
                        help=f"seed({CFG.seed})")

    # version
    parser.add_argument('--version', type=int)
    parser.add_argument('--exp-id', type=int)

    # etc
    parser.add_argument('--tta', action='store_true', default=False)

    args = parser.parse_args()

    CFG.root_path = args.root_path
    CFG.save_path = args.save_path
    CFG.sub_name = args.sub_name

    CFG.batch_size = args.batch_size
    CFG.workers = args.workers
    CFG.seed = args.seed

    CFG.model_path = f"./model/v{args.version}/exp_{args.exp_id}/"
    CFG.log_path = f"./log/v{args.version}/exp_{args.exp_id}/"

    CFG.tta = args.tta

    # get device
    CFG.device = get_device()

    # load train environment
    env = json.load(open(os.path.join(CFG.log_path, 'CFG.json'), 'r'))
    for k, v in env.items():
        setattr(CFG, k, v)

    loss, metric = 0, 0
    for fold in range(CFG.n_folds):
        fn = os.path.join(CFG.log_path, f"log.fold_{fold}.csv")
        score = pd.read_csv(fn).sort_values("val_metric",
                                            ascending=False).iloc[0]
        loss += score['val_loss']  #/ CFG.n_folds
        metric += score['val_metric']  #/ CFG.n_folds
        break

    CFG.sub_name = f"submission." \
                   f"ver_{args.version}." \
                   f"exp_{args.exp_id}." \
                   f"loss_{loss:.4f}." \
                   f"metric_{metric:.4f}.csv"

    if CFG.tta:
        CFG.sub_name = "tta." + CFG.sub_name

    pprint({k: v for k, v in dict(CFG.__dict__).items() if '__' not in k})
    print()

    ### seed all
    seed_everything(CFG.seed)

    ### Data related logic
    # load data
    print("Load Raw Data")
    _, test_df, ss_df = load_data(CFG)
    print()

    # get transform
    print("Get Transform")
    _, test_transforms = get_transform(CFG)
    print()

    # dataset
    tst_data = DFDDataset(CFG, test_df, test_transforms)

    # if tta
    if CFG.tta:
        test_transforms = get_tta_transform(CFG)
        tst_data = DFDDataset(CFG, test_df, test_transforms)

    final_preds = np.zeros(test_df.shape[0])

    # folds
    for fold in range(CFG.n_folds):
        print(f"========== Fold: {fold} ==========")
        # load learner
        print("Load Model")
        model_name = f'model.fold_{fold}.best.pt'
        learner = Learner(CFG)
        learner.load(os.path.join(CFG.model_path, model_name),
                     f"model_state_dict")

        # prediction
        if not CFG.tta:
            test_preds = torch.sigmoid(
                learner.predict(tst_data).view(-1)).numpy()

        else:
            test_preds = np.zeros(test_df.shape[0])
            for _ in range(4):
                test_preds += torch.sigmoid(
                    learner.predict(tta_data).view(-1)).numpy() / 4

        final_preds += test_preds  #/ CFG.n_folds
        print()
        break

    image_path = [
        path.replace(".npy", "").replace("./input/test_face_margin/", "")
        for path in test_df['image_path'].values
    ]
    print(final_preds.max(), final_preds.min())
    test_df = pd.DataFrame({"path": image_path, "y": np.round(final_preds)})
    test_df = test_df.set_index("path")
    ss_df = test_df.loc[ss_df['path']].reset_index()[['path', 'y']]
    ss_df.to_csv(os.path.join(CFG.save_path, f"{CFG.sub_name}"), index=False)
    print(ss_df.head())

    test_df = pd.DataFrame({"path": image_path, "y": final_preds})
    test_df = test_df.set_index("path")
    ss_df = test_df.loc[ss_df['path']].reset_index()[['path', 'y']]
    ss_df.to_csv(os.path.join(CFG.save_path, f"raw.{CFG.sub_name}"),
                 index=False)
    print(ss_df.head())
示例#12
0
from dataloader import PennFudanDataset
from PIL import Image
import os
import numpy as np
import matplotlib as mpl
import numpy
from color import pre_to_img
from transform import get_transform
from model import get_instance_segmentation_model

os.environ["CUDA_VISIBLE_DEVICES"] = "1"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# use the PennFudan dataset and defined transformations
dataset_test = PennFudanDataset(root='./patch_data_test/same_tissue/',
                                transforms=get_transform(train=False))

data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                               batch_size=1,
                                               shuffle=True,
                                               num_workers=0,
                                               collate_fn=utils.collate_fn)

# put the model in evaluation mode


def get_counts(sequence):
    counts = {}
    for i in range(sequence.shape[0]):
        for x in sequence[i]:
            if x in counts:
示例#13
0
def main():
    parser = argparse.ArgumentParser()
    # path
    parser.add_argument('--root-path', default=CFG.root_path, help="root path")
    parser.add_argument('--save-path', default=CFG.save_path, help="save path")
    parser.add_argument('--sub-name',
                        default=CFG.sub_name,
                        help="submission name")

    # learning
    parser.add_argument('--batch-size',
                        default=CFG.batch_size,
                        type=int,
                        help=f"batch size({CFG.batch_size})")
    parser.add_argument("--workers",
                        default=CFG.workers,
                        type=int,
                        help=f"number of workers({CFG.workers})")
    parser.add_argument("--seed",
                        default=CFG.seed,
                        type=int,
                        help=f"seed({CFG.seed})")

    # version
    parser.add_argument('--version', type=int)
    parser.add_argument('--exp-id', type=int)

    # etc
    parser.add_argument('--tta', action='store_true', default=False)

    args = parser.parse_args()

    CFG.root_path = args.root_path
    CFG.save_path = args.save_path
    CFG.sub_name = args.sub_name

    CFG.batch_size = args.batch_size
    CFG.workers = args.workers
    CFG.seed = args.seed

    CFG.model_path = f"./model/v{args.version}/exp_{args.exp_id}/"
    CFG.log_path = f"./log/v{args.version}/exp_{args.exp_id}/"

    CFG.tta = args.tta

    # get device
    CFG.device = get_device()

    # load train environment
    env = json.load(open(os.path.join(CFG.log_path, 'CFG.json'), 'r'))
    for k, v in env.items():
        setattr(CFG, k, v)

    loss, metric = 0, 0
    for fold in range(CFG.n_folds):
        fn = os.path.join(CFG.log_path, f"log.fold_{fold}.csv")
        score = pd.read_csv(fn).sort_values("val_loss", ascending=True).iloc[0]
        loss += score['val_loss'] / CFG.n_folds
        metric += score['val_metric'] / CFG.n_folds

    CFG.sub_name = f"submission." \
                   f"ver_{args.version}." \
                   f"exp_{args.exp_id}." \
                   f"loss_{loss:.4f}." \
                   f"metric_{metric:.4f}.csv"

    if CFG.tta:
        CFG.sub_name = "tta." + CFG.sub_name

    pprint({k: v for k, v in dict(CFG.__dict__).items() if '__' not in k})
    print()

    ### seed all
    seed_everything(CFG.seed)

    ### Data related logic
    # load data
    print("Load Raw Data")
    _, test_df = load_data(CFG)
    print()

    # preprocess data
    print("Preprocess Data")
    test_df = preprocess_data(CFG, test_df)
    print()

    # get transform
    print("Get Transform")
    _, test_transforms = get_transform(CFG)
    print()

    # dataset
    tst_data = MelanomaDataset(CFG, test_df, test_transforms)

    # if tta
    tta_transforms = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    tta_data = MelanomaDataset(CFG, test_df, tta_transforms)

    final_preds = np.zeros(test_df.shape[0])

    # folds
    for fold in range(CFG.n_folds):
        print(f"========== Fold: {fold} ==========")
        # load learner
        print("Load Model")
        model_name = f'model.fold_{fold}.best.pt'
        learner = Learner(CFG)
        learner.load(os.path.join(CFG.model_path, model_name),
                     f"model_state_dict")

        # prediction
        if not CFG.tta:
            test_preds = torch.sigmoid(
                learner.predict(tst_data).view(-1)).numpy()

        else:
            test_preds = np.zeros(test_df.shape[0])
            for _ in range(4):
                test_preds += torch.sigmoid(
                    learner.predict(tta_data).view(-1)).numpy() / 4

        final_preds += test_preds / CFG.n_folds
        print()

    ss_df = pd.read_csv(
        os.path.join(CFG.root_path, "melanoma-external-malignant-256",
                     "sample_submission.csv"))
    test_df['target'] = final_preds
    test_df.set_index("image_name", inplace=True)
    ss_df = test_df.loc[ss_df['image_name']].reset_index()[[
        'image_name', 'target'
    ]]
    ss_df.to_csv(os.path.join(CFG.save_path, f"{CFG.sub_name}"), index=False)

    print(ss_df.head())
示例#14
0
def main():
    # train on the GPU or on the CPU, if a GPU is not available
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    # our dataset has two classes only - background and person
    num_classes = 2

    # get the model using our helper function
    model = get_model_instance_segmentation(num_classes)

    # load pretrain_dict
    pretrain_dict = torch.load(
        os.path.join("C:\\zhulei\\maskRcnn\\models", "_epoch-9.pth"))
    model.load_state_dict(pretrain_dict)

    # move model to the right device
    model.to(device)

    # use our dataset and defined transformations
    dataset_test = PennFudanDataset('C:\\zhulei\\maskRcnn\\data\\test',
                                    get_transform(train=False))
    data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=1,
                                                   collate_fn=utils.collate_fn)

    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader_test.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader_test, 100,
                                                  header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()

        outputs = model(image)

        instance_segmentation_api(image[0], outputs)

        # 可视化
        # for img in image:
        #     Image.fromarray((img.mul(255).permute(1, 2, 0).byte().cpu().numpy())[0])
        # print(outputs[0]['masks'].shape)
        # for i in range(99):
        #     result = Image.fromarray(outputs[0]['masks'][i, 0].mul(255).byte().cpu().numpy())
        #     result.show()

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    pos2 = PyKDL.Vector(-0.0972128, -0.0170138, -0.106974)
    sideways = PyKDL.Rotation(-0.453413, 0.428549, -0.781513, -0.17203,
                              0.818259, 0.548505, 0.874541, 0.383143,
                              -0.297286)
    """ Move to arbitrary start position (near upper left corner) & release anything gripper is
	holding. """
    home(psm2, pos2, sideways)
    """ Get PSM and endoscope calibration data (25 corresponding chess points) """
    psm2_calibration_data = list(
        transform.load_all('utils/psm2_recordings.txt'))
    psm2_calibration_matrix = transform.fit_to_plane(
        transform.psm_data_to_matrix(psm2_calibration_data))
    endoscope_calibration_matrix = transform.fit_to_plane(
        np.matrix(
            list(read_camera.load_all('camera_data/endoscope_chesspts.p'))[0]))

    world = transform.generate_world()

    TE_2 = transform.get_transform("Endoscope", "PSM2",
                                   endoscope_calibration_matrix,
                                   psm2_calibration_matrix)
    psme_2 = transform.transform_data("Endoscope", "PSM2",
                                      endoscope_calibration_matrix, TE_2,
                                      psm2_calibration_matrix)
    pprint.pprint(psme_2)
    """ Move to chessboard corner, descend, come up,and go to next. """
    move_to(psm2, psme_2.tolist(), z_upper)

    home(psm2, pos, rot)
示例#16
0
def main():
    parser = argparse.ArgumentParser()
    # path
    parser.add_argument('--root-path', default=CFG.root_path, help="root path")
    parser.add_argument('--save-path', default=CFG.save_path, help="save path")
    parser.add_argument('--sub-name',
                        default=CFG.sub_name,
                        help="submission name")

    # learning
    parser.add_argument('--batch-size',
                        default=CFG.batch_size,
                        type=int,
                        help=f"batch size({CFG.batch_size})")
    parser.add_argument("--workers",
                        default=CFG.workers,
                        type=int,
                        help=f"number of workers({CFG.workers})")
    parser.add_argument("--seed",
                        default=CFG.seed,
                        type=int,
                        help=f"seed({CFG.seed})")

    # version
    parser.add_argument('--version', type=int)
    parser.add_argument('--exp-id', type=int)

    # etc
    parser.add_argument('--tta', action='store_true', default=False)
    parser.add_argument('--use-swa', action='store_true', default=False)
    parser.add_argument('--use-snapshot', action='store_true', default=False)

    args = parser.parse_args()

    CFG.root_path = args.root_path
    CFG.save_path = args.save_path
    CFG.sub_name = args.sub_name

    CFG.batch_size = args.batch_size
    CFG.workers = args.workers
    CFG.seed = args.seed

    CFG.model_path = f"./model/v{args.version}/exp_{args.exp_id}/"
    CFG.log_path = f"./log/v{args.version}/exp_{args.exp_id}/"

    CFG.tta = args.tta
    CFG.use_swa = args.use_swa
    CFG.use_snapshot = args.use_snapshot

    # get device
    CFG.device = get_device()

    # load train environment
    env = json.load(open(os.path.join(CFG.log_path, 'CFG.json'), 'r'))
    for k, v in env.items():
        setattr(CFG, k, v)

    loss, metric = 0, 0
    for fold in range(CFG.n_folds):
        fn = os.path.join(CFG.log_path, f"log.fold_{fold}.csv")
        score = pd.read_csv(fn).sort_values("val_metric",
                                            ascending=False).iloc[0]
        loss += score['val_loss'] / CFG.n_folds
        metric += score['val_metric'] / CFG.n_folds

    CFG.sub_name = f"submission." \
                   f"ver_{args.version}." \
                   f"exp_{args.exp_id}." \
                   f"loss_{loss:.4f}." \
                   f"metric_{metric:.4f}.csv"

    if CFG.tta:
        CFG.sub_name = "tta." + CFG.sub_name

    if CFG.use_swa:
        CFG.sub_name = "swa." + CFG.sub_name

    if CFG.use_snapshot:
        CFG.sub_name = "snapshot." + CFG.sub_name

    # CFG.batch_size = 1

    pprint({k: v for k, v in dict(CFG.__dict__).items() if '__' not in k})
    print()

    ### seed all
    seed_everything(CFG.seed)

    ### Data related logic
    # load data
    print("Load Raw Data")
    _, test_df = load_data(CFG)
    print()

    # preprocess data
    print("Preprocess Data")
    test_df = preprocess_data(CFG, test_df)
    print()

    # get transform
    print("Get Transform")
    if CFG.tta:
        CFG.transform_version = 99
        test_transforms = get_transform(CFG)

    else:
        _, test_transforms = get_transform(CFG)

    print()

    # dataset
    tst_data = MelanomaDataset(CFG, test_df, test_transforms)

    final_preds = np.zeros(test_df.shape[0])

    # folds
    for fold in range(CFG.n_folds):
        print(f"========== Fold: {fold} ==========")

        if CFG.use_snapshot:
            fn = os.path.join(CFG.log_path, f"log.fold_{fold}.csv")
            log = pd.read_csv(fn).rename({
                "Unnamed: 0": "epoch"
            }, axis=1).sort_values("sub_1_score", ascending=False)
            print(log.head(5), '\n')
            epochs = log['epoch'].values.tolist()[:5]

            test_preds = np.zeros(test_df.shape[0])
            for epoch in epochs:
                print(f"***** EPOCH: {epoch} *****")
                model_name = f'model.fold_{fold}.epoch_{epoch}.pt'
                learner = Learner(CFG)
                learner.load(os.path.join(CFG.model_path, model_name),
                             f"model_state_dict")

                preds, sub_1 = learner.predict(tst_data)
                preds = torch.sigmoid(preds.view(-1)).numpy()
                sub_1 = sub_1.view(-1).numpy()

                # case 1
                snapshot_preds = preds

                # case 2
                snapshot_preds = sub_1

                test_preds += snapshot_preds / len(epochs)

        else:
            # load learner
            print("Load Model")
            if CFG.use_swa:
                model_name = f'model.fold_{fold}.swa.pt'
            else:
                model_name = f'model.fold_{fold}.best.pt'
            learner = Learner(CFG)
            learner.load(os.path.join(CFG.model_path, model_name),
                         f"model_state_dict")

            # prediction
            if not CFG.tta:
                preds, sub_1 = learner.predict(tst_data)
                preds = torch.sigmoid(preds.view(-1)).numpy()
                sub_1 = sub_1.view(-1).numpy()

                # case 1
                test_preds = preds

                # case 2
                test_preds = sub_1

                # case 3
                # test_preds = (preds + sub_1) / 2

            else:
                test_preds = np.zeros(test_df.shape[0])
                for _ in range(4):
                    test_preds += torch.sigmoid(
                        learner.predict(tst_data)[0].view(-1)).numpy() / 4

        final_preds += test_preds / CFG.n_folds
        print()

    ss_df = pd.read_csv(
        os.path.join(CFG.root_path, "melanoma-external-malignant-256",
                     "sample_submission.csv"))
    test_df['target'] = final_preds
    test_df.set_index("image_name", inplace=True)
    ss_df = test_df.loc[ss_df['image_name']].reset_index()[[
        'image_name', 'target'
    ]]
    ss_df.to_csv(os.path.join(CFG.save_path, f"{CFG.sub_name}"), index=False)

    print(ss_df.head())
示例#17
0
    device = "cuda"
    train_dir = "../data/train_20k"
    test_dir = "../data/test_20k"
    save_path = "../data/models/200_iter{}.bin"
    best_path = "../data/models/200_best.bin"

    model = QRN18(target_classes=target_classes)
    model.double()
    model.to(device)

    # Lets add some transformation to make our model translation and scale invarient
    to_tensor = torchvision.transforms.ToTensor()

    train_dataset = QaidaDataset(train_dir,
                                 transform=get_transform(mode="train"),
                                 max_classes=400)
    # Train dataloader should shuffle images
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=train_batch_size,
                                  shuffle=True,
                                  num_workers=4)

    # Test dataset do not need transformations
    test_dataset = QaidaDataset(test_dir,
                                transform=get_transform(mode="test"),
                                max_classes=400)
    # Test dataloader should not shuffle images
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=test_batch_size,
                                 shuffle=False,
示例#18
0
def main():
    """ main function
    """

    ### header
    parser = argparse.ArgumentParser()

    # path
    parser.add_argument('--root-path', default=CFG.root_path, help="root path")
    parser.add_argument('--log-path', default=CFG.log_path, help="log path")
    parser.add_argument('--model-path',
                        default=CFG.model_path,
                        help="model path")
    parser.add_argument('--pretrained-path', help='pretrained path')

    # image
    parser.add_argument('--transform-version',
                        default=0,
                        type=int,
                        help="image transform version ex) 0, 1, 2 ...")
    parser.add_argument('--image-size',
                        default=256,
                        type=int,
                        help="image size(256)")

    # model
    parser.add_argument('--model-name',
                        default=CFG.model_name,
                        help=f"model name({CFG.model_name})")
    parser.add_argument('--backbone-name',
                        default=CFG.backbone_name,
                        help=f"backbone name({CFG.backbone_name})")
    parser.add_argument('--dropout',
                        default=CFG.dropout,
                        type=float,
                        help=f"dropout({CFG.dropout})")
    parser.add_argument('--weight-decay',
                        default=CFG.weight_decay,
                        type=float,
                        help=f"weight decay({CFG.weight_decay})")

    # learning
    parser.add_argument('--batch-size',
                        default=CFG.batch_size,
                        type=int,
                        help=f"batch size({CFG.batch_size})")
    parser.add_argument('--learning-rate',
                        default=CFG.learning_rate,
                        type=float,
                        help=f"learning rate({CFG.learning_rate})")
    parser.add_argument('--num-epochs',
                        default=CFG.num_epochs,
                        type=int,
                        help=f"number of epochs({CFG.num_epochs})")
    parser.add_argument("--swa",
                        action="store_true",
                        help="do stochastic weight averaging")

    # etc
    parser.add_argument("--seed",
                        default=CFG.seed,
                        type=int,
                        help=f"seed({CFG.seed})")
    parser.add_argument("--workers",
                        default=CFG.workers,
                        type=int,
                        help=f"number of workers({CFG.workers})")
    parser.add_argument("--debug", action="store_true", help="debug mode")

    args = parser.parse_args()

    # path
    CFG.root_path = args.root_path
    CFG.model_path = args.model_path
    CFG.log_path = args.log_path
    CFG.pretrained_path = args.pretrained_path

    # image
    CFG.transform_version = args.transform_version
    CFG.image_size = args.image_size

    # model
    CFG.model_name = args.model_name
    CFG.backbone_name = args.backbone_name
    CFG.dropout = args.dropout
    CFG.weight_decay = args.weight_decay

    # learning
    CFG.batch_size = args.batch_size
    CFG.learning_rate = args.learning_rate
    CFG.num_epochs = args.num_epochs
    CFG.swa = args.swa

    # etc
    CFG.seed = args.seed
    CFG.workers = args.workers
    CFG.debug = args.debug

    # get device
    CFG.device = get_device()

    # get version
    _, version, _ = sys.argv[0].split('/')
    CFG.version = version

    # update log path
    if not CFG.debug:
        CFG.log_path = os.path.join(CFG.log_path, CFG.version)
        os.makedirs(CFG.log_path, exist_ok=True)
        CFG.log_path = os.path.join(
            CFG.log_path, f'exp_{get_exp_id(CFG.log_path, prefix="exp_")}')
        os.makedirs(CFG.log_path, exist_ok=True)
    else:
        CFG.log_path = os.path.join(CFG.log_path, "debug")
        os.makedirs(CFG.log_path, exist_ok=True)
        CFG.log_path = os.path.join(CFG.log_path, "debug")
        os.makedirs(CFG.log_path, exist_ok=True)

    # update model path
    if not CFG.debug:
        CFG.model_path = os.path.join(CFG.model_path, version)
        os.makedirs(CFG.model_path, exist_ok=True)
        CFG.model_path = os.path.join(
            CFG.model_path, f'exp_{get_exp_id(CFG.model_path, prefix="exp_")}')
        os.makedirs(CFG.model_path, exist_ok=True)
    else:
        CFG.model_path = os.path.join(CFG.model_path, "debug")
        os.makedirs(CFG.model_path, exist_ok=True)
        CFG.model_path = os.path.join(CFG.model_path, "debug")
        os.makedirs(CFG.model_path, exist_ok=True)

    pprint({k: v for k, v in dict(CFG.__dict__).items() if '__' not in k})
    json.dump({k: v
               for k, v in dict(CFG.__dict__).items() if '__' not in k},
              open(os.path.join(CFG.log_path, 'CFG.json'), "w"))
    print()

    ### seed all
    seed_everything(CFG.seed)

    ### data related
    # load data
    print("Load Raw Data")
    data_df, test_df = load_data(CFG)

    # preprocess data
    print("Preprocess Data")
    data_df = preprocess_data(CFG, data_df)

    # split data into train with valid
    print("Split Data")
    data_df = split_data(CFG, data_df)

    # get transform
    print("Get Transform")
    train_transforms, test_transforms = get_transform(CFG)

    # train test split
    for fold in range(CFG.n_folds):
        print(f"\nValidation Fold: {fold}")
        train_df = data_df[data_df['fold'] != fold].reset_index(drop=True)
        valid_df = data_df[data_df['fold'] == fold].reset_index(drop=True)
        print(
            f"... Train Shape: {train_df.shape}, Valid Shape: {valid_df.shape}"
        )

        # dataset
        trn_data = MelanomaDataset(CFG, train_df, train_transforms)
        val_data = MelanomaDataset(CFG, valid_df, test_transforms)

        ### model related
        # get learner
        learner = Learner(CFG)
        learner.name = f"model.fold_{fold}"
        if CFG.pretrained_path:
            print("Load Pretrained Model")
            print(f"... Pretrained Info - {CFG.pretrained_path}")
            learner.load(CFG.pretrained_path, f"model_state_dict")

        # get model
        if CFG.pretrained_path:
            print(f"Get Model")
            model = learner.best_model.to(CFG.deivce)

        else:
            print(f"Get Model")
            model = get_model(CFG)
            model = model.to(CFG.device)

        # get optimizer
        module_names = list(model.state_dict())
        no_decay = ['bias']
        for m in module_names:
            if 'running_mean' in m:
                no_decay.append(m.split('.running_mean')[0])
        param_optimizer = list(model.named_parameters())
        optimizer_grouped_parameters = [{
            'params': [
                p for n, p in param_optimizer
                if not any(nd in n for nd in no_decay)
            ],
            'weight_decay':
            CFG.weight_decay
        }, {
            'params':
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            'weight_decay':
            0.0
        }]
        optimizer = optim.AdamW(optimizer_grouped_parameters,
                                CFG.learning_rate)

        model, optimizer = amp.initialize(model, optimizer, verbosity=0)

        if torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)

        # get optimizer
        # optimizer = optim.Adam(model.parameters(), lr=CFG.learning_rate)

        if CFG.swa:
            optimizer = torchcontrib.optim.SWA(optimizer)

        # get scheduler
        # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        #     optimizer, mode='min', patience=1, verbose=False, factor=0.2)
        # scheduler = CosineAnnealingLRWarmup(optimizer, T_min=int(CFG.num_epochs / 5), T_max=CFG.num_epochs)
        T = 1
        scheduler = CosineAnnealingLRWarmup(optimizer,
                                            T_min=0,
                                            T_max=CFG.num_epochs // T)

        ### train related
        # train model
        learner.train(trn_data, val_data, model, optimizer, scheduler)
        print()
示例#19
0
            except EOFError:
                return


def print_cache(lst, heading):
    print(heading)
    print('---')
    pprint.pprint(lst)


if __name__ == '__main__':

    endoscope_chesspts = list(load_all('camera_data/endoscope_chesspts.p'))
    # camera_info = list(load_all('camera_data/camera_info.p'))
    left_chesspts = np.matrix(list(load_all('camera_data/left_chesspts'))[0])
    right_chesspts = np.matrix(list(load_all('camera_data/right_chesspts'))[0])

    z = np.zeros((25, 1))
    left_chesspts = np.hstack((left_chesspts, z))
    right_chesspts = np.hstack((right_chesspts, z))

    print_cache(endoscope_chesspts, "ENDOSCOPE CHESSPOINTS")
    # print_cache(camera_info, "CAMERA INFO")
    print_cache(left_chesspts, "LEFT CHESSPOINTS")
    print_cache(right_chesspts, "RIGHT CHESSPOINTS")

    TL_R = transform.get_transform("Left Camera", "Right Camera",
                                   left_chesspts, right_chesspts)
    L_R = transform.transform_data("Left Camera", "Right Camera",
                                   left_chesspts, TL_R, right_chesspts)