Exemplo n.º 1
0
import torch
import builder
import trainer

import os
import time
import argparse
from opts import opts


opts = opts().parse()
torch.set_default_tensor_type('torch.DoubleTensor' if opts.usedouble else 'torch.FloatTensor')
Builder = builder.Builder(opts)

Model = Builder.Model()
Optimizer = Builder.Optimizer(Model)
Loss = Builder.Loss()
Metrics = Builder.Metric()
TrainDataLoader, ValDataLoader = Builder.DataLoaders()
Epoch = Builder.Epoch()


Model = Model.to(opts.gpuid)

# opts.saveDir = os.path.join(opts.saveDir, os.path.join(opts.model, 'logs_{}'.format(datetime.datetime.now().isoformat())))
File = os.path.join(opts.saveDir, 'log.txt')

Trainer = trainer.Trainer(Model, Optimizer, Loss, Metrics, File, None, opts)

if opts.test:
	Trainer.test(ValDataLoader)
Exemplo n.º 2
0
def error_bound_saliency(opt, img_id, loc=None, error_bound=0.1):

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str

    Dataset = dataset_factory[opt.dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    Detector = detector_factory[opt.task]

    ### simply run the detector and save the objectness heat map and the detection results
    split = 'val' if not opt.trainval else 'test'
    dataset = Dataset(opt, split)
    detector = Detector(opt)
    # use the FeatureExtractor to regester the hook to get activation value
    # to find the name of target_layers, see the model.named_modules()
    feature_extractor = FeatureExtractor(detector.model, target_layers='hm')
    detector.model = feature_extractor
    feature_extractor.eval()
    img_info = dataset.coco.loadImgs(ids=[img_id])[0]
    img_path = os.path.join(dataset.img_dir, img_info['file_name'])
    detector.run(img_path)

    ### get saliency mask
    ### Not due to the input image is usually resized and padding, we get the mask on the resized image
    ### for error, we use L1 loss.

    ## gradually increase the rect center on the image coor untile the error is lower the boundry
    debug_dir = detector.opt.debug_dir
    scale = 1.0
    debugger = Debugger(dataset=detector.opt.dataset,
                        ipynb=(detector.opt.debug == 3),
                        theme=detector.opt.debugger_theme)

    image_org = cv2.imread(img_path)
    image, meta, resized_img = pre_process(detector,
                                           image_org,
                                           scale,
                                           mask=None,
                                           return_resized_img=True)
    _, _, h, w = image.size()
    down_sample_rate = h / feature_extractor.target_val.size(2)
    # get the loc[center_h,center_w] on the resized image and corresponding [fh,fw] on feature map
    if loc is None:  # if loc [center_h,center_w] is not specified, use the location of the max value
        ind = torch.argmax(feature_extractor.target_val[0].sum(dim=0))
        fh = ind // feature_extractor.target_val.size(3)
        fw = ind % feature_extractor.target_val.size(3)
        center_h = fh * down_sample_rate
        center_w = fw * down_sample_rate
        val = feature_extractor.target_val[0, :, fh, fw]
        print([center_h, center_w])
    else:
        center_h, center_w = loc
        fh = int(center_h / down_sample_rate)
        fw = int(center_w / down_sample_rate)
        val = feature_extractor.target_val[0, :, fh, fw]

    loss_fn = lambda x: torch.mean(torch.pow((x - val), 2))
    area_increment = np.prod(image.size()) / 1000.0
    area = 0
    ratio = 1.0  # w/h = 1.0 increased rect ratio
    error = 1e10
    mask = np.zeros([h, w])  # [H,W]
    while (error > error_bound):
        print("it:{} error:{}".format(area // area_increment, error))
        area += area_increment
        bh = np.sqrt(area / ratio)
        bw = area / bh
        mask = np.zeros([h, w])
        hmin, hmax = max(int(center_h - bh / 2),
                         0), min(int(center_h + bh / 2) + 1, h - 1)
        wmin, wmax = max(int(center_w - bw / 2),
                         0), min(int(center_w + bw / 2) + 1, w - 1)
        mask[hmin:hmax, wmin:wmax] = 1
        image_masked, _ = pre_process(detector, image_org, 1.0, mask)
        image_masked = image_masked.to(opt.device)
        with torch.no_grad():
            feature_extractor(image_masked)
        error = loss_fn(feature_extractor.target_val[0, :, fh, fw])
    print("it:{} error:{}".format(area // area_increment, error))
    # draw the rect mask on resized_image and save
    rect_mask_img_save_name = 'rect_mask_{:.1f}'.format(scale)
    debugger.add_blend_img(resized_img,
                           debugger.gen_colormap(mask[np.newaxis, :, :]),
                           rect_mask_img_save_name)
    kernel_hmin, kernel_hmax = max(
        int(center_h - down_sample_rate / 2),
        0), min(int(center_h + down_sample_rate / 2) + 1, h - 1)
    kernel_wmin, kernel_wmax = max(
        int(center_w - down_sample_rate / 2),
        0), min(int(center_w + down_sample_rate / 2) + 1, w - 1)
    debugger.imgs[rect_mask_img_save_name][kernel_hmin:kernel_hmax,
                                           kernel_wmin:kernel_wmax] = [
                                               255, 0, 0
                                           ]  # green

    ## get saliency superpixel
    rect_img = resized_img[hmin:hmax, wmin:wmax]
    segments = slic(rect_img, n_segments=30)  #[hmin:hmax, wmin:wmax]
    un_removed_superpixel = list(np.unique(segments))
    rect_segment_mask = np.ones_like(segments)
    while (error < error_bound):
        # find superpixel whose removement leads to lowest error
        lowest_error = 1e10
        lowest_error_ind = -1
        for i in un_removed_superpixel:
            mask = np.zeros([h, w])
            mask[hmin:hmax, wmin:wmax] = rect_segment_mask * (segments != i)
            image_masked, _ = pre_process(detector, image_org, 1.0, mask)
            image_masked = image_masked.to(opt.device)
            with torch.no_grad():
                feature_extractor(image_masked)
            cur_error = loss_fn(feature_extractor.target_val[0, :, fh, fw])
            if cur_error < lowest_error:
                lowest_error = cur_error
                lowest_error_ind = i
        if not lowest_error < error_bound:
            break
        else:
            un_removed_superpixel.remove(lowest_error_ind)
            error = lowest_error
            rect_segment_mask = rect_segment_mask * (segments !=
                                                     lowest_error_ind)
            print("error={} remaining super pixel:{}".format(
                error, len(un_removed_superpixel)))

    # draw the segmentation saliency mask on resized_image and save
    mask = np.zeros([h, w])
    mask[hmin:hmax, wmin:wmax] = rect_segment_mask

    inp_image = resized_img * mask[:, :, np.newaxis].astype(np.uint8)
    debugger.add_img(inp_image, 'masked_img')
    mask_img_save_name = 'mask_{:.1f}'.format(scale)
    debugger.add_blend_img(resized_img,
                           debugger.gen_colormap(mask[np.newaxis, :, :]),
                           mask_img_save_name)
    debugger.imgs[mask_img_save_name][kernel_hmin:kernel_hmax,
                                      kernel_wmin:kernel_wmax] = [255, 0,
                                                                  0]  # blue
    debugger.save_all_imgs(debug_dir, prefix='{}'.format(opt.img_id))

    opt.prefix = '{}masked'.format(opt.img_id)
    detector.run(inp_image)
    return
Exemplo n.º 3
0
#CenterNet
import sys
CENTERNET_PATH = 'CENTERNET_ROOT/CenterNet/src/lib/'
sys.path.insert(0, CENTERNET_PATH)
from detectors.detector_factory import detector_factory
from opts import opts

MODEL_PATH = './CenterNet/models/ctdet_coco_dla_2x.pth'
ARCH = 'dla_34'

#MODEL_PATH = './CenterNet/models/ctdet_coco_resdcn18.pth'
#ARCH = 'resdcn_18'

TASK = 'ctdet'  # or 'multi_pose' for human pose estimation
opt = opts().init('{} --load_model {} --arch {}'.format(
    TASK, MODEL_PATH, ARCH).split(' '))

#input_type
opt.input_type = 'vid'  # video : vid,  webcam : webcam, ip camera : ipcam

#------------------------------
# for video
opt.vid_path = 'MOT16-11.mp4'  #
#------------------------------
# for webcam  (webcam device index is required)
opt.webcam_ind = 0
#------------------------------
# for ipcamera (camera url is required.this is dahua url format)
opt.ipcam_url = 'rtsp://{0}:{1}@IPAddress:554/cam/realmonitor?channel={2}&subtype=1'
# ipcamera camera number
opt.ipcam_no = 8
Exemplo n.º 4
0
def main():
    opt = opts().parse()
    now = datetime.datetime.now()
    logger = Logger(opt.saveDir + '/logs_{}'.format(now.isoformat()))

    if opt.loadModel != 'none':
        model = torch.load(opt.loadModel).cuda()
    else:
        model = HourglassNet3D(opt.nStack, opt.nModules, opt.nFeats,
                               opt.nRegModules).cuda()

    criterion = torch.nn.MSELoss().cuda()
    optimizer = torch.optim.RMSprop(model.parameters(),
                                    opt.LR,
                                    alpha=ref.alpha,
                                    eps=ref.epsilon,
                                    weight_decay=ref.weightDecay,
                                    momentum=ref.momentum)

    if opt.ratio3D < ref.eps:
        if not opt.allSYN:
            print 'Using MPII as validation set'
            val_loader = torch.utils.data.DataLoader(MPII(opt,
                                                          'val',
                                                          returnMeta=True),
                                                     batch_size=1,
                                                     shuffle=False,
                                                     num_workers=int(
                                                         ref.nThreads))
        else:
            print 'Using SYN2D as validation set'
            val_loader = torch.utils.data.DataLoader(Syn2D(opt,
                                                           'val',
                                                           returnMeta=True),
                                                     batch_size=1,
                                                     shuffle=False,
                                                     num_workers=int(
                                                         ref.nThreads))
    else:
        print 'Using 3D dataset as validation set'
        if opt.useSyn and not opt.allSYN:
            val_loader = torch.utils.data.DataLoader(Synthetic(opt, 'val'),
                                                     batch_size=1,
                                                     shuffle=False,
                                                     num_workers=int(
                                                         ref.nThreads))
        elif opt.allSYN:
            val_loader = torch.utils.data.DataLoader(Syn3D(opt, 'val'),
                                                     batch_size=1,
                                                     shuffle=False,
                                                     num_workers=int(
                                                         ref.nThreads))
        else:
            val_loader = torch.utils.data.DataLoader(H36M(opt, 'val'),
                                                     batch_size=1,
                                                     shuffle=False,
                                                     num_workers=int(
                                                         ref.nThreads))

    if opt.test:
        val(0, opt, val_loader, model, criterion)
        return

    train_loader = torch.utils.data.DataLoader(
        Fusion(opt, 'train'),
        batch_size=opt.trainBatch,
        shuffle=True if opt.DEBUG == 0 else False,
        num_workers=int(ref.nThreads))

    for epoch in range(1, opt.nEpochs + 1):
        loss_train, acc_train, mpjpe_train, loss3d_train = train(
            epoch, opt, train_loader, model, criterion, optimizer)
        logger.scalar_summary('loss_train', loss_train, epoch)
        logger.scalar_summary('acc_train', acc_train, epoch)
        logger.scalar_summary('mpjpe_train', mpjpe_train, epoch)
        logger.scalar_summary('loss3d_train', loss3d_train, epoch)

        print(
            'Epoch {}: loss_train = {}, acc_train = {}, mpjpe_train = {}, loss3d_train = {}'
            .format(epoch, loss_train, acc_train, mpjpe_train, loss3d_train))
        if epoch % opt.valIntervals == 0:
            loss_val, acc_val, mpjpe_val, loss3d_val = val(
                epoch, opt, val_loader, model, criterion)
            logger.scalar_summary('loss_val', loss_val, epoch)
            logger.scalar_summary('acc_val', acc_val, epoch)
            logger.scalar_summary('mpjpe_val', mpjpe_val, epoch)
            logger.scalar_summary('loss3d_val', loss3d_val, epoch)
            torch.save(model,
                       os.path.join(opt.saveDir, 'model_{}.pth'.format(epoch)))
            logger.write(
                '{:8f} {:8f} {:8f} {:8f} {:8f} {:8f} {:8f} {:8f} \n'.format(
                    loss_train, acc_train, mpjpe_train, loss3d_train, loss_val,
                    acc_val, mpjpe_val, loss3d_val))
        else:
            logger.write('{:8f} {:8f} {:8f} {:8f} \n'.format(
                loss_train, acc_train, mpjpe_train, loss3d_train))
        adjust_learning_rate(optimizer, epoch, opt.dropLR, opt.LR)
    logger.close()
Exemplo n.º 5
0
    # self.default_resolution = [opt.input_h, opt.input_w]
    self.cat_ids = {i: i for i in range(1, self.num_categories + 1)}

    self.images = None
    # load image list and coco
    super().__init__(opt, split, ann_path, img_dir)

    self.num_samples = len(self.images)
    print('Loaded Custom dataset {} samples'.format(self.num_samples))
  
  def __len__(self):
    return self.num_samples

  def run_eval(self, results, save_dir):
    pass

if __name__ == '__main__':
  sys.path.append('/u/jozhang/code/motion3d/external/CenterTrack/src/lib')
  from opts import opts
  opt = opts().parse()
  opt.data_dir = Path('/scratch/cluster/jozhang/datasets/TAO')
  opt.tracking = True
  opt.not_max_crop = True
  opt.output_w = 256
  opt.output_h = 256
  opt.num_classes = 833
  dataset = TAODataset(opt, 'train')
  opt = opts().update_dataset_info_and_set_heads(opt, TAODataset)
  sample = dataset[6]
  print(sample)
Exemplo n.º 6
0
def load_model(model, model_path, optimizer=None, resume=False, 
               lr=None, lr_step=None):
  opt = opts().init()
  start_epoch = 0
  checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
  print('loaded {}, epoch {}'.format(model_path, checkpoint['epoch']))
  state_dict_ = checkpoint['state_dict']
  state_dict = {}
  
  # convert data_parallal to model
  for k in state_dict_:
    if k.startswith('module') and not k.startswith('module_list'):
      state_dict[k[7:]] = state_dict_[k]
    else:
      state_dict[k] = state_dict_[k]
  model_state_dict = model.state_dict()

  # check loaded parameters and created model parameters
  msg = 'If you see this, your model does not fully load the ' + \
        'pre-trained weight. Please make sure ' + \
        'you have correctly specified --arch xxx ' + \
        'or set the correct --num_classes for your own dataset.'
  for k in state_dict:
    if k in model_state_dict:
      if state_dict[k].shape != model_state_dict[k].shape:
        print('Skip loading parameter {}, required shape{}, '\
              'loaded shape{}. {}'.format(
          k, model_state_dict[k].shape, state_dict[k].shape, msg))
        state_dict[k] = model_state_dict[k]
    else:
      print('Drop parameter {}.'.format(k) + msg)
  for k in model_state_dict:
    if not (k in state_dict):
      print('No param {}.'.format(k) + msg)
      state_dict[k] = model_state_dict[k]
  model.load_state_dict(state_dict, strict=False)

  # resume optimizer parameters
  if optimizer is not None and resume:
    if 'optimizer' in checkpoint:
      optimizer.load_state_dict(checkpoint['optimizer'])
      start_epoch = checkpoint['epoch']
      start_lr = lr
      for step in lr_step:
        if start_epoch >= step:
          start_lr *= 0.1
      for param_group in optimizer.param_groups:
        param_group['lr'] = start_lr
      print('Resumed optimizer with start lr', start_lr)
    else:
      print('No optimizer parameters in checkpoint.')
  
  if(opt.exp_wo):
    # load an example image and load it on model
    model, input_batch = load_ex_image(model, opt.exp_wo_dim)
    model.eval()
    with torch.no_grad():
          output = model(input_batch)
    # export weights and debugs
    weights_outputs_exporter(model, input_batch, opt.exp_wo_dim)

  if optimizer is not None:
    return model, optimizer, start_epoch
  else:
    return model
Exemplo n.º 7
0
def prefetch_test(opt):
    if not opt.not_set_cuda_env:
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus_str
    Dataset = dataset_factory[opt.test_dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    Logger(opt)

    split = "val" if not opt.trainval else "test"
    dataset = Dataset(opt, split)
    detector = Detector(opt)

    if opt.load_results != "":
        load_results = json.load(open(opt.load_results, "r"))
        for img_id in load_results:
            for k in range(len(load_results[img_id])):
                if load_results[img_id][k]["class"] - 1 in opt.ignore_loaded_cats:
                    load_results[img_id][k]["score"] = -1
    else:
        load_results = {}

    data_loader = torch.utils.data.DataLoader(
        PrefetchDataset(opt, dataset, detector.pre_process),
        batch_size=1,
        shuffle=False,
        num_workers=1,
        pin_memory=True,
    )

    results = {}
    num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
    bar = Bar("{}".format(opt.exp_id), max=num_iters)
    time_stats = ["tot", "load", "pre", "net", "dec", "post", "merge", "track"]
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    if opt.use_loaded_results:
        for img_id in data_loader.dataset.images:
            results[img_id] = load_results["{}".format(img_id)]
        num_iters = 0
    for ind, (img_id, pre_processed_images) in enumerate(data_loader):
        if ind >= num_iters:
            break
        if opt.tracking and ("is_first_frame" in pre_processed_images):
            if "{}".format(int(img_id.numpy().astype(np.int32)[0])) in load_results:
                pre_processed_images["meta"]["pre_dets"] = load_results[
                    "{}".format(int(img_id.numpy().astype(np.int32)[0]))
                ]
            else:
                print()
                print("No pre_dets for", int(img_id.numpy().astype(np.int32)[0]), ". Use empty initialization.")
                pre_processed_images["meta"]["pre_dets"] = []
            detector.reset_tracking()
            print("Start tracking video", int(pre_processed_images["video_id"]))
        if opt.public_det:
            if "{}".format(int(img_id.numpy().astype(np.int32)[0])) in load_results:
                pre_processed_images["meta"]["cur_dets"] = load_results[
                    "{}".format(int(img_id.numpy().astype(np.int32)[0]))
                ]
            else:
                print("No cur_dets for", int(img_id.numpy().astype(np.int32)[0]))
                pre_processed_images["meta"]["cur_dets"] = []

        ret = detector.run(pre_processed_images)
        results[int(img_id.numpy().astype(np.int32)[0])] = ret["results"]

        Bar.suffix = "[{0}/{1}]|Tot: {total:} |ETA: {eta:} ".format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td
        )
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix = Bar.suffix + "|{} {tm.val:.3f}s ({tm.avg:.3f}s) ".format(t, tm=avg_time_stats[t])
        if opt.print_iter > 0:
            if ind % opt.print_iter == 0:
                print("{}/{}| {}".format(opt.task, opt.exp_id, Bar.suffix))
        else:
            bar.next()
    bar.finish()
    if opt.save_results:
        print(
            "saving results to", opt.save_dir + "/save_results_{}{}.json".format(opt.test_dataset, opt.dataset_version)
        )
        json.dump(
            _to_list(copy.deepcopy(results)),
            open(opt.save_dir + "/save_results_{}{}.json".format(opt.test_dataset, opt.dataset_version), "w"),
        )
    dataset.run_eval(results, opt.save_dir)
Exemplo n.º 8
0
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch,
                           model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()


if __name__ == '__main__':
    opt = opts().parse('--exp_id {} --batch_size {} --lr_step {}'.format(
        "coco_resdcn18_2w5", 8, '3,6,10').split(' '))
    main(opt)
Exemplo n.º 9
0
def prefetch_test(opt):
    if not opt.not_set_cuda_env:
        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    Dataset = dataset_factory[opt.test_dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    Logger(opt)

    split = 'val' if not opt.trainval else 'test'
    if split == 'val':
        split = opt.val_split
    dataset = Dataset(opt, split)
    detector = Detector(opt)

    if opt.load_results != '':
        load_results = json.load(open(opt.load_results, 'r'))
        for img_id in load_results:
            for k in range(len(load_results[img_id])):
                if load_results[img_id][k][
                        'class'] - 1 in opt.ignore_loaded_cats:
                    load_results[img_id][k]['score'] = -1
    else:
        load_results = {}

    data_loader = torch.utils.data.DataLoader(PrefetchDataset(
        opt, dataset, detector.pre_process),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=1,
                                              pin_memory=True)

    results = {}
    num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
    bar = Bar('{}'.format(opt.exp_id), max=num_iters)
    time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge', 'track']
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    if opt.use_loaded_results:
        for img_id in data_loader.dataset.images:
            results[img_id] = load_results['{}'.format(img_id)]
        num_iters = 0
    for ind, (img_id, pre_processed_images) in enumerate(data_loader):
        if ind >= num_iters:
            break
        if opt.tracking and ('is_first_frame' in pre_processed_images):
            if '{}'.format(int(img_id.numpy().astype(
                    np.int32)[0])) in load_results:
                pre_processed_images['meta']['pre_dets'] = \
                  load_results['{}'.format(int(img_id.numpy().astype(np.int32)[0]))]
            else:
                print()
                print('No pre_dets for',
                      int(img_id.numpy().astype(np.int32)[0]),
                      '. Use empty initialization.')
                pre_processed_images['meta']['pre_dets'] = []
            detector.reset_tracking()
            print('Start tracking video',
                  int(pre_processed_images['video_id']))
        if opt.public_det:
            if '{}'.format(int(img_id.numpy().astype(
                    np.int32)[0])) in load_results:
                pre_processed_images['meta']['cur_dets'] = \
                  load_results['{}'.format(int(img_id.numpy().astype(np.int32)[0]))]
            else:
                print('No cur_dets for',
                      int(img_id.numpy().astype(np.int32)[0]))
                pre_processed_images['meta']['cur_dets'] = []

        ret = detector.run(pre_processed_images)
        results[int(img_id.numpy().astype(np.int32)[0])] = ret['results']

        Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix = Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(
                t, tm=avg_time_stats[t])
        if opt.print_iter > 0:
            if ind % opt.print_iter == 0:
                print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
        else:
            bar.next()
    bar.finish()
    if opt.save_results:
        print(
            'saving results to',
            opt.save_dir + '/save_results_{}{}.json'.format(
                opt.test_dataset, opt.dataset_version))
        json.dump(
            _to_list(copy.deepcopy(results)),
            open(
                opt.save_dir + '/save_results_{}{}.json'.format(
                    opt.test_dataset, opt.dataset_version), 'w'))
    dataset.run_eval(results,
                     opt.save_dir,
                     n_plots=opt.eval_n_plots,
                     render_curves=opt.eval_render_curves)
            for stat in time_stats:
                time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
            print(time_str)
            if cv2.waitKey(1) == 27:
                return  # esc to quit
    else:
        if os.path.isdir(opt.demo):
            image_names = []
            ls = os.listdir(opt.demo)
            for file_name in sorted(ls):
                ext = file_name[file_name.rfind('.') + 1:].lower()
                if ext in image_ext:
                    image_names.append(os.path.join(opt.demo, file_name))
        else:
            image_names = [opt.demo]

        for (image_name) in image_names:
            ret = detector.run(image_name)
            time_str = ''
            for stat in time_stats:
                time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
            print(time_str)


if __name__ == '__main__':
    opt = opts().init(numclasses=1)
    debugger.coco_class_name = [
        'face',
    ]
    demo(opt)
Exemplo n.º 11
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    if not opt.not_set_cuda_env:
        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
    logger = Logger(opt)

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
    optimizer = get_optimizer(opt, model)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model, opt,
                                                   optimizer)

    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up train data...')
    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    # for each epoch, record scale
    bestmota = 0
    bestepoch = 0

    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch, model,
                   optimizer)
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            # with torch.no_grad():
            #     log_dict_val, preds = trainer.val(epoch, val_loader)
            #     if opt.eval_val:
            #         val_loader.dataset.run_eval(preds, opt.save_dir)
            # for k, v in log_dict_val.items():
            #     logger.scalar_summary('val_{}'.format(k), v, epoch)
            #     logger.write('{} {:8f} | '.format(k, v))
            valset = '17halfval'
            mota, motp = prefetch_test(opt, valset)
            if mota > bestmota:
                bestmota = mota
                bestepoch = epoch
            print('mota = {}, motp = {}, bestmota = {}, bestepoch = {}'.format(
                mota, motp, bestmota, bestepoch))

        logger.write('\n')
        if epoch in opt.save_point:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
Exemplo n.º 12
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset, opt.task)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(
            model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)

    print('Setting up data...')
    val_loader = torch.utils.data.DataLoader(
        Dataset(opt, 'val', opt.test_on_subset),
        batch_size=opt.batch_size * 2,
        shuffle=False,
        num_workers=opt.num_workers,
        pin_memory=True
    )

    train_loader = torch.utils.data.DataLoader(
        Dataset(opt, 'train'),
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True
    )

    num_iters = len(train_loader) if opt.num_iters < 0 else opt.num_iters

    if opt.lr_type == 'STEP':
        scheduler_config = {
            'type': 'STEP',
            'lr_steps': [step * num_iters for step in opt.lr_step],
            'lr_mults': opt.lr_mults,
            'base_lr': opt.lr,
            'warmup_steps': opt.warmup_step,
            'warmup_lr': opt.warmup_lr,
            'max_iter': opt.num_epochs * num_iters,
            'last_iter': start_epoch * num_iters - 1
        }
    elif opt.lr_type == 'COSINE':
        scheduler_config = {
            'type': 'COSINE',
            'base_lr': opt.lr,
            'warmup_steps': opt.warmup_step,
            'warmup_lr': opt.warmup_lr,
            'min_lr': 0.0,
            'max_iter': opt.num_epochs * num_iters,
            'last_iter': start_epoch * num_iters - 1
        }
    else:
        raise ValueError("lr_type should be STEP or COSINE.")

    lr_scheduler = get_scheduler(optimizer=optimizer, config=scheduler_config)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer, logger, lr_scheduler=lr_scheduler)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    if opt.test:
        _, preds = trainer.val(0, val_loader)
        val_loader.dataset.run_eval(preds, opt.save_dir)
        return

    print('Starting training...')
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'

        # # FIXME: dataloader into epoch
        # # every epoch regenerate dataloader to random sample frames
        train_loader = torch.utils.data.DataLoader(
            Dataset(opt, 'train'),
            batch_size=opt.batch_size,
            shuffle=True,
            num_workers=opt.num_workers,
            pin_memory=True,
            drop_last=True
        )

        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_average/{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_average/{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'),
                           epoch, model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'),
                       epoch, model, optimizer)
        logger.write('\n')
    logger.close()
Exemplo n.º 13
0
        results[img_id] = ret['results']
        Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(
                t, avg_time_stats[t].avg)
        if ind % 1000 == 0:
            bar.next()
    bar.finish()
    val_dataset.run_eval(results=results, save_dir='./output/')


if __name__ == "__main__":
    opt = opts()
    opt.task = "ddd"
    opt = opt.init()

    ctx = [mx.gpu(int(i)) for i in opt.gpus_str.split(',') if i.strip()]
    ctx = ctx if ctx else [mx.cpu()]
    print("Using Devices: ", ctx)
    """ 1. network """
    print('Creating model...')
    print("Using network architecture: ", opt.arch)
    model = create_model(opt.arch, opt.heads, opt.head_conv, ctx=ctx)

    opt.cur_epoch = 0
    if opt.flag_finetune:
        model = load_model(model, opt.pretrained_path, ctx=ctx)
        opt.cur_epoch = int(opt.pretrained_path.split('.')[0][-4:])
Exemplo n.º 14
0
def main():
   config = {'crash':
                {'longOpt': 'crash',
                 'type': bool,
                 'value': False,
                 'desc': "Crash the target (and leave innocuous core file)"},
             'host':
                {'shortOpt': 'h',
                 'arg': 'host',
                 'type': str,
                 'value': '127.0.0.1',
                 'desc': "Target host (hostname or IP)"},
             'imtaBase':
                {'longOpt': 'imta',
                 'arg': 'address',
                 'type': long,
                 'desc': "Address of libimta.so in target process"},
             'nAttempts':
                {'shortOpt': 'n',
                 'arg': 'number',
                 'type': int,
                 'value': 1,
                 'desc': "Number of times to attempt exploitation"},
             'os':
                {'longOpt': 'os',
                 'arg': 'version',
                 'type': str,
                 'desc': "OS of target"},
             'port':
                {'shortOpt': 'p',
                 'arg': 'port',
                 'type': int,
                 'value': 25,
                 'desc': "Target port"},
             'recipient':
                {'shortOpt': 'r',
                 'arg': 'recipient',
                 'type': str,
                 'value': 'svcadmin',
                 'desc': "Email recipient"},
             'sender':
                {'shortOpt': 's',
                 'arg': 'sender',
                 'type': str,
                 'desc': "Email sender"},
             'stackBase':
                {'longOpt': 'stack',
                 'arg': 'address',
                 'type': long,
                 'desc': "Address of thread stack in target process"},
             'touch':
                {'longOpt': 'touch',
                 'type': bool,
                 'value': False,
                 'desc': "Touch the target"},
             'version':
                {'longOpt': 'version',
                 'arg': 'version',
                 'type': str,
                 'desc': "Version of target"}
            }

   options = opts.opts(sys.argv[0], config, targets.list, systems.list)
   args = options.parseCommandLine(sys.argv[1:])

   nAttempts = options.get('nAttempts')
   if (nAttempts < 1):
      usage(sys.argv[0])
      sys.exit(1)

   target = None
   version = options.get('version')   
   if (None != version):
      target = targets.factory(version)
      if (None == target):
         print "Unsupported version: \"%s\"." % version
         print "Please specify a different version or allow it to be " \
               "set automatically.\n"
         targets.list()
         sys.exit(1)

   osInstance = None
   os = options.get('os')
   stackBase = options.get('stackBase')
   if (None != os):
      osInstance = systems.factory(os, stackBase)
      if (None == osInstance):
         print "Unsupported operating system: \"%s\"." % os
         print "Please specify a different version or allow it to be " \
               "set automatically.\n"
         systems.list()
         sys.exit(1)

   host = options.get('host')
   port = options.get('port')
   sd, banner = smtpUtils.connect(host, port, True)
   try:
      version = re.compile(r"\((.*)\)").search(banner).group(1)
   except AttributeError, err:
      print "Target banner: \"%s\"" % banner.strip()
      print "Target banner doesn't parse."
      sys.exit(1)
Exemplo n.º 15
0
            cv2.imshow('input', img)
            ret = detector.run(img)
            time_str = ''
            for stat in time_stats:
                time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
            print(time_str)
            if cv2.waitKey(1) == 27:
                return  # esc to quit
    else:
        if os.path.isdir(opt.demo):
            image_names = []
            ls = os.listdir(opt.demo)
            for file_name in sorted(ls):
                ext = file_name[file_name.rfind('.') + 1:].lower()
                if ext in image_ext:
                    image_names.append(os.path.join(opt.demo, file_name))
        else:
            image_names = [opt.demo]

        for (image_name) in image_names:
            ret = detector.run(image_name)
            time_str = ''
            for stat in time_stats:
                time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
            print(time_str)


if __name__ == '__main__':
    opt = opts().init()
    demo(opt)
Exemplo n.º 16
0
def prefetch_test(opt):
    show_image = True
    if not opt.not_set_cuda_env:
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus_str
    Dataset = dataset_factory[opt.test_dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    Logger(opt)

    split = "val" if not opt.trainval else "test"
    dataset = Dataset(opt, split)
    detector = Detector(opt)

    if opt.load_results != "":
        load_results = json.load(open(opt.load_results, "r"))
        for img_id in load_results:
            for k in range(len(load_results[img_id])):
                if load_results[img_id][k][
                        "class"] - 1 in opt.ignore_loaded_cats:
                    load_results[img_id][k]["score"] = -1
    else:
        load_results = {}

    data_loader = torch.utils.data.DataLoader(
        PrefetchDataset(opt, dataset, detector.pre_process),
        batch_size=1,
        shuffle=False,
        num_workers=1,
        pin_memory=True,
    )

    results = {}
    num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
    bar = Bar("{}".format(opt.exp_id), max=num_iters)
    time_stats = ["tot", "load", "pre", "net", "dec", "post", "merge", "track"]
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    if opt.use_loaded_results:
        for img_id in data_loader.dataset.images:
            results[img_id] = load_results["{}".format(img_id)]
        num_iters = 0
    final_results = []
    out_path = ""

    if opt.dataset == "nuscenes":
        ret = {
            "meta": {
                "use_camera": True,
                "use_lidar": False,
                "use_radar": False,
                "use_map": False,
                "use_external": False,
            },
            "results": {},
        }

    for ind, (img_id, pre_processed_images,
              img_info) in enumerate(data_loader):
        if ind >= num_iters:
            break

        if opt.dataset == "nuscenes":
            sample_token = img_info["sample_token"][0]
            sensor_id = img_info["sensor_id"].numpy().tolist()[0]

        if opt.tracking and ("is_first_frame" in pre_processed_images):
            if "{}".format(int(img_id.numpy().astype(
                    np.int32)[0])) in load_results:
                pre_processed_images["meta"]["pre_dets"] = load_results[
                    "{}".format(int(img_id.numpy().astype(np.int32)[0]))]
            else:
                print(
                    "No pre_dets for",
                    int(img_id.numpy().astype(np.int32)[0]),
                    ". Use empty initialization.",
                )
                pre_processed_images["meta"]["pre_dets"] = []
            if len(final_results) > 0 and not opt.dataset == "nuscenes":
                write_results(out_path, final_results, opt.dataset)
                final_results = []
            img0 = pre_processed_images["image"][0].numpy()
            h, w, _ = img0.shape
            detector.img_height = h
            detector.img_width = w
            if opt.dataset == "nuscenes":
                save_video_name = os.path.join(
                    opt.dataset + "_videos/",
                    "MOT" + str(int(pre_processed_images["video_id"])) + "_" +
                    str(int(img_info["sensor_id"])) + ".avi",
                )
            elif opt.dataset == "kitti_tracking":
                save_video_name = os.path.join(
                    opt.dataset + "_videos/",
                    "KITTI_" + str(int(pre_processed_images["video_id"])) +
                    ".avi",
                )
            else:
                save_video_name = os.path.join(
                    opt.dataset + "_videos/",
                    "MOT" + str(int(pre_processed_images["video_id"])) +
                    ".avi",
                )
            results_dir = opt.dataset + "_results"
            if not os.path.exists(opt.dataset + "_videos/"):
                os.mkdir(opt.dataset + "_videos/")
            if not os.path.exists(results_dir):
                os.mkdir(results_dir)
            for video in dataset.coco.dataset["videos"]:
                video_id = video["id"]
                file_name = video["file_name"]
                if (pre_processed_images["video_id"] == video_id
                        and not opt.dataset == "nuscenes"):
                    out_path = os.path.join(results_dir,
                                            "{}.txt".format(file_name))
                    break

            detector.reset_tracking(opt)
            vw = cv2.VideoWriter(save_video_name,
                                 cv2.VideoWriter_fourcc("M", "J", "P", "G"),
                                 10, (w, h))
            print("Start tracking video",
                  int(pre_processed_images["video_id"]))
        if opt.public_det:
            if "{}".format(int(img_id.numpy().astype(
                    np.int32)[0])) in load_results:
                pre_processed_images["meta"]["cur_dets"] = load_results[
                    "{}".format(int(img_id.numpy().astype(np.int32)[0]))]
            else:
                print("No cur_dets for",
                      int(img_id.numpy().astype(np.int32)[0]))
                pre_processed_images["meta"]["cur_dets"] = []

        online_targets = detector.run(pre_processed_images,
                                      image_info=img_info)
        online_tlwhs = []
        online_ids = []
        online_ddd_boxes = []
        sample_results = []

        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            if tlwh[2] * tlwh[3] > min_box_area:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)

                if opt.dataset == "nuscenes":
                    online_ddd_boxes.append(t.org_ddd_box)
                    class_name = t.classe
                    if class_name in _cycles:
                        att = id_to_attribute[np.argmax(nuscenes_att[0:2]) + 1]
                    elif class_name in _pedestrians:
                        att = id_to_attribute[np.argmax(nuscenes_att[2:5]) + 3]
                    elif class_name in _vehicles:
                        att = id_to_attribute[np.argmax(nuscenes_att[5:8]) + 6]

                    ddd_box = t.ddd_bbox.copy()
                    ddd_box_submission = t.ddd_submission.tolist()
                    translation, size, rotation = (
                        ddd_box_submission[:3],
                        ddd_box_submission[3:6],
                        ddd_box_submission[6:],
                    )

                    result = {
                        "sample_token": sample_token,
                        "translation": translation,
                        "size": size,
                        "rotation": rotation,
                        "velocity": [0, 0],
                        "detection_name": t.classe,
                        "attribute_name": att,
                        "detection_score": t.score,
                        "tracking_name": t.classe,
                        "tracking_score": t.score,
                        "tracking_id": tid,
                        "sensor_id": sensor_id,
                        "det_id": -1,
                    }
                    sample_results.append(result.copy())

        if opt.dataset == "nuscenes":
            if sample_token in ret["results"]:

                ret["results"][sample_token] = (ret["results"][sample_token] +
                                                sample_results)
            else:
                ret["results"][sample_token] = sample_results

        final_results.append((pre_processed_images["frame_id"].cpu().item(),
                              online_tlwhs, online_ids))
        if show_image:
            img0 = pre_processed_images["image"][0].numpy()

            if opt.dataset == "nuscenes":
                online_im = plot_tracking_ddd(
                    img0,
                    online_tlwhs,
                    online_ddd_boxes,
                    online_ids,
                    frame_id=pre_processed_images["frame_id"],
                    calib=img_info["calib"],
                )
            else:
                online_im = plot_tracking(
                    img0,
                    online_tlwhs,
                    online_ids,
                    frame_id=pre_processed_images["frame_id"],
                )
            vw.write(online_im)

    if not opt.dataset == "nuscenes" and len(final_results) > 0:
        write_results(out_path, final_results, opt.dataset)
        final_results = []
    if opt.dataset == "nuscenes":
        for sample_token in ret["results"].keys():
            confs = sorted([
                (-d["detection_score"], ind)
                for ind, d in enumerate(ret["results"][sample_token])
            ])
            ret["results"][sample_token] = [
                ret["results"][sample_token][ind]
                for _, ind in confs[:min(500, len(confs))]
            ]

        json.dump(ret, open(results_dir + "/results.json", "w"))
Exemplo n.º 17
0
def main():
   config = {'atimeout':
                {'longOpt': 'atimeout',
                 'arg': 'seconds',
                 'type': int,
                 'default': 30,
                 'desc': "Authentication timeout (in seconds)"},
             'cip':
                {'longOpt': 'cip',
                 'arg': 'IPAddress',
                 'type': str,
                 'default': '127.0.0.1',
                 'desc': "Callback IP address"},
             'clport':
                {'longOpt': 'clport',
                 'arg': 'port',
                 'type': int,
                 'desc': "Local callback port"},
             'cport':
                {'longOpt': 'cport',
                 'arg': 'port',
                 'type': int,
                 'desc': "Callback port"},
             'ctimeout':
                {'longOpt': 'ctimeout',
                 'arg': 'seconds',
                 'type': int,
                 'default': 30,
                 'desc': "Callback timeout (in seconds)"},
             'domain':
                {'longOpt': 'domain',
                 'arg': 'domainName',
                 'type': str,
                 'desc': "Domain name of sender"},
             'exec':
                {'longOpt': 'exec',
                 'arg': 'filename',
                 'type': str,
                 'desc': "File to exec on successful upload"},
             'recipient':
                {'longOpt': 'recipient',
                 'arg': 'emailAddress',
                 'type': str,
                 'default': 'root',
                 'desc': "Email recipient"},
             'target':
                {'longOpt': 'target',
                 'arg': 'target',
                 'type': str,
                 'desc': "Target OS"},
             'tip':
                {'longOpt': 'tip',
                 'arg': 'IPAddress',
                 'type': str,
                 'default': '127.0.0.1',
                 'desc': "Target IP address"},
             'tmpnam':
                {'longOpt': 'tmpnam',
                 'arg': 'filename',
                 'type': str,
                 'desc': "Remote name of the uploaded file "
                         "(of the form /tmp/fileXXXXXX)"},
             'tport':
                {'longOpt': 'tport',
                 'arg': 'port',
                 'type': int,
                 'default': 25,
                 'desc': "Target port"},
             'upload':
                {'longOpt': 'upload',
                 'arg': 'filename',
                 'type': str,
                 'desc': "File to upload"}
            }

   parms = opts.opts(sys.argv[0], config, targets.list)
   args = parms.parseCommandLine(sys.argv[1:])

   status = doExploit(parms)
   if (-1 == status):
      return 1      
   return status
Exemplo n.º 18
0
def main():
    opt = opts().parse()
    if opt.loadModel == '':
        opt.loadModel = '../models/Pascal3D-cpu.pth'
    model = torch.load(opt.loadModel)
    img = cv2.imread(opt.demo)
    s = max(img.shape[0], img.shape[1]) * 1.0
    c = np.array([img.shape[1] / 2., img.shape[0] / 2.])
    img = Crop(img, c, s, 0, ref.inputRes).astype(np.float32).transpose(
        2, 0, 1) / 256.
    input = torch.from_numpy(img.copy()).float()
    input = input.view(1, input.size(0), input.size(1), input.size(2))
    input_var = torch.autograd.Variable(input).float()
    if opt.GPU > -1:
        model = model.cuda(opt.GPU)
        input_var = input_var.cuda(opt.GPU)

    output = model(input_var)
    hm = output[-1].data.cpu().numpy()

    debugger = Debugger()
    img = (input[0].numpy().transpose(1, 2, 0) * 256).astype(np.uint8).copy()
    inp = img.copy()
    star = (cv2.resize(hm[0, 0], (ref.inputRes, ref.inputRes)) * 255)
    star[star > 255] = 255
    star[star < 0] = 0
    star = np.tile(star, (3, 1, 1)).transpose(1, 2, 0)
    trans = 0.8
    star = (trans * star + (1. - trans) * img).astype(np.uint8)

    ps = parseHeatmap(hm[0], thresh=0.1)
    canonical, pred, color, score = [], [], [], []
    for k in range(len(ps[0])):
        x, y, z = ((hm[0, 1:4, ps[0][k], ps[1][k]] + 0.5) *
                   ref.outputRes).astype(np.int32)
        dep = ((hm[0, 4, ps[0][k], ps[1][k]] + 0.5) * ref.outputRes).astype(
            np.int32)
        canonical.append([x, y, z])
        pred.append([ps[1][k], ref.outputRes - dep, ref.outputRes - ps[0][k]])
        score.append(hm[0, 0, ps[0][k], ps[1][k]])
        color.append((1.0 * x / ref.outputRes, 1.0 * y / ref.outputRes,
                      1.0 * z / ref.outputRes))
        cv2.circle(img, (ps[1][k] * 4, ps[0][k] * 4), 4, (255, 255, 255), -1)
        cv2.circle(img, (ps[1][k] * 4, ps[0][k] * 4), 2,
                   (int(z * 4), int(y * 4), int(x * 4)), -1)

    pred = np.array(pred).astype(np.float32)
    canonical = np.array(canonical).astype(np.float32)

    pointS = canonical * 1.0 / ref.outputRes
    pointT = pred * 1.0 / ref.outputRes
    R, t, s = horn87(pointS.transpose(), pointT.transpose(), score)

    rotated_pred = s * np.dot(
        R, canonical.transpose()).transpose() + t * ref.outputRes

    debugger.addImg(inp, 'inp')
    debugger.addImg(star, 'star')
    debugger.addImg(img, 'nms')
    debugger.addPoint3D(canonical / ref.outputRes - 0.5, c=color, marker='^')
    debugger.addPoint3D(pred / ref.outputRes - 0.5, c=color, marker='x')
    debugger.addPoint3D(rotated_pred / ref.outputRes - 0.5,
                        c=color,
                        marker='*')

    debugger.showAllImg(pause=True)
    debugger.show3D()
Exemplo n.º 19
0
from models.model import create_model, load_model
from opts import opts

import torch
from torch.autograd import Variable
from pytorch2caffe import pytorch_to_caffe as p2c

name = 'CenterNet_shelf_res18_510_80'
img_dim = 510  # (384, 512)
num_classes = 80  # VOC=21 COCO=81
dataset = {
    'default_resolution': [img_dim, img_dim],
    'num_classes': num_classes,
    'mean': [0.408, 0.447, 0.470],
    'std': [0.289, 0.274, 0.278],
    'dataset': 'coco+oi'
}

opt = opts().init(dataset=dataset)
net = create_model(opt.arch, opt.heads, opt.head_conv)
# net = load_model(net, opt.load_model)
net.eval()

input = Variable(torch.ones([1, 3, img_dim, img_dim]))
p2c.trans_net(net, input, name, True)
p2c.save_prototxt('../caffe_models/{}.prototxt'.format(name))
p2c.save_caffemodel('../caffe_models/{}.caffemodel'.format(name))

print('Pytorch model "{}" conversion completed.'.format(name))
Exemplo n.º 20
0
def normal_inference(opt, drop_last=False):
    # added to specify gpu id; the gpus arg in the provided code does not work 
    torch.cuda.set_device(1)
    opt.gpus = [1]
    
    torch.backends.cudnn.benchmark = True

    Dataset = switch_dataset[opt.dataset]
    opt = opts().update_dataset(opt, Dataset)

    dataset = Dataset(opt, 'train') # test
    detector = MOCDetector(opt)
    prefetch_dataset = PrefetchDataset(opt, dataset, detector.pre_process) # check existing detection (skipping those that have been detected)
    total_num = len(prefetch_dataset)
    data_loader = torch.utils.data.DataLoader(
        prefetch_dataset,
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.num_workers,
        pin_memory=opt.pin_memory,
        drop_last=drop_last,
        worker_init_fn=worker_init_fn)

    num_iters = len(data_loader)

    bar = Bar(opt.exp_id, max=num_iters)

    print('inference chunk_sizes:', opt.chunk_sizes)
    print(len(data_loader))
    for iter, data in enumerate(data_loader):
        
        if iter % 1000 != 0 or iter < 0:
            continue
        
        print ("Current iter: {}".format(str(iter)))
        outfile = data['outfile']
        
        
        # Grad cam
        grad_cam = GradCam(detector, target_layer=1)
        # Generate cam mask
        cam = grad_cam.generate_cam(data['images'], target_class=0)
        
        
            
        vis_clip = True
        if vis_clip is True:
            for ii in range(len(data['images'])):
                original_image = data['images'][ii].clone()
                image_temp = original_image.numpy().squeeze().transpose(1,2,0)
                image_temp = ((image_temp * opt.std + opt.mean) * 255).astype(np.uint8)
                image_temp = cv2.cvtColor(image_temp, cv2.COLOR_BGR2RGB)
                #plt.imshow(image_temp)
                #plt.show()
                if ii == len(data['images']) // 2:
                    if len(cam) != 1:
                        visualize_class_activation_images(Image.fromarray(image_temp), cam[ii])
                    else:
                        visualize_class_activation_images(Image.fromarray(image_temp), cam[0])
        
        '''
        detections = detector.run(data)

        for i in range(len(outfile)):
            with open(outfile[i], 'wb') as file:
                pickle.dump(detections[i], file)

        Bar.suffix = 'inference: [{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
            iter, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        bar.next()
        '''
    #bar.finish()
    return total_num
Exemplo n.º 21
0
def main():
    start_time = datetime.now()
    start_time_str = datetime.strptime(drop_msecond(start_time),
                                       "%Y-%m-%d %H:%M:%S")
    args = opts()
    from trainer import train, validate
    #     if args.ablation == '':
    #         from trainer import train, validate
    #     elif args.ablation == 'baseline':
    #         from trainer_baseline import train, validate
    #     elif args.ablation == 'wo_taskt':
    #         from trainer_wo_taskt import train, validate
    #     elif args.ablation == 'wo_Mst':
    #         from trainer_wo_Mst import train, validate
    #     elif args.ablation == 'wo_confusion':
    #         from trainer_wo_confusion import train, validate
    #     elif args.ablation == 'wo_category_confusion':
    #         from trainer_wo_category_confusion import train, validate

    # 将每一个epoch洗牌后的序列固定, 以使多次训练的过程中不发生较大的变化(到同一个epoch时会得到同样的模型)
    # 师兄说不固定也问题不大,他一般都没固定
    #     if args.seed != 666:
    #         if torch.cuda.is_available():
    #             torch.cuda.manual_seed(args.seed)
    #             torch.manual_seed(args.seed)
    #         else:
    #             torch.manual_seed(args.seed)
    #     else:
    #         if torch.cuda.is_available():
    #             torch.cuda.manual_seed(666)
    #             torch.manual_seed(args.seed)
    #         else:
    #             torch.manual_seed(666)

    # init models, multi GPU
    #     model = nn.DataParallel(resnet(args)) # multi-GPU
    feature_extractor = nn.DataParallel(Extractor(args))
    class_classifier = nn.DataParallel(
        Class_classifier(2048, num_classes=args.num_classes)
    )  # 512 for ResNet18 and 32, 2048 for ResNet50
    domain_classifier = nn.DataParallel(
        Domain_classifier(2048, hidden_size=128))
    #     print(id(model.module))
    #     check_model([3, 200, 200], Extractor(args))

    if torch.cuda.is_available():
        #         model = model.cuda()
        feature_extractor = feature_extractor.cuda()
        class_classifier = class_classifier.cuda()
        domain_classifier = domain_classifier.cuda()

    # optimizer for multi gpu
    optimizer = torch.optim.SGD(
        [{
            'params': feature_extractor.module.parameters(),
            'name': 'pre-trained'
        }, {
            'params': class_classifier.module.parameters(),
            'name': 'new-added'
        }, {
            'params': domain_classifier.module.parameters(),
            'name': 'new-added'
        }],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay,
        nesterov=True)

    best_prec1 = 0
    if args.resume:
        if os.path.isfile(args.resume):
            print("==> loading checkpoints '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['model_state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            raise ValueError('The file to be resumed is not exited',
                             args.resume)

    train_loader_source, train_loader_target, val_loader_target = generate_dataloader(
        args)

    print('Begin training')
    print(len(train_loader_source), len(train_loader_target))
    train_loader_source_batches = enumerate(train_loader_source)
    train_loader_target_batches = enumerate(train_loader_target)
    if torch.cuda.is_available():
        criterion_y = nn.CrossEntropyLoss().cuda()
        criterion_d = nn.CrossEntropyLoss().cuda()  # not used in this code
    else:
        criterion_y = nn.CrossEntropyLoss()
        criterion_d = nn.CrossEntropyLoss()

    writer = SummaryWriter(log_dir=args.log)
    #     for epoch in range(args.start_epoch, args.epochs):
    epoch = args.start_epoch
    epochs_has_not_been_improved = 0
    maximum_gap = 0
    while epoch < args.epochs:
        # train for one epoch
        #         pred1_acc_train, loss = train(train_loader_source, train_loader_source_batches, train_loader_target,
        #                                       train_loader_target_batches, model, criterion_y, criterion_d, optimizer_C, optimizer_G, epoch, args)
        #         pred1_acc_train, loss_C, loss_G = train(train_loader_source, train_loader_source_batches, train_loader_target, train_loader_target_batches, model, criterion_y, criterion_d, optimizer_C, optimizer_G, epoch, args)
        #         pred1_acc_train, loss_C, loss_G, new_epoch_flag = train(train_loader_source, train_loader_source_batches, train_loader_target, train_loader_target_batches, model, criterion_y, criterion_d, optimizer_C, optimizer_G, epoch, args)
        #         train_loader_source_batches, train_loader_target_batches, epoch, pred1_acc_train, loss_C, loss_G, new_epoch_flag = train(train_loader_source, train_loader_source_batches, train_loader_target, train_loader_target_batches, model, criterion_y, criterion_d, optimizer_C, optimizer_G, epoch, args)
        # -------------尚未更新(开始),可能会有错误-------------
        # -------------尚未更新(结束),可能会有错误-------------

        train_loader_source_batches, train_loader_target_batches, epoch, pred1_acc_train, loss_C, loss_G, new_epoch_flag = train(
            train_loader_source, train_loader_source_batches,
            train_loader_target, train_loader_target_batches,
            feature_extractor, class_classifier, domain_classifier,
            criterion_y, criterion_d, optimizer, epoch, args)

        if new_epoch_flag:
            # 测试一下如果没有这两个语句,会不会出现异常
            #             train_loader_source_batches = enumerate(train_loader_source)
            #             (inputs_source, labels_source) = train_loader_source_batches.__next__()[1]

            # evaluate on the val data
            if epoch % args.test_freq == (args.test_freq - 1):
                #                 prec1, _ = validate(None, val_loader_target, model, criterion_y, criterion_d, epoch, args)
                prec1, _ = validate(None, val_loader_target, feature_extractor,
                                    class_classifier, domain_classifier,
                                    criterion_y, criterion_d, epoch, args)

                is_best = prec1 > best_prec1
                if is_best:
                    epochs_has_not_been_improved = 0
                    best_prec1 = prec1
                    with open(os.path.join(args.log, 'log.txt'), 'a') as fp:
                        fp.write('      \nTarget_T1 acc: %3f' % (best_prec1))
                else:
                    epochs_has_not_been_improved += 1

                writer.add_scalars('data/scalar_group', {
                    'pred1_acc_valid': prec1,
                    'best_prec1': best_prec1
                }, epoch)

                # updating the maximum distance between current and best
                current_gap = best_prec1 - prec1
                if current_gap > maximum_gap:
                    maximum_gap = current_gap

                save_checkpoint(
                    {
                        'epoch':
                        epoch + 1,
                        'arch':
                        args.arch,
                        #                     'model_state_dict': model.state_dict(),
                        'feature_extractor_state_dict':
                        feature_extractor.state_dict(),
                        'class_classifier_state_dict':
                        class_classifier.state_dict(),
                        'domain_classifier_state_dict':
                        domain_classifier.state_dict(),
                        'best_prec1':
                        best_prec1,
                        'optimizer':
                        optimizer.state_dict()
                    },
                    is_best,
                    args,
                    epoch + 1)

    writer.close()

    end_time = datetime.now()
    end_time_str = datetime.strptime(drop_msecond(end_time),
                                     "%Y-%m-%d %H:%M:%S")
    through_time = end_time - start_time
    through_time_str = time_delta2str(through_time)

    with open(os.path.join(args.result, 'overview.txt'), 'a') as fp:
        fp.write(
            '%s: \nbest_prec1:%.2f%%, epochs_has_not_been_improved:%d, maximum distance between current and best:%.2f%%\n\
start at %s, finish at %s, it takes %s \n' %
            (args.log.split('/')[1], best_prec1, epochs_has_not_been_improved,
             maximum_gap, start_time_str, end_time_str, through_time_str))
Exemplo n.º 22
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    if not opt.not_set_cuda_env:
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus_str
    opt.device = torch.device("cuda" if opt.gpus[0] >= 0 else "cpu")
    logger = Logger(opt)

    print("Creating model...")
    model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
    optimizer = get_optimizer(opt, model)
    start_epoch = 0
    if opt.load_model != "":
        model, optimizer, start_epoch = load_model(model, opt.load_model, opt,
                                                   optimizer)

    for i, param in enumerate(model.parameters()):
        param.requires_grad = True
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    if opt.val_intervals < opt.num_epochs or opt.test:
        print("Setting up validation data...")
        val_loader = torch.utils.data.DataLoader(
            Dataset(opt, "val"),
            batch_size=1,
            shuffle=False,
            num_workers=1,
            pin_memory=True,
        )

        if opt.test:
            _, preds = trainer.val(0, val_loader)
            val_loader.dataset.run_eval(preds, opt.save_dir)
            return

    print("Setting up train data...")
    train_loader = torch.utils.data.DataLoader(
        Dataset(opt, "train"),
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True,
    )

    print("Starting training...")
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        save_model(
            os.path.join(opt.save_dir, "model_{}.pth".format(epoch)),
            epoch,
            model,
            optimizer,
        )
        mark = epoch if opt.save_all else "last"
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write("epoch: {} |".format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary("train_{}".format(k), v, epoch)
            logger.write("{} {:8f} | ".format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(
                os.path.join(opt.save_dir, "model_{}.pth".format(mark)),
                epoch,
                model,
                optimizer,
            )
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
                if opt.eval_val:
                    val_loader.dataset.run_eval(preds, opt.save_dir)
            for k, v in log_dict_val.items():
                logger.scalar_summary("val_{}".format(k), v, epoch)
                logger.write("{} {:8f} | ".format(k, v))
        else:
            save_model(os.path.join(opt.save_dir, "model_last.pth"), epoch,
                       model, optimizer)
        logger.write("\n")
        #     if epoch in opt.save_point:
        save_model(
            os.path.join(opt.save_dir, "model_{}.pth".format(epoch)),
            epoch,
            model,
            optimizer,
        )
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print("Drop LR to", lr)
            for param_group in optimizer.param_groups:
                param_group["lr"] = lr
    logger.close()
Exemplo n.º 23
0
# class_name = ['__background__', 'bus', 'car', 'others', 'van']
class_name = ['__background__', 'object']

# os.environ['CUDA_VISIBLE_DEVICES'] = '1'

base_dir = 'YOUR_BASE_DIR'
exp_id = 'EXP_ID'
model_name = 'model_best.pth'
MODEL_PATH = os.path.join(base_dir, exp_id, model_name)
seg_dir = 'changedetection-raw'  # only relevant if you want segmentation masks
base_seg_dir = os.path.join(base_dir, exp_id, seg_dir)
TASK = 'ctdet'
# --seg_weight 1
# opt = opts().init('{} --load_model {} --arch hourglass --seg_weight 1 --dataset uadetrac1on10_b --gpu 1 --keep_res'.format(TASK, MODEL_PATH).split(' '))
opt = opts().init('{} --load_model {} --arch hourglass --seg_weight 1 --dataset uav --gpu 1 --keep_res'.format(TASK, MODEL_PATH).split(' '))
detector = detector_factory[opt.task](opt)

SPLIT = 'uav-test'

if SPLIT == 'test':
    source_lines = open('/store/datasets/UA-Detrac/test-tf-all.csv', 'r').readlines()
    target_file = open(os.path.join(base_dir, exp_id, 'ua-test.csv'), 'w')
elif SPLIT == 'train1on10':
    source_lines = open('/store/datasets/UA-Detrac/train-tf.csv', 'r').readlines() # + open('/store/datasets/UA-Detrac/val-tf-all.csv', 'r').readlines()
    target_file = open(os.path.join(base_dir, exp_id, 'ua-train1on10.csv'), 'w')
elif SPLIT == 'trainval':
    source_lines = open('/store/datasets/UA-Detrac/train-tf-all.csv', 'r').readlines() + open('/store/datasets/UA-Detrac/val-tf-all.csv', 'r').readlines()
    target_file = open(os.path.join(base_dir, exp_id, 'ua-trainval.csv'), 'w')
elif SPLIT == 'uav-test':
    source_lines = open('/store/datasets/UAV/val.csv', 'r').readlines()
Exemplo n.º 24
0
        'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
        'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
        'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
        'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
        'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
        'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
        'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
        'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
        'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
        'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
        'scissors', 'teddy bear', 'hair drier', 'toothbrush'
    ]

    MODEL_PATH = "/content/CenterTrack/models/coco_tracking.pth"
    TASK = "tracking"
    opt = opts().init("{} --load_model {}".format(TASK, MODEL_PATH).split(' '))
    detector = Detector(opt)

    import cv2
    name = '00000.ppm'
    #   img = cv2.imread(f'/content/CenterTrack/samples/images/{name}')

    img = cv2.imread(
        f'/content/CenterTrack/samples/images/FullIJCNN2013/{name}')
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    import time
    start_time = time.time()
    results = detector.run(img)['results']
    end_time = time.time()
    print('time:', end_time - start_time)
Exemplo n.º 25
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test

    print('Setting up data...')
    Dataset = get_dataset(opt.dataset, opt.task)
    f = open(opt.data_cfg)
    data_config = json.load(f)
    trainset_paths = data_config['train']
    dataset_root = data_config['root']
    f.close()
    transforms = T.Compose([T.ToTensor()])
    dataset = Dataset(opt, dataset_root, trainset_paths, (1088, 608), augment=True, transforms=transforms)
    opt = opts().update_dataset_info_and_set_heads(opt, dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0

    # Get dataloader

    train_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True
    )

    print('Starting training...')
    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(
            model, opt.load_model, trainer.optimizer, opt.resume, opt.lr, opt.lr_step)
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))

        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'),
                       epoch, model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                       epoch, model, optimizer)
            lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        if epoch % 5 == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                       epoch, model, optimizer)
    logger.close()
Exemplo n.º 26
0
def test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str

    val_mAP = []
    iteration_list = []
    checkpoints_path = opt.save_dir
    for model_name in os.listdir(checkpoints_path):
        if "pth" not in model_name or "final" in model_name:
            continue
        iteration = int(model_name.split(".")[0].split('_')[1])
        iteration_list.append(iteration)
    iteration_list = sorted(iteration_list)
    for iteration in iteration_list:
        model_name = "model_{:07d}.pth".format(iteration)
        opt.load_model = os.path.join(checkpoints_path, model_name)

        Dataset = dataset_factory[opt.dataset]
        opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
        print(opt)
        Logger(opt)
        Detector = detector_factory[opt.task]
        split = 'val' if not opt.trainval else 'test'
        dataset = Dataset(opt, split)
        detector = Detector(opt)

        results = {}
        num_iters = len(dataset)
        bar = Bar('{}'.format(opt.exp_id), max=num_iters)
        time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
        avg_time_stats = {t: AverageMeter() for t in time_stats}
        for ind in range(num_iters):
            img_id = dataset.images[ind]
            img_info = dataset.coco.loadImgs(ids=[img_id])[0]
            img_path = os.path.join(dataset.img_dir, img_info['file_name'])

            if opt.task == 'ddd':
                ret = detector.run(img_path, img_info['calib'])
            else:
                ret = detector.run(img_path)
            results[img_id] = ret['results']
            Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
                ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
            for t in avg_time_stats:
                avg_time_stats[t].update(ret[t])
                Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(
                    t, avg_time_stats[t].avg)
            bar.next()
        bar.finish()
        dataset.run_eval(results, opt.save_dir)

        gt_label_path = "data/kitti/training/label_2/"
        pred_label_path = os.path.join(opt.save_dir, 'results')
        pred_annos, image_ids = kitti.get_label_annos(pred_label_path,
                                                      return_ids=True)
        gt_annos = kitti.get_label_annos(gt_label_path, image_ids=image_ids)
        result, ret_dict = kitti_eval(gt_annos, pred_annos,
                                      ["Car", "Pedestrian", "Cyclist"])

        if ret_dict is not None:
            mAP_3d_moderate = ret_dict["KITTI/Car_3D_moderate_strict"]
            val_mAP.append(mAP_3d_moderate)
            with open(os.path.join(checkpoints_path, "val_mAP.json"),
                      'w') as file_object:
                json.dump(val_mAP, file_object)
            with open(
                    os.path.join(
                        checkpoints_path, 'epoch_result_{:07d}_{}.txt'.format(
                            iteration, round(mAP_3d_moderate, 2))), "w") as f:
                f.write(result)
def main():
    opt = opts().parse()
    model = Joint(opt)
    model.train()
Exemplo n.º 28
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset, opt.task)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)
    if opt.task == 'ddd_sun':
        task = 'ddd'
    else:
        print("not sun!!!!!!")
        task = opt.task
    Trainer = train_factory[task]
    trainer = Trainer(opt, model, optimizer)  # model 정의하고, loss 정의하고
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up data...')
    #val은 나중에 따로 만들어주자
    val_loader = torch.utils.data.DataLoader(
        Dataset(opt, 'val'),
        batch_size=1,
        shuffle=False,
        # num_workers=1,
        num_workers=1,
        pin_memory=True)

    # if opt.test:
    #     _, preds = trainer.val(0, val_loader)
    #     val_loader.dataset.run_eval(preds, opt.save_dir)
    #     return

    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)
    #visdom part
    # vis = visdom.Visdom()
    # vis_title = 'CenterNet SUNRGBD 3D Object Detection'
    # vis_legend = ['Train Loss']

    #textwindow = vis.text("Hello Pytorch")
    #plot = vis.line(Y = torch.tensor([0]), X = torch.tensor([0]))

    # iter_plot = vis.create_vis_plot(vis, 'Iteration', 'Total Loss', vis_title, vis_legend)
    # hm_plot = vis.create_vis_plot(vis, 'Iteration', 'hm Loss', vis_title, vis_legend)
    # dep_plot = vis.create_vis_plot(vis, 'Iteration', 'dep Loss', vis_title, vis_legend)
    # dim_plot = vis.create_vis_plot(vis, 'Iteration', 'dim Loss', vis_title, vis_legend)
    # rot_plot = vis.create_vis_plot(vis, 'Iteration', 'rot Loss', vis_title, vis_legend)
    # wh_plot = vis.create_vis_plot(vis, 'Iteration', 'wh Loss', vis_title, vis_legend)
    # off_plot = vis.create_vis_plot(vis, 'Iteration', 'off Loss', vis_title, vis_legend)
    print('Starting training...')
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)

        logger.write('epoch: {} |'.format(epoch))
        #여기에다가 loss를 적는상황.
        #k값으로는 loss , hm_loss, dep_loss, dim_loss, rot_loss, wh_loss, off_loss를

        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))

        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch,
                           model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
Exemplo n.º 29
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset, opt.task)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(
            model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up data...')
    val_loader = torch.utils.data.DataLoader(
        Dataset(opt, 'val'),
        batch_size=1,
        shuffle=False,
        num_workers=1,
        pin_memory=True
    )

    if opt.test:
        _, preds = trainer.val(0, val_loader)
        val_loader.dataset.run_eval(preds, opt.save_dir)
        return

    train_loader = torch.utils.data.DataLoader(
        Dataset(opt, 'train'),
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True
    )

    print('Starting training...')
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'),
                           epoch, model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'),
                       epoch, model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                       epoch, model, optimizer)
            lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
Exemplo n.º 30
0
    parser.add_argument('--generic_video', type=str, default=None,
                        help='path to save video with tracking')
    parser.add_argument('--save_framerate', type=int, default=10,
                        help='framerate of generic video')

    # Model
    parser.add_argument('--model_path', type=str,
                        default='/content/drive/My Drive/Projects/CenterTrack/exp/tracking/football/model_last.pth',
                        help='path to model')
    parser.add_argument('--centertrack_path', type=str,
                        default='/content/drive/My Drive/Projects/CenterTrack/src/lib',
                        help='path to src/lib')

    args = parser.parse_args()


    # Import model and load weights
    sys.path.insert(0, args.centertrack_path)
    from detector import Detector
    from opts import opts
    opt = opts().init(['tracking', '--save_video', '--load_model', args.model_path,
                        '--num_classes', args.num_classes,
                        '--input_h', args.input_h, '--input_w', args.input_w,
                        '--dataset', 'custom', '--debug', '4'])
    detector = Detector(opt)


    # Process input data
    process_video(args.video, args.save_csv, args.generic_video)

Exemplo n.º 31
0
        # b_max = torch.max(pc[:,5,:b_N], dim=1)[0]
        # b_max = torch.max(b_max, dim=0)[0]
        

    mean /= num_batches
    var /= num_batches
    std = torch.sqrt(var)
    print('mean: ', mean)
    print('std: ', std)
    print('max_rcs: ', max_rcs)

    with open("pc_stats.txt","w") as f:
        f.write("mean:" + ','.join([str(m) for m in mean.tolist()]))
        f.write("\nstd:" + ','.join([str(s) for s in std.tolist()]))

    # pre-calculated results:
    # pc_mean = np.array([407.1079, 255.5098, 41.8673, 1.8379, 36.2591, 6.6596, 
    #                 -0.4997, -0.8596, 0.0240, 0.0165, 1.0, 3.0, 19.5380, 
    #                 19.6979, 0.0, 1.0233, 16.4908, 3.0]).reshape(18,1)
    # pc_std = np.array([166.4957, 16.4833, 25.2681, 1.3892, 25.9181, 6.7230, 
    #                 2.7588, 2.0672, 1.4910, 0.5632, 0.0, 0.0, 0.7581, 
    #                 1.02991, 0.0, 0.2116, 0.5794, 0.0]).reshape(18,1)


if __name__ == '__main__':
    args = ['--pointcloud --nuscenes_att --velocity --dataset nuscenes --batch_size 10']
    opt = opts().parse()
    opt.pointcloud = True
    opt.dataset = 'nuscenes'
    opt.batch_size = 50
    main(opt)
Exemplo n.º 32
0
    def __init__(self, camera_stream, obstacle_tracking_stream, flags,
                 camera_setup):
        from dataset.dataset_factory import get_dataset
        from model.model import create_model, load_model
        from opts import opts
        from utils.tracker import Tracker

        camera_stream.add_callback(self.on_frame_msg,
                                   [obstacle_tracking_stream])
        self._flags = flags
        self._logger = erdos.utils.setup_logging(self.config.name,
                                                 self.config.log_file_name)
        self._csv_logger = erdos.utils.setup_csv_logging(
            self.config.name + '-csv', self.config.csv_log_file_name)
        self._camera_setup = camera_setup
        # TODO(ionel): Might have to filter labels when running with a coco
        # and a nuscenes model.
        num_classes = {
            'kitti_tracking': 3,
            'coco': 90,
            'mot': 1,
            'nuscenes': 10
        }
        # Other flags:
        # 1) --K ; max number of output objects.
        # 2) --fix_short ; resizes the height of the image to fix short, and
        # the width such the aspect ratio is maintained.
        # 3) --pre_hm ; pre heat map.
        # 4) --input_w; str(camera_setup.width)
        # 5) --input_h; str(camera_setup.height)
        args = [
            'tracking', '--load_model', flags.center_track_model_path,
            '--dataset', flags.center_track_model, '--test_focal_length',
            str(int(camera_setup.get_focal_length())), '--out_thresh',
            str(flags.obstacle_detection_min_score_threshold), '--pre_thresh',
            str(flags.obstacle_detection_min_score_threshold), '--new_thresh',
            str(flags.obstacle_detection_min_score_threshold),
            '--track_thresh',
            str(flags.obstacle_detection_min_score_threshold), '--max_age',
            str(flags.obstacle_track_max_age), '--num_classes',
            str(num_classes[flags.center_track_model]), '--tracking',
            '--hungarian'
        ]
        opt = opts().init(args)
        gpu = True
        if gpu:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        self.opt = opt
        self.model = create_model(opt.arch, opt.heads, opt.head_conv, opt=opt)
        self.model = load_model(self.model, opt.load_model, opt)
        self.model = self.model.to(self.opt.device)
        self.model.eval()

        self.trained_dataset = get_dataset(opt.dataset)
        self.mean = np.array(self.trained_dataset.mean,
                             dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(self.trained_dataset.std,
                            dtype=np.float32).reshape(1, 1, 3)
        self.rest_focal_length = self.trained_dataset.rest_focal_length \
            if self.opt.test_focal_length < 0 else self.opt.test_focal_length
        self.flip_idx = self.trained_dataset.flip_idx
        self.cnt = 0
        self.pre_images = None
        self.pre_image_ori = None
        self.tracker = Tracker(opt)
Exemplo n.º 33
0
import torchvision.datasets as datasets

import ref
import cv2
import numpy as np
from datasets.Fusion import Fusion

from utils.logger import Logger
from opts import opts
from train import train, validate, test
from optim_latent import initLatent, stepLatent, getY
from model import getModel
from utils.utils import collate_fn_cat

from datasets.chairs_modelnet import ChairsModelNet as SourceDataset
args = opts().parse()
if args.targetDataset == 'Redwood':
  from datasets.chairs_Redwood import ChairsRedwood as TargetDataset
elif args.targetDataset == 'ShapeNet':
  from datasets.chairs_Annotatedshapenet import ChairsShapeNet as TargetDataset
elif args.targetDataset == 'RedwoodRGB':
  from datasets.chairs_RedwoodRGB import ChairsRedwood as TargetDataset
elif args.targetDataset == '3DCNN':
  from datasets.chairs_3DCNN import Chairs3DCNN as TargetDataset
else:
  raise Exception("No target dataset {}".format(args.targetDataset))

splits = ['train', 'valSource', 'valTarget']

def main():
  now = datetime.datetime.now()
Exemplo n.º 34
0
def build_check():
    global BUILD_KILL,WAIT_TILL_CLEAR
    while True:
        build = check_for_new_build(get_current_build())  #pending : implement the build-check thread to wait till 
        BUILD_KILL = True  #pending : implement the build-check thread to wait till all the runs are safely shutdown
        time.sleep(4)
        k = time.time()
        while  WAIT_TILL_CLEAR:
            assert (time.time() - k) < 500
            print 'waiting to clear all the apps'
            time.sleep(2)
        

if __name__ == '__main__':
    opts,args = opts()
    master_conf = opts.conf
    smokes_mode = opts.build
    simple_harness = opts.plain
    path_to_build = opts.path
    curr = opts.current
    if not simple_harness:
        if curr:
            build = get_current_build(path_to_build)
        else :
            build = check_for_new_build(get_current_build(path_to_build),path_to_build)
        th = threading.Thread(target=build_check)
        th.daemon = True
        th.start()
    while True:
        try: