Ejemplo n.º 1
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model_image, self.model_point = create_model(opt)
        self.model_image = load_model(self.model_image, opt.load_model,
                                      'image_model')
        self.model_image = self.model_image.to(opt.device)
        self.model_image.eval()

        self.model_point = load_model(self.model_point, opt.load_model,
                                      'point_model')
        self.model_point = self.model_point.to(opt.device)
        self.model_point.eval()

        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = opt.num_classes
        self.scales = opt.test_scales
        self.opt = opt
        self.pause = True
        self.image_path = ' '
        self.max_objs = 32
        const = torch.Tensor([[-1, 0], [0, -1], [-1, 0], [0, -1], [-1, 0],
                              [0, -1], [-1, 0], [0, -1], [-1, 0], [0, -1],
                              [-1, 0], [0, -1], [-1, 0], [0, -1], [-1, 0],
                              [0, -1]])
        self.const = const.unsqueeze(0).unsqueeze(0)
        self.const = self.const.to(self.opt.device)
Ejemplo n.º 2
0
def teacher_train(cfg, start_epoch):
    torch.manual_seed(cfg.SEED)
    device = torch.device('cuda' if cfg.GPU[0] >= 0 else 'cpu')
    if start_epoch == 1:
        train_log = open(os.path.join(cfg.LOG_DIR, "train_log.csv"), 'w')
        train_log_title = "epoch,total_loss,hm_loss,wh_loss"
        val_log = open(os.path.join(cfg.LOG_DIR, "val_log.csv"), 'w')
        val_log_title = "epoch,precision,recall\n"
        if cfg.USE_OFFSET:
            train_log_title += ",offset_loss\n"
        else:
            train_log_title += "\n"
        train_log.write(train_log_title)
        train_log.flush()
        val_log.write(val_log_title)
        val_log.flush()
    else:
        train_log = open(os.path.join(cfg.LOG_DIR, "train_lo.csv"), 'a')
        val_log = open(os.path.join(cfg.LOG_DIR, "val_log.csv"), 'a')

    print('Creating model...')
    teacher = create_model(cfg, 'res_18')
    teacher = load_model(teacher, 'log/weights/model_last_res.pth')
    model = create_model(cfg, 'litnet')
    if start_epoch != 1:
        model = load_model(
            model, 'log/weights/model_epoch_{}.pth'.format(start_epoch - 1))
    optimizer = torch.optim.Adam(model.parameters(), cfg.LR)

    trainer = TeacherTrainer(cfg, teacher, model, optimizer)
    trainer.set_device(cfg.GPU, device)
    print('Setting up data...')
    train_loader = DataLoader(TrainCircleDataset(cfg),
                              batch_size=cfg.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.NUM_WORKERS,
                              pin_memory=True,
                              drop_last=True)
    val_loader = ValCircleDataset()
    print('Starting training...')
    epoch = start_epoch
    for epoch in range(start_epoch, start_epoch + cfg.NUM_EPOCHS):
        trainer.train(epoch, train_loader, train_log)
        model_path = os.path.join(cfg.WEIGHTS_DIR,
                                  'model_epoch_{}.pth'.format(epoch))
        save_model(model_path, epoch, model, optimizer)
        trainer.val(epoch, model_path, val_loader, val_log, cfg)

    save_model(os.path.join(cfg.WEIGHTS_DIR, 'model_last.pth'), epoch, model,
               optimizer)
Ejemplo n.º 3
0
    def __init__(self, options):
        if options.gpus[0] >= 0:
            try:
                self.ctx = mx.gpu()
                _ = nd.zeros((1, ), ctx=self.ctx)
            except mx.base.MXNetError:
                print("No GPU available. Use CPU instead.")
                self.ctx = mx.cpu()
        else:
            self.ctx = mx.cpu()

        print("Creating model...")
        self.model = create_model(options.arch,
                                  options.heads,
                                  options.head_conv,
                                  ctx=self.ctx)
        if options.load_model_path != '':
            self.model = load_model(self.model,
                                    options.load_model_path,
                                    ctx=self.ctx)

        self.mean = np.array(options.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(options.std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = options.num_classes
        self.scales = options.test_scales
        self.opt = options
        self.pause = True
Ejemplo n.º 4
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv,
                                  num_gnn_layers=opt.num_gnn_layers,
                                  gnn_type=opt.gnn_type,
                                  use_residual=opt.use_residual,
                                  return_pre_gnn_layer_outputs=opt.return_pre_gnn_layer_outputs,
                                  heads_share_params=opt.heads_share_params,
                                  omit_gnn=opt.omit_gnn,
                                  use_roi_align=opt.use_roi_align,
                                  viz_attention=opt.viz_attention)

        self.model = load_model(self.model, opt.load_model, distributed=True, copy_head_weights=False)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        # self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = opt.K
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()
        self.viz_attention = opt.viz_attention
Ejemplo n.º 5
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = 128
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()
Ejemplo n.º 6
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)
        self.model.eval()

        # input = torch.randn(1, 3, 640, 640, requires_grad=True)
        # input=input.to(opt.device)
        # out = self.model(input)
        # torch.onnx.export(self.model,  # model being run
        #                   input,  # model input (or a tuple for multiple inputs)
        #                   "./test.onnx",  # where to save the model (can be a file or file-like object)
        #                   export_params=True,  # store the trained parameter weights inside the model file# )
        #                   opset_version=9
        #                   )

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = opt.K
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()
Ejemplo n.º 7
0
    def __init__(self, opt, frame_rate=30):             # 帧率的意义
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)           # 加载模型,
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.tracked_stracks = []  # type: list[STrack]         # 保存一系列追踪中的轨迹
        self.lost_stracks = []  # type: list[STrack]            # 保存已经丢失的轨迹
        self.removed_stracks = []  # type: list[STrack]         # 保存已经移除的轨迹

        self.frame_id = 0
        self.det_thresh = opt.conf_thres                        # 检测框阈值,这里设置为与tracking的置信度阈值相同
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)            # 还是等于输入视频的真实帧率
        self.max_time_lost = self.buffer_size                   # 最大连续self.buffer_size次没有匹配到目标时,表示该轨迹丢失
        self.max_per_image = 128
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()         # 预测,根据上一帧目标的检测位置和速度,预测当前帧目标的检测位置和速度
Ejemplo n.º 8
0
def prepare_data_csv():
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    arcface = load_model('resnet50', pretrained=True)
    mtcnn = MTCNN(device=device)

    imgdataset = ImageFolder('./data/facebank')
    imgs = []
    transf = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    labels = []
    for img, label in imgdataset:
        boxes, landmarks = mtcnn.detect_faces(img)
        align = align_crop(img, landmarks, imgsize=112)
        align = transf(align)
        imgs.append(align)
        labels.append(label)
    imgs = torch.stack(imgs)

    embs = arcface(imgs.to(device)).data.cpu().numpy()

    idx_to_class = {}
    for a in imgdataset.class_to_idx:
        idx_to_class[imgdataset.class_to_idx[a]] = a
    name_labels = [idx_to_class[i] for i in labels]

    people = pd.DataFrame(embs)
    people['id'] = name_labels
    people.to_csv('./data/people.csv', index=False)
Ejemplo n.º 9
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model = create_model(opt.arch,
                                  opt.heads,
                                  opt.head_conv,
                                  number_stacks=opt.number_stacks,
                                  fsm=opt.fsm,
                                  drmc=opt.drmc,
                                  drmr=opt.drmr,
                                  only_ls=opt.only_ls)
        self.model = load_model(self.model, opt.load_model, test=True)
        self.model = self.model.to(opt.device)
        self.model.eval()
        if opt.gene_guided_grad_cam:
            self.act_maps = []
            self.register_hooks()
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
        # modified by condi
        # self.max_per_image = 100
        self.max_per_image = opt.K
        self.num_classes = opt.num_classes
        self.scales = opt.test_scales
        self.opt = opt
        self.pause = True
Ejemplo n.º 10
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)
        self.model.eval()

        # convert to onnx
        # input_names = ["input0"]
        # output_names = ["hm", "wh", "id", "reg"]
        # inputs = torch.randn(1, 3, 480, 640).to('cpu')
        # torch_out = torch.onnx._export(self.model, inputs, 'pruned.onnx', export_params=True, verbose=False,
        #                                input_names=input_names, output_names=output_names)
        # print("export onnx sucess")

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = opt.K
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()
Ejemplo n.º 11
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = opt.K
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()
        self.roi_align = RoIAlign(7, 7)
        cfg = get_config()
        cfg.merge_from_file(
            "/home/hongwei/track-human/FairMOT/src/lib/tracker/deep_configs/yolov3.yaml"
        )
        cfg.merge_from_file(
            "/home/hongwei/track-human/FairMOT/src/lib/tracker/deep_configs/deep_sort.yaml"
        )
        self.detector = build_detector(cfg, True)
Ejemplo n.º 12
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        # self.tb = SummaryWriter(
        #     log_dir='/home/wanghongwei/WorkSpace/source/detect/CenterNet/log/', comment='dla')
        self.model = create_model(opt.arch,
                                  opt.heads,
                                  opt.head_conv,
                                  vis_graph=opt.vis_graph)
        try:
            self.model = load_model(self.model, opt.load_model)
            self.model = self.model.to(opt.device)
            self.model.eval()
        except Exception as e:
            print(e)
            print('maybe the model is None!!')
            raise e

        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = opt.num_classes
        self.scales = opt.test_scales
        self.opt = opt
        self.pause = True
Ejemplo n.º 13
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if epoch > 100:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
Ejemplo n.º 14
0
def train(model,
          training_parameters,
          model_choice,
          target,
          model_name_suffix=''):
    optimizer = tf.keras.optimizers.Adam(1e-2)
    log_joint_pdf = get_log_joint_pdf(training_parameters['name'])

    # Early stopping
    best_loss = 1e20
    last_improvement = 0
    max_consecutive_no_improvement = 15000
    min_epoch_checkpoint = 1
    checkpoint_tol = 0.02
    saved_checkpoint = False

    # Monitor training loss for visualisation
    loss_monitor = []
    for epoch in range(1, training_parameters['epochs']):
        loss = compute_apply_gradients(model, optimizer, log_joint_pdf)

        if loss < best_loss:
            if ((best_loss - loss) / np.abs(best_loss) >
                    checkpoint_tol) & (epoch > min_epoch_checkpoint):
                print(
                    f"    - CHECKPOINT for epoch {epoch + 1}, current best loss {loss}"
                )
                save_model(model,
                           model_choice,
                           target,
                           model_name_suffix=model_name_suffix)
                best_loss = loss
                last_improvement = 0
                saved_checkpoint = True

        else:
            last_improvement += 1
        if last_improvement >= max_consecutive_no_improvement:
            print(f"    - STOPPED after {epoch} epochs")
            break

        if epoch % 100 == 0:
            print(f"Epoch {epoch}, loss: {loss}")
            loss_monitor.append(loss)

    plt.figure()
    plt.plot(loss_monitor, color='slategrey')
    plt.xlabel('Epochs (x100)')
    plt.ylabel('-ELBO(q)')

    if saved_checkpoint:
        model = load_model(model_choice,
                           training_parameters,
                           model_name_suffix='')

    return model
Ejemplo n.º 15
0
    def __init__(self, opt):
        self.model = create_model(opt.arch,
                                  opt.heads,
                                  opt.head_conv,
                                  opt.num_layers,
                                  training=False,
                                  channel_last=opt.channel_last)
        if opt.checkpoint != '':
            extension = os.path.splitext(opt.checkpoint)[1]
            assert (
                extension == '.h5' or extension == ".protobuf"
            ), "incorrect file extension, should be either .h5 or .protobuf"
            load_model(self.model, opt.checkpoint, clear=True)

        self.mean = opt.mean
        self.std = opt.std
        self.max_per_image = 128
        self.opt = opt
        self.pause = True
Ejemplo n.º 16
0
 def __init__(self, model='resnet50', device=None):
     if device is None:
         self.device = torch.device(
             'cuda:0' if torch.cuda.is_available() else 'cpu')
     else:
         self.device = device
     self.mtcnn = MTCNN(device=self.device)
     self.net = load_model(model, device=self.device, pretrained=True)
     # self.vec, self.name = load_vec(pd.read_csv(facevec))
     self.vec, self.name = load_data_from_database()
Ejemplo n.º 17
0
    def __init__(self, opt):
        torch.manual_seed(opt.seed)
        torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
        Dataset = get_dataset(opt.dataset, opt.task)
        opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
        self.opt = opt
        print(opt)

        self.logger = Logger(opt)

        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
        opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

        print('Creating model...')
        model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = model

        optimizer = torch.optim.Adam(model.parameters(), opt.lr)
        self.optimizer = optimizer

        start_epoch = 0
        if opt.load_model != '':
            model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                       optimizer, opt.resume,
                                                       opt.lr, opt.lr_step)

        Trainer = train_factory[opt.task]
        trainer = Trainer(opt, model, optimizer)
        trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
        self.trainer = trainer

        print('Setting up data...')
        val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'),
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=1,
                                                 pin_memory=True)
        self.val_loader = val_loader

        if opt.test:
            _, preds = trainer.val(0, val_loader)
            val_loader.dataset.run_eval(preds, opt.save_dir)
            return

        train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.num_workers,
                                                   pin_memory=True,
                                                   drop_last=True)
        self.train_loader = train_loader

        self.best = 1e10
Ejemplo n.º 18
0
 def __init__(self,
              model_path,
              conf_thres=0.4,
              model_name='dla_34',
              device='cpu'):
     heads = {'hm': 1, 'wh': 4, 'id': 128, 'reg': 2}
     head_conv = 256
     self.device = device
     self.conf_thres = conf_thres
     self.model = create_model(model_name, heads, head_conv)
     self.model = load_model(self.model, model_path)
     self.model = self.model.to(device)
     self.model.eval()
Ejemplo n.º 19
0
def train(args, device_id):
    """ Starts training pipeline given CLI args """
    device = "cpu" if args.visible_gpus == '-1' else "cuda"
    logger.info('Device ID %d' % device_id)
    logger.info('Device %s' % device)
    seed(args.seed, device_id)

    def train_iter_fct():
        return data.load(args, 'train', device)

    if args.train_from != '':
        logger.info("Training from checkpoint %s", args.train_from)
        model, optim = load_model(args,
                                  device,
                                  load_bert=True,
                                  checkpoint=args.train_from)
    else:
        logger.info("Training without checkpoint")
        model, optim = load_model(args, device, load_bert=True)

    logger.info(model)
    trainer = build_trainer(args, device_id, model, optim)
    trainer.train(train_iter_fct, args.train_steps)
Ejemplo n.º 20
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model = create_model(opt.arch, opt.num_classes)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)

        self.num_classes = opt.num_classes
        self.opt = opt
        self.pause = True
Ejemplo n.º 21
0
    def __init__(self, cfg):
    
        print('Creating model...')
        self.model = create_model(cfg.MODEL.NAME, cfg.MODEL.HEAD_CONV, cfg)
        self.model = load_model(self.model, cfg.TEST.MODEL_PATH)
        self.model = self.model.to(torch.device('cuda'))
        self.model.eval()

        self.mean = np.array(cfg.DATASET.MEAN, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(cfg.DATASET.STD, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = cfg.MODEL.NUM_CLASSES
        self.scales = cfg.TEST.TEST_SCALES
        self.cfg = cfg
        self.pause = True
Ejemplo n.º 22
0
def demo(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = '1'
    opt.device = torch.device('cuda')

    print('Loading images...')
    if os.path.isdir(opt.demo):
        image_names = []
        ls = os.listdir(opt.demo)
        for file_name in sorted(ls):
            ext = file_name[file_name.rfind('.') + 1:].lower()
            if ext in image_ext:
                image_names.append(os.path.join(opt.demo, file_name))
    else:
        image_names = [opt.demo]

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    model = load_model(model, opt.load_model)
    model = model.to(opt.device)

    for (image_name) in image_names:
        image = cv2.imread(image_name)
        original_images = pre_process(image)
        original_images = original_images.to(opt.device)
        images = torch.tensor(original_images)
        images.requires_grad = True
        for i in range(20):
            hm = model(images)[-1]['hm'].sigmoid_()
            scores = _topk(hm, K=1)
            loss = torch.sum(scores)
            if loss > 0:
                print(loss)
                model.zero_grad()
                loss.backward()
                grad = images.grad.data.sign()
                images = images - 0.4 * grad
                images = torch.clamp(images, -1, 1)
            else:
                break
        perturb = (images - original_images).squeeze(0).numpy()
        perturb = perturb.transpose(1, 2, 0)
        perturb = perturb * np.array(opt.std, dtype=np.float32).reshape(
            1, 1, 3).astype(np.float32) * 255
        perturb = (perturb.astype(np.int16) + 128).clip(0, 255)
        perturb = cv2.resize(perturb, (image.shape[0:2]))
        perturb = perturb - 128
        adv_image = (image + perturb).clip(0, 255)
        cv2.imwrite('results/' + image_name, adv_image)
Ejemplo n.º 23
0
    def __init__(self, model_path, cfg):
        super(Detector, self).__init__()
        self.cfg = cfg
        print('Creating model...')
        self.model = create_model(self.cfg, 'res_18')
        self.model = load_model(self.model, model_path)
        self.model = self.model.to(self.cfg.DEVICE)
        self.model.eval()

        self.mean = np.array(self.cfg.DATA_MEAN,
                             dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(self.cfg.DATA_STD,
                            dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = cfg.K
        self.scales = self.cfg.TEST_SCALES
        self.pause = True
Ejemplo n.º 24
0
  def __init__(self, sess, model_saved_path, labels_path, fix_ratio, **_):
    with open(labels_path) as f:
      self.labels = f.read().splitlines()
    self.image_shape = [600, 600, 3]  # TODO

    self.opt = opt = opts().parse('ctdet --dataset bdd --keep_res'.split())  # TODO: others than bdd
    Dataset = get_dataset(opt.dataset, opt.task)
    opts.update_dataset_info_and_set_heads(None, opt, Dataset)
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    model = load_model(model, model_saved_path)
    # TODO: loss
    # TODO: model_with_loss
    self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    self.model = model.to(self.device)  # TODO: model_with_loss
    self.model.eval()  # TODO: model_with_loss
    self.fix_ratio = fix_ratio
Ejemplo n.º 25
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)

        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = opt.num_classes
        self.scales = opt.test_scales
        self.opt = opt
        self.pause = True
Ejemplo n.º 26
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')  # can be multi gpu
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)  # load pretrain
        self.model = self.model.to(opt.device)
        self.model.eval()  # set BN, Dropout constant

        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = opt.K  # 100
        self.num_classes = opt.num_classes
        self.scales = opt.test_scales
        self.opt = opt
        self.pause = True
  def __init__(self, opt):
    if opt.gpus[0] >= 0:
      opt.device = torch.device('cuda')
    else:
      opt.device = torch.device('cpu')
    
    print('Creating model...')
    self.model = create_model(opt.arch, opt.heads, opt.head_conv)
    self.model = load_model(self.model, "/nfs4/ajaym/Downloads/cendeep_sort_pytorch-master/centernet/models/ctdet_coco_dla_2x.pth")
    self.model = self.model.to(opt.device)
    self.model.eval()

    self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
    self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
    self.max_per_image = 100
    self.num_classes = opt.num_classes
    self.scales = opt.test_scales
    self.opt = opt
    self.pause = True
Ejemplo n.º 28
0
    def __init__(self):
        logging.info('Creating model...')
        self.model = create_model(arch, heads, head_conv)
        self.model = load_model(self.model, model_path)
        self.model = self.model.to(device)
        self.model.eval()
        logging.info('model loaded.')

        self.mean = np.array(mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = num_classes
        self.scales = test_scales
        self.pad = pad
        self.mean = mean
        self.std = std
        self.down_ratio = down_ratio
        self.input_shape = input_shape
        self.K = K_outputs
        self.pause = True
Ejemplo n.º 29
0
 def __init__(self, num_layers, opt):
     super().__init__()
     heads = {'hm': 80, 'wh': 4}
     model = get_hardnet(num_layers=num_layers,
                         heads=opt.heads,
                         head_conv=opt.head_conv,
                         trt=True)
     if opt.load_model:
         model = load_model(model, opt.load_model)
     model = fuse_bn_recursively(model)
     model.v2_transform()
     self.model = model
     mean = np.array(opt.mean, dtype=np.float32).reshape(1, 3, 1, 1)
     std = np.array(opt.std, dtype=np.float32).reshape(1, 3, 1, 1)
     self.mean = nn.Parameter(torch.from_numpy(mean))
     self.std = nn.Parameter(torch.from_numpy(std))
     self.max_per_image = 100
     self.num_classes = opt.num_classes
     self.scales = opt.test_scales
     self.opt = opt
Ejemplo n.º 30
0
def main(opt):

    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset, opt.task)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    model, optimizer, start_epoch = load_model(model, opt.load_model,
                                               optimizer, opt.resume, opt.lr,
                                               opt.lr_step)

    print(opt)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up data...')
    val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'),
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1,
                                             pin_memory=True)

    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    best = 1e10
    train_stat = trainer.wta_stat(start_epoch, train_loader)
    val_stat = trainer.wta_stat(start_epoch, val_loader)
def main():
    # parse options 
    op = Options()
    opt = op.parse()

    # initialize train or test working dir
    trainer_dir = "trainer_" + opt.name
    opt.model_dir = os.path.join(opt.dir, trainer_dir, "Train") 
    opt.data_dir = os.path.join(opt.dir, trainer_dir, "Data") 
    opt.test_dir = os.path.join(opt.dir, trainer_dir, "Test") 
    
    if not os.path.exists(opt.data_dir):
        os.makedirs(opt.data_dir)
    if opt.mode == "Train":
        if not os.path.exists(opt.model_dir):        
            os.makedirs(opt.model_dir)
        log_dir = opt.model_dir 
        log_path = log_dir + "/train.log"
    if opt.mode == "Test":
        if not os.path.exists(opt.test_dir):
            os.makedirs(opt.test_dir)
        log_dir = opt.test_dir
        log_path = log_dir + "/test.log"

    # save options to disk
    util.opt2file(opt, log_dir+"/opt.txt")
    
    # log setting 
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(log_path, 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    log_level = logging.INFO
    logging.getLogger().setLevel(log_level)
    
    # load train or test data
    data_loader = MultiLabelDataLoader(opt)
    if opt.mode == "Train":
        train_set = data_loader.GetTrainSet()
        val_set = data_loader.GetValSet()
    elif opt.mode == "Test":
        test_set = data_loader.GetTestSet()

    num_classes = data_loader.GetNumClasses()
    rid2name = data_loader.GetRID2Name()
    id2rid = data_loader.GetID2RID()
    opt.class_num = len(num_classes)

    # load model
    model = load_model(opt, num_classes)

    # define loss function
    criterion = nn.CrossEntropyLoss(weight=opt.loss_weight) 
    
    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        criterion = criterion.cuda(opt.devices[0])
        cudnn.benchmark = True
    
    # Train model
    if opt.mode == "Train":
        train(model, criterion, train_set, val_set, opt, (rid2name, id2rid))
    # Test model
    elif opt.mode == "Test":
        test(model, criterion, test_set, opt)
Ejemplo n.º 32
0
def main():
    # parse options 
    op = Options()
    opt = op.parse()

    # special setting
    opt.shuffle = False
    opt.batch_size = 1
    opt.load_thread = 1

    # initialize train or test working dir
    test_dir = os.path.join(opt.classify_dir , opt.name)
    opt.model_dir = opt.dir + "/trainer_" + opt.name + "/Train/"
    if not os.path.exists(test_dir):
        os.mkdir(test_dir)

    # save options to disk
    opt2file(opt, os.path.join(test_dir, "opt.txt"))
    
    # log setting 
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(test_dir + "/deploy.log", 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    logging.getLogger().setLevel(logging.INFO)
    
    # load label  
    if opt.label_file == "":
        opt.label_file = opt.dir + "/label.txt"
    rid2name, id2rid, rid2id = load_label(opt.label_file)
    num_classes = [len(rid2name[index])-2 for index in range(len(rid2name))]
        
    # load transformer
    transformer = get_transformer(opt) 

    # load model
    model = load_model(opt, num_classes)
    model.eval()
    
    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        cudnn.benchmark = True
    
    l = open(test_dir + "/classify_res_data.txt", 'w')
    with open(opt.classify_dir + "/data.txt") as data:
        for num, line in enumerate(data):
            logging.info(str(num+1))
            line = json.loads(line)
            input_tensor = load_image(line["image_file"], line["box"], opt, transformer) 
            input_tensor = input_tensor.unsqueeze(0)
            if opt.cuda:
                input_tensor = input_tensor.cuda(opt.devices[0])
            outputs = model(Variable(input_tensor, volatile=True)) 
            if not isinstance(outputs, list):
                outputs = [outputs]
            line["classify_res"] = list() 
            for index, out in enumerate(outputs):
                out = out.cpu()
                #print "out:", out
                softmax = F.softmax(out, dim=1).data.squeeze()
                #print "softmax:", softmax 
                probs, ids = softmax.sort(0, True)
                classify_res = {}
                for i in range(len(probs)):
                    classify_res[rid2name[index][id2rid[index][ids[i]]]] = probs[i]
                classify_res["max_score"] = probs[0]
                classify_res["best_label"] = rid2name[index][id2rid[index][ids[0]]]
                line["classify_res"].append(classify_res)
            l.write(json.dumps(line, separators=(',', ':'))+'\n')
    l.close()
    logging.info("classification done")