示例#1
0
def main():
    global args
    args = arg_parse()
    cfg_from_file(args.cfg_file)
    bgr_means = cfg.TRAIN.BGR_MEAN
    dataset_name = cfg.DATASETS.DATA_TYPE
    batch_size = cfg.TEST.BATCH_SIZE
    num_workers = args.num_workers
    if cfg.DATASETS.DATA_TYPE == 'VOC':
        trainvalDataset = VOCDetection
        top_k = 200
    else:
        trainvalDataset = COCODetection
        top_k = 300
    dataroot = cfg.DATASETS.DATAROOT
    if cfg.MODEL.SIZE == '300':
        size_cfg = cfg.SMALL
    else:
        size_cfg = cfg.BIG
    valSet = cfg.DATASETS.VAL_TYPE
    num_classes = cfg.MODEL.NUM_CLASSES
    save_folder = args.save_folder
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    cfg.TRAIN.TRAIN_ON = False
    net = SSD(cfg)

    checkpoint = torch.load(args.weights)
    state_dict = checkpoint['model']
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    detector = Detect(cfg)
    ValTransform = BaseTransform(size_cfg.IMG_WH, bgr_means, (2, 0, 1))
    val_dataset = trainvalDataset(dataroot, valSet, ValTransform, "val")
    val_loader = data.DataLoader(val_dataset,
                                 batch_size,
                                 shuffle=False,
                                 num_workers=num_workers,
                                 collate_fn=detection_collate)
    top_k = 300
    thresh = cfg.TEST.CONFIDENCE_THRESH
    eval_net(val_dataset,
             val_loader,
             net,
             detector,
             cfg,
             ValTransform,
             top_k,
             thresh=thresh,
             batch_size=batch_size)
示例#2
0
def main():
    global args
    args = arg_parse()
    ssh_run_param(args)
    cfg_from_file(args.cfg_file)
    bgr_means = cfg.TRAIN.BGR_MEAN
    dataset_name = cfg.DATASETS.DATA_TYPE
    batch_size = cfg.TEST.BATCH_SIZE
    num_workers = args.num_workers
    if cfg.DATASETS.DATA_TYPE == 'VOC':
        trainvalDataset = VOCDetection
        classes = VOC_CLASSES
        top_k = 200
    else:
        trainvalDataset = COCODetection
        classes = COCO_CLASSES
        top_k = 300
    valSet = cfg.DATASETS.VAL_TYPE
    num_classes = cfg.MODEL.NUM_CLASSES
    save_folder = args.save_folder
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    cfg.TRAIN.TRAIN_ON = False
    net = SSD(cfg)

    checkpoint = torch.load(args.weights)
    state_dict = checkpoint['model']
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)

    detector = Detect(cfg)
    img_wh = cfg.TEST.INPUT_WH
    ValTransform = BaseTransform(img_wh, bgr_means, (2, 0, 1))
    input_folder = args.images
    thresh = cfg.TEST.CONFIDENCE_THRESH
    for item in os.listdir(input_folder)[2:3]:
        img_path = os.path.join(input_folder, item)
        print(img_path)
        img = cv2.imread(img_path)
        dets = im_detect(img, net, detector, ValTransform, thresh)
        draw_img = draw_rects(img, dets, classes)
        out_img_name = "output_" + item
        save_path = os.path.join(save_folder, out_img_name)
        cv2.imwrite(save_path, img)
示例#3
0
def main():
    args = parser.parse_args()

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.dist_url == "env://" and args.world_size == -1:
        args.world_size = int(os.environ["WORLD_SIZE"])

    args.distributed = args.world_size > 1 or args.multiprocessing_distributed

    # args = arg_parse()
    cfg_from_file(args.cfg_file)
    save_folder = args.save_folder
    args.num_workers = args.workers
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    args.batch_size = cfg.TRAIN.BATCH_SIZE

    ngpus_per_node = args.ngpu
    if args.multiprocessing_distributed:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        args.world_size = ngpus_per_node * args.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker,
                 nprocs=ngpus_per_node,
                 args=(ngpus_per_node, args))
    else:
        # Simply call main_worker function
        main_worker(args.gpu, ngpus_per_node, args)
def main():
    global args
    args = arg_parse()
    cfg_from_file(args.cfg_file)
    save_folder = args.save_folder
    batch_size = cfg.TRAIN.BATCH_SIZE
    bgr_means = cfg.TRAIN.BGR_MEAN
    p = 0.6
    gamma = cfg.SOLVER.GAMMA
    momentum = cfg.SOLVER.MOMENTUM
    weight_decay = cfg.SOLVER.WEIGHT_DECAY
    size = cfg.MODEL.SIZE
    thresh = cfg.TEST.CONFIDENCE_THRESH
    if cfg.DATASETS.DATA_TYPE == 'VOC':
        trainvalDataset = VOCDetection
        top_k = 1000
    else:
        trainvalDataset = COCODetection
        top_k = 1000
    dataset_name = cfg.DATASETS.DATA_TYPE
    dataroot = cfg.DATASETS.DATAROOT
    trainSet = cfg.DATASETS.TRAIN_TYPE
    valSet = cfg.DATASETS.VAL_TYPE
    num_classes = cfg.MODEL.NUM_CLASSES
    start_epoch = args.resume_epoch
    epoch_step = cfg.SOLVER.EPOCH_STEPS
    end_epoch = cfg.SOLVER.END_EPOCH
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    net = SSD(cfg)
    print(net)
    if cfg.MODEL.SIZE == '300':
        size_cfg = cfg.SMALL
    else:
        size_cfg = cfg.BIG
    optimizer = optim.SGD(
        net.parameters(),
        lr=cfg.SOLVER.BASE_LR,
        momentum=momentum,
        weight_decay=weight_decay)
    if args.resume_net != None:
        checkpoint = torch.load(args.resume_net)
        state_dict = checkpoint['model']
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            head = k[:7]
            if head == 'module.':
                name = k[7:]  # remove `module.`
            else:
                name = k
            new_state_dict[name] = v
        net.load_state_dict(new_state_dict)
        optimizer.load_state_dict(checkpoint['optimizer'])
        print('Loading resume network...')
    if args.ngpu > 1:
        net = torch.nn.DataParallel(net)
    net.cuda()
    cudnn.benchmark = True

    criterion = list()
    if cfg.MODEL.REFINE:
        detector = Detect(cfg)
        arm_criterion = RefineMultiBoxLoss(cfg, 2)
        odm_criterion = RefineMultiBoxLoss(cfg, cfg.MODEL.NUM_CLASSES)
        criterion.append(arm_criterion)
        criterion.append(odm_criterion)
    else:
        detector = Detect(cfg)
        ssd_criterion = MultiBoxLoss(cfg)
        criterion.append(ssd_criterion)
    
    TrainTransform = preproc(size_cfg.IMG_WH, bgr_means, p)
    ValTransform = BaseTransform(size_cfg.IMG_WH, bgr_means, (2, 0, 1))

    val_dataset = trainvalDataset(dataroot, valSet, ValTransform, dataset_name)
    val_loader = data.DataLoader(
        val_dataset,
        batch_size,
        shuffle=False,
        num_workers=args.num_workers,
        collate_fn=detection_collate)

    for epoch in range(start_epoch + 1, end_epoch + 1):
        train_dataset = trainvalDataset(dataroot, trainSet, TrainTransform,
                                        dataset_name)
        epoch_size = len(train_dataset)
        train_loader = data.DataLoader(
            train_dataset,
            batch_size,
            shuffle=True,
            num_workers=args.num_workers,
            collate_fn=detection_collate)
        train(train_loader, net, criterion, optimizer, epoch, epoch_step,
              gamma, end_epoch, cfg)
        if (epoch % 5 == 0) or (epoch % 2 == 0 and epoch >= 60):
            save_checkpoint(net, epoch, size, optimizer)
        if (epoch >= 2 and epoch % 2 == 0):
            eval_net(
                val_dataset,
                val_loader,
                net,
                detector,
                cfg,
                ValTransform,
                top_k,
                thresh=thresh,
                batch_size=batch_size)
    save_checkpoint(net, end_epoch, size, optimizer)
示例#5
0
def main_worker(gpu, ngpus_per_node, args):
    global best_map
    ## deal with args
    args.gpu = gpu
    cfg_from_file(args.cfg_file)
    torch.set_default_tensor_type('torch.cuda.FloatTensor')

    # distributed cfgs
    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    torch.cuda.set_device(args.gpu)
    net = SSD(cfg)
    # print(net)
    if args.resume_net != None:
        checkpoint = torch.load(args.resume_net)
        state_dict = checkpoint['model']
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            head = k[:7]
            if head == 'module.':
                name = k[7:]  # remove `module.`
            else:
                name = k
            new_state_dict[name] = v
        net.load_state_dict(new_state_dict)

        print('Loading resume network...')

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            # print(args.gpu)
            torch.cuda.set_device(args.gpu)
            net.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            net = torch.nn.parallel.DistributedDataParallel(
                net, device_ids=[args.gpu])
        else:
            net.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            net = torch.nn.parallel.DistributedDataParallel(net)
    elif args.gpu is not None:
        # torch.cuda.set_device(args.gpu)
        net = net.cuda(args.gpu)

    # args = arg_parse()

    batch_size = args.batch_size
    print("batch_size = ", batch_size)
    bgr_means = cfg.TRAIN.BGR_MEAN
    p = 0.6

    gamma = cfg.SOLVER.GAMMA
    momentum = cfg.SOLVER.MOMENTUM
    weight_decay = cfg.SOLVER.WEIGHT_DECAY
    size = cfg.MODEL.SIZE  # size =300
    thresh = cfg.TEST.CONFIDENCE_THRESH
    if cfg.DATASETS.DATA_TYPE == 'VOC':
        trainvalDataset = VOCDetection
        top_k = 1000
    else:
        trainvalDataset = COCODetection
        top_k = 1000
    dataset_name = cfg.DATASETS.DATA_TYPE
    dataroot = cfg.DATASETS.DATAROOT
    trainSet = cfg.DATASETS.TRAIN_TYPE
    valSet = cfg.DATASETS.VAL_TYPE
    num_classes = cfg.MODEL.NUM_CLASSES
    start_epoch = args.resume_epoch
    epoch_step = cfg.SOLVER.EPOCH_STEPS
    end_epoch = cfg.SOLVER.END_EPOCH
    args.num_workers = args.workers

    # optimizer

    optimizer = optim.SGD(net.parameters(),
                          lr=cfg.SOLVER.BASE_LR,
                          momentum=momentum,
                          weight_decay=weight_decay)

    if cfg.MODEL.SIZE == '300':
        size_cfg = cfg.SMALL
    else:
        size_cfg = cfg.BIG
    # if args.resume_net != None:
    #	 checkpoint = torch.load(args.resume_net)
    #    optimizer.load_state_dict(checkpoint['optimizer'])

    cudnn.benchmark = True

    # deal with criterion
    criterion = list()
    if cfg.MODEL.REFINE:
        detector = Detect(cfg)
        arm_criterion = RefineMultiBoxLoss(cfg, 2)
        odm_criterion = RefineMultiBoxLoss(cfg, cfg.MODEL.NUM_CLASSES)
        arm_criterion.cuda(args.gpu)
        odm_criterion.cuda(args.gpu)
        criterion.append(arm_criterion)
        criterion.append(odm_criterion)
    else:
        detector = Detect(cfg)
        ssd_criterion = MultiBoxLoss(cfg)
        criterion.append(ssd_criterion)

    # deal with dataset
    TrainTransform = preproc(size_cfg.IMG_WH, bgr_means, p)
    ValTransform = BaseTransform(size_cfg.IMG_WH, bgr_means, (2, 0, 1))

    val_dataset = trainvalDataset(dataroot, valSet, ValTransform, dataset_name)
    val_loader = data.DataLoader(val_dataset,
                                 batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers * ngpus_per_node,
                                 collate_fn=detection_collate)
    # deal with training dataset
    train_dataset = trainvalDataset(dataroot, trainSet, TrainTransform,
                                    dataset_name)
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.num_workers,
                                               collate_fn=detection_collate,
                                               pin_memory=True,
                                               sampler=train_sampler)
    ## set net in training phase
    net.train()

    for epoch in range(start_epoch + 1, end_epoch + 1):
        if args.distributed:
            train_sampler.set_epoch(epoch)

        # train_loader = data.DataLoader(train_dataset, batch_size, shuffle=True, num_workers=args.num_workers,
        #                               collate_fn=detection_collate)

        # Training
        train(train_loader, net, criterion, optimizer, epoch, epoch_step,
              gamma, end_epoch, cfg, args)

        if (epoch >= 0 and epoch % 10 == 0):
            #print("here",args.rank % ngpus_per_node)
            ## validation the model
            eval_net(val_dataset,
                     val_loader,
                     net,
                     detector,
                     cfg,
                     ValTransform,
                     args,
                     top_k,
                     thresh=thresh,
                     batch_size=cfg.TEST.BATCH_SIZE)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            if (epoch % 10 == 0) or (epoch % 5 == 0 and epoch >= 60):
                save_name = os.path.join(
                    args.save_folder,
                    cfg.MODEL.TYPE + "_epoch_{}_rank_{}_{}".format(
                        str(epoch), str(args.rank), str(size)) + '.pth')
                save_checkpoint(net, epoch, size, optimizer, batch_size,
                                save_name)
    if not evaluate and not detect:
        if cfg_file == None:
            cfg_file = osp.join('configs', method_name, 'default.yml')
            print('No config file given, '
                  'using default config: {:s}'.format(cfg_file))

        check_if_exist('Config', cfg_file)

        extra_cfg = ('METHOD_NAME {:s} MODEL_NAME {:s} '
                     'DATASET_NAME {:s} GPU_ID {:d}'.format(
                         method_name, model_name, dataset_name, gpu_id))

        set_cfgs = extra_cfg.split()

        # Update config
        cfg_from_file(cfg_file)
        cfg_from_list(set_cfgs)

        # Set and create output dir
        cfg.OUTPUT_DIR = osp.join(cfg.OUTPUT_DIR, cfg.DATASET_NAME,
                                  cfg.METHOD_NAME, cfg.MODEL_NAME)
        make_if_not_exist(cfg.OUTPUT_DIR)

        # Get classes from label map
        label_map_file = osp.join(
            cfg.DATA_DIR, cfg.DATASET_NAME,
            '{}_labelmap.prototxt'.format(cfg.DATASET_NAME))

        cfg.CLASSES = get_classnames_from_labelmap(label_map_file)
        cfg.NUM_CLASSES = len(cfg.CLASSES)
def main():
    global args
    args = arg_parse()
    cfg_from_file(args.cfg_file)
    bgr_means = cfg.TRAIN.BGR_MEAN
    dataset_name = cfg.DATASETS.DATA_TYPE
    batch_size = cfg.TEST.BATCH_SIZE
    num_workers = args.num_workers
    if cfg.DATASETS.DATA_TYPE == 'VOC':
        trainvalDataset = VOCDetection
        classes = VOC_CLASSES
        top_k = 200
    elif cfg.DATASETS.DATA_TYPE == 'JUGG':
        trainvalDataset = COCOJUGGDetection
        top_k = 200
    else:
        trainvalDataset = COCODetection
        classes = COCO_CLASSES
        top_k = 300
    valSet = cfg.DATASETS.VAL_TYPE
    num_classes = cfg.MODEL.NUM_CLASSES
    save_folder = args.save_folder
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    cfg.TRAIN.TRAIN_ON = False
    net = SSD(cfg)

    checkpoint = torch.load(args.weights)
    state_dict = checkpoint['model']
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)

    detector = Detect(cfg)
    img_wh = cfg.TEST.INPUT_WH
    print(img_wh)
    ValTransform = BaseTransform(img_wh, bgr_means, (2, 0, 1))
    input_folder = args.images
    thresh = cfg.TEST.CONFIDENCE_THRESH
    all_items = os.listdir(input_folder)
    ##for voc debug
    all_items = get_voc_test()
    # validation
    if cfg.DATASETS.DATA_TYPE == 'JUGG':
        num_classes -= 1
        val_ann_file = '/workspace/mnt/group/blademaster/jianglielin/datasets/juggdet_train_test/juggdet_0830/Lists/annotations/juggdet_0503_test_0711.json'
        val_dataset = trainvalDataset(cfg.DATASETS.DATAROOT, valSet,
                                      val_ann_file, ValTransform, dataset_name)
        all_items = []
        for id in val_dataset.ids:
            all_items.append(id.split('/')[-1])
    else:
        val_dataset = trainvalDataset(cfg.DATASETS.DATAROOT, valSet,
                                      ValTransform, dataset_name)
    batch_size = 1
    imgs = []
    img_infos = []
    img_names = []
    all_boxes = [[[] for _ in range(len(all_items))]
                 for _ in range(num_classes)]
    idx = 0
    all_fps_time = 0.0
    all_forward_time = 0.0
    all_detect_time = 0.0
    all_nms_time = 0.0
    net.eval()
    t1 = time.time()
    for i in range(len(all_items)):
        item = all_items[i]
        img_path = os.path.join(input_folder, item)
        #print(img_path)
        img = cv2.imread(img_path,
                         cv2.IMREAD_COLOR + cv2.IMREAD_IGNORE_ORIENTATION)
        img_resize, img_info = pre_process(img, img_wh)
        imgs.append(img_resize)
        img_infos.append(img_info)
        img_names.append(item)
        if len(imgs) == batch_size or i == len(all_items) - 1:
            if idx % 50 == 0:
                print('idx:', idx)
            dets, fps_time, forward_time, detect_time, nms_time = im_detect_batch(
                imgs, img_infos, net, detector, thresh, num_classes)
            all_fps_time += fps_time
            all_forward_time += forward_time
            all_detect_time += detect_time
            all_nms_time += nms_time

            for k in range(len(imgs)):
                i = idx * batch_size + k
                for j in range(1, num_classes):
                    all_boxes[j][i] = dets[j][k]
            idx += 1
            imgs = []
            img_infos = []
    t2 = time.time()
    all_inference_time = t2 - t1
    print('all fps time: {:3f}s, per_image: {:3f}s\n'
          'all detect time: {:3f}s, per_image: {:3f}s\n'
          'all_forward_time: {:3f}s, per_image: {:3f}s\n'
          'all_post_precess_time: {:3f}s, per_image: {:3f}s\n'
          'all_nms_time: {:3f}s, per_image: {:3f}s'.format(
              all_fps_time, all_fps_time / len(all_items), all_inference_time,
              all_inference_time / len(all_items), all_forward_time,
              all_forward_time / len(all_items), all_detect_time,
              all_detect_time / len(all_items), all_nms_time,
              all_nms_time / len(all_items)))

    date = time.strftime('%Y-%m-%d-%H-%M', time.localtime())
    eval_save_folder = os.path.join("./eval_batch_demo/", date)
    if not os.path.exists(eval_save_folder):
        os.makedirs(eval_save_folder)
    det_file = os.path.join(eval_save_folder, 'detections_demo.pkl')
    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
    print('Evaluating detections')
    val_dataset.evaluate_detections(all_boxes, eval_save_folder)
    print("detect time: ", time.time() - st)
示例#8
0
def main():
    global args
    args = arg_parse()
    cfg_from_file(args.cfg_file)
    bgr_means = cfg.TRAIN.BGR_MEAN
    dataset_name = cfg.DATASETS.DATA_TYPE
    batch_size = cfg.TEST.BATCH_SIZE
    num_workers = args.num_workers
    if cfg.DATASETS.DATA_TYPE == 'VOC':
        trainvalDataset = VOCDetection
        classes = VOC_CLASSES
        top_k = 200
    elif cfg.DATASETS.DATA_TYPE == 'CHECKOUT':
        trainvalDataset = CheckoutDetection
        classes = CHECKOUT_CLASSES
        top_k = 50
    else:
        trainvalDataset = COCODetection
        classes = COCO_CLASSES
        top_k = 300
    valSet = cfg.DATASETS.VAL_TYPE
    num_classes = cfg.MODEL.NUM_CLASSES
    save_folder = args.save_folder
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    cfg.TRAIN.TRAIN_ON = False
    net = SSD(cfg)

    checkpoint = torch.load(args.weights, map_location='cpu')
    state_dict = checkpoint['model']
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        head = k[:7]
        if head == 'module.':
            name = k[7:]  # remove `module.`
        else:
            name = k
        new_state_dict[name] = v
    net.load_state_dict(new_state_dict)
    net.cuda()

    detector = Detect(cfg)
    img_wh = cfg.TEST.INPUT_WH
    ValTransform = BaseTransform(img_wh, bgr_means, (2, 0, 1))
    thresh = cfg.TEST.CONFIDENCE_THRESH

    video = cv2.VideoCapture(args.video)
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    writer = cv2.VideoWriter('output.avi', fourcc, 30.0, (1000, 1000), True)

    while True:
        _, img = video.read()
        if img is None:
            break
        dets = im_detect(img, net, detector, ValTransform, thresh)
        draw_img = draw_rects(img, dets, classes)
        resized = cv2.resize(img, (1000, 1000),
                             interpolation=cv2.INTER_NEAREST)
        #cv2.imshow('image', resized)
        #cv2.waitKey(10)
        writer.write(resized)