Exemplo n.º 1
0
    # distributed training
    args.distributed = args.world_size > 1
    if args.distributed:
        import re, socket
        rank = int(re.search('192.168.0.(.*)', socket.gethostname()).group(1))
        logging.info(
            "Distributed Training (rank = {}), world_size = {}, backend = `{}'"
            .format(rank, args.world_size, args.backend))
        dist.init_process_group(backend=args.backend,
                                init_method=args.dist_url,
                                rank=rank,
                                group_name=args.task_name,
                                world_size=args.world_size)

    # load dataset related configuration
    dataset_cfg = dataset.get_config(name=args.dataset)

    # creat model with all parameters initialized
    net, input_conf = get_symbol(
        name=args.network,
        pretrained=args.pretrained_2d if args.resume_epoch < 0 else None,
        print_net=True if args.distributed else False,
        **dataset_cfg)

    # training
    kwargs = {}
    kwargs.update(dataset_cfg)
    kwargs.update({'input_conf': input_conf})
    kwargs.update(vars(args))
    train_model(sym_net=net, **kwargs)
    # set device states
    print('Can use cuda: ' + str(torch.cuda.is_available()))
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus  # before using torch
    assert torch.cuda.is_available(), "CUDA is not available"
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)

    # load dataset related configuration<
    dataset_cfg = dataset.get_config(name=args.dataset)

    # creat model with all parameters initialized
    assert (not args.fine_tune or not args.resume_epoch < 0), \
            "args: `resume_epoch' must be defined for fine tuning"
    net, input_conf = get_symbol(
        name=args.network,
        print_net=False,  # True if args.distributed else False,
        **dataset_cfg)

    # training
    kwargs = {}
    kwargs.update(dataset_cfg)
    kwargs.update({'input_conf': input_conf})
    kwargs.update(vars(args))
    args.short_cycles = False
    args.long_cycles = False
    train_model(sym_net=net,
                name='UCF101',
                net_name=args.network,
                dataset_location=args.dataset,
                enable_long_cycles=args.long_cycles,
                enable_short_cycles=args.short_cycles,
Exemplo n.º 3
0
    set_logger(log_file=args.log_file, debug_mode=args.debug_mode)
    logging.info("Start evaluation with args:\n" +
                 json.dumps(vars(args), indent=4, sort_keys=True))

    # set device states
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus  # before using torch
    assert torch.cuda.is_available(), "CUDA is not available"

    # load dataset related configuration
    dataset_cfg = dataset.get_config(name=args.dataset)

    # creat model
    dark = args.is_dark
    sym_net_rgb, input_config = get_symbol(name=args.network_rgb,
                                           is_dark=dark,
                                           **dataset_cfg)
    sym_net_flow, input_config = get_symbol(name=args.network_flow,
                                            is_dark=dark,
                                            **dataset_cfg)

    # network
    if torch.cuda.is_available():
        cudnn.benchmark = True
        sym_net_rgb = torch.nn.DataParallel(sym_net_rgb).cuda()
        sym_net_flow = torch.nn.DataParallel(sym_net_flow).cuda()
        criterion = torch.nn.CrossEntropyLoss().cuda()
    else:
        sym_net_rgb = torch.nn.DataParallel(sym_net_rgb)
        sym_net_flow = torch.nn.DataParallel(sym_net_flow)
        criterion = torch.nn.CrossEntropyLoss()
    args = parser.parse_args()
    args = autofill(args)

    set_logger(log_file=args.log_file, debug_mode=args.debug_mode)
    logging.info("Start evaluation with args:\n" +
                 json.dumps(vars(args), indent=4, sort_keys=True))

    # set device states
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpus)  # before using torch
    assert torch.cuda.is_available(), "CUDA is not available"

    # load dataset related configuration
    dataset_cfg = dataset.get_config(name=args.dataset)

    # creat model
    sym_net, input_config = get_symbol(name=args.network, **dataset_cfg)

    # network
    if torch.cuda.is_available():
        cudnn.benchmark = True
        sym_net = torch.nn.DataParallel(sym_net).cuda()
        criterion = torch.nn.CrossEntropyLoss().cuda()
    else:
        sym_net = torch.nn.DataParallel(sym_net)
        criterion = torch.nn.CrossEntropyLoss()
    net = static_model(net=sym_net,
                       criterion=criterion,
                       model_prefix=args.model_prefix)
    net.load_checkpoint(epoch=args.load_epoch)

    # data iterator:
Exemplo n.º 5
0
    logging.info("Start training with args:\n" +
                 json.dumps(vars(args), indent=4, sort_keys=True))

    # set device states
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus # before using torch
    assert torch.cuda.is_available(), "CUDA is not available"
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)



    # load dataset related configuration
    dataset_cfg = dataset.get_config(name=args.dataset)

    # creat model with all parameters initialized
    net, input_conf = get_symbol(name=args.network,
                     pretrained=args.pretrained_2d if args.resume_epoch < 0 else None,
                     modality = args.modality,
                     drop_out = args.drop_out,
                     arch_estimator = args.arch_estimator,
                     arch_d = args.arch_d,
                     print_net= False,
                     **dataset_cfg)

    # training
    kwargs = {}
    kwargs.update(dataset_cfg)
    kwargs.update({'input_conf': input_conf})
    kwargs.update(vars(args))
    train_model(args.network, sym_net=net, optim = args.optimizer, **kwargs)
Exemplo n.º 6
0
def search_result(video_path):
    video_path = "./static/data/" + video_path
    b_time = time.time()

    # set args
    args = parser.parse_args()
    args = autofill(args)

    set_logger(log_file=args.log_file, debug_mode=args.debug_mode)
    logging.info("Start evaluation with args:\n" +
                 json.dumps(vars(args), indent=4, sort_keys=True))

    # set device states
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpus)  # before using torch
    assert torch.cuda.is_available(), "CUDA is not available"

    # load dataset related configuration
    dataset_cfg = dataset.get_config(name=args.dataset)
    # number_class=51

    # creat model
    sym_net, input_config = get_symbol(name=args.network, use_flow=False, **dataset_cfg)

    # network
    if torch.cuda.is_available():
        cudnn.benchmark = True
        sym_net = torch.nn.DataParallel(sym_net).cuda()
        criterion = torch.nn.CrossEntropyLoss().cuda()
    else:
        sym_net = torch.nn.DataParallel(sym_net)
        criterion = torch.nn.CrossEntropyLoss()
    net = static_model(net=sym_net,
                       criterion=criterion,
                       model_prefix=args.model_prefix)
    net.load_checkpoint(epoch=args.load_epoch)
    m_time = time.time()

    dict_name_label = get_name_label()
    Video_list, feature_list = get_feature_dict()
    all_feature = np.array(feature_list)
    d_time = time.time()

    get_query(video_path)
    extract_query_frame()
    data_root = "./query/"
    query_names = os.listdir(data_root + "videos")
    txt_path = "./query/list_cvt/search.txt"
    if os.path.exists(txt_path):
        os.remove(txt_path)
    with open(txt_path, "w")as f:
        for i in range(len(query_names)):
            f.write(str(i) + "\t" + "0" + "\t" + query_names[i] + "\n")

    normalize = transforms.Normalize(mean=input_config['mean'], std=input_config['std'])
    val_sampler = sampler.RandomSampling(num=args.clip_length,
                                         interval=args.frame_interval,
                                         speed=[1.0, 1.0])
    val_loader = VideoIter(video_prefix=os.path.join(data_root, 'videos'),
                           frame_prefix=os.path.join(data_root, 'frames'),
                           txt_list=os.path.join(data_root, 'list_cvt', 'search.txt'),
                           sampler=val_sampler,
                           force_color=True,
                           video_transform=transforms.Compose([
                               transforms.Resize((256, 256)),
                               transforms.CenterCrop((224, 224)),
                               transforms.ToTensor(),
                               normalize,
                           ]),
                           name='test',
                           return_item_subpath=True
                           )

    eval_iter = torch.utils.data.DataLoader(val_loader,
                                            batch_size=args.batch_size,
                                            shuffle=True,
                                            num_workers=1,  # change this part accordingly
                                            pin_memory=True)

    net.net.eval()
    avg_score = {}
    sum_batch_elapse = 0.
    sum_batch_inst = 0
    duplication = 1
    softmax = torch.nn.Softmax(dim=1)
    pr_time = time.time()
    # print("preprocessing video time:" ,pv_time-lm_time)

    total_round = 1  # change this part accordingly if you do not want an inf loop

    for i_round in range(total_round):
        list_Ap = []
        i_batch = 0
        dict_q_r = {}
        # dict_AP={}
        for data, target, video_subpath in eval_iter:

            # print(video_subpath)
            batch_start_time = time.time()
            feature = net.get_feature(data)
            feature = feature.detach().cpu().numpy()

            for i in range(len(video_subpath)):
                dict_info = {}
                V_feature = feature[i]
                topN_re = get_top_N(Video_list, all_feature, args.topN, V_feature)
                dict_info["result"] = topN_re
                if video_subpath[i] in dict_name_label.keys():
                    tmp_AP10 = cal_AP(topN_re[:10], dict_name_label[video_subpath[i]])
                    tmp_AP50 = cal_AP(topN_re[:50], dict_name_label[video_subpath[i]])
                    tmp_AP200 = cal_AP(topN_re[:200], dict_name_label[video_subpath[i]])
                else:
                    print("video is not in the database, AP=0")
                    tmp_AP10 = 0
                    tmp_AP50 = 0
                    tmp_AP200 = 0
                print(video_subpath[i], str(tmp_AP10), str(tmp_AP50), str(tmp_AP200))
                list_Ap = [tmp_AP10, tmp_AP50, tmp_AP200]
                dict_info["AP"] = list_Ap
                dict_q_r[video_subpath[i]] = dict_info
            batch_end_time = time.time()
            dict_q_r[video_subpath[0]]["time"] = batch_end_time - batch_start_time + pr_time - d_time
            dict_q_r[video_subpath[0]]["lmtime"] = m_time - b_time
            dict_q_r[video_subpath[0]]["datatime"] = d_time - m_time
            json.dump(dict_q_r, open("q_r.json", "w"))

    return dict_q_r
Exemplo n.º 7
0
    args = autofill(args)

    set_logger(log_file=args.log_file, debug_mode=args.debug_mode)
    logging.info("Start evaluation with args:\n" +
                 json.dumps(vars(args), indent=4, sort_keys=True))

    # set device states
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpus)  # before using torch
    assert torch.cuda.is_available(), "CUDA is not available"

    # load dataset related configuration
    dataset_cfg = dataset.get_config(name=args.dataset)

    # creat model
    sym_net, input_config = get_symbol(name=args.network,
                                       modality=args.modality,
                                       arch_estimator=args.arch_estimator,
                                       **dataset_cfg)

    # network
    if torch.cuda.is_available():
        cudnn.benchmark = True
        sym_net = torch.nn.DataParallel(sym_net).cuda()
        criterion = torch.nn.CrossEntropyLoss().cuda()
    else:
        sym_net = torch.nn.DataParallel(sym_net)
        criterion = torch.nn.CrossEntropyLoss()
    net = static_model(net=sym_net,
                       criterion=criterion,
                       model_prefix=args.model_prefix,
                       criterion2=torch.nn.MSELoss().cuda()
                       if args.modality == 'flow+mp4' else None)
Exemplo n.º 8
0
	set_logger(log_file=args.log_file, debug_mode=args.debug_mode)  #进行日志捕捉
	#打印
	logging.info("Using pytorch {} ({})".format(torch.__version__, torch.__path__))
	logging.info("Start training with args:\n" + json.dumps(vars(args), indent=4, sort_keys=True))

	# set device states
	os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus # before using torch  
	assert torch.cuda.is_available(), "CUDA is not available"
	torch.manual_seed(args.random_seed)
	torch.cuda.manual_seed(args.random_seed)  #防止gpu不可用
 
	# load dataset related configuration    c
	dataset_cfg = dataset.get_config(name=args.dataset)   #返回一个字典config['num_classes'] = 6 

	# creat model with all parameters initialized
	net, input_conf = get_symbol(name=args.network, pretrained=True, **dataset_cfg)  #config['num_classes'] = 6
    
	'''
	net:class RESNET18(nn.Module);
	input_conf:
    config['mean'] = [0.43216, 0.394666, 0.37645] 
	config['std'] = [0.22803, 0.22145, 0.216989] 


    '''
	# training
	kwargs = {}
	kwargs.update(dataset_cfg)      
	kwargs.update({'input_conf': input_conf})
    
	'''
Exemplo n.º 9
0
    # set args
    args = parser.parse_args()

    set_logger(log_file=args.log_file, debug_mode=args.debug_mode)
    logging.info("Cudnn Version: {}".format(cudnn.version()))
    cudnn.benchmark = True
    logging.info("Start evaluation with args:\n" +
                 json.dumps(vars(args), indent=4, sort_keys=True))

    # set device states
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpus)  # before using torch
    assert torch.cuda.is_available(), "CUDA is not available"

    # creat model
    sym_net, input_config = get_symbol(name=args.network, num_classes=101)

    # network
    if torch.cuda.is_available():
        sym_net = sym_net.cuda()
    net = static_model(net=sym_net)

    # main loop
    with torch.no_grad():
        net.net.eval()
        sum_batch_elapse = 0.
        sum_batch_inst = 0
        if args.network == 'RESNET18_C3D':
            data = torch.autograd.Variable(torch.randn(args.batch_size,3,args.clip_length,112,112).float().cuda(), \
                                            requires_grad=False)
        else:
Exemplo n.º 10
0
            if cv2.waitKey(1) & 0xff == ord('q'):
                break

        cv2.destroyAllWindows()


if __name__ == '__main__':

    # args = parser.parse_args()

    gpus = '0'
    network = 'mfnet_3d'
    video_name = 0
    model_dir = './models/zhinanche.pth'
    label_dir = './labels/70mai-zh.txt'

    label = transform_label(label_dir)
    num_classes = len(label)

    # creat model
    sym_net, input_config = get_symbol(name=network, num_classes=num_classes)

    net = make_net(gpus, model_dir, sym_net)

    frame_holder_queue = Queue(maxsize=0)

    C = cross_net(video_name, label, frame_holder_queue)
    P = preview_video(frame_holder_queue)
    C.start()
    P.start()
    args = autofill(args)

    set_logger(log_file=args.log_file, debug_mode=args.debug_mode)
    logging.info("Start evaluation with args:\n" +
                 json.dumps(vars(args), indent=4, sort_keys=True))

    # set device states
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpus)  # before using torch
    assert torch.cuda.is_available(), "CUDA is not available"

    # load dataset related configuration
    dataset_cfg = dataset.get_config(name=args.dataset)

    # creat model
    sym_net, input_config = get_symbol(name=args.network,
                                       use_flow=args.use_flow,
                                       **dataset_cfg)

    # network
    if torch.cuda.is_available():
        cudnn.benchmark = True
        sym_net = torch.nn.DataParallel(sym_net).cuda()
        criterion = torch.nn.CrossEntropyLoss().cuda()
    else:
        sym_net = torch.nn.DataParallel(sym_net)
        criterion = torch.nn.CrossEntropyLoss()
    net = static_model(net=sym_net,
                       criterion=criterion,
                       model_prefix=args.model_prefix)
    net.load_checkpoint(epoch=args.load_epoch)
Exemplo n.º 12
0
    import metric
    from lr_scheduler import MultiFactorScheduler
    import data.dataiter_factory as dataiter_factory

    logging.getLogger().setLevel(logging.DEBUG)

    resume = False
    pretained = False

    ## -----------------
    resnet18 = torchvision.models.resnet18()
    import network
    logging.info(network.__file__)
    from network import symbol_builder
    sym_c3d, net_cfg = symbol_builder.get_symbol(name="c3d", num_classes=101)

    # settings for the optimization
    optimizer = torch.optim.SGD(sym_c3d.parameters(),
                                lr=0.1,
                                momentum=0.9,
                                weight_decay=0.005)

    # initializatioln the dynamic model
    net = model(net=sym_c3d,
                optimizer=optimizer,
                criterion=torch.nn.CrossEntropyLoss().cuda())

    # load the pretained model
    if resume:
        net.load_checkpoint(epoch=load_epoch)
Exemplo n.º 13
0
    args = autofill(args)

    set_logger(log_file=args.log_file, debug_mode=args.debug_mode)
    logging.info("Start evaluation with args:\n" +
                 json.dumps(vars(args), indent=4, sort_keys=True))

    # set device states
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus  # before using torch
    assert torch.cuda.is_available(), "CUDA is not available"

    # load dataset related configuration
    dataset_cfg = dataset.get_config(name=args.dataset)

    # creat model
    sym_net, input_config = get_symbol(name=args.network,
                                       DA_method=None,
                                       **dataset_cfg)

    # network
    if torch.cuda.is_available():
        sym_net = torch.nn.DataParallel(sym_net).cuda()
        criterion = torch.nn.CrossEntropyLoss().cuda()
        criterion_domain = torch.nn.CrossEntropyLoss().cuda()
    else:
        sym_net = torch.nn.DataParallel(sym_net)
        criterion = torch.nn.CrossEntropyLoss()
        criterion_domain = torch.nn.CrossEntropyLoss()
    net = static_model(net=sym_net,
                       criterion=criterion,
                       criterion_domain=criterion_domain,
                       DA_method=None,
Exemplo n.º 14
0
    args = parser.parse_args()
    args = autofill(args)

    set_logger(log_file=args.log_file, debug_mode=args.debug_mode)
    logging.info("Using pytorch {} ({})".format(torch.__version__,
                                                torch.__path__))
    logging.info("Start training with args:\n" +
                 json.dumps(vars(args), indent=4, sort_keys=True))

    # set device states
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus  # before using torch
    assert torch.cuda.is_available(), "CUDA is not available"
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)

    # load dataset related configuration
    dataset_cfg = dataset.get_config(name=args.dataset)

    # creat model with all parameters initialized
    net, input_conf = get_symbol(name=args.network,
                                 pretrained=True,
                                 DA_method=args.DA_method,
                                 **dataset_cfg)

    # training
    kwargs = {}
    kwargs.update(dataset_cfg)
    kwargs.update({'input_conf': input_conf})
    kwargs.update(vars(args))
    train_da_model(sym_net=net, **kwargs)
Exemplo n.º 15
0
        rank = int(re.search('192.168.0.(.*)', socket.gethostname()).group(1))
        logging.info(
            "Distributed Training (rank = {}), world_size = {}, backend = `{}'"
            .format(rank, args.world_size, args.backend))
        dist.init_process_group(backend=args.backend,
                                init_method=args.dist_url,
                                rank=rank,
                                group_name=args.task_name,
                                world_size=args.world_size)

    # load dataset related configuration
    dataset_cfg = dataset.get_config(name=args.dataset)

    # creat model with all parameters initialized
    net, input_conf = get_symbol(
        name=args.network,
        use_flow=args.use_flow,
        pretrained=args.pretrained_2d if args.resume_epoch < 0 else None,
        print_net=True if args.distributed else False,
        sample_duration=args.clip_length,
        dyn_mode=args.dyn_mode,
        get_fea=args.get_fea,
        **dataset_cfg)

    # training
    kwargs = {}
    kwargs.update(dataset_cfg)
    kwargs.update({'input_conf': input_conf})
    kwargs.update(vars(args))
    train_model(sym_net=net, **kwargs)