def load_models(feature_extractor_path,
                ad_model_path,
                features_method='c3d',
                device='cuda'):
    """
    Loads both feature extractor and anomaly detector from the given paths
    :param feature_extractor_path: path of the features extractor weights to load
    :param ad_model_path: path of the anomaly detector weights to load
    :param features_method: name of the model to use for features extraction
    :param device: device to use for the models
    :return: anomaly_detector, feature_extractor
    """
    assert path.exists(feature_extractor_path)
    assert path.exists(ad_model_path)

    feature_extractor, anomaly_detector = None, None

    if features_method == 'c3d':
        logging.info(
            f"Loading feature extractor from {feature_extractor_path}")
        feature_extractor = C3D(pretrained=feature_extractor_path)

    else:
        raise NotImplementedError(
            f"Features extraction method {features_method} not implemented")

    feature_extractor = feature_extractor.to(device).eval()

    logging.info(f"Loading anomaly detector from {ad_model_path}")
    anomaly_detector = TorchModel.load_model(
        model_path=ad_model_path).to(device).eval()

    return anomaly_detector, feature_extractor
Exemplo n.º 2
0
def cd3_extartion(video_parth, device=None):

    batch_size = 1
    train_frame_interval = 2
    clip_length = 16

    single_load = True  #should not matter
    home = os.getcwd()
    pretrained_3d = home + "/c3d-pytorch-master/c3d.pickle"

    if device == None:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    #Load clips
    print("doing train loader")
    train_loader = VideoIterTrain(dataset_path=None,
                                  annotation_path=video_parth,
                                  clip_length=clip_length,
                                  frame_stride=train_frame_interval,
                                  video_transform=build_transforms(),
                                  name='train',
                                  return_item_subpath=False,
                                  single_load=single_load)
    print("train loader done, train_iter now")
    train_iter = torch.utils.data.DataLoader(
        train_loader,
        batch_size=batch_size,
        shuffle=False,
        num_workers=32,  # 4, # change this part accordingly
        pin_memory=True)

    #Possesing with CD3
    print("Now loading the data to C3D netowr")
    network = C3D(pretrained=pretrained_3d)
    network.to(device)

    if not os.path.exists(features_dir):
        os.mkdir(features_dir)

    features_writer = FeaturesWriter()

    dir_list = []

    for i_batch, (data, target, sampled_idx, dirs,
                  vid_names) in tqdm(enumerate(train_iter)):
        with torch.no_grad():
            outputs = network(data.cuda())

            for i, (dir, vid_name, start_frame) in enumerate(
                    zip(dirs, vid_names,
                        sampled_idx.cpu().numpy())):
                dir_list.append([dir, vid_name])
                dir = os.path.join(features_dir, dir)
                features_writer.write(feature=outputs[i],
                                      video_name=vid_name,
                                      idx=start_frame,
                                      dir=dir)

    features_writer.dump()
    return dir_list
Exemplo n.º 3
0
    def __init__(self,
                 train_video_list,
                 test_video_list,
                 use_gpu=True,
                 batch_size=30,
                 new_width=171,
                 new_height=128,
                 crop_size=112,
                 length=16):
        # Preparing data
        train_dataset = UCF101(video_list=train_video_list,
                               subset='train',
                               length=length,
                               new_width=new_width,
                               new_height=new_height,
                               transforms=transforms.Compose([
                                   SubtractMean([102, 98, 90]),
                                   RandomCrop(crop_size),
                                   RandomHorizontalFlip()
                               ]))
        train_loader = DataLoader(dataset=train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  collate_fn=collate)
        test_dataset = UCF101(video_list=test_video_list,
                              subset='test',
                              length=length,
                              new_width=new_width,
                              new_height=new_height,
                              transforms=transforms.Compose([
                                  SubtractMean([102, 98, 90]),
                                  RandomCrop(crop_size)
                              ]))
        test_loader = DataLoader(dataset=test_dataset,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 collate_fn=collate)
        self.datasets = (train_dataset, test_dataset)
        self.loaders = (train_loader, test_loader)

        # Preparing network
        self.model = C3D(n_classes=101)
        if use_gpu:
            self.model.cuda()
        self.model = nn.DataParallel(self.model)
        # Print some logs
        conf = {
            'train_video_list': os.path.abspath(train_video_list),
            'test_video_list': os.path.abspath(test_video_list),
            'use_gpu': use_gpu,
            'batch_size': batch_size,
            'resize': (new_width, new_height),
            'crop': crop_size,
            'clip_length': length,
            'mean': [90, 98, 102]
        }
        self.conf = conf
        logger.info('Setup a trainer with parameters:')
        logger.info('{}'.format(self.conf))
Exemplo n.º 4
0
def cd3_sigle_extartion(input_clips_f, c3d_network=None):
    #input_clips = get_clip(input_clips_f, verbose=True)
    input_clips_t = np.array(input_clips_f)
    input_clips = torch.from_numpy(np.array(input_clips_f))
    transforms = build_transforms()
    input_clips = transforms(input_clips)

    random_seed = 1
    torch.manual_seed(random_seed)
    torch.cuda.manual_seed(random_seed)

    if c3d_network == None:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print("Now doing the C3D network")
        #c3d_network = C3D(pretrained=pretrained_3d)
        #c3d_network.to(device)

        #from C3D_model import C3D
        #from network.model import static_model
        #from network.anomaly_detector_model import AnomalyDetector, RegularizedLoss, custom_objective

        c3d_network = C3D()
        home = os.getcwd()
        os.chdir(home[0:-len('GUI_stuff')])
        c3d_network.load_state_dict(torch.load('c3d.pickle'))
        os.chdir(home)
        c3d_network.cuda()
        c3d_network.eval()

    #X = Variable(input_clips)
    #X = X.cuda()
    X = input_clips
    X = X.unsqueeze(0)
    #X=X.cuda()
    #input_clip=torch.from_numpy(input_clip)

    with torch.no_grad():
        c3d_outputs = c3d_network(X.cuda())

        features_writer = FeaturesWriter()
        start_frame = 0
        vid_name = 'test_sigle_run_output'
        #c3d_outputs[0] as this is for a single use meaning no need to loop over the results
        features_writer.write(feature=c3d_outputs[0],
                              video_name=vid_name,
                              start_frame=start_frame,
                              dir="test")

        avg_segments = features_writer.dump_NO_save()  #dump()

    if c3d_network == None:
        return c3d_outputs, avg_segments, device, c3d_network
    else:
        return c3d_outputs, avg_segments
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    args = parser.parse_args()
    set_logger(log_file=args.log_file, debug_mode=args.debug_mode)

    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    cudnn.benchmark = True

    train_loader = VideoIter(dataset_path=args.dataset_path,
                             annotation_path=args.annotation_path,
                             clip_length=args.clip_length,
                             frame_stride=args.frame_interval,
                             video_transform=build_transforms(),
                             name='Features extraction')

    train_iter = torch.utils.data.DataLoader(
        train_loader,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,  # 4, # change this part accordingly
        pin_memory=True)

    # Loading network
    if args.feature_extractor == 'c3d':
        network = C3D(pretrained=args.pretrained_c3d)
    elif args.feature_extractor == 'resnet':
        network = resnet(200)
    network.load_state_dict(
        torch.load('network/r3d200_K_200ep.pth')['state_dict'])
    network = network.to(device)

    if not path.exists(args.save_dir):
        mkdir(args.save_dir)

    features_writer = FeaturesWriter()

    for i_batch, (data, target, sampled_idx, dirs,
                  vid_names) in tqdm(enumerate(train_iter)):
        with torch.no_grad():
            outputs = network(data.to(device)).detach().cpu().numpy()

            for i, (dir, vid_name, start_frame) in enumerate(
                    zip(dirs, vid_names,
                        sampled_idx.cpu().numpy())):
                dir = path.join(args.save_dir, dir)
                features_writer.write(feature=outputs[i],
                                      video_name=vid_name,
                                      idx=start_frame,
                                      dir=dir)

    features_writer.dump()
def load_model(model_path):
    model = C3D(n_classes=101)
    model.cuda()
    model = nn.DataParallel(model)
    try:
        model.module.load_state_dict(torch.load(model_path))
        model.module.eval()
        model.eval()
    except Exception as e:
        logger.error(e)
        print('Cannot load model {}'.format(model_path))
    print('Loaded model {}'.format(model_path))
    return model
def main():
	device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

	args = get_args()
	register_logger(log_file=args.log_file)

	cudnn.benchmark = True

	data_loader = VideoIter(dataset_path=args.dataset_path,
							clip_length=args.clip_length,
							frame_stride=args.frame_interval,
							video_transform=build_transforms(),
							return_label=False)

	data_iter = torch.utils.data.DataLoader(data_loader,
											batch_size=args.batch_size,
											shuffle=False,
											num_workers=args.num_workers,
											pin_memory=True)

	network = C3D(pretrained=args.pretrained_3d)
	if device.type != 'cpu':
		network = torch.nn.DataParallel(network)
	network = network.to(device)

	if not path.exists(args.save_dir):
		mkdir(args.save_dir)

	features_writer = FeaturesWriter(num_videos=data_loader.video_count)
	loop_i = 0
	with torch.no_grad():
		for data, clip_idxs, dirs, vid_names in data_iter:
			outputs = network(data.to(device)).detach().cpu().numpy()

			for i, (dir, vid_name, clip_idx) in enumerate(zip(dirs, vid_names, clip_idxs)):
				if loop_i == 0:
					logging.info(f"Video {features_writer.dump_count} / {features_writer.num_videos} : Writing clip {clip_idx} of video {vid_name}")

				loop_i += 1
				loop_i %= args.log_every

				dir = path.join(args.save_dir, dir)
				features_writer.write(feature=outputs[i],
									  video_name=vid_name,
									  idx=clip_idx,
									  dir=dir, )

	features_writer.dump()
Exemplo n.º 8
0
def load_feature_extractor(features_method, feature_extractor_path, device):
    assert path.exists(feature_extractor_path)
    logging.info(f"Loading feature extractor from {feature_extractor_path}")

    model = None
    if features_method == 'c3d':
        model = C3D(pretrained=feature_extractor_path)
    elif features_method == 'i3d':
        model = InceptionI3d(400, in_channels=3)
        model.replace_logits(157)
        model.load_state_dict(torch.load(feature_extractor_path))
    elif features_method == 'mfnet':
        model = MFNET_3D()
        model.load_state(state_dict=feature_extractor_path)
    else:
        raise NotImplementedError(
            f"Features extraction method {features_method} not implemented")

    return model.to(device)
def network_setup(ad_model_dir='/home/peter/Documents/actionID/AnomalyDetectionCVPR2018-Pytorch-master/short_60_low_mem/exps/model'):
    device = torch.device("cuda" if torch.cuda.is_available()
                          else "cpu")

    c3d_network = C3D(pretrained=pretrained_3d)
    c3d_network.to(device)

    print("staring the sigle AD networl")
    # pediction of AD with pertrain network
    AD_network = AnomalyDetector()
    AD_network.to(device)
    net = static_model(net=AD_network,
                       criterion=RegularizedLoss(AD_network, custom_objective).cuda(),
                       model_prefix=ad_model_dir,
                       )
    model_path = net.get_checkpoint_path(20000)
    net.load_checkpoint(pretrain_path=model_path, epoch=20000)
    net.net.to(device)

    return device,c3d_network,net
Exemplo n.º 10
0
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    args = parser.parse_args()
    set_logger(log_file=args.log_file, debug_mode=args.debug_mode)

    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    cudnn.benchmark = True

    train_loader = VideoIterTrain(
        dataset_path=args.dataset_path,
        annotation_path=args.annotation_path,
        clip_length=args.clip_length,
        frame_stride=args.train_frame_interval,
        video_transform=build_transforms(),
        name='train',
        return_item_subpath=False,
    )

    train_iter = torch.utils.data.DataLoader(
        train_loader,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=32,  # 4, # change this part accordingly
        pin_memory=True)

    val_loader = VideoIterTrain(
        dataset_path=args.dataset_path,
        annotation_path=args.annotation_path_test,
        clip_length=args.clip_length,
        frame_stride=args.val_frame_interval,
        video_transform=build_transforms(),
        name='val',
        return_item_subpath=False,
    )

    val_iter = torch.utils.data.DataLoader(
        val_loader,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=32,  # 4, # change this part accordingly
        pin_memory=True)

    network = C3D(pretrained=args.pretrained_3d)
    network.to(device)

    if not path.exists(features_dir):
        mkdir(features_dir)

    features_writer = FeaturesWriter()

    for i_batch, (data, target, sampled_idx, dirs,
                  vid_names) in tqdm(enumerate(train_iter)):
        with torch.no_grad():
            outputs = network(data.cuda())

            for i, (dir, vid_name, start_frame) in enumerate(
                    zip(dirs, vid_names,
                        sampled_idx.cpu().numpy())):
                dir = path.join(features_dir, dir)
                features_writer.write(feature=outputs[i],
                                      video_name=vid_name,
                                      start_frame=start_frame,
                                      dir=dir)

    features_writer.dump()

    features_writer = FeaturesWriter()
    for i_batch, (data, target, sampled_idx, dirs,
                  vid_names) in tqdm(enumerate(val_iter)):
        with torch.no_grad():
            outputs = network(data.cuda())

            for i, (dir, vid_name, start_frame) in enumerate(
                    zip(dirs, vid_names,
                        sampled_idx.cpu().numpy())):
                dir = path.join(features_dir, dir)
                features_writer.write(feature=outputs[i],
                                      video_name=vid_name,
                                      start_frame=start_frame,
                                      dir=dir)

    features_writer.dump()