Exemple #1
0
def cd3_extartion(video_parth, device=None):

    batch_size = 1
    train_frame_interval = 2
    clip_length = 16

    single_load = True  #should not matter
    home = os.getcwd()
    pretrained_3d = home + "/c3d-pytorch-master/c3d.pickle"

    if device == None:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    #Load clips
    print("doing train loader")
    train_loader = VideoIterTrain(dataset_path=None,
                                  annotation_path=video_parth,
                                  clip_length=clip_length,
                                  frame_stride=train_frame_interval,
                                  video_transform=build_transforms(),
                                  name='train',
                                  return_item_subpath=False,
                                  single_load=single_load)
    print("train loader done, train_iter now")
    train_iter = torch.utils.data.DataLoader(
        train_loader,
        batch_size=batch_size,
        shuffle=False,
        num_workers=32,  # 4, # change this part accordingly
        pin_memory=True)

    #Possesing with CD3
    print("Now loading the data to C3D netowr")
    network = C3D(pretrained=pretrained_3d)
    network.to(device)

    if not os.path.exists(features_dir):
        os.mkdir(features_dir)

    features_writer = FeaturesWriter()

    dir_list = []

    for i_batch, (data, target, sampled_idx, dirs,
                  vid_names) in tqdm(enumerate(train_iter)):
        with torch.no_grad():
            outputs = network(data.cuda())

            for i, (dir, vid_name, start_frame) in enumerate(
                    zip(dirs, vid_names,
                        sampled_idx.cpu().numpy())):
                dir_list.append([dir, vid_name])
                dir = os.path.join(features_dir, dir)
                features_writer.write(feature=outputs[i],
                                      video_name=vid_name,
                                      idx=start_frame,
                                      dir=dir)

    features_writer.dump()
    return dir_list
Exemple #2
0
def cd3_sigle_extartion(input_clips_f, c3d_network=None):
    #input_clips = get_clip(input_clips_f, verbose=True)
    input_clips_t = np.array(input_clips_f)
    input_clips = torch.from_numpy(np.array(input_clips_f))
    transforms = build_transforms()
    input_clips = transforms(input_clips)

    random_seed = 1
    torch.manual_seed(random_seed)
    torch.cuda.manual_seed(random_seed)

    if c3d_network == None:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print("Now doing the C3D network")
        #c3d_network = C3D(pretrained=pretrained_3d)
        #c3d_network.to(device)

        #from C3D_model import C3D
        #from network.model import static_model
        #from network.anomaly_detector_model import AnomalyDetector, RegularizedLoss, custom_objective

        c3d_network = C3D()
        home = os.getcwd()
        os.chdir(home[0:-len('GUI_stuff')])
        c3d_network.load_state_dict(torch.load('c3d.pickle'))
        os.chdir(home)
        c3d_network.cuda()
        c3d_network.eval()

    #X = Variable(input_clips)
    #X = X.cuda()
    X = input_clips
    X = X.unsqueeze(0)
    #X=X.cuda()
    #input_clip=torch.from_numpy(input_clip)

    with torch.no_grad():
        c3d_outputs = c3d_network(X.cuda())

        features_writer = FeaturesWriter()
        start_frame = 0
        vid_name = 'test_sigle_run_output'
        #c3d_outputs[0] as this is for a single use meaning no need to loop over the results
        features_writer.write(feature=c3d_outputs[0],
                              video_name=vid_name,
                              start_frame=start_frame,
                              dir="test")

        avg_segments = features_writer.dump_NO_save()  #dump()

    if c3d_network == None:
        return c3d_outputs, avg_segments, device, c3d_network
    else:
        return c3d_outputs, avg_segments
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    args = parser.parse_args()
    set_logger(log_file=args.log_file, debug_mode=args.debug_mode)

    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    cudnn.benchmark = True

    train_loader = VideoIter(dataset_path=args.dataset_path,
                             annotation_path=args.annotation_path,
                             clip_length=args.clip_length,
                             frame_stride=args.frame_interval,
                             video_transform=build_transforms(),
                             name='Features extraction')

    train_iter = torch.utils.data.DataLoader(
        train_loader,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,  # 4, # change this part accordingly
        pin_memory=True)

    # Loading network
    if args.feature_extractor == 'c3d':
        network = C3D(pretrained=args.pretrained_c3d)
    elif args.feature_extractor == 'resnet':
        network = resnet(200)
    network.load_state_dict(
        torch.load('network/r3d200_K_200ep.pth')['state_dict'])
    network = network.to(device)

    if not path.exists(args.save_dir):
        mkdir(args.save_dir)

    features_writer = FeaturesWriter()

    for i_batch, (data, target, sampled_idx, dirs,
                  vid_names) in tqdm(enumerate(train_iter)):
        with torch.no_grad():
            outputs = network(data.to(device)).detach().cpu().numpy()

            for i, (dir, vid_name, start_frame) in enumerate(
                    zip(dirs, vid_names,
                        sampled_idx.cpu().numpy())):
                dir = path.join(args.save_dir, dir)
                features_writer.write(feature=outputs[i],
                                      video_name=vid_name,
                                      idx=start_frame,
                                      dir=dir)

    features_writer.dump()
def features_extraction(video_path,
                        model,
                        device,
                        batch_size=1,
                        frame_stride=1,
                        clip_length=16,
                        n_segments=32,
                        bar=None):
    """
    Extracts features of the video. The returned features will be returned after averaging over the required number of
    video segments.
    :param video_path: path of the video to predict
    :param model: model to use for feature extraction
    :param device: device to use for loading data
    :param batch_size: batch size to use for loading data
    :param frame_stride: interval between frames to load
    :param clip_length: number of frames to use for loading each video sample
    :param n_segments: how many chunks the video should be divided into
    :return: features list (n_segments, feature_dim), usually (32, 4096) as in the original paper
    """
    data_loader = SingleVideoIter(
        clip_length=clip_length,
        frame_stride=frame_stride,
        video_path=video_path,
        video_transform=build_transforms(mode=args.feature_method),
        return_label=False)
    data_iter = torch.utils.data.DataLoader(
        data_loader,
        batch_size=batch_size,
        shuffle=False,
        num_workers=0,  # 4, # change this part accordingly
        pin_memory=True)

    logging.info("Extracting features...")
    features = torch.tensor([])

    if bar is not None:
        bar.setRange(0, len(data_iter))

    with torch.no_grad():
        for i, data in tqdm(enumerate(data_iter)):
            outputs = model(data.to(device)).detach().cpu()
            features = torch.cat([features, outputs])

            if bar is not None:
                bar.setValue(i + 1)

    features = features.numpy()
    return to_segments(features, n_segments)
Exemple #5
0
def get_features_loader(dataset_path, clip_length, frame_interval, batch_size,
                        num_workers, mode):
    data_loader = VideoIter(dataset_path=dataset_path,
                            clip_length=clip_length,
                            frame_stride=frame_interval,
                            video_transform=build_transforms(mode),
                            return_label=False)

    data_iter = torch.utils.data.DataLoader(data_loader,
                                            batch_size=batch_size,
                                            shuffle=False,
                                            num_workers=num_workers,
                                            pin_memory=True)

    return data_loader, data_iter
def main():
	device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

	args = get_args()
	register_logger(log_file=args.log_file)

	cudnn.benchmark = True

	data_loader = VideoIter(dataset_path=args.dataset_path,
							clip_length=args.clip_length,
							frame_stride=args.frame_interval,
							video_transform=build_transforms(),
							return_label=False)

	data_iter = torch.utils.data.DataLoader(data_loader,
											batch_size=args.batch_size,
											shuffle=False,
											num_workers=args.num_workers,
											pin_memory=True)

	network = C3D(pretrained=args.pretrained_3d)
	if device.type != 'cpu':
		network = torch.nn.DataParallel(network)
	network = network.to(device)

	if not path.exists(args.save_dir):
		mkdir(args.save_dir)

	features_writer = FeaturesWriter(num_videos=data_loader.video_count)
	loop_i = 0
	with torch.no_grad():
		for data, clip_idxs, dirs, vid_names in data_iter:
			outputs = network(data.to(device)).detach().cpu().numpy()

			for i, (dir, vid_name, clip_idx) in enumerate(zip(dirs, vid_names, clip_idxs)):
				if loop_i == 0:
					logging.info(f"Video {features_writer.dump_count} / {features_writer.num_videos} : Writing clip {clip_idx} of video {vid_name}")

				loop_i += 1
				loop_i %= args.log_every

				dir = path.join(args.save_dir, dir)
				features_writer.write(feature=outputs[i],
									  video_name=vid_name,
									  idx=clip_idx,
									  dir=dir, )

	features_writer.dump()
    def handle_errors(self):
        self.playBtn.setEnabled(False)
        self.label.setText("Error: " + self.mediaPlayer.errorString())

    def plot(self):
        ax = self.graphWidget.axes
        ax.clear()
        # ax.set_xlim(0, self.mediaPlayer.duration())
        ax.set_ylim(-0.1, 1.1)
        ax.plot(self.y_pred, '*-', linewidth=7)
        self.graphWidget.draw()


if __name__ == '__main__':
    args = get_args()

    anomaly_detector, feature_extractor = load_models(
        args.feature_extractor,
        args.ad_model,
        features_method=args.feature_method,
        device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
    )

    transforms = build_transforms(mode=args.feature_method)

    app = QApplication(sys.argv)
    window = Window(args.clip_length, transforms)
    # window.run()

    sys.exit(app.exec_())
Exemple #8
0
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    args = parser.parse_args()
    set_logger(log_file=args.log_file, debug_mode=args.debug_mode)

    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    cudnn.benchmark = True

    train_loader = VideoIterTrain(
        dataset_path=args.dataset_path,
        annotation_path=args.annotation_path,
        clip_length=args.clip_length,
        frame_stride=args.train_frame_interval,
        video_transform=build_transforms(),
        name='train',
        return_item_subpath=False,
    )

    train_iter = torch.utils.data.DataLoader(
        train_loader,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=32,  # 4, # change this part accordingly
        pin_memory=True)

    val_loader = VideoIterTrain(
        dataset_path=args.dataset_path,
        annotation_path=args.annotation_path_test,
        clip_length=args.clip_length,
        frame_stride=args.val_frame_interval,
        video_transform=build_transforms(),
        name='val',
        return_item_subpath=False,
    )

    val_iter = torch.utils.data.DataLoader(
        val_loader,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=32,  # 4, # change this part accordingly
        pin_memory=True)

    network = C3D(pretrained=args.pretrained_3d)
    network.to(device)

    if not path.exists(features_dir):
        mkdir(features_dir)

    features_writer = FeaturesWriter()

    for i_batch, (data, target, sampled_idx, dirs,
                  vid_names) in tqdm(enumerate(train_iter)):
        with torch.no_grad():
            outputs = network(data.cuda())

            for i, (dir, vid_name, start_frame) in enumerate(
                    zip(dirs, vid_names,
                        sampled_idx.cpu().numpy())):
                dir = path.join(features_dir, dir)
                features_writer.write(feature=outputs[i],
                                      video_name=vid_name,
                                      start_frame=start_frame,
                                      dir=dir)

    features_writer.dump()

    features_writer = FeaturesWriter()
    for i_batch, (data, target, sampled_idx, dirs,
                  vid_names) in tqdm(enumerate(val_iter)):
        with torch.no_grad():
            outputs = network(data.cuda())

            for i, (dir, vid_name, start_frame) in enumerate(
                    zip(dirs, vid_names,
                        sampled_idx.cpu().numpy())):
                dir = path.join(features_dir, dir)
                features_writer.write(feature=outputs[i],
                                      video_name=vid_name,
                                      start_frame=start_frame,
                                      dir=dir)

    features_writer.dump()