def AD_sigle_perdiction(model_dir,c3d_features,lengths=16,device=None,network=None): if device==None: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if network==None: print("staring the sigle AD networl") # pediction of AD with pertrain network network = AnomalyDetector() network.to(device) net = static_model(net=network, criterion=RegularizedLoss(network, custom_objective).cuda(), model_prefix=model_dir, ) model_path = net.get_checkpoint_path(20000) net.load_checkpoint(pretrain_path=model_path, epoch=20000) net.net.to(device) else: net=network #no need for anatation or batch loading of the C3D featuers #from annotation_methods import annotatate_file #annotation_path=annotatate_file(video_parth,dir_list,normal=True,file_name="Demo_anmotation") # #runing vedio in to network # data_loader = FeaturesLoaderVal(features_path=features_dir, # annotation_path=annotation_path) # # data_iter = torch.utils.data.DataLoader(data_loader, # batch_size=1, # shuffle=False, # num_workers=1, # 4, # change this part accordingly # pin_memory=True) #print("loading of data done") #for features, start_end_couples, feature_subpaths, lengths in tqdm(data_iter): # features is a batch where each item is a tensor of 32 4096D features c3d_features=torch.from_numpy(c3d_features) #print(c3d_features.shape) features = c3d_features.to(device) with torch.no_grad(): input_var = torch.autograd.Variable(features) outputs = net.predict(input_var)[0] # (batch_size, 32) outputs = outputs.reshape(1, 32)#outputs.shape[0] for vid_len, output in zip([lengths], outputs.cpu().numpy()): y_true = np.zeros(vid_len) segments_len = vid_len // 32 # for couple in couples: # if couple[0] != -1: # y_true[couple[0]: couple[1]] = 1 y_pred = np.zeros(vid_len) for i in range(32): segment_start_frame = i * segments_len segment_end_frame = (i + 1) * segments_len y_pred[segment_start_frame: segment_end_frame] = output[i] #print(y_true) #print(y_pred) #print("it is over") return y_pred
def load_anomaly_detector(ad_model_path, device): assert path.exists(ad_model_path) logging.info(f"Loading anomaly detector from {ad_model_path}") anomaly_detector = pw.System(model=AnomalyDetector(), device=device) anomaly_detector.load_model_state(ad_model_path) return anomaly_detector.model.eval()
def load_models(feature_extractor_path, ad_model_path, features_method='c3d', device='cuda'): """ Loads both feature extractor and anomaly detector from the given paths :param feature_extractor_path: path of the features extractor weights to load :param ad_model_path: path of the anomaly detector weights to load :param features_method: name of the model to use for features extraction :param device: device to use for the models :return: anomaly_detector, feature_extractor """ assert path.exists(feature_extractor_path) assert path.exists(ad_model_path) feature_extractor, anomaly_detector = None, None if features_method == 'c3d': logging.info(f"Loading feature extractor from {feature_extractor_path}") feature_extractor = C3D(pretrained=feature_extractor_path) else: raise NotImplementedError(f"Features extraction method {features_method} not implemented") logging.info(f"Loading anomaly detector from {ad_model_path}") feature_extractor = feature_extractor.to(device).eval() anomaly_detector = pw.System(model=AnomalyDetector(), device=device) anomaly_detector.load_model_state(ad_model_path) anomaly_detector = anomaly_detector.model.eval() return anomaly_detector, feature_extractor
def network_setup(ad_model_dir='/home/peter/Documents/actionID/AnomalyDetectionCVPR2018-Pytorch-master/short_60_low_mem/exps/model'): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") c3d_network = C3D(pretrained=pretrained_3d) c3d_network.to(device) print("staring the sigle AD networl") # pediction of AD with pertrain network AD_network = AnomalyDetector() AD_network.to(device) net = static_model(net=AD_network, criterion=RegularizedLoss(AD_network, custom_objective).cuda(), model_prefix=ad_model_dir, ) model_path = net.get_checkpoint_path(20000) net.load_checkpoint(pretrain_path=model_path, epoch=20000) net.net.to(device) return device,c3d_network,net
""" train_iter = torch.utils.data.DataLoader(train_loader, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True) """ eval_iter = torch.utils.data.DataLoader(val_loader, batch_size=args.batch_size, shuffle=True, num_workers=1, pin_memory=True) """ iter_seed = torch.initial_seed() + 100 network = AnomalyDetector() net = model( net=network, criterion=RegularizedLoss(network, custom_objective).to(device), model_prefix=args.model_dir, step_callback_freq=5, save_checkpoint_freq=args.save_frequency, opt_batch_size=args.batch_size, # optional, 60 in the paper ) if torch.cuda.is_available(): net.net.cuda() torch.cuda.manual_seed(args.random_seed) net.net = torch.nn.DataParallel(net.net).cuda() """ In the original paper:
help="path to test annotation") parser.add_argument('--random-seed', type=int, default=1, help='random seed (default: 1)') parser.add_argument('--model-dir', type=str, default="./exps/model", help="set model dir.") parser.add_argument('--classid_path', type=str, default="ClassIDs.txt", help="path to ClassIDs") if __name__ == "__main__": args = parser.parse_args() # load network device = torch.device("cuda" if torch.cuda.is_available() else "cpu") torch.manual_seed(args.random_seed) torch.cuda.manual_seed(args.random_seed) network = AnomalyDetector() network.to(device) net = static_model(net=network, criterion=RegularizedLoss(network, custom_objective).to(device), model_prefix=args.model_dir) model_path = net.get_checkpoint_path(20000) net.load_checkpoint(pretrain_path=model_path, epoch=20000) net.net.to(device) # enable cudnn tune cudnn.benchmark = True # load test data data_loader = FeaturesLoader(features_path=args.features_path, annotation_path=args.annotation_path_test, classid_path=args.classid_path)
device = get_torch_device() cudnn.benchmark = True # enable cudnn tune # Data loader train_loader = FeaturesDatasetWrapper(features_path=args.features_path, annotation_path=args.annotation_path) train_iter = torch.utils.data.DataLoader(train_loader, batch_size=args.batch_size, num_workers=0, pin_memory=True) # Model if args.checkpoint is not None and path.exists(args.checkpoint): model = TorchModel.load_model(args.checkpoint) else: network = AnomalyDetector(args.feature_dim) model = TorchModel(network) # Training parameters """ In the original paper: lr = 0.01 epsilon = 1e-8 """ optimizer = torch.optim.Adadelta(network.parameters(), lr=args.lr_base, eps=1e-8) criterion = RegularizedLoss(network, custom_objective) # Callbacks
def AD_perdiction(model_dir, dir_list, device=None): if device == None: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") #pediction of AD with pertrain network network = AnomalyDetector() network.to(device) net = static_model( net=network, criterion=RegularizedLoss(network, custom_objective).cuda(), model_prefix=model_dir, ) model_path = net.get_checkpoint_path(20000) net.load_checkpoint(pretrain_path=model_path, epoch=20000) net.net.to(device) from annotation_methods import annotatate_file annotation_path = annotatate_file(video_path, dir_list, normal=[True, True], file_name="Demo_anmotation") #runing vedio in to network data_loader = FeaturesLoaderVal(features_path=features_dir, annotation_path=annotation_path) data_iter = torch.utils.data.DataLoader( data_loader, batch_size=1, shuffle=False, num_workers=1, # 4, # change this part accordingly pin_memory=True) print("it is over") for features, start_end_couples, feature_subpaths, lengths in tqdm( data_iter): # features is a batch where each item is a tensor of 32 4096D features features = features.to(device) with torch.no_grad(): input_var = torch.autograd.Variable(features) outputs = net.predict(input_var)[0] # (batch_size, 32) outputs = outputs.reshape(outputs.shape[0], 32) for vid_len, couples, output in zip(lengths, start_end_couples, outputs.cpu().numpy()): y_true = np.zeros(vid_len) segments_len = vid_len // 32 for couple in couples: if couple[0] != -1: y_true[couple[0]:couple[1]] = 1 y_pred = np.zeros(vid_len) print() for i in range(32): segment_start_frame = i * segments_len segment_end_frame = (i + 1) * segments_len y_pred[segment_start_frame:segment_end_frame] = output[i] print(y_true) print(y_pred) print("it is over") return y_pred
if __name__ == "__main__": args = get_args() register_logger(log_file=args.log_file) os.makedirs(args.exps_dir, exist_ok=True) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") cudnn.benchmark = True # enable cudnn tune train_loader = FeaturesDatasetWrapper(features_path=args.features_path, annotation_path=args.annotation_path) train_iter = torch.utils.data.DataLoader(train_loader, batch_size=args.batch_size, num_workers=0, # 4, # change this part accordingly pin_memory=True) network = AnomalyDetector() system = pw.System(model=network, device=device) """ In the original paper: lr = 0.01 epsilon = 1e-8 """ optimizer = torch.optim.Adadelta(network.parameters(), lr=args.lr_base, eps=1e-8) loss_wrapper = pw.loss_wrappers.GenericPointWiseLossWrapper(RegularizedLoss(network, custom_objective)) system.train( loss_wrapper, optimizer, train_data_loader=train_iter,
# Optimizations device = get_torch_device() cudnn.benchmark = True # enable cudnn tune # Data loader train_loader = FeaturesLoader(features_path=args.features_path, annotation_path=args.annotation_path) train_iter = torch.utils.data.DataLoader(train_loader, batch_size=args.batch_size, num_workers=8, pin_memory=True) # Model if args.checkpoint is not None and path.exists(args.checkpoint): model = TorchModel.load_model(args.checkpoint) else: network = AnomalyDetector(args.feature_dim) model = TorchModel(network) model = model.to(device).train() # Training parameters """ In the original paper: lr = 0.01 epsilon = 1e-8 """ optimizer = torch.optim.Adadelta(model.parameters(), lr=args.lr_base, eps=1e-8) criterion = RegularizedLoss(network, custom_objective).to(device) # Callbacks tb_writer = SummaryWriter(log_dir=tb_dir)