def AD_perdiction(model_dir,dir_list,features_dir,video_parth,device=None):

    if device==None:
        device = torch.device("cuda" if torch.cuda.is_available()
                          else "cpu")

    #pediction of AD with pertrain network
    network = AnomalyDetector()
    network.to(device)
    net = static_model(net=network,
                           criterion=RegularizedLoss(network, custom_objective).cuda(),
                           model_prefix=model_dir,
                           )
    model_path = net.get_checkpoint_path(20000)
    net.load_checkpoint(pretrain_path=model_path, epoch=20000)
    net.net.to(device)

    from annotation_methods import annotatate_file
    annotation_path=annotatate_file(video_parth,dir_list,normal=True,file_name="Demo_anmotation")

    #runing vedio in to network
    data_loader = FeaturesLoaderVal(features_path=features_dir,
                                    annotation_path=annotation_path)

    data_iter = torch.utils.data.DataLoader(data_loader,
                                            batch_size=1,
                                            shuffle=False,
                                            num_workers=1,  # 4, # change this part accordingly
                                            pin_memory=True)
    print("loading of data done")

    for features, start_end_couples, feature_subpaths, lengths in tqdm(data_iter):
        # features is a batch where each item is a tensor of 32 4096D features
        print(features.shape)
        features = features.to(device)
        with torch.no_grad():
            input_var = torch.autograd.Variable(features)
            outputs = net.predict(input_var)[0]  # (batch_size, 32)
            outputs = outputs.reshape(outputs.shape[0], 32)
            for vid_len, couples, output in zip(lengths, start_end_couples, outputs.cpu().numpy()):
                y_true = np.zeros(vid_len)
                segments_len = vid_len // 32
                for couple in couples:
                    if couple[0] != -1:
                        y_true[couple[0]: couple[1]] = 1
                y_pred = np.zeros(vid_len)
                print()
                for i in range(32):
                    segment_start_frame = i * segments_len
                    segment_end_frame = (i + 1) * segments_len
                    y_pred[segment_start_frame: segment_end_frame] = output[i]

    print(y_true)
    print(y_pred)
    #print("it is over")
    return y_pred
def get_video_length(vid_name):
    video_path = vid_name + '.mp4'
    cap = cv2.VideoCapture(video_path)
    length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    return length


if __name__ == "__main__":
    args = parser.parse_args()
    device = torch.device("cuda" if torch.cuda.is_available()
                          else "cpu")
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)

    data_loader = FeaturesLoaderVal(features_path=args.features_path,
                                    annotation_path=args.annotation_path)

    data_iter = torch.utils.data.DataLoader(data_loader,
                                            batch_size=1,
                                            shuffle=False,
                                            num_workers=1,  # 4, # change this part accordingly
                                            pin_memory=True)

    network = AnomalyDetector()
    network.to(device)
    net = static_model(net=network,
                       criterion=RegularizedLoss(network, custom_objective).cuda(),
                       model_prefix=args.model_dir,
                       )
    model_path = net.get_checkpoint_path(20000)
    net.load_checkpoint(pretrain_path=model_path, epoch=20000)
Пример #3
0
    parser.add_argument('--annotation_path', default="Test_Annotation.txt",
                        help="path to annotations")
    parser.add_argument('--model_path', type=str, default="./exps/model.weights",
                        help="set logging file.")
    
    parser.add_argument('--extractor_type', type=str, default="c3d")
    return parser.parse_args()


if __name__ == "__main__":
    args = get_args()
    device = torch.device("cuda" if torch.cuda.is_available()
                          else "cpu")

    data_loader = FeaturesLoaderVal(features_path=args.features_path,
                                    features_file_name=args.features_file,
                                    annotation_path=args.annotation_path,
                                    extractor_type=args.extractor_type)

    data_iter = torch.utils.data.DataLoader(data_loader,
                                            batch_size=1,
                                            shuffle=False,
                                            num_workers=0,  # 4, # change this part accordingly
                                            pin_memory=True)

    model = TorchModel.load_model(args.model_path).to(device).eval()

    # enable cudnn tune
    cudnn.benchmark = True

    y_trues = torch.tensor([])
    y_preds = torch.tensor([])