def AD_sigle_perdiction(model_dir,c3d_features,lengths=16,device=None,network=None):
    if device==None:
        device = torch.device("cuda" if torch.cuda.is_available()
                          else "cpu")
    if network==None:
        print("staring the sigle AD networl")
        # pediction of AD with pertrain network
        network = AnomalyDetector()
        network.to(device)
        net = static_model(net=network,
                           criterion=RegularizedLoss(network, custom_objective).cuda(),
                           model_prefix=model_dir,
                           )
        model_path = net.get_checkpoint_path(20000)
        net.load_checkpoint(pretrain_path=model_path, epoch=20000)
        net.net.to(device)
    else:
        net=network

    #no need for anatation or batch loading of the C3D featuers

    #from annotation_methods import annotatate_file
    #annotation_path=annotatate_file(video_parth,dir_list,normal=True,file_name="Demo_anmotation")

    # #runing vedio in to network
    # data_loader = FeaturesLoaderVal(features_path=features_dir,
    #                                 annotation_path=annotation_path)
    #
    # data_iter = torch.utils.data.DataLoader(data_loader,
    #                                         batch_size=1,
    #                                         shuffle=False,
    #                                         num_workers=1,  # 4, # change this part accordingly
    #                                         pin_memory=True)
    #print("loading of data done")

    #for features, start_end_couples, feature_subpaths, lengths in tqdm(data_iter):
        # features is a batch where each item is a tensor of 32 4096D features
    c3d_features=torch.from_numpy(c3d_features)
    #print(c3d_features.shape)
    features = c3d_features.to(device)
    with torch.no_grad():
        input_var = torch.autograd.Variable(features)
        outputs = net.predict(input_var)[0]  # (batch_size, 32)
        outputs = outputs.reshape(1, 32)#outputs.shape[0]
        for vid_len,  output in zip([lengths],  outputs.cpu().numpy()):
            y_true = np.zeros(vid_len)
            segments_len = vid_len // 32
        #         for couple in couples:
        #             if couple[0] != -1:
        #                 y_true[couple[0]: couple[1]] = 1
            y_pred = np.zeros(vid_len)
            for i in range(32):
                segment_start_frame = i * segments_len
                segment_end_frame = (i + 1) * segments_len
                y_pred[segment_start_frame: segment_end_frame] = output[i]

    #print(y_true)
    #print(y_pred)
    #print("it is over")
    return y_pred
def network_setup(ad_model_dir='/home/peter/Documents/actionID/AnomalyDetectionCVPR2018-Pytorch-master/short_60_low_mem/exps/model'):
    device = torch.device("cuda" if torch.cuda.is_available()
                          else "cpu")

    c3d_network = C3D(pretrained=pretrained_3d)
    c3d_network.to(device)

    print("staring the sigle AD networl")
    # pediction of AD with pertrain network
    AD_network = AnomalyDetector()
    AD_network.to(device)
    net = static_model(net=AD_network,
                       criterion=RegularizedLoss(AD_network, custom_objective).cuda(),
                       model_prefix=ad_model_dir,
                       )
    model_path = net.get_checkpoint_path(20000)
    net.load_checkpoint(pretrain_path=model_path, epoch=20000)
    net.net.to(device)

    return device,c3d_network,net
Exemple #3
0
parser.add_argument('--model-dir', type=str, default="./exps/model",
                    help="set model dir.")
parser.add_argument('--classid_path', type=str, default="ClassIDs.txt",
                    help="path to ClassIDs")

if __name__ == "__main__":
    args = parser.parse_args()

    # load network
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    network = AnomalyDetector()
    network.to(device)
    net = static_model(net=network,
                       criterion=RegularizedLoss(network, custom_objective).to(device),
                       model_prefix=args.model_dir)
    model_path = net.get_checkpoint_path(20000)
    net.load_checkpoint(pretrain_path=model_path, epoch=20000)
    net.net.to(device)

    # enable cudnn tune
    cudnn.benchmark = True

    # load test data
    data_loader = FeaturesLoader(features_path=args.features_path,
                                 annotation_path=args.annotation_path_test,
                                 classid_path=args.classid_path)
    data_iter = torch.utils.data.DataLoader(data_loader,
                                            batch_size=1,
                                            shuffle=False,
def AD_perdiction(model_dir, dir_list, device=None):

    if device == None:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    #pediction of AD with pertrain network
    network = AnomalyDetector()
    network.to(device)
    net = static_model(
        net=network,
        criterion=RegularizedLoss(network, custom_objective).cuda(),
        model_prefix=model_dir,
    )
    model_path = net.get_checkpoint_path(20000)
    net.load_checkpoint(pretrain_path=model_path, epoch=20000)
    net.net.to(device)

    from annotation_methods import annotatate_file
    annotation_path = annotatate_file(video_path,
                                      dir_list,
                                      normal=[True, True],
                                      file_name="Demo_anmotation")

    #runing vedio in to network
    data_loader = FeaturesLoaderVal(features_path=features_dir,
                                    annotation_path=annotation_path)

    data_iter = torch.utils.data.DataLoader(
        data_loader,
        batch_size=1,
        shuffle=False,
        num_workers=1,  # 4, # change this part accordingly
        pin_memory=True)
    print("it is over")

    for features, start_end_couples, feature_subpaths, lengths in tqdm(
            data_iter):
        # features is a batch where each item is a tensor of 32 4096D features
        features = features.to(device)
        with torch.no_grad():
            input_var = torch.autograd.Variable(features)
            outputs = net.predict(input_var)[0]  # (batch_size, 32)
            outputs = outputs.reshape(outputs.shape[0], 32)
            for vid_len, couples, output in zip(lengths, start_end_couples,
                                                outputs.cpu().numpy()):
                y_true = np.zeros(vid_len)
                segments_len = vid_len // 32
                for couple in couples:
                    if couple[0] != -1:
                        y_true[couple[0]:couple[1]] = 1
                y_pred = np.zeros(vid_len)
                print()
                for i in range(32):
                    segment_start_frame = i * segments_len
                    segment_end_frame = (i + 1) * segments_len
                    y_pred[segment_start_frame:segment_end_frame] = output[i]

    print(y_true)
    print(y_pred)
    print("it is over")
    return y_pred