def Test(dataset, Recmodel, epoch, w=None, multicore=0):
    u_batch_size = world.config['test_u_batch_size']
    dataset: utils.BasicDataset
    testDict: dict = dataset.testDict
    Recmodel: model.LightGCN
    # eval mode with no dropout
    Recmodel = Recmodel.eval()
    max_K = max(world.topks)
    if multicore == 1:
        pool = multiprocessing.Pool(CORES)
    results = {'precision': np.zeros(len(world.topks)),
               'recall': np.zeros(len(world.topks)),
               'ndcg': np.zeros(len(world.topks))}
    with torch.no_grad():
        users = list(testDict.keys())
        try:
            assert u_batch_size <= len(users) / 10
        except AssertionError:
            print(f"test_u_batch_size is too big for this dataset, try a small one {len(users) // 10}")
        users_list = []
        rating_list = []
        groundTrue_list = []
        auc_record = []
        # ratings = []
        total_batch = len(users) // u_batch_size + 1
        for batch_users in utils.minibatch(users, batch_size=u_batch_size):
            allPos = dataset.getUserPosItems(batch_users)
            groundTrue = [testDict[u] for u in batch_users]
            batch_users_gpu = torch.Tensor(batch_users).long()
            batch_users_gpu = batch_users_gpu.to(world.device)

            rating = Recmodel.getUsersRating(batch_users_gpu)
            #rating = rating.cpu()
            exclude_index = []
            exclude_items = []
            for range_i, items in enumerate(allPos):
                exclude_index.extend([range_i] * len(items))
                exclude_items.extend(items)
            rating[exclude_index, exclude_items] = -(1<<10)
            _, rating_K = torch.topk(rating, k=max_K)
            rating = rating.cpu().numpy()
            aucs = [ 
                    utils.AUC(rating[i],
                              dataset, 
                              test_data) for i, test_data in enumerate(groundTrue)
                ]
            auc_record.extend(aucs)
            del rating
            users_list.append(batch_users)
            rating_list.append(rating_K.cpu())
            groundTrue_list.append(groundTrue)
        assert total_batch == len(users_list)
        X = zip(rating_list, groundTrue_list)
        if multicore == 1:
            pre_results = pool.map(test_one_batch, X)
        else:
            pre_results = []
            for x in X:
                pre_results.append(test_one_batch(x))
        scale = float(u_batch_size/len(users))
        for result in pre_results:
            results['recall'] += result['recall']
            results['precision'] += result['precision']
            results['ndcg'] += result['ndcg']
        results['recall'] /= float(len(users))
        results['precision'] /= float(len(users))
        results['ndcg'] /= float(len(users))
        results['auc'] = np.mean(auc_record)
        if world.tensorboard:
            w.add_scalars(f'Test/Recall@{world.topks}',
                          {str(world.topks[i]): results['recall'][i] for i in range(len(world.topks))}, epoch)
            w.add_scalars(f'Test/Precision@{world.topks}',
                          {str(world.topks[i]): results['precision'][i] for i in range(len(world.topks))}, epoch)
            w.add_scalars(f'Test/NDCG@{world.topks}',
                          {str(world.topks[i]): results['ndcg'][i] for i in range(len(world.topks))}, epoch)
        if multicore == 1:
            pool.close()
        print(results)
        return results
def test(dataset, Recmodel, epoch, w=None, multicore=0, best_results=None):
    u_batch_size = args.test_batch
    testDict = dataset.testDict
    Recmodel = Recmodel.eval()  # eval mode with no dropout
    max_K = max(args.topks)

    if multicore == 1:
        pool = multiprocessing.Pool(args.n_cores)
    results = {
        'precision': np.zeros(len(args.topks)),
        'recall': np.zeros(len(args.topks)),
        'ndcg': np.zeros(len(args.topks))
    }

    with torch.no_grad():
        users = list(testDict.keys())
        try:
            assert u_batch_size <= len(users) / 10
        except AssertionError:
            print(
                f"test_u_batch_size is too big for this dataset, try a small one {len(users) // 10}"
            )
        users_list, rating_list, groundTrue_list, auc_record = [], [], [], []
        t0 = time.time()
        total_batch = len(users) // u_batch_size + 1
        for batch_users in utils.minibatch(users, batch_size=u_batch_size):
            allPos = dataset.getUserPosItems(batch_users,
                                             dataset.UserItemNet)  # **********
            groundTrue = [testDict[u] for u in batch_users]
            batch_users_gpu = torch.Tensor(batch_users).long()
            batch_users_gpu = batch_users_gpu.to(args.device)

            rating = Recmodel.getUsersRating(dataset.graph[args.test_graph],
                                             batch_users_gpu)  # **********
            exclude_index = []
            exclude_items = []
            for range_i, items in enumerate(allPos):
                exclude_index.extend([range_i] * len(items))
                exclude_items.extend(items)
            rating[exclude_index, exclude_items] = -(1 << 10)
            _, rating_K = torch.topk(rating, k=max_K)
            rating = rating.cpu().numpy()
            aucs = [
                utils.AUC(rating[i], dataset, test_data)
                for i, test_data in enumerate(groundTrue)
            ]
            auc_record.extend(aucs)
            del rating
            users_list.append(batch_users)
            rating_list.append(rating_K.cpu())
            groundTrue_list.append(groundTrue)
        assert total_batch == len(users_list)
        X = zip(rating_list, groundTrue_list)
        if multicore == 1:
            pre_results = pool.map(test_one_batch, X)
        else:
            pre_results = []
            for x in X:
                pre_results.append(test_one_batch(x))
        scale = float(u_batch_size / len(users))
        for result in pre_results:
            results['recall'] += result['recall']
            results['precision'] += result['precision']
            results['ndcg'] += result['ndcg']
        results['recall'] /= float(len(users))
        results['precision'] /= float(len(users))
        results['ndcg'] /= float(len(users))
        results['auc'] = np.mean(auc_record)
        if args.tensorboard:
            w.add_scalars(
                f'Test/Recall@{args.topks}', {
                    str(args.topks[i]): results['recall'][i]
                    for i in range(len(args.topks))
                }, epoch)
            w.add_scalars(
                f'Test/Precision@{args.topks}', {
                    str(args.topks[i]): results['precision'][i]
                    for i in range(len(args.topks))
                }, epoch)
            w.add_scalars(
                f'Test/NDCG@{args.topks}', {
                    str(args.topks[i]): results['ndcg'][i]
                    for i in range(len(args.topks))
                }, epoch)
        if multicore == 1:
            pool.close()

        print('time consumption: %.2f' % (time.time() - t0))
        print('recall:', results['recall'], 'precision:', results['precision'],
              'ndcg:', results['ndcg'], 'auc:', results['auc'])

        if results['recall'][0] > best_results['recall'][0]:
            return results
        else:
            return best_results
Exemple #3
0
def evaluate(test_dataloader,
             generator,
             labels_list,
             videos,
             loss_function,
             dataset,
             test_bboxes,
             frame_height=256,
             frame_width=256,
             is_visual=False,
             mask_labels_path=None,
             save_path=None,
             labels_dict=None):
    #init
    psnr_list = {}
    roi_psnr_list = {}
    for key in videos.keys():
        psnr_list[key] = []
        roi_psnr_list[key] = []

    video_num = 0
    frame_index = 0
    label_length = videos[sorted(videos.keys())[video_num]]['length']
    bboxes_list = sorted(os.listdir(test_bboxes))
    bboxes = np.load(os.path.join(test_bboxes, bboxes_list[0]),
                     allow_pickle=True)

    WIDTH, HEIGHT = 640, 360
    # 保存可视化结果
    if is_visual:
        # 异常标记信息
        mask_labels = sorted(glob.glob(os.path.join(mask_labels_path, "*")))
        mask_label_list = np.load(mask_labels[0], allow_pickle=True)
        #TODO
        if dataset == 'avenue':
            WIDTH = 640
            HEIGHT = 360
        elif dataset == 'ped2':
            HEIGHT = 240
            WIDTH = 360
        else:
            raise ValueError("no dataset")

        if not os.path.exists(save_path):
            os.makedirs(save_path)
        if not os.path.exists("{}/video".format(save_path)):
            os.makedirs("{}/video".format(save_path))
        if not os.path.exists("{}/psnr".format(save_path)):
            os.makedirs("{}/psnr".format(save_path))

        fourcc = cv2.VideoWriter_fourcc(*'DIVX')
        out = cv2.VideoWriter("{}/video/{}_01.avi".format(save_path,
                                                          dataset), fourcc,
                              25.0, (frame_width * 3 + 20, frame_height))

    # test
    generator.eval()
    for k, imgs in enumerate(tqdm(test_dataloader, desc='test', leave=False)):
        if k == label_length - 4 * (video_num + 1):
            video_num += 1
            label_length += videos[sorted(videos.keys())[video_num]]['length']
            frame_index = 0
            bboxes = np.load(os.path.join(test_bboxes, bboxes_list[video_num]),
                             allow_pickle=True)

            if is_visual == True:
                out = cv2.VideoWriter(
                    "{}/video/{}_{}.avi".format(
                        save_path, dataset,
                        sorted(videos.keys())[video_num]), fourcc, 25.0,
                    (frame_width * 3 + 20, frame_height))
                mask_label_list = np.load(mask_labels[video_num],
                                          allow_pickle=True)
        imgs = imgs.cuda()
        input = imgs[:, :-1, ]
        target = imgs[:, -1, ]

        # print(input.data.shape)

        outputs = generator(input)  #[c, h, w]
        # print(outputs.data.shape)

        # mse roi
        roi_mse_imgs = roi_mse(outputs, target, loss_function,
                               bboxes[frame_index], frame_height / HEIGHT,
                               frame_width / WIDTH)
        roi_psnr_list[sorted(videos.keys())[video_num]].append(
            utils.psnr(roi_mse_imgs))

        # mse frame
        mse_imgs = loss_function(outputs, target).item()
        psnr_list[sorted(videos.keys())[video_num]].append(
            utils.psnr(mse_imgs))

        if is_visual:
            ################ show predict frame ######################
            real_frame = target.squeeze().data.cpu().numpy().transpose(1, 2, 0)
            predict_frame = outputs.squeeze().data.cpu().numpy().transpose(
                1, 2, 0)
            # diff = cv2.absdiff(real_frame, predict_frame)
            diff = (real_frame - predict_frame)**2
            diff = np.uint8(
                (diff[:, :, 0] + diff[:, :, 1] + diff[:, :, 2]) * 255.0)
            diff = cv2.cvtColor(diff, cv2.COLOR_GRAY2BGR)
            diff = draw_bbox(diff, bboxes[frame_index], 255.0 / HEIGHT,
                             255.0 / WIDTH)

            real_frame = np.uint8(real_frame * 255.0)
            predict_frame = np.uint8(predict_frame * 255.0)

            # add mask label to real img
            mask = np.uint8(mask_label_list[frame_index] * 255.0)
            mask = cv2.resize(mask, (256, 256))
            mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
            mask[:, :, 1] = np.zeros([256, 256])
            mask[:, :, 0] = np.zeros([256, 256])
            real_frame = cv2.addWeighted(real_frame, 1, mask, 0.6, 0)

            compare = np.concatenate(
                [real_frame,
                 np.zeros([256, 20, 3]), predict_frame, diff],
                axis=1)
            # cv2.imshow("real_frame", real_frame)
            # cv2.imshow("diff", diff)
            # cv2.imshow("compare", np.uint8(compare) )
            # cv2.waitKey(1)
            compare = np.uint8(compare)
            # add text
            cv2.putText(
                compare,
                "video: {}, frame:{}, psnr: {}".format(video_num + 1,
                                                       frame_index,
                                                       utils.psnr(mse_imgs)),
                (256 * 2 + 20, 15), cv2.FONT_HERSHEY_COMPLEX, 0.4,
                (200, 255, 255), 1)
            # print("putText")
            out.write(compare)

        frame_index += 1

    # Measuring the abnormality score and the AUC
    # 这个地方想了一下应该还是全局做归一化
    anomaly_score_total_list = []
    roi_anomaly_score_total_list = []
    for video_name in sorted(videos.keys()):
        # anomaly_score_total_list += utils.anomaly_score_list(psnr_list[video_name])
        # roi_anomaly_score_total_list += utils.anomaly_score_list(roi_psnr_list[video_name])
        anomaly_score_total_list += psnr_list[video_name]
        roi_anomaly_score_total_list += roi_psnr_list[video_name]

        if is_visual:
            plt.figure(figsize=(10, 2))
            # 绘制psnr
            plt.plot(psnr_list[video_name])
            plt.xlabel('t')
            plt.ylabel('psnr')

            min_ = min(list(psnr_list[video_name]))
            max_ = max(list(psnr_list[video_name]))

            # 绘制真值
            plt.fill_between(np.linspace(0, len(labels_dict[video_name][0]),
                                         len(labels_dict[video_name][0])),
                             np.array(min_),
                             (max_ - min_) * labels_dict[video_name][0] + min_,
                             facecolor='r',
                             alpha=0.3)
            plt.savefig("{}/psnr/{}_{}_frame_psnr.jpg".format(
                save_path, dataset, video_name))
            print(video_name)

            plt.figure(figsize=(10, 2))
            # 绘制psnr
            plt.plot(roi_psnr_list[video_name])
            plt.xlabel('t')
            plt.ylabel('psnr')

            min_ = min(list(roi_psnr_list[video_name]))
            max_ = max(list(roi_psnr_list[video_name]))

            # 绘制真值
            plt.fill_between(np.linspace(0, len(labels_dict[video_name][0]),
                                         len(labels_dict[video_name][0])),
                             np.array(min_),
                             (max_ - min_) * labels_dict[video_name][0] + min_,
                             facecolor='r',
                             alpha=0.3)
            plt.savefig("{}/psnr/{}_{}_roi_psnr.jpg".format(
                save_path, dataset, video_name))

    anomaly_score_total_list = utils.anomaly_score_list(
        anomaly_score_total_list)
    roi_anomaly_score_total_list = utils.anomaly_score_list(
        roi_anomaly_score_total_list)

    # TODO
    anomaly_score_total_list = np.asarray(anomaly_score_total_list)
    frame_AUC = utils.AUC(anomaly_score_total_list,
                          np.expand_dims(1 - labels_list, 0))

    roi_anomaly_score_total_list = np.asarray(roi_anomaly_score_total_list)
    roi_AUC = utils.AUC(roi_anomaly_score_total_list,
                        np.expand_dims(1 - labels_list, 0))
    # print('AUC: ' + str(accuracy * 100) + '%')

    return frame_AUC, roi_AUC
Exemple #4
0
def train(config):
    #### set the save and log path ####
    svname = args.name
    if svname is None:
        svname = config['train_dataset_type'] + '_' + config[
            'generator'] + '_' + config['flow_model']

    if args.tag is not None:
        svname += '_' + args.tag
    save_path = os.path.join('./save', svname)
    utils.set_save_path(save_path)
    utils.set_log_path(save_path)
    writer = SummaryWriter(os.path.join(save_path, 'tensorboard'))
    yaml.dump(config,
              open(os.path.join(save_path, 'classifier_config.yaml'), 'w'))

    #### make datasets ####
    # train
    train_folder = config['dataset_path'] + config[
        'train_dataset_type'] + "/training/frames"
    test_folder = config['dataset_path'] + config[
        'train_dataset_type'] + "/testing/frames"

    # Loading dataset
    train_dataset_args = config['train_dataset_args']
    test_dataset_args = config['test_dataset_args']
    train_dataset = VadDataset(train_folder,
                               transforms.Compose([
                                   transforms.ToTensor(),
                               ]),
                               resize_height=train_dataset_args['h'],
                               resize_width=train_dataset_args['w'],
                               time_step=train_dataset_args['t_length'] - 1)

    test_dataset = VadDataset(test_folder,
                              transforms.Compose([
                                  transforms.ToTensor(),
                              ]),
                              resize_height=test_dataset_args['h'],
                              resize_width=test_dataset_args['w'],
                              time_step=test_dataset_args['t_length'] - 1)

    train_dataloader = DataLoader(
        train_dataset,
        batch_size=train_dataset_args['batch_size'],
        shuffle=True,
        num_workers=train_dataset_args['num_workers'],
        drop_last=True)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=test_dataset_args['batch_size'],
                                 shuffle=False,
                                 num_workers=test_dataset_args['num_workers'],
                                 drop_last=False)

    # for test---- prepare labels
    labels = np.load('./data/frame_labels_' + config['test_dataset_type'] +
                     '.npy')
    if config['test_dataset_type'] == 'shanghai':
        labels = np.expand_dims(labels, 0)
    videos = OrderedDict()
    videos_list = sorted(glob.glob(os.path.join(test_folder, '*')))
    labels_list = []
    label_length = 0
    psnr_list = {}
    for video in sorted(videos_list):
        # video_name = video.split('/')[-1]

        # windows
        video_name = os.path.split(video)[-1]
        videos[video_name] = {}
        videos[video_name]['path'] = video
        videos[video_name]['frame'] = glob.glob(os.path.join(video, '*.jpg'))
        videos[video_name]['frame'].sort()
        videos[video_name]['length'] = len(videos[video_name]['frame'])
        labels_list = np.append(
            labels_list,
            labels[0][4 + label_length:videos[video_name]['length'] +
                      label_length])
        label_length += videos[video_name]['length']
        psnr_list[video_name] = []

    # Model setting
    num_unet_layers = 4
    discriminator_num_filters = [128, 256, 512, 512]

    # for gradient loss
    alpha = 1
    # for int loss
    l_num = 2
    pretrain = False

    if config['generator'] == 'cycle_generator_convlstm':
        ngf = 64
        netG = 'resnet_6blocks'
        norm = 'instance'
        no_dropout = False
        init_type = 'normal'
        init_gain = 0.02
        gpu_ids = []
        generator = define_G(train_dataset_args['c'], train_dataset_args['c'],
                             ngf, netG, norm, not no_dropout, init_type,
                             init_gain, gpu_ids)
    elif config['generator'] == 'unet':
        # generator = UNet(n_channels=train_dataset_args['c']*(train_dataset_args['t_length']-1),
        #                  layer_nums=num_unet_layers, output_channel=train_dataset_args['c'])
        model = PreAE(train_dataset_args['c'], train_dataset_args['t_length'],
                      **config['model_args'])
    else:
        raise Exception('The generator is not implemented')

    # generator = torch.load('save/avenue_cycle_generator_convlstm_flownet2_0103/generator-epoch-199.pth')

    discriminator = PixelDiscriminator(train_dataset_args['c'],
                                       discriminator_num_filters,
                                       use_norm=False)
    # discriminator = torch.load('save/avenue_cycle_generator_convlstm_flownet2_0103/discriminator-epoch-199.pth')

    # if not pretrain:
    #     generator.apply(weights_init_normal)
    #     discriminator.apply(weights_init_normal)

    # if use flownet
    # if config['flow_model'] == 'flownet2':
    #     flownet2SD_model_path = 'flownet2/FlowNet2_checkpoint.pth.tar'
    #     flow_network = FlowNet2(args).eval()
    #     flow_network.load_state_dict(torch.load(flownet2SD_model_path)['state_dict'])
    # elif config['flow_model'] == 'liteflownet':
    #     lite_flow_model_path = 'liteFlownet/network-sintel.pytorch'
    #     flow_network = Network().eval()
    #     flow_network.load_state_dict(torch.load(lite_flow_model_path))

    # different range with the source version, should change
    lam_int = 1.0 * 2
    lam_gd = 1.0 * 2
    # here we use no flow loss
    lam_op = 0  # 2.0
    lam_adv = 0.05
    adversarial_loss = Adversarial_Loss()
    discriminate_loss = Discriminate_Loss()
    gd_loss = Gradient_Loss(alpha, train_dataset_args['c'])
    op_loss = Flow_Loss()
    int_loss = Intensity_Loss(l_num)
    step = 0

    utils.log('initializing the model with Generator-Unet {} layers,'
              'PixelDiscriminator with filters {} '.format(
                  num_unet_layers, discriminator_num_filters))

    g_lr = 0.0002
    d_lr = 0.00002
    optimizer_G = torch.optim.Adam(generator.parameters(), lr=g_lr)
    optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=d_lr)

    # # optimizer setting
    # params_encoder = list(generator.encoder.parameters())
    # params_decoder = list(generator.decoder.parameters())
    # params = params_encoder + params_decoder
    # optimizer, lr_scheduler = utils.make_optimizer(
    #     params, config['optimizer'], config['optimizer_args'])
    #
    # loss_func_mse = nn.MSELoss(reduction='none')

    # parallel if muti-gpus
    if torch.cuda.is_available():
        generator.cuda()
        discriminator.cuda()
        # # if use flownet
        # flow_network.cuda()
        adversarial_loss.cuda()
        discriminate_loss.cuda()
        gd_loss.cuda()
        op_loss.cuda()
        int_loss.cuda()

    if config.get('_parallel'):
        generator = nn.DataParallel(generator)
        discriminator = nn.DataParallel(discriminator)
        # if use flownet
        # flow_network = nn.DataParallel(flow_network)
        adversarial_loss = nn.DataParallel(adversarial_loss)
        discriminate_loss = nn.DataParallel(discriminate_loss)
        gd_loss = nn.DataParallel(gd_loss)
        op_loss = nn.DataParallel(op_loss)
        int_loss = nn.DataParallel(int_loss)

    # Training
    utils.log('Start train')
    max_accuracy = 0
    base_channel_num = train_dataset_args['c'] * (
        train_dataset_args['t_length'] - 1)
    save_epoch = 5 if config['save_epoch'] is None else config['save_epoch']
    for epoch in range(config['epochs']):

        generator.train()
        for j, imgs in enumerate(
                tqdm(train_dataloader, desc='train', leave=False)):
            imgs = imgs.cuda()
            input = imgs[:, :-1, ]
            input_last = input[:, -1, ]
            target = imgs[:, -1, ]
            # input = input.view(input.shape[0], -1, input.shape[-2],input.shape[-1])

            # only for debug
            # input0=imgs[:, 0,]
            # input1 = imgs[:, 1, ]
            # gt_flow_esti_tensor = torch.cat([input0, input1], 1)
            # flow_gt = batch_estimate(gt_flow_esti_tensor, flow_network)[0]
            # objectOutput = open('./out_train.flo', 'wb')
            # np.array([80, 73, 69, 72], np.uint8).tofile(objectOutput)
            # np.array([flow_gt.size(2), flow_gt.size(1)], np.int32).tofile(objectOutput)
            # np.array(flow_gt.detach().cpu().numpy().transpose(1, 2, 0), np.float32).tofile(objectOutput)
            # objectOutput.close()
            # break

            # ------- update optim_G --------------
            outputs = generator(input)
            # pred_flow_tensor = torch.cat([input_last, outputs], 1)
            # gt_flow_tensor = torch.cat([input_last, target], 1)
            # flow_pred = batch_estimate(pred_flow_tensor, flow_network)
            # flow_gt = batch_estimate(gt_flow_tensor, flow_network)

            # if you want to use flownet2SD, comment out the part in front

            # #### if use flownet ####
            # pred_flow_esti_tensor = torch.cat([input_last.view(-1,3,1,input.shape[-2],input.shape[-1]),
            #                                    outputs.view(-1,3,1,input.shape[-2],input.shape[-1])], 2)
            # gt_flow_esti_tensor = torch.cat([input_last.view(-1,3,1,input.shape[-2],input.shape[-1]),
            #                                  target.view(-1,3,1,input.shape[-2],input.shape[-1])], 2)

            # flow_gt=flow_network(gt_flow_esti_tensor*255.0)
            # flow_pred=flow_network(pred_flow_esti_tensor*255.0)
            ##############################
            # g_op_loss = op_loss(flow_pred, flow_gt) ## flow loss
            g_op_loss = 0
            g_adv_loss = adversarial_loss(discriminator(outputs))

            g_int_loss = int_loss(outputs, target)
            g_gd_loss = gd_loss(outputs, target)
            g_loss = lam_adv * g_adv_loss + lam_gd * g_gd_loss + lam_op * g_op_loss + lam_int * g_int_loss

            optimizer_G.zero_grad()
            g_loss.backward()
            optimizer_G.step()

            train_psnr = utils.psnr_error(outputs, target)

            # ----------- update optim_D -------
            optimizer_D.zero_grad()
            d_loss = discriminate_loss(discriminator(target),
                                       discriminator(outputs.detach()))
            d_loss.backward()
            optimizer_D.step()
            # break
        # lr_scheduler.step()

        utils.log('----------------------------------------')
        utils.log('Epoch:' + str(epoch + 1))
        utils.log('----------------------------------------')
        utils.log("g_loss: {} d_loss {}".format(g_loss, d_loss))
        utils.log('\t gd_loss {}, op_loss {}, int_loss {} ,'.format(
            g_gd_loss, g_op_loss, g_int_loss))
        utils.log('\t train psnr{}'.format(train_psnr))

        # Testing
        utils.log('Evaluation of ' + config['test_dataset_type'])
        for video in sorted(videos_list):
            # video_name = video.split('/')[-1]
            video_name = os.path.split(video)[-1]
            psnr_list[video_name] = []

        generator.eval()
        video_num = 0
        # label_length += videos[videos_list[video_num].split('/')[-1]]['length']
        label_length = videos[os.path.split(
            videos_list[video_num])[1]]['length']
        for k, imgs in enumerate(
                tqdm(test_dataloader, desc='test', leave=False)):
            if k == label_length - 4 * (video_num + 1):
                video_num += 1
                label_length += videos[os.path.split(
                    videos_list[video_num])[1]]['length']
            imgs = imgs.cuda()
            input = imgs[:, :-1, ]
            target = imgs[:, -1, ]
            # input = input.view(input.shape[0], -1, input.shape[-2], input.shape[-1])

            outputs = generator(input)
            mse_imgs = int_loss((outputs + 1) / 2, (target + 1) / 2).item()
            # psnr_list[videos_list[video_num].split('/')[-1]].append(utils.psnr(mse_imgs))
            psnr_list[os.path.split(videos_list[video_num])[1]].append(
                utils.psnr(mse_imgs))

        # Measuring the abnormality score and the AUC
        anomaly_score_total_list = []
        for video in sorted(videos_list):
            # video_name = video.split('/')[-1]
            video_name = os.path.split(video)[1]
            anomaly_score_total_list += utils.anomaly_score_list(
                psnr_list[video_name])

        anomaly_score_total_list = np.asarray(anomaly_score_total_list)
        accuracy = utils.AUC(anomaly_score_total_list,
                             np.expand_dims(1 - labels_list, 0))

        utils.log('The result of ' + config['test_dataset_type'])
        utils.log('AUC: ' + str(accuracy * 100) + '%')

        # Save the model
        if epoch % save_epoch == 0 or epoch == config['epochs'] - 1:
            # torch.save(model, os.path.join(
            #     save_path, 'model-epoch-{}.pth'.format(epoch)))

            torch.save(
                generator,
                os.path.join(save_path,
                             'generator-epoch-{}.pth'.format(epoch)))
            torch.save(
                discriminator,
                os.path.join(save_path,
                             'discriminator-epoch-{}.pth'.format(epoch)))

        if accuracy > max_accuracy:
            torch.save(generator, os.path.join(save_path, 'generator-max'))
            torch.save(discriminator,
                       os.path.join(save_path, 'discriminator-max'))

        utils.log('----------------------------------------')

    utils.log('Training is finished')
Exemple #5
0
def runModel(datagen, model, optimizer, class_wts, process, batch_size,
             n_batches, loss_wts):
    '''
    process : 'trn', 'val' or 'tst'
    '''
    running_loss = 0
    pred_list = []
    label_list = []
    soft_pred_list = []
    all_file_list = []
    with trange(n_batches, desc=process, ncols=100) as t:
        for m in range(n_batches):
            data, labels, filenames = datagen.__next__()
            labels_one_hot = utils.get_one_hot(labels).cuda()
            if process == 'trn':
                optimizer.zero_grad()
                model.train()
                pred, aux_pred = model.forward(data)
                pred = F.softmax(pred, 1)
                aux_pred = F.softmax(aux_pred, 1)
                loss = 0
                for i in range(2):
                    loss += loss_wts[0] * utils.weightedBCE(class_wts[i],
                                                            pred[:, i],
                                                            (labels_one_hot
                                                            [:, i]))\
                            + loss_wts[1] * utils.weightedBCE(class_wts[i],
                                                              aux_pred[:, i],
                                                              (labels_one_hot
                                                              [:, i]))
                loss.backward()
                if torch.isnan(loss):
                    pdb.set_trace()
                optimizer.step()
            elif process == 'val' or process == 'tst':
                model.eval()
                with torch.no_grad():
                    pred = F.softmax(model.forward(data), 1)
                    loss = utils.weightedBCE(class_wts[0], pred[:, 0],
                                             labels_one_hot[:, 0])\
                        + utils.weightedBCE(class_wts[1], pred[:, 1],
                                            labels_one_hot[:, 1])
            running_loss += loss
            hard_pred = torch.argmax(pred, 1)
            pred_list.append(hard_pred.cpu())
            soft_pred_list.append(pred.detach().cpu())
            label_list.append(labels.cpu())
            all_file_list += filenames
            t.set_postfix(loss=running_loss.item() /
                          (float(m + 1) * batch_size))
            t.update()
        finalLoss = running_loss / (float(m + 1) * batch_size)
        # if process != 'trn':
        #     pred_list, soft_pred_list, label_list = utils.test_time_aug(
        #                                                     all_file_list,
        #                                                     soft_pred_list,
        #                                                     label_list, 3)
        acc = utils.globalAcc(pred_list, label_list)
        if not isinstance(pred_list, torch.Tensor):
            f1 = sklearn.metrics.f1_score(torch.cat(label_list),
                                          torch.cat(pred_list),
                                          labels=None)
        else:
            f1 = sklearn.metrics.f1_score(label_list, pred_list, labels=None)
        auroc, auprc, fpr_tpr_arr, precision_recall_arr = utils.AUC(
            soft_pred_list, label_list)
        metrics = Metrics(finalLoss, acc, f1, auroc, auprc, fpr_tpr_arr,
                          precision_recall_arr)
        utils.save_preds(soft_pred_list, pred_list, label_list, all_file_list,
                         args.savename, process)
        return metrics
Exemple #6
0
 def getMetrics(self, soft_pred_list, pred_list, label_list):
     acc = utils.globalAcc(pred_list, label_list)
     F1 = sklearn.metrics.f1_score(label_list, pred_list, labels=None)
     auc, auprc, auc_params, prc_params = utils.AUC(soft_pred_list,
                                                    label_list)
     return acc, F1, auc, auprc
Exemple #7
0
def run(inv_resize_factor, rep):

    tf.reset_default_graph()

    folder = str(inv_resize_factor) + "_INbreast_patches"

    x = tf.placeholder(tf.float32, [None, 36, 36, 1])
    phase_train = tf.placeholder(tf.bool, [])
    y = tf.placeholder(tf.float32, [None, 2])

    logits = cnn_models.detector36(x, phase_train).out

    pred = tf.nn.softmax(logits)
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))
    train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)
    correct = tf.equal(tf.arg_max(logits, 1), tf.arg_max(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
    results = []

    train_sets = [
        train_batcher("/home/eduardo/tese/data/" + folder + "/tr_neg_0.npy"),
        train_batcher("/home/eduardo/tese/data/" + folder + "/tr_pos_0.npy")
    ]

    test_sets = [
        test_batcher("/home/eduardo/tese/data/" + folder + "/va_neg_0.npy"),
        test_batcher("/home/eduardo/tese/data/" + folder + "/va_pos_0.npy")
    ]

    test_sets[0].images = np.concatenate(
        (test_sets[0].images,
         np.load("/home/eduardo/tese/data/" + folder + "/te_neg_0.npy")))

    test_sets[1].images = np.concatenate(
        (test_sets[1].images,
         np.load("/home/eduardo/tese/data/" + folder + "/te_pos_0.npy")))

    number_of_iter = 10000

    train_loss, train_acc = 0, 0
    test_loss, test_acc = 0, 0

    board = tensorboard_wrapper("heyou")

    saver = tf.train.Saver()

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        #writer = tf.summary.FileWriter("/home/eduardo/tese/logs/tensorflow/ex"+str(inv_resize_factor)+"_"+str(rep), graph=sess.graph)

        saver.restore(sess, "/home/eduardo/tese/models/last_model-1638")

        training_iter = 0
        for ite in range(number_of_iter):

            batchx = np.concatenate((train_sets[0].next_batch(16),
                                     train_sets[1].next_batch(16)))[:, :, :,
                                                                    np.newaxis]
            batchy = np.zeros((32, 2))
            batchy[0:16, 0] = 1
            batchy[16:32, 1] = 1
            training_iter += 1
            #test1,test2 = (sess.run( [logits,modelt],feed_dict={x:batchx,y:batchy}))
            l1, _, l2 = sess.run([loss, train_step, accuracy],
                                 feed_dict={
                                     x: batchx,
                                     y: batchy,
                                     phase_train: True
                                 })
            #print(test2)
            train_loss += l1
            train_acc += l2

            sys.stdout.write("\r \x1b[K Iteration: " + str(ite))
            sys.stdout.flush()

            if train_sets[1].finished():
                train_loss /= training_iter
                train_acc /= training_iter

                count_preds = 0
                preds = np.zeros(test_sets[0].size() + test_sets[1].size())
                labels = np.ones(test_sets[0].size() + test_sets[1].size())
                labels[0:test_sets[0].size()] = 0

                for set_ in range(2):
                    test_sets[set_].reset()
                    running = True

                    while running:
                        batchx, weights = test_sets[set_].next_batch(128)
                        batchy = np.zeros((batchx.shape[0], 2))
                        batchy[:, set_] = 1

                        l1, l2, preds_local = sess.run(
                            [loss, accuracy, pred],
                            feed_dict={
                                x: batchx[:, :, :, np.newaxis],
                                y: batchy,
                                phase_train: False
                            })
                        preds[count_preds:count_preds +
                              batchx.shape[0]] = preds_local[:, 1]
                        count_preds += batchx.shape[0]

                        test_loss += l1 * weights / 2
                        test_acc += l2 * weights / 2
                        running = not test_sets[set_].finished()

                test_auc, _, _ = utils.AUC(preds, labels)
                print("\n", "{:1.3f}".format(train_loss),
                      "{:1.3f}".format(train_acc), "{:1.3f}".format(test_loss),
                      "{:1.3f}".format(test_acc), "{:1.3f}".format(test_auc),
                      "\n")

                board.add_summary(
                    sess,
                    [train_loss, train_acc, test_loss, test_acc, test_auc],
                    ite)

                results = results + [
                    train_loss, train_acc, test_loss, test_acc, test_auc
                ]
                train_loss, train_acc = 0, 0
                test_loss, test_acc = 0, 0
                training_iter = 0

                saver.save(sess,
                           "/home/eduardo/tese/models/last_model",
                           global_step=ite)

    pickle.dump(
        results,
        open(
            "/home/eduardo/tese/logs/experiment_results" +
            str(inv_resize_factor) + "_" + str(rep), "wb"))
Exemple #8
0
def test(dataset, testDict, Recmodel):
    u_batch_size = args.test_batch
    Recmodel = Recmodel.eval()  # eval mode with no dropout
    max_K = max(args.topks)

    results = {'precision': np.zeros(len(args.topks)),
               'recall': np.zeros(len(args.topks)),
               'ndcg': np.zeros(len(args.topks))}

    with torch.no_grad():
        users = list(testDict.keys())
        users_list, rating_list, groundTrue_list, auc_record = [], [], [], []
        total_batch = len(users) // u_batch_size + 1

        for batch_users in utils.minibatch(users, batch_size=u_batch_size):
            allPos = dataset.getUserPosItems(batch_users)  # batch user pos in train
            groundTrue = [testDict[u] for u in batch_users]  # batch user pos in test
            batch_users_gpu = torch.Tensor(batch_users).long()
            batch_users_gpu = batch_users_gpu.to(args.device)
            rating = Recmodel.getUsersRating(dataset.graph, batch_users_gpu)  # batch user rating / have used sigmoid

            # mask pos in train
            exclude_index = []
            exclude_items = []
            for range_i, items in enumerate(allPos):
                exclude_index.extend([range_i] * len(items))
                exclude_items.extend(items)
            rating[exclude_index, exclude_items] = -(1 << 10)

            # get top k rating item index
            _, rating_K = torch.topk(rating, k=max_K)
            rating_K = rating_K.cpu().numpy()

            # compute auc
            rating = rating.cpu().numpy()
            aucs = [utils.AUC(rating[i], test_data) for i, test_data in enumerate(groundTrue)]
            auc_record.extend(aucs)

            # store batch
            del rating
            users_list.append(batch_users)
            rating_list.append(rating_K)
            groundTrue_list.append(groundTrue)

        # compute metric
        assert total_batch == len(users_list)
        pre_results = []
        for rl, gt in zip(rating_list, groundTrue_list):
            batch_ks_result = np.zeros((3, len(args.topks)), dtype=np.float32)
            for i, k in enumerate(args.topks):
                batch_ks_result[:, i] = np.array([utils.test_batch(rl[:, :k], gt)], dtype=np.float32)
            pre_results.append(batch_ks_result)
        for result in pre_results:
            results['precision'] += result[0]
            results['recall'] += result[1]
            results['ndcg'] += result[2]
        results['precision'] /= float(len(users))
        results['recall'] /= float(len(users))
        results['ndcg'] /= float(len(users))
        results['auc'] = np.mean(auc_record)

        return results