def features(self, split="train"):
        Tools.print()
        Tools.print("Vis ...")

        loader = self.test_loader if split == "test" else self.train_loader
        loader = self.val_loader if split == "val" else loader

        feature_list = []
        self.net.eval()
        for image_transform, label, idx in tqdm(loader):
            out = self.net(self.to_cuda(image_transform))
            out = out.view(out.shape[0], -1)
            for i in range(len(idx)):
                feature_list.append([
                    int(idx[i]),
                    int(label[i]),
                    int(label[i]),
                    np.array(out[i].cpu().detach().numpy()),
                    np.array(out[i].cpu().detach().numpy())
                ])
                pass
            pass

        Tools.write_to_pkl(
            os.path.join(self.config.features_dir, "{}.pkl".format(split)),
            feature_list)
        pass
예제 #2
0
    def vis(self, split="train"):
        Tools.print()
        Tools.print("Vis ...")

        loader = self.test_loader if split == "test" else self.train_loader
        loader = self.val_loader if split == "val" else loader

        feature_list = []
        self.ic_model.eval()
        for image_transform, image, label, idx in tqdm(loader):
            ic_out_logits, ic_out_l2norm = self.ic_model(self.to_cuda(image_transform))

            image_data = np.asarray(image.permute(0, 2, 3, 1) * 255, np.uint8)
            cluster_id = np.asarray(torch.argmax(ic_out_logits, -1).cpu())
            for i in range(len(idx)):
                feature_list.append([int(idx[i]), int(label[i]), int(cluster_id[i]),
                                     np.array(ic_out_logits[i].cpu().detach().numpy()),
                                     np.array(ic_out_l2norm[i].cpu().detach().numpy())])

                result_path = Tools.new_dir(os.path.join(Config.vis_dir, split, str(cluster_id[i])))
                Image.fromarray(image_data[i]).save(os.path.join(result_path, "{}_{}.png".format(label[i], idx[i])))
                pass
            pass

        Tools.write_to_pkl(os.path.join(Config.vis_dir, "{}.pkl".format(split)), feature_list)
        pass
예제 #3
0
    def save_person_result(self, result_path):
        self.net.eval()
        with torch.no_grad():
            count = 0
            images_list = [[
                o for o in one
            ] for one in self.data_loader_person_val_all.dataset.images_list]
            for i, (inputs, labels, indexes) in tqdm(
                    enumerate(self.data_loader_person_val_all),
                    total=len(self.data_loader_person_val_all)):
                logits = self.net(inputs.float().cuda()).detach().cpu()
                logits = torch.softmax(logits, dim=1)
                net_out = torch.argmax(logits, dim=1).numpy()
                labels = labels.numpy()

                for out_one, logit_one, label_one, index_one in zip(
                        net_out, logits, labels, indexes):
                    if label_one != 1 and out_one == 1 and logit_one[1] > 0.90:
                        images_list[index_one][0] = 1
                        count += 1
                        pass
                    pass
                pass
            pass
        Tools.write_to_pkl(result_path, _data=images_list)
        Tools.print("change num = {}".format(count))
        pass
    def eval_mlc_cam_1(self):
        net = self.config.Net(num_classes=self.config.mlc_num_classes).cuda()

        _, _, dataset_cam = DatasetUtil.get_dataset_by_type(
            DatasetUtil.dataset_type_mlc,
            image_size=self.config.mlc_size,
            data_root=self.config.data_root_path,
            return_image_info=True,
            sampling=self.config.sampling)
        data_loader_cam = DataLoader(dataset_cam,
                                     self.config.mlc_batch_size,
                                     shuffle=False,
                                     num_workers=16)

        Tools.print("Load model form {}".format(self.config.model_file_name))
        self.load_model(net=net, model_file_name=self.config.model_file_name)

        net.eval()
        with torch.no_grad():
            for _, (inputs, labels,
                    image_paths) in tqdm(enumerate(data_loader_cam),
                                         total=len(data_loader_cam)):
                inputs_cuda = inputs.float().cuda()
                logits, out_features = net.forward(inputs_cuda, is_vis=True)
                logits = logits.detach().cpu().numpy()

                # 标签选择策略
                label_for_cam = self.label_select_strategy(
                    logits=logits,
                    image_labels=labels.numpy(),
                    thr=self.config.top_k_thr)
                # 生成 CAM
                cam_list = self.generate_cam(net.head_linear.weight,
                                             features=out_features,
                                             indexes=label_for_cam)

                for input_index, input_one in enumerate(inputs):
                    image_path_one = image_paths[input_index]
                    now_name = image_path_one.split("Data/DET/")[1]
                    result_filename = Tools.new_dir(
                        os.path.join(self.config.mlc_cam_pkl_dir, now_name))

                    label_one = labels[input_index].numpy()
                    label_for_cam_one = label_for_cam[input_index]
                    cam_one = cam_list[input_index]
                    Tools.write_to_pkl(_path=result_filename.replace(
                        ".JPEG", ".pkl"),
                                       _data={
                                           "label": label_one,
                                           "image_path": image_path_one,
                                           "label_for_cam": label_for_cam_one,
                                           "cam": cam_one
                                       })
                    pass

                pass
            pass

        pass
def deal(cam_path, voc12_root, result_file="train_ss.pkl"):
    image_info_list = read_image_info(voc12_root)
    Tools.print("{}".format(len(image_info_list)))

    result_image_info_list = get_train_image_info(image_info_list,
                                                  cam_path=cam_path)
    Tools.write_to_pkl(os.path.join(cam_path, result_file),
                       result_image_info_list)
    Tools.print("{} {}".format(len(result_image_info_list),
                               os.path.join(cam_path, result_file)))
    pass
예제 #6
0
 def main(self, save=True):
     Tools.print("begin get_mid_data")
     mid_data = self.get_mid_data()
     Tools.print("begin get_video_aid_list")
     video_aid_list = self.get_video_aid_list()
     Tools.print("begin get_video_cid_list")
     video_cid_list = self.get_video_cid_list(video_aid_list)
     Tools.print("begin get_video_danmaku_list")
     video_danmaku_list = self.get_video_danmaku_list(video_cid_list)
     if save:
         Tools.print("begin save to {}".format(self.save_path))
         Tools.write_to_pkl(Tools.new_dir(self.save_path),
                            video_danmaku_list)
     return video_danmaku_list
    def main(self):
        tsne = TSNE(n_components=2)

        Tools.print("begin to fit_transform {}".format(self.config.result_png))
        if os.path.exists(self.config.result_pkl):
            Tools.print("exist pkl, and now to load")
            result = Tools.read_from_pkl(self.config.result_pkl)
        else:
            Tools.print("not exist pkl, and now to fit")
            data, label = self.deal_feature()
            fit = tsne.fit_transform(data)
            result = {"fit": fit, "label": label}
            Tools.write_to_pkl(self.config.result_pkl, result)
            pass

        Tools.print("begin to embedding")
        fig = self.plot_embedding(result["fit"], result["label"])

        Tools.print("begin to save")
        plt.savefig(self.config.result_png)
        # plt.show(fig)
        pass
예제 #8
0
    for index, image_one in tqdm(enumerate(image_info_list),
                                 total=len(image_info_list)):
        cam_name = os.path.join(
            cam_path,
            image_one[1].split("Data/DET/")[1].replace(".JPEG", ".png"))
        im = np.asarray(Image.open(cam_name))
        size = im.shape[0] * im.shape[1]
        im.resize((size, ))
        counter = collections.Counter(im.tolist())
        keys = sorted(list(counter.keys()))
        now_list = [counter[key] / size for key in keys]

        if not (keys[0] != 0 or now_list[0] > 0.96 or now_list[0] < 0.16):
            result_image_info_list.append(image_one)
            pass
        pass
    return result_image_info_list


cam_path = "/media/ubuntu/4T/ALISURE/USS/ConTa/pseudo_mask/result/2/sem_seg"
voc12_root = "/media/ubuntu/4T/ALISURE/Data/L2ID/data"
image_info_list = read_image_info(voc12_root)
Tools.print("{}".format(len(image_info_list)))

result_image_info_list = get_train_image_info(image_info_list,
                                              cam_path=cam_path)
Tools.write_to_pkl(os.path.join(cam_path, "train_ss.pkl"),
                   result_image_info_list)
Tools.print("{} {}".format(len(result_image_info_list),
                           os.path.join(cam_path, "train_ss.pkl")))
                          int(_bnd_box[2].text), int(_bnd_box[3].text))
            label_id = name_to_label_id[object_name]
            label_info = label_info_dict[label_id]

            now_info["object"].append(
                [object_name, object_box, label_id, label_info])
            pass
        return now_info

    pass


if __name__ == '__main__':
    data_root = "/media/ubuntu/4T/ALISURE/Data/L2ID/data"
    result_path = Tools.new_dir(
        os.path.join(data_root, "deal", "label_info_list.pkl"))

    name_to_label_id, label_info_dict = DealData.get_class_name(
        os.path.join(data_root, "meta_det.mat"))
    xml_files = DealData.get_all_xml(
        os.path.join(data_root, "ILSVRC2017_DET/ILSVRC/Annotations/DET/train"))

    label_info_list = []
    for i, xml_file in tqdm(enumerate(xml_files), total=len(xml_files)):
        now_info = DealData.parse_xml(xml_file=xml_file)
        label_info_list.append(now_info)
        pass

    Tools.write_to_pkl(_path=result_path, _data=label_info_list)
    pass
from alisuretool.Tools import Tools
from deal_data_0_global_info import get_data_root_path


if __name__ == '__main__':
    data_root = get_data_root_path()
    image_root_path = os.path.join(data_root, "ILSVRC2017_DET/ILSVRC/Data/DET/train")

    label_info_path = os.path.join(data_root, "deal", "label_info_list.pkl")
    label_info_list = Tools.read_from_pkl(label_info_path)
    image_info_path = os.path.join(data_root, "deal", "image_info_list2.pkl")

    image_info_list = []
    for i, label_info in tqdm(enumerate(label_info_list), total=len(label_info_list)):
        if "2013" in label_info["source"]:
            source = os.path.join("{}_train".format(label_info["source"].replace("_", "")), label_info["folder"])
        else:
            source = label_info["folder"]
        image_path = os.path.join(image_root_path, source, "{}.JPEG".format(label_info["filename"]))

        if not os.path.exists(image_path):
            Tools.print(image_path)

        label_info["image_path"] = image_path
        image_info_list.append(label_info)
        pass

    image_info_list = [one for one in image_info_list if len(one["object"]) > 0]
    Tools.write_to_pkl(_path=image_info_path, _data=image_info_list)
    pass
예제 #11
0
    dataset_name = MyDataset.dataset_name_omniglot

    data_root = MyDataset.get_data_root(dataset_name=dataset_name,
                                        is_png=is_png)
    Tools.print(data_root)

    features_save_path = Tools.new_dir("{}_feature".format(data_root))
    Tools.print(features_save_path)
    pass


if __name__ == '__main__':
    features_dict = Tools.read_from_pkl(
        os.path.join(Config.features_save_path, "train_features.pkl"))
    data_list, features = features_dict["info"], np.asarray(
        features_dict["feature"])

    k_means = KMeans(k=512)
    images_lists = k_means.cluster(features)

    cluster_result = np.zeros(shape=len(data_list), dtype=np.int)
    for cluster_id, cluster_image in enumerate(images_lists):
        cluster_result[cluster_image] = cluster_id
        pass
    Tools.write_to_pkl(
        os.path.join(Config.features_save_path, "train_cluster.pkl"), {
            "info": data_list,
            "cluster": cluster_result
        })
    pass
    Tools.print(data_root)

    features_save_path = Tools.new_dir("{}_feature".format(data_root))
    Tools.print(features_save_path)
    pass


if __name__ == '__main__':
    data_train, data_val, data_test = MyDataset.get_data_all(Config.data_root)

    ext_features = ExtFeatures()

    features_train = ext_features.run_features(data_list=data_train)
    Tools.write_to_pkl(
        os.path.join(Config.features_save_path, "train_features.pkl"), {
            "info": data_train,
            "feature": features_train
        })

    features_val = ext_features.run_features(data_list=data_val)
    Tools.write_to_pkl(
        os.path.join(Config.features_save_path, "val_features.pkl"), {
            "info": data_val,
            "feature": features_val
        })

    features_test = ext_features.run_features(data_list=data_test)
    Tools.write_to_pkl(
        os.path.join(Config.features_save_path, "test_features.pkl"), {
            "info": data_test,
            "feature": features_test
import os
import numpy as np
from glob import glob
from tqdm import tqdm
from alisuretool.Tools import Tools
from deal_data_0_global_info import get_data_root_path, get_project_path


if __name__ == '__main__':
    data_root = get_data_root_path()
    image_info_path = os.path.join(data_root, "deal", "image_info_list2.pkl")
    person_pkl = os.path.join(data_root, "deal", "person2.pkl")
    result_image_info_path = os.path.join(data_root, "deal", "image_info_list_change_person2.pkl")

    image_info_list = Tools.read_from_pkl(image_info_path)
    person_info_list = Tools.read_from_pkl(person_pkl)

    result_image_info_list = []
    for i, (image_info, person_info) in tqdm(enumerate(zip(image_info_list, person_info_list)), total=len(image_info_list)):
        if not os.path.exists(image_info["image_path"]) or image_info["image_path"] != person_info[1]:
            Tools.print(image_info["image_path"])
            pass
        image_label = list(set([one[2] for one in image_info["object"]]+ ([124] if person_info[0] == 1 else [])))
        image_path = image_info["image_path"]
        result_image_info_list.append([image_label, image_path])
        pass

    Tools.write_to_pkl(_path=result_image_info_path, _data=result_image_info_list)
    pass