示例#1
0
    def inference_ss_train(self, model_file_name=None, data_loader=None, save_path=None):
        if model_file_name is not None:
            Tools.print("Load model form {}".format(model_file_name), txt_path=self.config.ss_save_result_txt)
            self.load_model(model_file_name)
            pass

        final_save_path = Tools.new_dir("{}_final".format(save_path))

        self.net.eval()
        with torch.no_grad():
            for i, (inputs, labels, image_info_list) in tqdm(enumerate(data_loader), total=len(data_loader)):
                assert len(image_info_list) == 1

                # 标签
                basename = image_info_list[0].split("Data/DET/")[1]
                final_name = os.path.join(final_save_path, basename.replace(".JPEG", ".png"))
                final_name = Tools.new_dir(final_name)
                if os.path.exists(final_name):
                    continue

                # 预测
                outputs = self.net(inputs[0].float().cuda()).cpu()
                preds = outputs.max(dim=1)[1].numpy()

                if save_path:
                    Image.open(image_info_list[0]).convert("RGB").save(
                        Tools.new_dir(os.path.join(save_path, basename)))
                    DataUtil.gray_to_color(np.asarray(preds[0], dtype=np.uint8)).save(
                        os.path.join(save_path, basename.replace(".JPEG", ".png")))
                    Image.fromarray(np.asarray(preds[0], dtype=np.uint8)).save(final_name)
                    pass
                pass
            pass

        pass
class Config(object):
    gpu_id = 3
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    num_workers = 8

    num_way = 5
    num_shot = 1
    batch_size = 64

    val_freq = 10
    episode_size = 15
    test_episode = 600

    hid_dim = 64
    z_dim = 64

    matching_net = MatchingNet(hid_dim=hid_dim, z_dim=z_dim)

    # ic
    ic_out_dim = 512
    ic_ratio = 1

    learning_rate = 0.01
    loss_fsl_ratio = 1.0
    loss_ic_ratio = 1.0

    train_epoch = 2100
    first_epoch, t_epoch = 500, 200
    adjust_learning_rate = RunnerTool.adjust_learning_rate1

    ###############################################################################################
    is_png = True
    # is_png = False

    # resnet = resnet18
    resnet = resnet34

    # modify_head = False
    modify_head = True
    ###############################################################################################

    model_name = "{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}{}{}".format(
        gpu_id, train_epoch, batch_size, num_way, num_shot, first_epoch, t_epoch, ic_out_dim, ic_ratio,
        loss_fsl_ratio, loss_ic_ratio, "_head" if modify_head else "", "_png" if is_png else "")
    Tools.print(model_name)

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"
    data_root = os.path.join(data_root, "miniImageNet_png") if is_png else data_root
    Tools.print(data_root)

    _root_path = "../models_mn/two_ic_ufsl_2net_res_sgd_acc_duli"
    mn_dir = Tools.new_dir("{}/{}_mn.pkl".format(_root_path, model_name))
    ic_dir = Tools.new_dir("{}/{}_ic.pkl".format(_root_path, model_name))
    pass
示例#3
0
class Config(object):
    gpu_id = 2
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    num_workers = 8
    episode_size = 15
    test_episode = 600
    test_avg_num = 5

    feature_encoder, relation_network = CNNEncoder(), RelationNetwork()

    # model_path = "../models/two_ic_ufsl_2net_res_sgd_acc"
    # model_fe_name = "0_2100_64_5_1_500_200_512_1_1.0_1.0_fe_5way_1shot.pkl"
    # model_rn_name = "0_2100_64_5_1_500_200_512_1_1.0_1.0_rn_5way_1shot.pkl"

    model_path = "../models/two_ic_ufsl_2net_res_sgd_acc_duli"
    model_fe_name = "2_2100_64_5_1_500_200_512_1_1.0_1.0_fe_5way_1shot.pkl"
    model_rn_name = "2_2100_64_5_1_500_200_512_1_1.0_1.0_rn_5way_1shot.pkl"

    fe_dir = Tools.new_dir(os.path.join(model_path, model_fe_name))
    rn_dir = Tools.new_dir(os.path.join(model_path, model_rn_name))

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"

    pass
示例#4
0
class Config(object):
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"

    num_workers = 8

    num_way = 5
    num_shot = 1
    batch_size = 64
    test_avg_num = 5

    MEAN_PIXEL = [x / 255.0 for x in [120.39586422, 115.59361427, 104.54012653]]
    STD_PIXEL = [x / 255.0 for x in [70.68188272, 68.27635443, 72.54505529]]

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"

    # model_path = "ic_fsl"
    # model_fe_name = "2_64_5_1_fe_5way_1shot.pkl"
    # model_rn_name = "2_64_5_1_rn_5way_1shot.pkl"
    # fe_dir = Tools.new_dir("../models/{}/{}".format(model_path, model_fe_name))
    # rn_dir = Tools.new_dir("../models/{}/{}".format(model_path, model_rn_name))

    model_path = "fsl_old"
    model_fe_name = "1_fe_5way_1shot.pkl"
    model_rn_name = "1_rn_5way_1shot.pkl"
    fe_dir = Tools.new_dir("../models/{}/{}".format(model_path, model_fe_name))
    rn_dir = Tools.new_dir("../models/{}/{}".format(model_path, model_rn_name))
    pass
示例#5
0
    def __init__(self, split="all", class_id=None, is_l2norm=True, result_dir_name=None, png_name=None, sample=None):
        self.title = "vis"

        self.split = split
        self.is_l2norm = is_l2norm
        self.class_id = class_id
        self.sample = sample
        current_time = datetime.now().strftime('%b%d_%H-%M-%S')# or 
        self.vis_dir = "/home/test/Documents/kbzhao/IC/IC_result/3_resnet_34_64_512_1_2100_500_200"
        self.result_pkl = Tools.new_dir(os.path.join(self.vis_dir, "fig_final_new", "{}_{}{}.pkl".format(
            self.split, "l2norm" if self.is_l2norm else "logits",
            "" if png_name is None else "_{}".format(png_name))))
        self.result_png = self.result_pkl[:-4] + f"{current_time}.png"

        split_path = os.path.split(self.result_png)
        self.result_png = Tools.new_dir(os.path.join(split_path[0], result_dir_name, split_path[1]))

        if self.split == "all":
            self.feature_dir_list = [os.path.join(self.vis_dir, "train.pkl"),
                                     os.path.join(self.vis_dir, "test.pkl"),
                                     os.path.join(self.vis_dir, "val.pkl")]
        else:
            self.feature_dir = os.path.join(self.vis_dir, "{}.pkl".format(self.split))
            pass
        pass
class Config(object):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    # train_epoch = 300
    train_epoch = 180
    learning_rate = 0.001
    num_workers = 8

    val_freq = 10

    num_way = 5
    num_shot = 1
    batch_size = 64

    episode_size = 15
    test_episode = 600

    feature_encoder, relation_network = CNNEncoder(), RelationNetwork()
    # feature_encoder, relation_network = CNNEncoder1(), RelationNetwork1()

    model_name = "{}_{}_{}_{}".format(train_epoch, batch_size, num_way,
                                      num_shot)

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"

    fe_dir = Tools.new_dir("../models/fsl/{}_fe_{}way_{}shot.pkl".format(
        model_name, num_way, num_shot))
    rn_dir = Tools.new_dir("../models/fsl/{}_rn_{}way_{}shot.pkl".format(
        model_name, num_way, num_shot))
    pass
示例#7
0
class Config(object):
    gpu_id = 2
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    num_workers = 8
    batch_size = 8

    resnet, vggnet, net_name = resnet18, None, "resnet_18"
    # resnet, vggnet, net_name = resnet34, None, "resnet_34"
    # resnet, vggnet, net_name = resnet50, None, "resnet_50"
    # resnet, vggnet, net_name = None, vgg16_bn, "vgg16_bn"

    modify_head = False
    # modify_head = True

    # ic
    ic_out_dim = 512

    ic_dir = "../cub/models/ic_res_xx/2_resnet_18_64_512_1_2100_500_200_False_ic.pkl"
    vis_dir = Tools.new_dir(
        "../vis/CUB/ic_res_xx/2_resnet_18_64_512_1_2100_500_200_False_ic")

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/UFSL/CUB'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/UFSL/CUB'
    else:
        data_root = "F:\\data\\CUB"

    data_root = os.path.join(data_root, "CUBSeg")
    vis_dir = Tools.new_dir(
        "../vis/CUBSeg/ic_res_xx/2_resnet_18_64_512_1_2100_500_200_False_ic")

    Tools.print(data_root)
    pass
class Config(object):
    gpu_id = 0
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    num_workers = 16

    num_way = 5
    num_shot = 1
    # batch_size = 64

    val_freq = 20
    episode_size = 15
    test_episode = 600

    ic_ratio = 1
    knn = 50

    learning_rate = 0.01
    loss_fsl_ratio = 1.0
    loss_ic_ratio = 1.0

    ###############################################################################################
    # resnet = resnet18
    resnet = resnet34

    # modify_head = False
    modify_head = True

    # matching_net, net_name, batch_size = MatchingNet(hid_dim=64, z_dim=64), "conv4", 64
    matching_net, net_name, batch_size = ResNet12Small(avg_pool=True, drop_rate=0.1), "resnet12", 32

    ic_times = 2

    ic_out_dim = 512

    train_epoch = 1200
    first_epoch, t_epoch = 400, 200
    adjust_learning_rate = RunnerTool.adjust_learning_rate1
    ###############################################################################################

    model_name = "{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}{}".format(
        gpu_id, net_name, train_epoch, batch_size, num_way, num_shot, first_epoch, t_epoch,
        ic_out_dim, ic_ratio, loss_fsl_ratio, loss_ic_ratio, ic_times, "_head" if modify_head else "")

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/UFSL/CUB'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/UFSL/CUB'
    else:
        data_root = "F:\\data\\CUB"
    data_root = os.path.join(data_root, "CUBSeg")

    _root_path = "../cub/models_mn/two_ic_ufsl_2net_res_sgd_acc_duli"
    mn_dir = Tools.new_dir("{}/{}_mn.pkl".format(_root_path, model_name))
    ic_dir = Tools.new_dir("{}/{}_ic.pkl".format(_root_path, model_name))

    Tools.print(model_name)
    Tools.print(data_root)
    pass
    def inference_crf(self, dataset, logits_path):
        logit_file_path = Tools.new_dir("{}_logit".format(logits_path))
        crf_file_path = Tools.new_dir("{}_crf".format(logits_path))
        crf_final_file_path = Tools.new_dir("{}_crf_final".format(logits_path))

        postprocessor = DenseCRF()
        n_jobs = multiprocessing.cpu_count()

        def process(i):
            image_info, label_info = dataset.__getitem__(i)
            label = Image.fromarray(np.zeros_like(np.asarray(Image.open(image_info)))).convert("L") \
                if label_info == 1 else Image.open(label_info)

            basename = os.path.basename(image_info)
            im = Image.open(image_info)
            logit = np.load(
                os.path.join(logit_file_path, basename.replace(".jpg",
                                                               ".npy")))

            ori_size = (im.size[1], im.size[0])
            crf_size = (logit.shape[1], logit.shape[2])

            logit_tensor = torch.FloatTensor(logit)[None, ...]
            logit_tensor = self._up_to_target(logit_tensor,
                                              target_size=crf_size)
            prob_one = F.softmax(logit_tensor, dim=1)[0].numpy()

            prob_crf = postprocessor(
                np.array(im.resize((crf_size[1], crf_size[0]))), prob_one)
            prob_crf_resize = self._up_to_target(
                torch.FloatTensor(prob_crf)[None, ...], target_size=ori_size)
            result = np.argmax(prob_crf_resize[0].numpy(), axis=0)

            # save
            im.save(os.path.join(crf_file_path, basename))
            DataUtil.gray_to_color(np.asarray(label, dtype=np.uint8)).save(
                os.path.join(crf_file_path, basename.replace(".jpg",
                                                             "_l.png")))
            DataUtil.gray_to_color(np.asarray(result, dtype=np.uint8)).save(
                os.path.join(crf_file_path, basename.replace(".jpg", ".png")))
            Image.fromarray(np.asarray(result, dtype=np.uint8)).save(
                os.path.join(crf_final_file_path,
                             basename.replace(".jpg", ".png")))

            return result, np.array(label)

        results = joblib.Parallel(n_jobs=n_jobs,
                                  verbose=10,
                                  pre_dispatch="all")([
                                      joblib.delayed(process)(i)
                                      for i in range(len(dataset))
                                  ])

        metrics = StreamSegMetrics(self.config.ss_num_classes)
        for preds, targets in results:
            metrics.update(targets, preds)
        Tools.print("{}".format(metrics.to_str(metrics.get_results())))
        Tools.print()
        pass
示例#10
0
 def _set_dir(model_name, dataset_name, gpu_id, out_dir):
     _file_name = "{}_{}_GPU{}_{}".format(
         model_name, dataset_name, gpu_id,
         time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y'))
     root_log_dir = Tools.new_dir("{}/logs/{}".format(out_dir, _file_name))
     root_ckpt_dir = Tools.new_dir("{}/checkpoints/{}".format(
         out_dir, _file_name))
     return root_log_dir, root_ckpt_dir
    def __init__(self,
                 run_name,
                 batch_size,
                 max_epochs,
                 data,
                 net,
                 has_weight_decay=True,
                 log_dir='./logs',
                 model_dir="./models",
                 model_name="./c3d_ucf_model",
                 pre_train=None):
        self.run_name = run_name
        self.model_name = model_name
        self.model_dir = Tools.new_dir(os.path.join(model_dir, self.run_name))
        self.checkpoint_path = os.path.join(self.model_dir, self.model_name)
        self.log_dir = Tools.new_dir(os.path.join(log_dir, self.run_name))
        self.pre_train = pre_train

        self.data = data
        self.batch_size = batch_size
        self.max_epochs = max_epochs
        self.has_weight_decay = has_weight_decay

        # Input
        _shape = (batch_size, self.data.FRAMES, self.data.CROP_SIZE,
                  self.data.CROP_SIZE, self.data.CHANNELS)
        self.images_placeholder = tf.placeholder(tf.float32, _shape)
        self.labels_placeholder = tf.placeholder(tf.int64,
                                                 shape=(self.batch_size, ))

        # Net
        self.logits = net(self.images_placeholder, 0.5, self.data.NUM_CLASSES)

        # Output
        self.pred = tf.argmax(self.logits, 1)
        self.loss = self.cal_loss(self.labels_placeholder, self.logits,
                                  self.has_weight_decay)
        self.accuracy = self.cal_acc(self.pred, self.labels_placeholder)

        # learning rate
        self.now_epoch = tf.Variable(0, trainable=False, name="epoch")
        self.add_epoch_op = tf.assign_add(self.now_epoch, 1)
        self.learning_rate = tf.train.piecewise_constant(
            self.now_epoch, [10, 20], [0.0001, 0.00005, 0.00001])

        self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(
            self.loss)
        self.summary_op = tf.summary.merge_all()

        # Sess
        self.sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
            allow_growth=True)))
        self.sess.run(tf.global_variables_initializer())
        self.saver = tf.train.Saver(var_list=tf.global_variables(),
                                    max_to_keep=5)
        self.summary_writer = tf.summary.FileWriter(self.log_dir,
                                                    self.sess.graph)
        pass
class Config(object):
    multi_gpu = True

    gpu_id = "0,1,2,3" if multi_gpu else "0"
    # gpu_id = "1,2,3" if multi_gpu else "0"
    gpu_num = len(gpu_id.split(","))

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    num_workers = 32

    num_way = 5
    num_shot = 1

    val_freq = 10
    episode_size = 15
    test_episode = 600

    # ic
    ic_out_dim = 512
    ic_ratio = 1

    learning_rate = 0.01

    train_epoch = 1500
    first_epoch, t_epoch = 500, 200
    adjust_learning_rate = RunnerTool.adjust_learning_rate1

    ###############################################################################################
    # ic_net, net_name = EncoderC4(), "ICConv4"
    ic_net, net_name = EncoderResNet12(), "ICResNet12"
    # ic_net, net_name = EncoderResNet34(), "ICResNet34"

    # matching_net, batch_size, net_name = EncoderC4(), 64, net_name + "MNConv4"
    matching_net, batch_size, net_name = EncoderResNet12(
    ), 16, net_name + "MNResNet12"
    # matching_net, batch_size, net_name = EncoderResNet34(), 16, net_name + "MNRResNet34"
    ###############################################################################################
    batch_size = batch_size * gpu_num

    model_name = "{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_png".format(
        gpu_id.replace(",", ""), net_name, train_epoch, batch_size, num_way,
        num_shot, first_epoch, t_epoch, ic_out_dim, ic_ratio)
    Tools.print(model_name)

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"
    data_root = os.path.join(data_root, "miniImageNet_png")
    Tools.print(data_root)

    _root_path = "../models_abl/mn/two_ic_ufsl_1net_2net_duli"
    mn_dir = Tools.new_dir("{}/{}_mn.pkl".format(_root_path, model_name))
    ic_dir = Tools.new_dir("{}/{}_ic.pkl".format(_root_path, model_name))
    pass
class Config(object):
    multi_gpu = False

    gpu_id = "1,2,3" if multi_gpu else "0"
    # gpu_id = "1,2,3" if multi_gpu else "1"
    gpu_num = len(gpu_id.split(","))

    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    num_workers = 16

    num_way = 10
    num_way_test = 5
    num_shot = 1

    val_freq = 5
    episode_size = 15
    test_episode = 600

    # ic
    ic_out_dim = 2048
    ic_ratio = 1

    learning_rate = 0.01

    train_epoch = 1600
    first_epoch, t_epoch = 1000, 300
    adjust_learning_rate = RunnerTool.adjust_learning_rate1

    ###############################################################################################
    ic_net, net_name = EncoderC4(), "ICConv4"
    matching_net, batch_size, net_name = EncoderC4(), 64, net_name + "MNConv4"
    ###############################################################################################
    batch_size = batch_size * gpu_num

    model_name = "{}_{}_{}_{}_{}_{}_{}_{}_{}_{}".format(
        gpu_id.replace(",", ""), net_name, train_epoch, batch_size,
        num_way, num_shot, first_epoch, t_epoch, ic_out_dim, ic_ratio)
    Tools.print(model_name)

    dataset_name = "omniglot_single"
    # dataset_name = "omniglot_rot"
    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/UFSL/{}'.format(dataset_name)
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/UFSL/{}'.format(dataset_name)
        if not os.path.isdir(data_root):
            data_root = '/home/ubuntu/Dataset/Partition1/ALISURE/Data/UFSL/{}'.format(dataset_name)
    else:
        data_root = "F:\\data\\{}".format(dataset_name)
    Tools.print(data_root)

    _root_path = "../omniglot/mn/two_ic_ufsl_1net_2net_duli"
    mn_dir = Tools.new_dir("{}/{}_mn.pkl".format(_root_path, model_name))
    ic_dir = Tools.new_dir("{}/{}_ic.pkl".format(_root_path, model_name))
    pass
示例#14
0
class Config(object):
    gpu_id = 0
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    num_workers = 16

    num_way = 5
    num_shot = 1
    batch_size = 64

    val_freq = 10
    episode_size = 15
    test_episode = 600

    feature_encoder, relation_network = CNNEncoder(), RelationNetwork()

    # ic
    ic_out_dim = 512
    # ic_out_dim = 2560
    ic_ratio = 1

    learning_rate = 0.01
    # loss_fsl_ratio = 10.0
    # loss_ic_ratio = 0.1
    loss_fsl_ratio = 1.0
    loss_ic_ratio = 1.0

    # train_epoch = 500
    # first_epoch, t_epoch = 300, 150
    # adjust_learning_rate = RunnerTool.adjust_learning_rate2

    train_epoch = 2100
    first_epoch, t_epoch = 500, 200
    adjust_learning_rate = RunnerTool.adjust_learning_rate1

    model_name = "{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}".format(
        gpu_id, train_epoch, batch_size, num_way, num_shot, first_epoch,
        t_epoch, ic_out_dim, ic_ratio, loss_fsl_ratio, loss_ic_ratio)

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"

    _root_path = "../models/two_ic_ufsl_2net_res_sgd_acc"
    fe_dir = Tools.new_dir("{}/{}_fe_{}way_{}shot.pkl".format(
        _root_path, model_name, num_way, num_shot))
    rn_dir = Tools.new_dir("{}/{}_rn_{}way_{}shot.pkl".format(
        _root_path, model_name, num_way, num_shot))
    ic_dir = Tools.new_dir("{}/{}_ic_{}way_{}shot.pkl".format(
        _root_path, model_name, num_way, num_shot))
    pass
示例#15
0
def train(config):

    voc_runner = VOCRunner(config=config)

    if config.only_inference_ss:
        dataset_ss_inference_train, dataset_ss_inference_val, dataset_ss_inference_test = DatasetUtil.get_dataset_by_type(
            DatasetUtil.dataset_type_ss_voc_val_scale,
            config.ss_size,
            scales=config.scales,
            data_root=config.data_root_path)
        data_loader_ss_inference_train = DataLoader(dataset_ss_inference_train,
                                                    1,
                                                    False,
                                                    num_workers=8)
        data_loader_ss_inference_val = DataLoader(dataset_ss_inference_val,
                                                  1,
                                                  False,
                                                  num_workers=8)
        data_loader_ss_inference_test = DataLoader(dataset_ss_inference_test,
                                                   1,
                                                   False,
                                                   num_workers=8)

        voc_runner.inference_ss(model_file_name=config.model_file_name,
                                data_loader=data_loader_ss_inference_train,
                                save_path=Tools.new_dir(
                                    os.path.join(config.eval_save_path,
                                                 "train")))
        voc_runner.inference_ss(model_file_name=config.model_file_name,
                                data_loader=data_loader_ss_inference_val,
                                save_path=Tools.new_dir(
                                    os.path.join(config.eval_save_path,
                                                 "val")))
        voc_runner.inference_ss(model_file_name=config.model_file_name,
                                data_loader=data_loader_ss_inference_test,
                                save_path=Tools.new_dir(
                                    os.path.join(config.eval_save_path,
                                                 "test")))
        return

    if config.only_eval_ss:
        voc_runner.eval_ss(
            epoch=0,
            model_file_name=
            "../../../WSS_Model_VOC/5_DeepLabV3PlusResNet101_21_100_18_5_513/ss_90.pth"
        )
        return

    if config.only_train_ss:
        voc_runner.train_ss(start_epoch=0, model_file_name=None)
        return

    pass
示例#16
0
class Config(object):
    gpu_id = 0
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    num_workers = 8
    num_way = 5
    num_shot = 1
    val_freq = 10
    episode_size = 15
    test_episode = 600
    learning_rate = 0.01
    loss_fsl_ratio = 1.0
    loss_ic_ratio = 1.0
    train_epoch = 2100
    first_epoch, t_epoch = 500, 200
    adjust_learning_rate = RunnerTool.adjust_learning_rate1
    ic_out_dim = 512
    ic_ratio = 1

    ##############################################################################################################
    is_png = True
    # is_png = False

    # has_l2norm = True
    has_l2norm = False

    resnet, resnet_name = resnet18, "resnet18"
    # resnet, resnet_name = resnet34, "resnet34"

    # proto_net, proto_name, batch_size = ProtoNet(hid_dim=64, z_dim=64, has_norm=has_norm), "ProtoNet", 64
    proto_net, proto_name, batch_size = ProtoNetLarge(has_norm=has_l2norm), "ProtoNetLarge", 32
    ##############################################################################################################

    model_name = "{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}{}{}".format(
        gpu_id, train_epoch, first_epoch, t_epoch, batch_size, num_way, num_shot,
        ic_out_dim, ic_ratio, loss_fsl_ratio, loss_ic_ratio, resnet_name, proto_name,
        "_l2norm" if has_l2norm else "", "_png" if is_png else "")
    Tools.print(model_name)

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"
    data_root = os.path.join(data_root, "miniImageNet_png") if is_png else data_root
    Tools.print(data_root)

    _root_path = "../models_pn/ufsl_abl"
    pn_dir = Tools.new_dir("{}/{}_pn_{}_{}.pkl".format(_root_path, model_name, num_way, num_shot))
    ic_dir = Tools.new_dir("{}/{}_ic_{}_{}.pkl".format(_root_path, model_name, num_way, num_shot))
    pass
class Config(object):
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"

    learning_rate = 0.01
    num_workers = 8

    num_way = 5
    num_shot = 1
    batch_size = 64

    val_freq = 10
    episode_size = 15
    test_episode = 600

    # ic
    ic_in_dim = 64
    ic_out_dim = 512
    ic_ratio = 1

    loss_fsl_ratio = 10.0
    loss_ic_ratio = 0.1

    train_epoch = 600
    first_epoch, t_epoch = 300, 150
    adjust_learning_rate = RunnerTool.adjust_learning_rate2

    feature_encoder, relation_network = CNNEncoder(), RelationNetwork()
    # feature_encoder, relation_network = CNNEncoder1(), RelationNetwork1()

    model_name = "2_{}_{}_{}_{}_{}_{}_{}_{}_{}".format(
        train_epoch, batch_size, first_epoch, t_epoch, ic_in_dim, ic_out_dim,
        ic_ratio, loss_fsl_ratio, loss_ic_ratio)

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"

    fe_dir = Tools.new_dir(
        "../models/two_ic_fsl_sgd/{}_fe_{}way_{}shot.pkl".format(
            model_name, num_way, num_shot))
    rn_dir = Tools.new_dir(
        "../models/two_ic_fsl_sgd/{}_rn_{}way_{}shot.pkl".format(
            model_name, num_way, num_shot))
    ic_dir = Tools.new_dir(
        "../models/two_ic_fsl_sgd/{}_ic_{}way_{}shot.pkl".format(
            model_name, num_way, num_shot))
    pass
class Config(object):
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"

    num_workers = 8
    batch_size = 64
    val_freq = 2

    learning_rate = 0.001
    learning_rate_small = 0.001

    num_way = 5
    num_shot = 1

    episode_size = 15
    test_episode = 600

    feature_encoder, relation_network = CNNEncoder(), RelationNetwork()

    fe_pretrain = "../models/ic/1_64_512_1_500_200_0.01_fe.pkl"
    ic_pretrain = "../models/ic/1_64_512_1_500_200_0.01_ic.pkl"

    # ic
    ic_in_dim = 64
    ic_out_dim = 512
    ic_ratio = 2

    train_epoch = 300
    loss_fsl_ratio = 10.0
    loss_ic_ratio = 0.1

    model_name = "4_{}_{}_{}_{}_{}_{}_{}_{}_{}".format(
        train_epoch, batch_size, num_way, num_shot, ic_in_dim, ic_out_dim,
        ic_ratio, loss_fsl_ratio, loss_ic_ratio)

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"

    root_path = "../models/two_ic_ufsl_pretrain"
    fe_dir = Tools.new_dir("{}/{}_fe_{}way_{}shot.pkl".format(
        root_path, model_name, num_way, num_shot))
    rn_dir = Tools.new_dir("{}/{}_rn_{}way_{}shot.pkl".format(
        root_path, model_name, num_way, num_shot))
    ic_dir = Tools.new_dir("{}/{}_ic_{}way_{}shot.pkl".format(
        root_path, model_name, num_way, num_shot))
    pass
def process_folder(all_png, out_dir):
    label_dict = utils.get_label_dict()
    folders = utils.get_ordered_folders()
    val_ground_dict = utils.get_val_ground_dict()

    labels_searched = []
    for folder in folders:
        labels_searched.append(label_dict[folder])
    labels_list = []
    images = []
    for image_index, image_name in enumerate(all_png):
        if image_index % 1000 == 0:
            Tools.print("{} {}".format(image_index, len(all_png)))

        basename = os.path.basename(image_name)
        label = val_ground_dict[basename[:-4]]
        if label not in labels_searched:
            continue
        try:
            img = imageio.imread(image_name)
            r = img[:, :, 0].flatten()
            g = img[:, :, 1].flatten()
            b = img[:, :, 2].flatten()
        except:
            Tools.print('Cant process image {}'.format(basename))
            with open("log_img2np_val.txt", "a") as f:
                f.write("Couldn't read: {}".format(image_name))
            continue
        arr = np.array(list(r) + list(g) + list(b), dtype=np.uint8)
        images.append(arr)
        labels_list.append(label)
        pass

    data_val = np.row_stack(images)

    # Can add some kind of data splitting
    d_val = {'data': data_val, 'labels': labels_list}
    Tools.new_dir(out_dir)
    pickle.dump(d_val, open(os.path.join(out_dir, 'val_data'), 'wb'))

    y_test = d_val['labels']
    count = np.zeros([1000])

    for i in y_test:
        count[i - 1] += 1
    for i in range(1000):
        Tools.print('%d : %d' % (i, count[i]))
    Tools.print('SUM: %d' % len(y_test))
    pass
class Config(object):
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"

    num_workers = 8
    batch_size = 64
    val_freq = 10

    learning_rate = 0.001

    num_way = 5
    num_shot = 1

    episode_size = 15
    test_episode = 600

    # ic
    ic_in_dim = 256
    ic_out_dim = 512
    ic_ratio = 1

    feature_encoder, relation_network = VGGEncoder(), VGGRelationNetwork()
    ic_model = VGGICModel(in_dim=ic_in_dim, out_dim=ic_out_dim)

    train_epoch = 600

    loss_fsl_ratio = 20.0
    loss_ic_ratio = 0.1

    model_name = "1_{}_{}_{}_{}_{}_{}_{}_{}_{}".format(
        train_epoch, batch_size, num_way, num_shot, ic_in_dim, ic_out_dim,
        ic_ratio, loss_fsl_ratio, loss_ic_ratio)

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"

    fe_dir = Tools.new_dir(
        "../models/two_ic_fsl_large/{}_fe_{}way_{}shot.pkl".format(
            model_name, num_way, num_shot))
    rn_dir = Tools.new_dir(
        "../models/two_ic_fsl_large/{}_rn_{}way_{}shot.pkl".format(
            model_name, num_way, num_shot))
    ic_dir = Tools.new_dir(
        "../models/two_ic_fsl_large/{}_ic_{}way_{}shot.pkl".format(
            model_name, num_way, num_shot))
    pass
示例#21
0
class Config(object):
    gpu_id = 3
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    num_workers = 24
    batch_size = 64
    val_freq = 10

    learning_rate = 0.001

    num_way = 5
    num_shot = 1

    episode_size = 15
    test_episode = 600

    feature_encoder, relation_network = CNNEncoder(), RelationNetwork()

    # ic
    ic_in_dim = 64
    ic_out_dim = 512
    ic_ratio = 1

    train_epoch = 900
    loss_fsl_ratio = 10.0
    loss_ic_ratio = 0.1

    model_name = "{}_{}_{}_{}_{}_{}_{}_{}_{}_{}".format(
        gpu_id, train_epoch, batch_size, num_way, num_shot, ic_in_dim,
        ic_out_dim, ic_ratio, loss_fsl_ratio, loss_ic_ratio)

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"

    fe_dir = Tools.new_dir(
        "../models/two_ic_ufsl_acc/{}_fe_{}way_{}shot.pkl".format(
            model_name, num_way, num_shot))
    rn_dir = Tools.new_dir(
        "../models/two_ic_ufsl_acc/{}_rn_{}way_{}shot.pkl".format(
            model_name, num_way, num_shot))
    ic_dir = Tools.new_dir(
        "../models/two_ic_ufsl_acc/{}_ic_{}way_{}shot.pkl".format(
            model_name, num_way, num_shot))
    pass
示例#22
0
class Config(object):
    gpu_id = 1
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    num_workers = 8
    batch_size = 8

    # resnet, vggnet, net_name = resnet18, None, "resnet_18"
    resnet, vggnet, net_name = resnet34, None, "resnet_34"
    # resnet, vggnet, net_name = resnet50, None, "resnet_50"
    # resnet, vggnet, net_name = None, vgg16_bn, "vgg16_bn"

    # modify_head = False
    modify_head = True

    is_png = True
    # is_png = False

    # ic
    ic_out_dim = 512

    ic_dir = "../models/ic_res_xx/3_resnet_34_64_512_1_2100_500_2000.01_ic.pkl"

    vis_dir = Tools.new_dir("../vis/miniImagenet/ic_res_xx/3_resnet_34_64_512_1_2100_500_200")

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"
    data_root = os.path.join(data_root, "miniImageNet_png") if is_png else data_root
    Tools.print(data_root)

    pass
示例#23
0
    def vis(self, split="train"):
        Tools.print()
        Tools.print("Vis ...")

        loader = self.test_loader if split == "test" else self.train_loader
        loader = self.val_loader if split == "val" else loader

        feature_list = []
        self.ic_model.eval()
        for image_transform, image, label, idx in tqdm(loader):
            ic_out_logits, ic_out_l2norm = self.ic_model(self.to_cuda(image_transform))

            image_data = np.asarray(image.permute(0, 2, 3, 1) * 255, np.uint8)
            cluster_id = np.asarray(torch.argmax(ic_out_logits, -1).cpu())
            for i in range(len(idx)):
                feature_list.append([int(idx[i]), int(label[i]), int(cluster_id[i]),
                                     np.array(ic_out_logits[i].cpu().detach().numpy()),
                                     np.array(ic_out_l2norm[i].cpu().detach().numpy())])

                result_path = Tools.new_dir(os.path.join(Config.vis_dir, split, str(cluster_id[i])))
                Image.fromarray(image_data[i]).save(os.path.join(result_path, "{}_{}.png".format(label[i], idx[i])))
                pass
            pass

        Tools.write_to_pkl(os.path.join(Config.vis_dir, "{}.pkl".format(split)), feature_list)
        pass
示例#24
0
class Config(object):
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"

    start_epoch = 0
    max_epoch = 1600
    learning_rate = 0.01
    first_epoch, t_epoch = 200, 100
    low_dim = 512
    ratio = 1
    # ratio = 2

    has_entropy = False
    # has_entropy = True

    batch_size = 64
    resume = False
    pre_train = None
    name = "{}_{}_{}_{}_{}_{}_{}".format(max_epoch, batch_size, low_dim, ratio, first_epoch, t_epoch, has_entropy)
    checkpoint_path = Tools.new_dir("../models_ic/{}/ckpt.t7".format(name))

    if "Linux" in platform.platform():
        data_root = "/mnt/4T/Data/data/CIFAR"
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/cifar'
    else:
        data_root  ="F:\\data\\cifar10"
    pass
示例#25
0
    def __init__(self, data_root_path='/mnt/4T/Data/cifar/cifar-10',
                 batch_size=64, image_size=224, sp_size=8, train_print_freq=100, test_print_freq=50,
                 root_ckpt_dir="./ckpt2/norm3", num_workers=8, use_gpu=True, gpu_id="1"):
        self.train_print_freq = train_print_freq
        self.test_print_freq = test_print_freq

        self.device = gpu_setup(use_gpu=use_gpu, gpu_id=gpu_id)
        self.root_ckpt_dir = Tools.new_dir(root_ckpt_dir)

        self.train_dataset = MyDataset(data_root_path=data_root_path,
                                       is_train=True, image_size=image_size, sp_size=sp_size)
        self.test_dataset = MyDataset(data_root_path=data_root_path,
                                      is_train=False, image_size=image_size, sp_size=sp_size)

        self.train_loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True,
                                       num_workers=num_workers, collate_fn=self.train_dataset.collate_fn)
        self.test_loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False,
                                      num_workers=num_workers, collate_fn=self.test_dataset.collate_fn)

        self.model = MyGCNNet().to(self.device)
        # self.lr_s = [[0, 0.001], [25, 0.001], [50, 0.0002], [75, 0.00004]]
        self.lr_s = [[0, 0.01], [10, 0.001], [20, 0.0001], [30, 0.00001]]
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr_s[0][0], weight_decay=0.0)
        # self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr_s[0][0], momentum=0.9, weight_decay=5e-4)
        self.loss_class = nn.CrossEntropyLoss().to(self.device)

        Tools.print("Total param: {}".format(self._view_model_param(self.model)))
        pass
示例#26
0
    def __init__(self, gcn_model=GCNNet, data_root_path='/mnt/4T/Data/cifar/cifar-10',
                 ve_model_file_name="./ckpt/norm3/epoch_7.pkl", cos_sim_th=0.5,
                 root_ckpt_dir="./ckpt2/norm3", num_workers=8, use_gpu=True, gpu_id="1"):
        self.device = gpu_setup(use_gpu=use_gpu, gpu_id=gpu_id)
        _image_size = 32
        _sp_size = 4
        _sp_ve_size = 6
        _cos_sim_th = cos_sim_th
        _VEModel = EmbeddingNetCIFARSmallNorm3
        self.root_ckpt_dir = Tools.new_dir(root_ckpt_dir)

        self.train_dataset = MyDataset(data_root_path=data_root_path, is_train=True, cos_sim_th=_cos_sim_th,
                                       ve_model_file_name=ve_model_file_name, VEModel=_VEModel,
                                       image_size=_image_size, sp_size=_sp_size, sp_ve_size=_sp_ve_size)
        self.test_dataset = MyDataset(data_root_path=data_root_path, is_train=False, cos_sim_th=_cos_sim_th,
                                      ve_model_file_name=ve_model_file_name, VEModel=_VEModel,
                                      image_size=_image_size, sp_size=_sp_size, sp_ve_size=_sp_ve_size)

        self.train_loader = DataLoader(self.train_dataset, batch_size=64, shuffle=True,
                                       num_workers=num_workers, collate_fn=self.train_dataset.collate_fn)
        self.test_loader = DataLoader(self.test_dataset, batch_size=64, shuffle=False,
                                      num_workers=num_workers, collate_fn=self.test_dataset.collate_fn)

        self.model = gcn_model().to(self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.005, weight_decay=0.0)
        self.loss = nn.CrossEntropyLoss().to(self.device)
        pass
    def train_eval_ss(self, data_loader, epoch=0, model_file_name=None):
        if model_file_name is not None:
            Tools.print("Load model form {}".format(model_file_name), txt_path=self.config.ss_save_result_txt)
            self.load_model(model_file_name)
            pass

        self.net.eval()
        with torch.no_grad():
            for i, (inputs, labels, image_paths) in tqdm(enumerate(data_loader), total=len(data_loader)):
                inputs = inputs.float().cuda()
                outputs = self.net(inputs)
                preds = outputs.detach().max(dim=1)[1].cpu().numpy()

                for j, (pred_one, label_one, image_path) in enumerate(zip(preds, labels, image_paths)):
                    im = Image.open(image_path)
                    now_name = image_path.split("Data/DET/")[1]
                    result_filename = Tools.new_dir(os.path.join(self.config.ss_train_eval_save_dir, now_name))
                    im.save(result_filename)

                    DataUtil.gray_to_color(np.asarray(pred_one, dtype=np.uint8)).resize(im.size).save(
                        result_filename.replace(".JPEG", "_new.png"))
                    DataUtil.gray_to_color(np.asarray(label_one, dtype=np.uint8)).resize(im.size).save(
                        result_filename.replace(".JPEG", "_old.png"))
                    pred_one[pred_one != label_one & label_one != 255] = 255
                    DataUtil.gray_to_color(np.asarray(pred_one, dtype=np.uint8)).resize(im.size).save(
                        result_filename.replace(".JPEG", ".png"))
                    pass
                pass
            pass
        pass
    def __init__(self):
        self.gpu_id = "1, 2, 3"
        # self.gpu_id = "0, 1, 2, 3"
        os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpu_id)

        # 流程控制
        self.only_train = True  # 是否训练
        self.only_eval = False
        self.only_train_debug = False
        self.is_supervised = True

        # Train
        self.sampling = False

        # Eval
        self.model_pth = None
        self.model_eval_dir = None
        self.model_pth = "../../../WSS_Model_My/DSS/5_DualNet_20_50_32_5_224/final_50.pth"
        self.model_eval_dir = "../../../WSS_Model_My/DEval/5_DualNet_20_50_32_5_224"

        # Debug
        self.model_resume_pth = "../../../WSS_Model_My/DSS/4_DualNet_20_100_32_5_224/50.pth"

        self.has_class = True
        self.has_cam = True
        self.has_ss = True

        # 伪标签
        if self.is_supervised:
            self.train_label_path = None
        else:
            self.train_label_path = "/mnt/4T/ALISURE/USS/ConTa/pseudo_mask_voc/result/2/sem_seg/train_aug"

        self.num_classes = 20
        self.lr = 0.001
        self.epoch_num = 80
        self.milestones = [40, 60]
        self.save_epoch_freq = 5
        self.eval_epoch_freq = 5

        # self.input_size = 352
        # self.input_size_val = 352
        # self.batch_size = 4 * len(self.gpu_id.split(","))
        self.input_size = 224
        self.input_size_val = 224
        self.batch_size = 8 * len(self.gpu_id.split(","))

        # 网络
        self.Net, self.met_name = DualNet, "DualNet"
        self.data_root_path = self.get_data_root_path()

        run_name = "6"
        self.model_name = "{}_{}_{}_{}_{}_{}_{}_{}".format(
            run_name, self.met_name, self.num_classes, self.epoch_num,
            self.batch_size, self.save_epoch_freq, self.input_size, self.input_size_val)
        Tools.print(self.model_name)

        self.model_dir = "../../../WSS_Model_My/DSS/{}".format(self.model_name)
        self.save_result_txt = Tools.new_dir("{}/result.txt".format(self.model_dir))
        pass
class Config(object):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    # train_epoch = 300
    train_epoch = 180
    learning_rate = 0.0005
    num_workers = 8

    val_freq = 10

    num_way = 5
    num_shot = 1
    batch_size = 64

    episode_size = 15
    test_episode = 600

    z_dim = 512

    proto_net = ProtoResNet(low_dim=z_dim)
    pn_pretrain = "../models/ic_res_no_val/1_32_512_1_500_200_0.01_ic.pkl"

    model_name = "new_0_{}_{}_{}_{}".format(train_epoch, batch_size, z_dim, learning_rate)

    if "Linux" in platform.platform():
        data_root = '/mnt/4T/Data/data/miniImagenet'
        if not os.path.isdir(data_root):
            data_root = '/media/ubuntu/4T/ALISURE/Data/miniImagenet'
    else:
        data_root = "F:\\data\\miniImagenet"

    root_path = "../models_pn/fsl_res_pretrain_sgd"
    pn_dir = Tools.new_dir("{}/{}_pn_{}way_{}shot.pkl".format(root_path, model_name, num_way, num_shot))
    pass
    def __init__(self, data_root_path, batch_size=64, image_size=320, sp_size=4, pool_ratio=2, train_print_freq=100,
                 test_print_freq=50, root_ckpt_dir="./ckpt2", num_workers=8, use_gpu=True, gpu_id="1", is_sgd=False):
        self.train_print_freq = train_print_freq
        self.test_print_freq = test_print_freq

        self.root_ckpt_dir = Tools.new_dir(root_ckpt_dir)
        self.device = gpu_setup(use_gpu=use_gpu, gpu_id=gpu_id)

        self.train_dataset = MyDataset(data_root_path=data_root_path, is_train=True,
                                       image_size=image_size, sp_size=sp_size, pool_ratio=pool_ratio)
        self.test_dataset = MyDataset(data_root_path=data_root_path, is_train=False,
                                      image_size=image_size, sp_size=sp_size, pool_ratio=pool_ratio)

        self.train_loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True,
                                       num_workers=num_workers, collate_fn=self.train_dataset.collate_fn)
        self.test_loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False,
                                      num_workers=num_workers, collate_fn=self.test_dataset.collate_fn)

        self.model = MyGCNNet().to(self.device)

        if is_sgd:
            self.lr_s = [[0, 0.01], [50, 0.001], [80, 0.0001]]
            self.optimizer = torch.optim.SGD(self.model.parameters(),
                                             lr=self.lr_s[0][0], momentum=0.9, weight_decay=5e-4)
        else:
            self.lr_s = [[0, 0.001], [50, 0.0003], [75, 0.0001]]
            self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr_s[0][0], weight_decay=0.0)
            pass

        self.loss_class = nn.BCELoss().to(self.device)

        Tools.print("Total param: {}".format(self._view_model_param(self.model)))
        pass