Beispiel #1
0
        args.data, args.type, args.kmeans, datetime.now()))
    """ normal training data """
    # train_dataset = dataloaders.MvtecLoader(train_path)
    # train_loader = DataLoader(train_dataset, batch_size=args.train_batch)
    """ weight sampling with noise patch in training data """
    train_dataset = dataloaders.NoisePatchDataloader(train_path, label_name,
                                                     left_i_path, left_j_path)
    samples_weights = torch.from_numpy(train_dataset.samples_weights)
    sampler = WeightedRandomSampler(samples_weights.type('torch.DoubleTensor'),
                                    len(samples_weights))
    train_loader = DataLoader(train_dataset,
                              batch_size=args.train_batch,
                              num_workers=1,
                              sampler=sampler)

    test_dataset = dataloaders.MvtecLoader(test_path)
    test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
    mask_dataset = dataloaders.MaskLoader(defect_gt_path)
    mask_loader = DataLoader(mask_dataset, batch_size=1, shuffle=False)

    eval_dataset = dataloaders.MvtecLoader(eval_path)
    eval_loader = DataLoader(eval_dataset, batch_size=1, shuffle=False)
    eval_mask_dataset = dataloaders.MaskLoader(eval_mask_path)
    eval_mask_loader = DataLoader(eval_mask_dataset,
                                  batch_size=1,
                                  shuffle=False)

    scratch_model = nn.DataParallel(scratch_model).to(device)
    if (args.pretrain == 'True'):
        scratch_model.load_state_dict(
            torch.load('models/{}/{}/exp6_128_5.ckpt'.format(
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default="bottle")
parser.add_argument('--kmeans', type=int, default=128)
parser.add_argument('--type', type=str, default="all")
parser.add_argument('--index', type=int, default=30)
parser.add_argument('--image_size', type=int, default=256)
parser.add_argument('--patch_size', type=int, default=16)
parser.add_argument('--dim_reduction', type=str, default='PCA')
args = parser.parse_args()

scratch_model = nn.Sequential(
    resnet.resnet50(pretrained=False, num_classes=args.kmeans))
scratch_model = nn.DataParallel(scratch_model).cuda()

train_path = "{}/dataset/{}/train_resize/good/".format(ROOT, args.data)
train_dataset = dataloaders.MvtecLoader(train_path)
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False)
### DataSet for all defect type
test_path = "{}/dataset/{}/test_resize/all/".format(ROOT, args.data)
test_dataset = dataloaders.MvtecLoader(test_path)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

test_good_path = "{}/dataset/{}/test_resize/good/".format(ROOT, args.data)
test_good_dataset = dataloaders.MvtecLoader(test_good_path)
test_good_loader = DataLoader(test_good_dataset, batch_size=1, shuffle=False)

mask_path = "{}/dataset/{}/ground_truth_resize/all/".format(ROOT, args.data)
mask_dataset = dataloaders.MaskLoader(mask_path)
mask_loader = DataLoader(mask_dataset, batch_size=1, shuffle=False)

## Models
    """ set parameters """
    parser = argparse.ArgumentParser()
    parser.add_argument('--kmeans', type=int, default=128)
    parser.add_argument('--data', type=str, default='bottle')
    parser.add_argument('--index', type=int, default=30)
    parser.add_argument('--resume', type=bool, default=True)
    parser.add_argument('--image_size', type=int, default=1024)
    parser.add_argument('--patch_size', type=int, default=64)
    args = parser.parse_args()

    global_index = args.index
    test_data = args.data

    ### DataSet for all defect type
    test_all_path = f"{ ROOT }/dataset/{ args.data }/test_resize/all/"
    test_all_dataset = dataloaders.MvtecLoader(test_all_path)
    test_all_loader = DataLoader(test_all_dataset, batch_size=1, shuffle=False)

    test_good_path = f"{ ROOT }/dataset/{ args.data }/test_resize/good/"
    test_good_dataset = dataloaders.MvtecLoader(test_good_path)
    test_good_loader = DataLoader(test_good_dataset,
                                  batch_size=1,
                                  shuffle=False)

    mask_path = f"{ ROOT }/dataset/{ args.data }/ground_truth_resize/all/"
    mask_dataset = dataloaders.MaskLoader(mask_path)
    mask_loader = DataLoader(mask_dataset, batch_size=1, shuffle=False)

    print("----- defect -----")
    if args.resume and os.path.isfile(
            f"{ ROOT }/Results/testing_multiMap/artificial/{ args.data }/all/128_img_all_feature_{ args.index }_Origin.pickle"
            for j in range(16):
                if ((i==0 or i ==15) and (j<=4 or j>=11)) or ((i==1 or i ==14) and (j<=2 or j>=13)) or ((i==2 or i==12) and (j<=1 or j>=14)) or ((i==3 or i==4 or i==5 or i==10 or i==11) and (j==0 or j ==15)):
                    white_training_features.append( training_features[k*256 + i*16 + j, :] )
                    white_training_index.append(k*256 + i*16 + j)
                else:
                    other_training_features.append( training_features[i*256 + j*16 + k, :] )

    white_training_index = np.array(white_training_index)
    white_training_features = np.array(white_training_features)
    other_training_features = np.array(other_training_features)

    """ Load testing features """
    test_normal_path = f"{ ROOT }/dataset/{ args.data.split('_')[0] }/test_resize/good/"
    test_all_path = f"{ ROOT }/dataset/{ args.data.split('_')[0] }/test_resize/all/"

    test_normal_dataset = dataloaders.MvtecLoader(test_normal_path)
    test_normal_loader = DataLoader(test_normal_dataset, batch_size=1, shuffle=False)

    test_all_dataset = dataloaders.MvtecLoader(test_all_path)
    test_all_loader = DataLoader(test_normal_dataset, batch_size=1, shuffle=False)

    test_normal_white_features = []
    test_all_white_features = []

    for idx, img in tqdm(test_normal_loader):
        img = img.cuda()
        idx = idx[0].item()

        for i in range(16):
            for j in range(16):
                if ((i==0 or i ==15) and (j<=4 or j>=11)) or ((i==1 or i ==14) and (j<=2 or j>=13)) or ((i==2 or i==12) and (j<=1 or j>=14)) or ((i==3 or i==4 or i==5 or i==10 or i==11) and (j==0 or j ==15)):
Beispiel #5
0
        if not os.path.isdir(
                f"{ ROOT }/preprocessData/label/ssl/{ args.dim_reduction }/{ args.data }/test/"
        ):
            os.makedirs(
                f"{ ROOT }/preprocessData/label/ssl/{ args.dim_reduction }/{ args.data }/test/"
            )

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model_path = f"{ ROOT }/preprocess_SSL/SSL/KNN/exp3/{ args.data }/2048_2000.pth"

    pretrain_model = ssl_model.Model()
    pretrain_model = nn.DataParallel(pretrain_model).cuda()
    pretrain_model.load_state_dict(torch.load(model_path))
    pretrain_model.eval()

    train_dataset = dataloaders.MvtecLoader(path)
    train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False)

    img_index_list = []
    """ kmeans version """

    image_list = []
    for idx, img in tqdm(train_loader):
        img = img.to(device)
        idx = idx[0].item()

        patch_index_list = []

        chunk_num = int(args.image_size / args.patch_size)

        patch_list = []
Beispiel #6
0
    parser.add_argument('--fine_tune_epoch', type=int, default=0)
    args = parser.parse_args()

    print('data: ', args.data)
    print('patch size: ', args.patch_size)
    print('image size: ', args.image_size)
    """" """
    patch_list = []
    patch_i = []
    patch_j = []

    model = model.to(device)
    # if args.fine_tune_epoch != 0:
    #     model.load_state_dict(torch.load(f"/train-data2/corn/fine-tune-models/{ args.data.split('_')[0] }/{ args.fine_tune_epoch }.ckpt"))
    """ Load dataset """
    train_dataset = dataloaders.MvtecLoader(
        f"{ ROOT }/dataset/{ args.data }/train_resize/good/")
    train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False)

    test_dataset = dataloaders.MvtecLoader(
        f"{ ROOT }/dataset/{ args.data }/test_resize/all/")
    test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

    for idx, img in tqdm(train_loader):
        model.train()
        for i in range(int(args.image_size / args.patch_size)):
            for j in range(int(args.image_size / args.patch_size)):
                noise_i = random.randint(-1 * int(args.patch_size / 2),
                                         int(args.patch_size / 2))
                noise_j = random.randint(-1 * int(args.patch_size / 2),
                                         int(args.patch_size / 2))
Beispiel #7
0
pre_model = {k: v for k, v in pre_model.items() if k in model_dict}
model_dict.update(pre_model)
model.load_state_dict(pre_model)
""" save chunks of training datas to fit the corresponding kmeans """

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    parser = argparse.ArgumentParser()
    parser.add_argument('--data', type=str, default='bottle')
    args = parser.parse_args()

    print('data: ', args.data)
    """ load dataset """
    train_dataset = dataloaders.MvtecLoader(ROOT + '/dataset/' + args.data +
                                            '/train_resize/good/')
    train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False)
    model = model.to(device)
    model.train()
    patch_list = []
    patch_i = []
    patch_j = []

    for idx, img in tqdm(train_loader):
        for i in range(16):
            for j in range(16):
                noise_i = random.randint(-32, 32)
                noise_j = random.randint(-32, 32)

                if (i * 64 + 64 + noise_i > 1024 or i * 64 + noise_i < 0):
                    noise_i = 0