# train_dataset = dataloaders.MvtecLoader(train_path) # train_loader = DataLoader(train_dataset, batch_size=args.train_batch) """ weight sampling with noise patch in training data """ train_dataset = dataloaders.NoisePatchDataloader(train_path, label_name, left_i_path, left_j_path) samples_weights = torch.from_numpy(train_dataset.samples_weights) sampler = WeightedRandomSampler(samples_weights.type('torch.DoubleTensor'), len(samples_weights)) train_loader = DataLoader(train_dataset, batch_size=args.train_batch, num_workers=1, sampler=sampler) test_dataset = dataloaders.MvtecLoader(test_path) test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False) mask_dataset = dataloaders.MaskLoader(defect_gt_path) mask_loader = DataLoader(mask_dataset, batch_size=1, shuffle=False) eval_dataset = dataloaders.MvtecLoader(eval_path) eval_loader = DataLoader(eval_dataset, batch_size=1, shuffle=False) eval_mask_dataset = dataloaders.MaskLoader(eval_mask_path) eval_mask_loader = DataLoader(eval_mask_dataset, batch_size=1, shuffle=False) scratch_model = nn.DataParallel(scratch_model).to(device) if (args.pretrain == 'True'): scratch_model.load_state_dict( torch.load('models/{}/{}/exp6_128_5.ckpt'.format( args.model, args.data))) print("--- Load pretrain model ---")
scratch_model = nn.DataParallel(scratch_model).cuda() train_path = "{}/dataset/{}/train_resize/good/".format(ROOT, args.data) train_dataset = dataloaders.MvtecLoader(train_path) train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False) ### DataSet for all defect type test_path = "{}/dataset/{}/test_resize/all/".format(ROOT, args.data) test_dataset = dataloaders.MvtecLoader(test_path) test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False) test_good_path = "{}/dataset/{}/test_resize/good/".format(ROOT, args.data) test_good_dataset = dataloaders.MvtecLoader(test_good_path) test_good_loader = DataLoader(test_good_dataset, batch_size=1, shuffle=False) mask_path = "{}/dataset/{}/ground_truth_resize/all/".format(ROOT, args.data) mask_dataset = dataloaders.MaskLoader(mask_path) mask_loader = DataLoader(mask_dataset, batch_size=1, shuffle=False) ## Models model_path = f"{ ROOT }/preprocess_SSL/SSL/KNN/exp3/{ args.data }/2048_2000.pth" torch.manual_seed(0) np.random.seed(0) pretrain_model = ssl_model.Model() pretrain_model = nn.DataParallel(pretrain_model).cuda() pretrain_model.load_state_dict(torch.load(model_path)) pretrain_model.eval() ## Clusters kmeans_path = "{}/preprocessData/kmeans/{}/{}/ssl_{}_100_128.pickle".format(
""" Summary Writer """ writer = SummaryWriter(log_dir=f"{ RESULT_PATH }/Artificial_{args.data}_mask_{ args.with_mask }_patch_{ args.patch_size }_type_{ args.type }_kmeans_{ args.kmeans }_{ datetime.now() }") """ weight sampling with noise patch in training data """ train_dataset = dataloaders.NoisePatchDataloader(train_path, label_name, left_i_path, left_j_path) samples_weights = torch.from_numpy(train_dataset.samples_weights) sampler = WeightedRandomSampler(samples_weights.type('torch.DoubleTensor'), len(samples_weights)) train_loader = DataLoader(train_dataset, batch_size=args.train_batch, num_workers=1, sampler=sampler) # testing set (normal data) test_dataset = dataloaders.MvtecLoader(test_path) test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False) # testing set (defect data) eval_dataset = dataloaders.MvtecLoader(eval_path) eval_loader = DataLoader(eval_dataset, batch_size=1, shuffle=False) eval_mask_dataset = dataloaders.MaskLoader(eval_mask_path) eval_mask_loader = DataLoader(eval_mask_dataset, batch_size=1, shuffle=False) ## Cluster Center Features center_features_path = f"{ ROOT }/preprocessData/cluster_center/artificial/{ args.data }/{ args.kmeans }.pickle" cluster_features = pickle.load(open(center_features_path, "rb")) scratch_model = nn.DataParallel(scratch_model).to(device) epoch_num = 0 """ training config """ criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(scratch_model.parameters(), lr = args.lr) iter_count = 1