def data_load(args): ## prepare data dsets = {} dset_loaders = {} train_bs = args.batch_size // 2 dsets["target_l"] = ImageList_idx(open(args.tl_dset_path).readlines(), transform=image_train()) dsets["target_u"] = ImageList_idx(open(args.tu_dset_path).readlines(), transform=image_train()) dsets["test"] = ImageList_idx(open(args.tu_dset_path).readlines(), transform=image_test()) dset_loaders["target_l"] = DataLoader(dsets["target_l"], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dset_loaders["target_u"] = DataLoader(dsets["target_u"], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dset_loaders["test"] = DataLoader(dsets["test"], batch_size=train_bs * 3, shuffle=False, num_workers=args.worker, drop_last=False) return dset_loaders
def data_load(args): ## prepare data dsets = {} dset_loaders = {} train_bs = args.batch_size txt_tar = open(args.t_dset_path).readlines() txt_test = open(args.test_dset_path).readlines() if not args.da == 'uda': label_map_s = {} for i in range(len(args.src_classes)): label_map_s[args.src_classes[i]] = i new_tar = [] for i in range(len(txt_tar)): rec = txt_tar[i] reci = rec.strip().split(' ') if int(reci[1]) in args.tar_classes: if int(reci[1]) in args.src_classes: line = reci[0] + ' ' + str(label_map_s[int(reci[1])]) + '\n' new_tar.append(line) else: line = reci[0] + ' ' + str(len(label_map_s)) + '\n' new_tar.append(line) txt_tar = new_tar.copy() txt_test = txt_tar.copy() dsets["target"] = ImageList_idx(txt_tar, transform=image_train()) dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets["test"] = ImageList_idx(txt_test, transform=image_test()) dset_loaders["test"] = DataLoader(dsets["test"], batch_size=train_bs*3, shuffle=False, num_workers=args.worker, drop_last=False) return dset_loaders
def data_load(args): dsets = {} dset_loaders = {} train_bs = args.batch_size txt_tar = open(args.t_dset_path).readlines() txt_test = open(args.test_dset_path).readlines() dsets['target'] = ImageList_idx(txt_tar, transform=image_train()) dset_loaders['target'] = DataLoader(dsets['target'], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets['test'] = ImageList_idx(txt_test, transform=image_test()) dset_loaders['test'] = DataLoader(dsets['test'], batch_size=train_bs*3, shuffle=False, num_workers=args.worker, drop_last=False) return dset_loaders
def data_load(args): ## prepare data dsets = {} dset_loaders = {} train_bs = args.batch_size txt_src = open(args.s_dset_path).readlines() txt_tar = open(args.t_dset_path).readlines() txt_test = open(args.test_dset_path).readlines() if args.trte == "val": dsize = len(txt_src) tr_size = int(0.7*dsize) print(dsize, tr_size, dsize - tr_size) tr_txt, te_txt = torch.utils.data.random_split(txt_src, [tr_size, dsize - tr_size]) else: tr_txt = txt_src te_txt = txt_src dsets["source_tr"] = ImageList(tr_txt, transform=image_train()) dset_loaders["source_tr"] = DataLoader(dsets["source_tr"], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets["source_te"] = ImageList(te_txt, transform=image_test()) dset_loaders["source_te"] = DataLoader(dsets["source_te"], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets["target"] = ImageList_idx(txt_tar, transform=image_train()) dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets["test"] = ImageList(txt_test, transform=image_test()) dset_loaders["test"] = DataLoader(dsets["test"], batch_size=train_bs*3, shuffle=False, num_workers=args.worker, drop_last=False) return dset_loaders
def data_load(args): ## prepare data dsets = {} dset_loaders = {} train_bs = args.batch_size txt_src = open(args.s_dset_path).readlines() txt_tar = [] for i in range(len(args.t_dset_path)): tmp = open(args.t_dset_path[i]).readlines() txt_tar.extend(tmp) txt_test = txt_tar.copy() if args.trte == 'val': dsize = len(txt_src) tr_size = int(0.9*dsize) print(dsize, tr_size, dsize - tr_size) tr_txt, te_txt = torch.utils.data.random_split(txt_src, [tr_size, dsize - tr_size]) else: tr_txt = txt_src te_txt = txt_src dsets['source_tr'] = ImageList(tr_txt, transform=image_train()) dset_loaders['source_tr'] = DataLoader(dsets['source_tr'], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets['source_te'] = ImageList(te_txt, transform=image_test()) dset_loaders['source_te'] = DataLoader(dsets['source_te'], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets['target'] = ImageList_idx(txt_tar, transform=image_train()) dset_loaders['target'] = DataLoader(dsets['target'], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets['test'] = ImageList(txt_test, transform=image_test()) dset_loaders['test'] = DataLoader(dsets['test'], batch_size=train_bs*2, shuffle=False, num_workers=args.worker, drop_last=False) return dset_loaders
def data_load(args): ## prepare data dsets = {} dset_loaders = {} train_bs = args.batch_size txt_src = open(args.s_dset_path).readlines() txt_tar = open(args.t_dset_path).readlines() txt_test = open(args.test_dset_path).readlines() count = np.zeros(args.class_num) tr_txt = [] te_txt = [] for i in range(len(txt_src)): line = txt_src[i] reci = line.strip().split(' ') if count[int(reci[1])] < 3: count[int(reci[1])] += 1 te_txt.append(line) else: tr_txt.append(line) dsets["source_tr"] = ImageList(tr_txt, transform=image_train()) dset_loaders["source_tr"] = DataLoader(dsets["source_tr"], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets["source_te"] = ImageList(te_txt, transform=image_test()) dset_loaders["source_te"] = DataLoader(dsets["source_te"], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets["target"] = ImageList_idx(txt_tar, transform=image_train()) dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets["target_te"] = ImageList(txt_tar, transform=image_test()) dset_loaders["target_te"] = DataLoader(dsets["target_te"], batch_size=train_bs, shuffle=False, num_workers=args.worker, drop_last=False) dsets["test"] = ImageList(txt_test, transform=image_test()) dset_loaders["test"] = DataLoader(dsets["test"], batch_size=train_bs * 2, shuffle=False, num_workers=args.worker, drop_last=False) return dset_loaders
def data_load(args): ## prepare data dsets = {} dset_loaders = {} train_bs = args.batch_size txt_src = open(args.s_dset_path).readlines() txt_tar = [] for i in range(len(args.t_dset_path)): tmp = open(args.t_dset_path[i]).readlines() txt_tar.extend(tmp) txt_test = txt_tar.copy() dsets["target"] = ImageList_idx(txt_tar, transform=image_train()) dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets["test"] = ImageList(txt_test, transform=image_test()) dset_loaders["test"] = DataLoader(dsets["test"], batch_size=train_bs*2, shuffle=False, num_workers=args.worker, drop_last=False) return dset_loaders
def data_load(args): ## prepare data dsets = {} dset_loaders = {} train_bs = args.batch_size txt_src = open(args.s_dset_path).readlines() txt_tar = open(args.t_dset_path).readlines() txt_test = open(args.test_dset_path).readlines() if not args.da == 'uda': label_map_s = {} for i in range(len(args.src_classes)): label_map_s[args.src_classes[i]] = i new_src = [] for i in range(len(txt_src)): rec = txt_src[i] reci = rec.strip().split(' ') if int(reci[1]) in args.src_classes: line = reci[0] + ' ' + str(label_map_s[int(reci[1])]) + '\n' new_src.append(line) txt_src = new_src.copy() new_tar = [] for i in range(len(txt_tar)): rec = txt_tar[i] reci = rec.strip().split(' ') if int(reci[1]) in args.tar_classes: if int(reci[1]) in args.src_classes: line = reci[0] + ' ' + str(label_map_s[int( reci[1])]) + '\n' new_tar.append(line) else: line = reci[0] + ' ' + str(len(label_map_s)) + '\n' new_tar.append(line) txt_tar = new_tar.copy() txt_test = txt_tar.copy() if args.trte == 'val': dsize = len(txt_src) tr_size = int(0.9 * dsize) print(dsize, tr_size, dsize - tr_size) tr_txt, te_txt = torch.utils.data.random_split( txt_src, [tr_size, dsize - tr_size]) else: tr_txt = txt_src te_txt = txt_src dsets['source_tr'] = ImageList(tr_txt, transform=image_train()) dset_loaders['source_tr'] = DataLoader(dsets['source_tr'], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets['source_te'] = ImageList(te_txt, transform=image_test()) dset_loaders['source_te'] = DataLoader(dsets['source_te'], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets['target'] = ImageList_idx(txt_tar, transform=image_train()) dset_loaders['target'] = DataLoader(dsets['target'], batch_size=train_bs, shuffle=True, num_workers=args.worker, drop_last=False) dsets['test'] = ImageList_idx(txt_test, transform=image_test()) dset_loaders['test'] = DataLoader(dsets['test'], batch_size=train_bs * 3, shuffle=False, num_workers=args.worker, drop_last=False) return dset_loaders