def load_data(root_path, src, tar, batch_size):
    kwargs = {'num_workers': 1, 'pin_memory': True}
    loader_src = data_loader.load_training(root_path, src, batch_size, kwargs)
    loader_tar = data_loader.load_training(root_path, tar, batch_size, kwargs)
    loader_tar_test = data_loader.load_testing(
        root_path, tar, batch_size, kwargs)
    return loader_src, loader_tar, loader_tar_test
Exemple #2
0
def load_data():
    source_train_loader = data_loader.load_training(args.root_path,
                                                    args.source_dir,
                                                    args.batch_size, kwargs)
    target_train_loader = data_loader.load_training(args.root_path,
                                                    args.test_dir,
                                                    args.batch_size, kwargs)
    target_test_loader = data_loader.load_testing(args.root_path,
                                                  args.test_dir,
                                                  args.batch_size, kwargs)
    return source_train_loader, target_train_loader, target_test_loader
source_name = "amazon"
target_name = "webcam"

cuda = not no_cuda and torch.cuda.is_available()

torch.manual_seed(seed)
if cuda:
    torch.cuda.manual_seed(seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}

source_loader = data_loader.load_training(root_path, source_name, batch_size,
                                          kwargs)
target_train_loader = data_loader.load_training(root_path, target_name,
                                                batch_size, kwargs)
target_test_loader = data_loader.load_testing(root_path, target_name,
                                              batch_size, kwargs)

len_source_dataset = len(source_loader.dataset)
len_target_dataset = len(target_test_loader.dataset)
len_source_loader = len(source_loader)
len_target_loader = len(target_train_loader)


def load_pretrain(model):
    url = 'https://download.pytorch.org/models/resnet50-19c8e357.pth'
    pretrained_dict = model_zoo.load_url(url)
    model_dict = model.state_dict()
    for k, v in model_dict.items():
        if not "cls_fc" in k:
            model_dict[k] = pretrained_dict[k[k.find(".") + 1:]]
    model.load_state_dict(model_dict)
Exemple #4
0
l2_decay = 5e-4
root_path = "./dataset/"
source_name = "amazon"
target_name = "webcam"

cuda = not no_cuda and torch.cuda.is_available()

torch.manual_seed(seed)
if cuda:
    torch.cuda.manual_seed(seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}

source_loader = data_loader.load_training(root_path, source_name, batch_size, kwargs)
target_train_loader = data_loader.load_training(root_path, target_name, batch_size, kwargs)
target_test_loader = data_loader.load_testing(root_path, target_name, batch_size, kwargs)

len_source_dataset = len(source_loader.dataset)
len_target_dataset = len(target_test_loader.dataset)
len_source_loader = len(source_loader)
len_target_loader = len(target_train_loader)

def load_pretrain(model):
    url = 'https://download.pytorch.org/models/resnet50-19c8e357.pth'
    pretrained_dict = model_zoo.load_url(url)
    model_dict = model.state_dict()
    for k, v in model_dict.items():
        if not "cls_fc" in k and not "domain_fc" in k:
            model_dict[k] = pretrained_dict[k[k.find(".") + 1:]]
    model.load_state_dict(model_dict)
    return model
        optimizer = optim.SGD(params=[
            {'params': model.features.parameters()},
            {'params': model.classifier.parameters()},
            {'params': model.nfc.parameters(), 'lr': learning_rate * 10}
        ], lr=learning_rate, momentum=MOMENTUM, weight_decay=L2_DECAY)
    elif model_name == 'resnet':
        optimizer = optim.SGD(params=[
            {'params': model.features.parameters()},
            {'params': model.nfc.parameters(), 'lr': learning_rate * 10}
        ], lr=learning_rate, momentum=MOMENTUM, weight_decay=L2_DECAY)
    assert optimizer is not None
    return optimizer

if __name__ == '__main__':
    torch.manual_seed(10)
    root_dir = 'data/OFFICE31/'
    src, tar = 'amazon', 'webcam'
    model_name = 'alexnet'
    data_src, data_tar = data_loader.load_training(root_dir, src, BATCH_SIZE_SRC), \
                         data_loader.load_testing(root_dir, tar, BATCH_SIZE_TAR)
    print('Source:{}, target:{}'.format(src, tar))

    model = load_model(model_name).to(DEVICE)
    lrs = LEARNING_RATE
    for e in tqdm(range(1, N_EPOCH + 1)):
        tqdm.write('learning rate: ' + str(lrs))
        optimizer = get_optimizer(model_name,learning_rate=lrs)
        train(e, model, optimizer, data_src)
        test(e, model, data_tar)
        lrs = lr_decay(1e-4, N_EPOCH, e)
Exemple #6
0
from models import DANNet
from train_test import train, test
from data_loader import load_training, load_testing


# GPUの設定
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)

# ハイパーパラメータの設定
batch_size = 256
num_classes = 6
epochs = 10

# データのロード
source_loader = load_training(root_path='../UCI HAR Dataset', directory='/train/Inertial Signals', batch_size=256)
target_train_loader = load_training(root_path='../UCI HAR Dataset', directory='/test/Inertial Signals', batch_size=256)
target_test_loader = load_testing(root_path='../UCI HAR Dataset', directory='/test/Inertial Signals')

# モデル作成
model = DANNet(num_classes=num_classes).to(device)
print(model)

loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
 
# train. test
print('Begin train')
for epoch in range(1, epochs+1):
    train(source_loader, target_train_loader, model, optimizer, loss_fn, device, epochs, epoch)
    test(target_test_loader, model, device)
Exemple #7
0
        optimizer = optim.SGD(params=[{
            'params': model.features.parameters()
        }, {
            'params': model.nfc.parameters(),
            'lr': learning_rate * 10
        }],
                              lr=learning_rate,
                              momentum=MOMENTUM,
                              weight_decay=L2_DECAY)
    assert optimizer is not None
    return optimizer


if __name__ == '__main__':
    torch.manual_seed(10)
    root_dir = '../../../../data/Original_images/'
    src, tar = 'amazon/images', 'webcam/images'
    #    root_dir = 'C:/Users/beansprouts/Desktop/office31_raw_image/Original_images/'
    #    src, tar = 'amazon/images', 'webcam/images'
    #model_name = 'alexnet'
    model_name = 'resnet'
    data_src, data_tar = data_loader.load_training(root_dir, src, BATCH_SIZE_SRC), \
                         data_loader.load_testing(root_dir, tar, BATCH_SIZE_TAR)
    print('Source:{}, target:{}'.format(src, tar))

    model = load_model(model_name).to(DEVICE)

    model.load_state_dict(torch.load('../../../../data/models/params.pkl'))

    test(model=model, data_tar=data_tar)
Exemple #8
0
l2_decay = 5e-4
# l2_decay = 1e-4

# src_dir = './source_datasets_UN.mat'
# tar_dir = './target_datasets_UN.mat'

src_dir = './source_datasets_Eq_shuffle_HHT.mat'
tar_dir = './target_datasets_Eq_shuffle_HHT.mat'

src_name = "source_data_train"
tgt_train_name = "target_data_train"
tgt_test_name = "target_data_test"

src_loader = data_loader.load_training(src_dir, src_name, batch_size)
tgt_train_loader = data_loader.load_training(tar_dir, tgt_train_name, batch_size)
tgt_test_loader = data_loader.load_testing(tar_dir, tgt_train_name, batch_size)

src_dataset_len = len(src_loader.dataset)
tgt_train_dataset_len = len(tgt_train_loader.dataset)
tgt_test_dataset_len = len(tgt_test_loader.dataset)
src_loader_len = len(src_loader)
tgt_loader_len = len(tgt_train_loader)

print('源域训练集个数:%d'%src_dataset_len)
print('目标域训练集个数:%d'%tgt_train_dataset_len)
print('源域训练集个数:%d'%tgt_test_dataset_len)

print("源域批次个数:%d"%src_loader_len)
print("目标域批次个数:%d"%tgt_loader_len)

# 设置迭代次数iteration和训练完一次所有数据(log_interval批)展示一下损失值
Exemple #9
0
# RSTL
# UCM WHU AID RSSCN7
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

source_loader = data_loader.load_training(args.root_path, args.source_dir,
                                          args.batch_size, kwargs)
target_train_loader = data_loader.load_training(args.root_path, args.test_dir,
                                                args.batch_size, kwargs)
target_test_loader = data_loader.load_testing(args.root_path, args.test_dir,
                                              args.batch_size, kwargs)

len_source_dataset = len(source_loader.dataset)
len_target_dataset = len(target_test_loader.dataset)
len_source_loader = len(source_loader)
len_target_loader = len(target_train_loader)


def train(epoch, model):
    LEARNING_RATE = args.lr / math.pow(
        (1 + 10 * (epoch - 1) / args.epochs), 0.75)
    print('learning rate{: .4f}'.format(LEARNING_RATE))

    optimizer = torch.optim.SGD([
        {
            'params': model.sharedNet.parameters()
Exemple #10
0
target_path_train = "/home/fujiahui/Desktop/openset_caffe/office-31/D_target/images"  #分一下训练集和测试集
target_path_test = "/home/fujiahui/Desktop/openset_caffe/office-31/D_target/images"


cuda = not no_cuda and torch.cuda.is_available()

torch.manual_seed(seed)
if cuda:
    torch.cuda.manual_seed(seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}

source_loader = data_loader.load_training(source_path, batch_size ,kwargs)

target_train_loader = data_loader.load_training(target_path_train, batch_size, kwargs)
target_test_loader = data_loader.load_testing(target_path_test, batch_size, kwargs)

len_source_dataset = len(source_loader.dataset)
len_target_dataset = len(target_test_loader.dataset)
print (len_source_dataset)
print (len_target_dataset)
len_source_loader = len(source_loader)
len_target_loader = len(target_train_loader)
print (len_source_loader)
print (len_target_loader)
#引入预训练模型


def load_pretrain(model,pre_trained):
    model_dict = model.state_dict()
from Weight import Weight
from Config import *
import time
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_id

cuda = not no_cuda and torch.cuda.is_available()
#torch.manual_seed(seed)
#if cuda:
#    torch.cuda.manual_seed(seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
target_test_name = "DE_6205_1HP_3C_inner_inner7"
#source_loader = data_loader.load_training(root_path, source_name, 1500, kwargs)
#target_train_loader = data_loader.load_training(root_path, target_train_name, 1500, kwargs)
target_test_loader = data_loader.load_testing(root_path, target_test_name, 300, kwargs)

#len_source_dataset = len(source_loader.dataset)
len_target_dataset = len(target_test_loader.dataset)
#len_source_loader = len(source_loader)
len_target_loader = len(target_test_loader)
#print(len_target_dataset)
#print(len_target_loader)
num_perclass=int(len_target_dataset/class_num)
print(num_perclass)


def Full_Probability_Evaluation (output_softmax):
    Evaluation_index=np.zeros(len_target_dataset)
    output_np=output_softmax.cpu().numpy()
    #print(output_np)
Exemple #12
0
def load_data(label, image):
    data_loader.pre_distribution_data(label, image)
    E = data_loader.load_testing(root_path, target_name, batch_size, kwargs)
    return E
Exemple #13
0
# Training settings
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_id

cuda = torch.cuda.is_available()
if cuda:
    torch.cuda.manual_seed(opt.seed)

# Dataloader

kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}

source_loader = data_loader.load_training(opt.root_path, opt.source,
                                          opt.batch_size, kwargs)
target_train_loader = data_loader.load_training(opt.root_path, opt.target,
                                                opt.batch_size, kwargs)
target_test_loader = data_loader.load_testing(opt.root_path, opt.target,
                                              opt.batch_size, kwargs)

len_source_dataset = len(source_loader.dataset)
len_target_dataset = len(target_test_loader.dataset)
len_source_loader = len(source_loader)
len_target_loader = len(target_train_loader)
nclasses = len(source_loader.dataset.classes)

###################################


# For every epoch training
def train(epoch, model):

    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    loss_class = torch.nn.CrossEntropyLoss()