Пример #1
0
def load_data(root_path, src, tar, batch_size):
    kwargs = {'num_workers': 1, 'pin_memory': True}
    loader_src = data_loader.load_training(root_path, src, batch_size, kwargs)
    loader_tar = data_loader.load_training(root_path, tar, batch_size, kwargs)
    loader_tar_test = data_loader.load_testing(
        root_path, tar, batch_size, kwargs)
    return loader_src, loader_tar, loader_tar_test
Пример #2
0
def load_data():
    source_train_loader = data_loader.load_training(args.root_path,
                                                    args.source_dir,
                                                    args.batch_size, kwargs)
    target_train_loader = data_loader.load_training(args.root_path,
                                                    args.test_dir,
                                                    args.batch_size, kwargs)
    target_test_loader = data_loader.load_testing(args.root_path,
                                                  args.test_dir,
                                                  args.batch_size, kwargs)
    return source_train_loader, target_train_loader, target_test_loader
Пример #3
0
seed = 8
log_interval = 10
l2_decay = 5e-4
root_path = "./dataset/"
source_name = "amazon"
target_name = "webcam"

cuda = not no_cuda and torch.cuda.is_available()

torch.manual_seed(seed)
if cuda:
    torch.cuda.manual_seed(seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}

source_loader = data_loader.load_training(root_path, source_name, batch_size,
                                          kwargs)
target_train_loader = data_loader.load_training(root_path, target_name,
                                                batch_size, kwargs)
target_test_loader = data_loader.load_testing(root_path, target_name,
                                              batch_size, kwargs)

len_source_dataset = len(source_loader.dataset)
len_target_dataset = len(target_test_loader.dataset)
len_source_loader = len(source_loader)
len_target_loader = len(target_train_loader)


def load_pretrain(model):
    url = 'https://download.pytorch.org/models/resnet50-19c8e357.pth'
    pretrained_dict = model_zoo.load_url(url)
    model_dict = model.state_dict()
Пример #4
0
seed = 8
log_interval = 10
l2_decay = 5e-4
root_path = "./dataset/"
source_name = "amazon"
target_name = "webcam"

cuda = not no_cuda and torch.cuda.is_available()

torch.manual_seed(seed)
if cuda:
    torch.cuda.manual_seed(seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}

source_loader = data_loader.load_training(root_path, source_name, batch_size, kwargs)
target_train_loader = data_loader.load_training(root_path, target_name, batch_size, kwargs)
target_test_loader = data_loader.load_testing(root_path, target_name, batch_size, kwargs)

len_source_dataset = len(source_loader.dataset)
len_target_dataset = len(target_test_loader.dataset)
len_source_loader = len(source_loader)
len_target_loader = len(target_train_loader)

def load_pretrain(model):
    url = 'https://download.pytorch.org/models/resnet50-19c8e357.pth'
    pretrained_dict = model_zoo.load_url(url)
    model_dict = model.state_dict()
    for k, v in model_dict.items():
        if not "cls_fc" in k and not "domain_fc" in k:
            model_dict[k] = pretrained_dict[k[k.find(".") + 1:]]
Пример #5
0
        optimizer = optim.SGD(params=[
            {'params': model.features.parameters()},
            {'params': model.classifier.parameters()},
            {'params': model.nfc.parameters(), 'lr': learning_rate * 10}
        ], lr=learning_rate, momentum=MOMENTUM, weight_decay=L2_DECAY)
    elif model_name == 'resnet':
        optimizer = optim.SGD(params=[
            {'params': model.features.parameters()},
            {'params': model.nfc.parameters(), 'lr': learning_rate * 10}
        ], lr=learning_rate, momentum=MOMENTUM, weight_decay=L2_DECAY)
    assert optimizer is not None
    return optimizer

if __name__ == '__main__':
    torch.manual_seed(10)
    root_dir = 'data/OFFICE31/'
    src, tar = 'amazon', 'webcam'
    model_name = 'alexnet'
    data_src, data_tar = data_loader.load_training(root_dir, src, BATCH_SIZE_SRC), \
                         data_loader.load_testing(root_dir, tar, BATCH_SIZE_TAR)
    print('Source:{}, target:{}'.format(src, tar))

    model = load_model(model_name).to(DEVICE)
    lrs = LEARNING_RATE
    for e in tqdm(range(1, N_EPOCH + 1)):
        tqdm.write('learning rate: ' + str(lrs))
        optimizer = get_optimizer(model_name,learning_rate=lrs)
        train(e, model, optimizer, data_src)
        test(e, model, data_tar)
        lrs = lr_decay(1e-4, N_EPOCH, e)
Пример #6
0
seed = 8
log_interval = 10
l2_decay = 5e-4
root_path = "/data/zhuyc/OFFICE31/"
src_name = "amazon"
tgt_name = "dslr"

cuda = not no_cuda and torch.cuda.is_available()

torch.manual_seed(seed)
if cuda:
    torch.cuda.manual_seed(seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}

src_loader = data_loader.load_training(root_path, src_name, batch_size, kwargs)
tgt_train_loader = data_loader.load_training(root_path, tgt_name, batch_size,
                                             kwargs)
tgt_test_loader = data_loader.load_testing(root_path, tgt_name, batch_size,
                                           kwargs)

src_dataset_len = len(src_loader.dataset)
tgt_dataset_len = len(tgt_test_loader.dataset)
src_loader_len = len(src_loader)
tgt_loader_len = len(tgt_train_loader)


def train(model):
    src_iter = iter(src_loader)
    tgt_iter = iter(tgt_train_loader)
    correct = 0
Пример #7
0
from models import DANNet
from train_test import train, test
from data_loader import load_training, load_testing


# GPUの設定
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)

# ハイパーパラメータの設定
batch_size = 256
num_classes = 6
epochs = 10

# データのロード
source_loader = load_training(root_path='../UCI HAR Dataset', directory='/train/Inertial Signals', batch_size=256)
target_train_loader = load_training(root_path='../UCI HAR Dataset', directory='/test/Inertial Signals', batch_size=256)
target_test_loader = load_testing(root_path='../UCI HAR Dataset', directory='/test/Inertial Signals')

# モデル作成
model = DANNet(num_classes=num_classes).to(device)
print(model)

loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
 
# train. test
print('Begin train')
for epoch in range(1, epochs+1):
    train(source_loader, target_train_loader, model, optimizer, loss_fn, device, epochs, epoch)
    test(target_test_loader, model, device)
Пример #8
0
        optimizer = optim.SGD(params=[{
            'params': model.features.parameters()
        }, {
            'params': model.nfc.parameters(),
            'lr': learning_rate * 10
        }],
                              lr=learning_rate,
                              momentum=MOMENTUM,
                              weight_decay=L2_DECAY)
    assert optimizer is not None
    return optimizer


if __name__ == '__main__':
    torch.manual_seed(10)
    root_dir = '../../../../data/Original_images/'
    src, tar = 'amazon/images', 'webcam/images'
    #    root_dir = 'C:/Users/beansprouts/Desktop/office31_raw_image/Original_images/'
    #    src, tar = 'amazon/images', 'webcam/images'
    #model_name = 'alexnet'
    model_name = 'resnet'
    data_src, data_tar = data_loader.load_training(root_dir, src, BATCH_SIZE_SRC), \
                         data_loader.load_testing(root_dir, tar, BATCH_SIZE_TAR)
    print('Source:{}, target:{}'.format(src, tar))

    model = load_model(model_name).to(DEVICE)

    model.load_state_dict(torch.load('../../../../data/models/params.pkl'))

    test(model=model, data_tar=data_tar)
Пример #9
0
# momentum = 0.9
momentum = 2
l2_decay = 5e-4
# l2_decay = 1e-4

# src_dir = './source_datasets_UN.mat'
# tar_dir = './target_datasets_UN.mat'

src_dir = './source_datasets_Eq_shuffle_HHT.mat'
tar_dir = './target_datasets_Eq_shuffle_HHT.mat'

src_name = "source_data_train"
tgt_train_name = "target_data_train"
tgt_test_name = "target_data_test"

src_loader = data_loader.load_training(src_dir, src_name, batch_size)
tgt_train_loader = data_loader.load_training(tar_dir, tgt_train_name, batch_size)
tgt_test_loader = data_loader.load_testing(tar_dir, tgt_train_name, batch_size)

src_dataset_len = len(src_loader.dataset)
tgt_train_dataset_len = len(tgt_train_loader.dataset)
tgt_test_dataset_len = len(tgt_test_loader.dataset)
src_loader_len = len(src_loader)
tgt_loader_len = len(tgt_train_loader)

print('源域训练集个数:%d'%src_dataset_len)
print('目标域训练集个数:%d'%tgt_train_dataset_len)
print('源域训练集个数:%d'%tgt_test_dataset_len)

print("源域批次个数:%d"%src_loader_len)
print("目标域批次个数:%d"%tgt_loader_len)
Пример #10
0
    type=str,
    default="/data/sihan.zhu/transfer learning/deep/dataset/RSTL/")
parser.add_argument('--source_dir', type=str, default="UCM")
parser.add_argument('--test_dir', type=str, default="RSSCN7")
# RSTL
# UCM WHU AID RSSCN7
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

source_loader = data_loader.load_training(args.root_path, args.source_dir,
                                          args.batch_size, kwargs)
target_train_loader = data_loader.load_training(args.root_path, args.test_dir,
                                                args.batch_size, kwargs)
target_test_loader = data_loader.load_testing(args.root_path, args.test_dir,
                                              args.batch_size, kwargs)

len_source_dataset = len(source_loader.dataset)
len_target_dataset = len(target_test_loader.dataset)
len_source_loader = len(source_loader)
len_target_loader = len(target_train_loader)


def train(epoch, model):
    LEARNING_RATE = args.lr / math.pow(
        (1 + 10 * (epoch - 1) / args.epochs), 0.75)
    print('learning rate{: .4f}'.format(LEARNING_RATE))
Пример #11
0
# Training settings
iteration = 10000
lr = 0.00000003
LEARNING_RATE = 0.01

root_path = "dataset"
src_name = "src"
tgt_name = "dslr"
src_path = 'dataset/raw_pkl/30001_40000.pkl'
tgt_path = 'dataset/raw_pkl/40001_50000.pkl'
checkpoint_path = 'dataset/checkpoint'  # 模型位置
tf.device('/gpu:1')

# 加载数据,赋值给三个变量:源域、目标域、测试,使用方法在data_loader文件内
f_src = data_loader.load_training(src_path)
f_tgt = data_loader.load_training(tgt_path)

# 对四组数据进行变换(src)
src_x_train, src_y_train, src_x_test, src_y_test = data_loader.transform(f_src)
tgt_x_train, tgt_y_train, tgt_x_test, tgt_y_test = data_loader.transform(f_tgt)

sess = tf.Session()
saver = tf.train.import_meta_graph('dataset/model.ckpt.meta')
saver.restore(sess, tf.train.latest_checkpoint('dataset'))
graph = tf.compat.v1.get_default_graph()

### restore
temp_output = graph.get_tensor_by_name('l2/Relu:0')
tf_x = graph.get_tensor_by_name('input_placeholder_x:0')
tf_y = graph.get_tensor_by_name('ouput_placeholder_y:0')
Пример #12
0
source_name = "W"
target_name = "D"
source_path = "/home/fujiahui/Desktop/openset_caffe/office-31/W_source/images"
target_path_train = "/home/fujiahui/Desktop/openset_caffe/office-31/D_target/images"  #分一下训练集和测试集
target_path_test = "/home/fujiahui/Desktop/openset_caffe/office-31/D_target/images"


cuda = not no_cuda and torch.cuda.is_available()

torch.manual_seed(seed)
if cuda:
    torch.cuda.manual_seed(seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}

source_loader = data_loader.load_training(source_path, batch_size ,kwargs)

target_train_loader = data_loader.load_training(target_path_train, batch_size, kwargs)
target_test_loader = data_loader.load_testing(target_path_test, batch_size, kwargs)

len_source_dataset = len(source_loader.dataset)
len_target_dataset = len(target_test_loader.dataset)
print (len_source_dataset)
print (len_target_dataset)
len_source_loader = len(source_loader)
len_target_loader = len(target_train_loader)
print (len_source_loader)
print (len_target_loader)
#引入预训练模型

def train(model):
    for epoch in range(10):
        model.train()
        for class_name in class_name_list:
            source1_loader, number_of_images1 = data_loader.load_training(
                root1, class_name, batch_size, kwargs)
            source2_loader, number_of_images2 = data_loader.load_training(
                root2, class_name, batch_size, kwargs)
            source3_loader, number_of_images3 = data_loader.load_training(
                root3, class_name, batch_size, kwargs)
            target_train_loader, number_of_target = data_loader.load_training(
                root_target, class_name, batch_size, kwargs)
            print(number_of_images1, number_of_images2, number_of_images3,
                  number_of_target)
            iteration_num = min(number_of_images1, number_of_images2,
                                number_of_images3, number_of_target)
            # LEARNING_RATE = lr / math.pow((1 + 10 * (i - 1) / (iteration)), 0.75)
            LEARNING_RATE = lr
            # if (i - 1) % 100 == 0:
            #     print("learning rate:", LEARNING_RATE)

            optimizer = torch.optim.SGD([
                {
                    'params': model.sharedNet.parameters()
                },
                {
                    'params': model.cls_fc_son1.parameters(),
                    'lr': LEARNING_RATE
                },
                {
                    'params': model.cls_fc_son2.parameters(),
                    'lr': LEARNING_RATE
                },
                {
                    'params': model.cls_fc_son3.parameters(),
                    'lr': LEARNING_RATE
                },
                {
                    'params': model.sonnet1.parameters(),
                    'lr': LEARNING_RATE
                },
                {
                    'params': model.sonnet2.parameters(),
                    'lr': LEARNING_RATE
                },
                {
                    'params': model.sonnet3.parameters(),
                    'lr': LEARNING_RATE
                },
            ],
                                        lr=LEARNING_RATE / 10,
                                        momentum=momentum,
                                        weight_decay=l2_decay)

            for i in range(iteration_num // batch_size):
                print("i", i, "/", "iteration_num:",
                      iteration_num // batch_size, " ", class_name)
                try:
                    source_data, source_label = source1_iter.next()
                    # print(source_data.size(), source_label.size())
                    # print(source_label)
                except Exception as err:
                    source1_iter = iter(source1_loader)
                    source_data, source_label = source1_iter.next()
                try:
                    target_data, __ = target_iter.next()
                except Exception as err:
                    target_iter = iter(target_train_loader)
                    target_data, __ = target_iter.next()
                if cuda:
                    source_data, source_label = source_data.cuda(
                    ), source_label.cuda()
                    target_data = target_data.cuda()
                source_data, source_label = Variable(source_data), Variable(
                    source_label)
                target_data = Variable(target_data)
                optimizer.zero_grad()

                cls_loss, mmd_loss, l1_loss = model(source_data,
                                                    target_data,
                                                    source_label,
                                                    mark=1)
                gamma = 2 / (1 + math.exp(-10 * (i) / (iteration))) - 1
                loss = cls_loss + gamma * (mmd_loss + l1_loss)
                loss.backward()
                optimizer.step()

                if i % log_interval == 0:
                    print(
                        'Train source1 iter: {}\tLoss: {:.6f}\tsoft_Loss: {:.6f}\tmmd_Loss: {:.6f}\tl1_Loss: {:.6f}'
                        .format(i, loss.item(), cls_loss.item(),
                                mmd_loss.item(), l1_loss.item()))

                try:
                    source_data, source_label = source2_iter.next()
                except Exception as err:
                    source2_iter = iter(source2_loader)
                    source_data, source_label = source2_iter.next()
                try:
                    target_data, __ = target_iter.next()
                except Exception as err:
                    target_iter = iter(target_train_loader)
                    target_data, __ = target_iter.next()
                if cuda:
                    source_data, source_label = source_data.cuda(
                    ), source_label.cuda()
                    target_data = target_data.cuda()
                source_data, source_label = Variable(source_data), Variable(
                    source_label)
                target_data = Variable(target_data)
                optimizer.zero_grad()

                cls_loss, mmd_loss, l1_loss = model(source_data,
                                                    target_data,
                                                    source_label,
                                                    mark=2)
                gamma = 2 / (1 + math.exp(-10 * (i) / (iteration))) - 1
                loss = cls_loss + gamma * (mmd_loss + l1_loss)
                loss.backward()
                optimizer.step()

                if i % log_interval == 0:
                    print(
                        'Train source1 iter: {}\tLoss: {:.6f}\tsoft_Loss: {:.6f}\tmmd_Loss: {:.6f}\tl1_Loss: {:.6f}'
                        .format(i, loss.item(), cls_loss.item(),
                                mmd_loss.item(), l1_loss.item()))

                try:
                    source_data, source_label = source3_iter.next()
                except Exception as err:
                    source3_iter = iter(source3_loader)
                    source_data, source_label = source3_iter.next()
                try:
                    target_data, __ = target_iter.next()
                except Exception as err:
                    target_iter = iter(target_train_loader)
                    target_data, __ = target_iter.next()
                if cuda:
                    source_data, source_label = source_data.cuda(
                    ), source_label.cuda()
                    target_data = target_data.cuda()
                source_data, source_label = Variable(source_data), Variable(
                    source_label)
                target_data = Variable(target_data)
                optimizer.zero_grad()

                cls_loss, mmd_loss, l1_loss = model(source_data,
                                                    target_data,
                                                    source_label,
                                                    mark=3)
                gamma = 2 / (1 + math.exp(-10 * (i) / (iteration))) - 1
                loss = cls_loss + gamma * (mmd_loss + l1_loss)
                loss.backward()
                optimizer.step()

                if i % log_interval == 0:
                    print(
                        'Train source1 iter: {}\tLoss: {:.6f}\tsoft_Loss: {:.6f}\tmmd_Loss: {:.6f}\tl1_Loss: {:.6f}'
                        .format(i, loss.item(), cls_loss.item(),
                                mmd_loss.item(), l1_loss.item()))
Пример #14
0
print(opt)

###################################

# Training settings
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_id

cuda = torch.cuda.is_available()
if cuda:
    torch.cuda.manual_seed(opt.seed)

# Dataloader

kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}

source_loader = data_loader.load_training(opt.root_path, opt.source,
                                          opt.batch_size, kwargs)
target_train_loader = data_loader.load_training(opt.root_path, opt.target,
                                                opt.batch_size, kwargs)
target_test_loader = data_loader.load_testing(opt.root_path, opt.target,
                                              opt.batch_size, kwargs)

len_source_dataset = len(source_loader.dataset)
len_target_dataset = len(target_test_loader.dataset)
len_source_loader = len(source_loader)
len_target_loader = len(target_train_loader)
nclasses = len(source_loader.dataset.classes)

###################################


# For every epoch training