def main():
    args = get_argument()
    config = Configs(args.model_index)
    net = build_small_model(config.ins_norm,
                            False if config.lambda_div == 0 else True)

    if config.mode in [0, -1] and not args.test_only:
        config.dump_to_file(os.path.join(config.save_path, 'exp_config.txt'))

        train_data_loader = get_lmdb_loader(config.source_lmdb,
                                            config.target_lmdb,
                                            'data',
                                            'label',
                                            batch_size=config.batch_size)
        val_data_loader = get_lmdb_loader(config.source_lmdb,
                                          config.target_lmdb,
                                          'vdata',
                                          'vlabel',
                                          batch_size=config.batch_size)
        test_data_loader = get_lmdb_loader(config.source_lmdb,
                                           config.target_lmdb,
                                           'tdata',
                                           'tlabel',
                                           batch_size=config.batch_size)

        net_list = [net]
        for idx in range(1, args.num_net):
            tmp_net = build_small_model(
                config.ins_norm, False if config.lambda_div == 0 else True)
            net_list.append(tmp_net)

        trainer = Trainer(net_list, train_data_loader, val_data_loader,
                          test_data_loader, config)

        trainer.train_all()

    elif config.mode == 1 or args.test_only:
        tdata, tlabel, tdata_test, tdata_test_label = get_svhn(
            'D:/workspace/dataset/digits/SVHN/')
        #tdata, tlabel, tdata_test, tdata_test_label = get_mnist('D:/workspace/DA/dataset/MNIST/')
        test_data_loader = get_data_loader(tdata_test,
                                           tdata_test_label[:, 0],
                                           tdata_test,
                                           tdata_test_label[:, 0],
                                           shuffle=False,
                                           batch_size=32)

        load_weights(net, config.checkpoint, config.gpu)

        trainer = Trainer([net], None, None, test_data_loader, config)
        print('acc', trainer.val_(trainer.nets[0], 0, 0, 'test'))
Ejemplo n.º 2
0
import time
from data.data_loader import get_data_loader
from models.models import create_model
from option_parser import TrainingOptionParser
from utils.visualizer import Visualizer
import torch

parser = TrainingOptionParser()
opt = parser.parse_args()

data_loader = get_data_loader(opt)

print("[INFO] small batch size : {}".format(opt.small_batch_size))
print("[INFO] large batch size : {}".format(opt.large_batch_size))
print("[INFO] total batch size : {}".format(opt.large_batch_size *
                                            opt.small_batch_size))

model = create_model(opt)
visualizer = Visualizer(opt)
max_int = 999999999
large_batch_clock = time.time()
validated_before = list()

for _ in range(max_int):
    for i, data in enumerate(data_loader):
        # data : dict
        small_batch_clock = time.time()
        one_hot_labels = torch.zeros(opt.small_batch_size,
                                     opt.label_size,
                                     out=torch.LongTensor())
        for j, n in enumerate(data['label']):
Ejemplo n.º 3
0
            print('\n\nlabel>>>\n', label)
            print('\n\nimg_id>>>\n', name)

            _, pred = torch.max(out.data, 1)
            _, label = torch.max(label.data, 1)

            print('\n\noutput-max>>>\n', pred)

            total_correct += (pred == label).sum().item()

    total_acc = 100. * total_correct / len(test_data_loader.dataset)

    print('\nTEST RESULT ===> ACC : %d/%d (%3.4f%%)\n' %
          (total_correct, len(test_data_loader.dataset), total_acc))


if __name__ == "__main__":

    opt = args
    print(opt)
    data_dir = opt.data_dir
    print('data_dir is : {}'.format(data_dir))

    training_data_loader, validation_data_loader, test_data_loader = get_data_loader(
        opt)

    if opt.mode is 'train':
        run_train(opt, training_data_loader, validation_data_loader)
    else:
        test_model(opt, test_data_loader)
Ejemplo n.º 4
0
from data.data_loader import get_data_loader
from models.models import create_model
from option_parser import TestingOptionParser
from scipy import misc
import numpy as np
import torch
import os

parser = TestingOptionParser()
opt = parser.parse_args()
opt.batch_size = opt.repeat_generation
opt.gpu_ids = []

data_loader = get_data_loader(opt)

model = create_model(opt)
total_steps = 0
model.load(opt.epoch)
Tensor = torch.cuda.FloatTensor if opt.gpu_ids else torch.FloatTensor
single_input = Tensor(
    1,
    opt.input_channel,
    opt.height,
    opt.width
)
repeated_input = Tensor(
    opt.batch_size,
    opt.input_channel,
    opt.height,
    opt.width
)