Esempio n. 1
0
        # Another choice for Office-home:
        # width = 1024
        # srcweight = 3
        # is_cen = True
    else:
        width = -1

    model_instance = MDD(base_net='ResNet50',
                         width=width,
                         use_gpu=True,
                         class_num=class_num,
                         srcweight=srcweight)

    train_source_loader = load_images(source_file,
                                      batch_size=32,
                                      is_cen=is_cen)
    train_target_loader = load_images(target_file,
                                      batch_size=32,
                                      is_cen=is_cen)
    test_target_loader = load_images(target_file,
                                     batch_size=32,
                                     is_train=False)

    param_groups = model_instance.get_parameter_list()
    group_ratios = [group['lr'] for group in param_groups]

    assert cfg.optim.type == 'sgd', 'Optimizer type not supported!'

    optimizer = torch.optim.SGD(param_groups, **cfg.optim.params)
Esempio n. 2
0
            "Source label distribution: {}".format(source_label_distribution))
        print(
            "Target label distribution: {}".format(target_label_distribution))
        print("True weights : {}".format(true_weights[:, 0].cpu().numpy()))
    else:
        true_weights = None
        source_label_distribution = None
        target_label_distribution = None
        print(
            "Source label distribution: {}".format(source_label_distribution))
        print(
            "Target label distribution: {}".format(target_label_distribution))
        print("True weights : {}".format(true_weights))


    train_source_loader = load_images(source_file, batch_size=args.batch_size, resize_size=resize_size, crop_size=crop_size, \
                                      is_cen=is_cen, root_folder=args.root_folder)

    if use_ssda:
        train_target_loader = load_images(target_file, batch_size=args.batch_size-args.labeled_tgt_batch_size, \
                                                resize_size=resize_size, crop_size=crop_size, is_cen=is_cen, \
                                                root_folder=args.root_folder)
        train_labeled_target_loader = load_images(labeled_target_file, batch_size=args.labeled_tgt_batch_size, \
                                                resize_size=resize_size, crop_size=crop_size, is_cen=is_cen, \
                                                root_folder=args.root_folder)
    else:
        train_target_loader = load_images(target_file, batch_size=args.batch_size, resize_size=resize_size, \
                                          crop_size=crop_size, is_cen=is_cen, root_folder=args.root_folder)
        train_labeled_target_loader = None

    test_source_loader = load_images(source_test_file, batch_size=16,  resize_size=resize_size, crop_size=crop_size, \
                                    is_train=False, is_cen=is_cen, root_folder=args.root_folder)
Esempio n. 3
0
        # Another choice for Office-home:
        # width = 1024
        # srcweight = 3
        # is_cen = True
    else:
        width = -1

    model_instance = DANN(base_net='ResNet50',
                          width=width,
                          use_gpu=True,
                          class_num=class_num,
                          srcweight=srcweight)

    train_source_clean_loader = load_images(source_file,
                                            batch_size=32,
                                            is_cen=is_cen,
                                            split_noisy=False)
    train_source_noisy_loader = train_source_clean_loader
    train_target_loader = load_images(target_file,
                                      batch_size=32,
                                      is_cen=is_cen)
    test_target_loader = load_images(target_file,
                                     batch_size=32,
                                     is_train=False)

    param_groups = model_instance.get_parameter_list()
    group_ratios = [group['lr'] for group in param_groups]

    assert cfg.optim.type == 'sgd', 'Optimizer type not supported!'

    optimizer = torch.optim.SGD(param_groups, **cfg.optim.params)
Esempio n. 4
0
def train(config):

    train_source_loader = load_images(config['source_path'],
                                      batch_size=config['batch_size'],
                                      resize_size = config['resize_size'],
                                      is_cen = config['is_cen'],
                                      )
    train_target_loader = load_images(config['target_path'],
                                      batch_size=config['batch_size'],
                                      resize_size=config['resize_size'],
                                      is_cen=config['is_cen'],)
    test_target_loader = load_images(config['target_path'],
                                     batch_size=config['batch_size'],
                                     resize_size=config['resize_size'],
                                     is_cen=config['is_cen'],
                                     is_train=False)

    model_instance = MDD(base_net=config['base_net'],
                         width = config['width'],
                         use_gpu = True,
                         class_num = config['class_num'],
                         srcweight = config['srcweight'])

    param_groups = model_instance.get_parameter_list()
    group_ratios = [group['lr'] for group in param_groups]

    assert config['optim']['type'] == 'sgd', 'Optimizer type not supported!'

    optimizer = torch.optim.SGD(param_groups, **config['optim']['params'])

    assert config['lr_scheduler']['type'] == 'inv', 'Scheduler type not supported!'

    lr_scheduler = INVScheduler(gamma = config['lr_scheduler']['gamma'],
                                decay_rate = config['lr_scheduler']['decay_rate'],
                                init_lr = config['init_lr'])

    model_instance.set_train(True)

    print("start train...")
    iter_num = 0
    epoch = 0
    total_progress_bar = tqdm.tqdm(desc='Train iter', total=config['max_iter'])

    while True:
        for (datas, datat) in tqdm.tqdm(
                zip(train_source_loader, train_target_loader),
                total=min(len(train_source_loader),
                          len(train_target_loader)),
                desc='Train epoch = {}'.format(epoch),
                ncols=80, leave=False):

            inputs_source, labels_source = datas
            inputs_target, labels_target = datat

            optimizer = lr_scheduler.next_optimizer(group_ratios, optimizer, iter_num / 5)
            optimizer.zero_grad()

            if model_instance.use_gpu:
                inputs_source, inputs_target, labels_source = Variable(inputs_source).cuda(), Variable(
                    inputs_target).cuda(), Variable(labels_source).cuda()
            else:
                inputs_source, inputs_target, labels_source = Variable(inputs_source), Variable(
                    inputs_target), Variable(labels_source)

            train_batch(model_instance, inputs_source, labels_source, inputs_target, optimizer)

            # val
            if iter_num % config['eval_iter'] == 0 and iter_num !=0:
                eval_result = evaluate(model_instance,
                                       test_target_loader,)
                print(eval_result)
                acc = round(eval_result['accuracy'], 2)
                print(acc)

                torch.save(model_instance.c_net.state_dict(), os.path.join(config["output_path"], str(acc*100)+"_model.pth.tar"))
            iter_num += 1
            total_progress_bar.update(1)

        epoch += 1
        if iter_num >= config['max_iter']:
            break
    print('finish train')
Esempio n. 5
0
        # Another choice for Office-home:
        # width = 1024
        # srcweight = 3
        # is_cen = True
    else:
        width = -1

    model_instance = MDD(base_net='ResNet50',
                         width=width,
                         use_gpu=True,
                         class_num=class_num,
                         srcweight=srcweight)

    train_source_loader = load_images(source_file,
                                      batch_size=32,
                                      is_cen=is_cen,
                                      drop_last=True)
    test_source_loader = load_images(source_file,
                                     batch_size=32,
                                     is_train=False)

    param_groups = model_instance.get_parameter_list()
    group_ratios = [group['lr'] for group in param_groups]

    assert cfg.optim.type == 'sgd', 'Optimizer type not supported!'

    optimizer = torch.optim.SGD(param_groups, **cfg.optim.params)

    assert cfg.lr_scheduler.type == 'inv', 'Scheduler type not supported!'
    lr_scheduler = INVScheduler(gamma=cfg.lr_scheduler.gamma,
                                decay_rate=cfg.lr_scheduler.decay_rate,