Пример #1
0
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

train_loader = DataLoader(get_dataset(dsize=args.size),
                          batch_size=args.batch_size,
                          shuffle=True)
test_loader = DataLoader(get_dataset(root="../data/test", dsize=args.size),
                         batch_size=args.batch_size,
                         shuffle=True)

model = VAE(args)
model.apply(weights_init())
if args.cuda:
    model.cuda()

optimizer = optim.Adam(model.parameters(), lr=1e-4)

if args.rec_loss == "ssim":
    rec_loss = SSIMLoss(method=args.ssim_method)
elif args.rec_loss == "bce":
    rec_loss = nn.BCELoss(size_average=args.size_average)
elif args.rec_loss == "l1":
    rec_loss = nn.L1Loss(size_average=args.size_average)
else:
    rec_loss = None
kl_loss = KLLoss(size_average=args.size_average)


def loss_function(x, x_rec, mu, logvar):
def main():
    CUDA = False
    if torch.cuda.is_available():
        CUDA = True
        print('cuda available')
        torch.backends.cudnn.benchmark = True
    config = config_process(parser.parse_args())
    print(config)

    with open('pkl/task_1_train.pkl', 'rb') as f:
        task_1_train = pkl.load(f)
    # with open('pkl/task_1_test.pkl', 'rb') as g:
    #     task_1_test = pkl.load(g)
    ######################## task_1_testval.pkl
    with open('pkl/task_1_test.pkl', 'rb') as g:
        task_1_testval = pkl.load(g)

    task_1_test = task_1_testval[:500]
    task_1_val = task_1_testval[500:]
    ###################################### task 1  test + val
    ###### task 0:seen training data and unseen test data
    examples, labels, class_map = image_load(config['class_file'],
                                             config['image_label'])
    ###### task 0: seen test data
    examples_0, labels_0, class_map_0 = image_load(config['class_file'],
                                                   config['test_seen_classes'])

    datasets = split_byclass(config, examples, labels,
                             np.loadtxt(config['attributes_file']), class_map)
    datasets_0 = split_byclass(config, examples_0, labels_0,
                               np.loadtxt(config['attributes_file']),
                               class_map)
    print('load the task 0 train: {} the task 1 as test: {}'.format(
        len(datasets[0][0]), len(datasets[0][1])))
    print('load task 0 test data {}'.format(len(datasets_0[0][0])))

    train_attr = F.normalize(datasets[0][3])
    test_attr = F.normalize(datasets[0][4])

    best_cfg = config
    best_cfg['n_classes'] = datasets[0][3].size(0)
    best_cfg['n_train_lbl'] = datasets[0][3].size(0)
    best_cfg['n_test_lbl'] = datasets[0][4].size(0)

    task_1_train_set = grab_data(best_cfg, task_1_train, datasets[0][2], True)
    task_1_test_set = grab_data(best_cfg, task_1_test, datasets[0][2], False)
    task_1_val_set = grab_data(best_cfg, task_1_val, datasets[0][2], False)
    task_0_seen_test_set = grab_data(best_cfg, datasets_0[0][0],
                                     datasets_0[0][2], False)

    base_model = models.__dict__[config['arch']](pretrained=True)
    if config['arch'].startswith('resnet'):
        FE_model = nn.Sequential(*list(base_model.children())[:-1])
    else:
        print('untested')
        raise NotImplementedError

    print('load pretrained FE_model')
    #######3 task id 'softmax'
    FE_path = './ckpts/{}_{}_{}_task_id_{}_finetune_{}_{}'.format(
        config['dataset'], config['softmax_method'], config['arch'],
        config['task_id'], config['finetune'], 'checkpoint.pth')

    FE_model.load_state_dict(torch.load(FE_path)['state_dict_FE'])
    for name, para in FE_model.named_parameters():
        para.requires_grad = False

    vae = VAE(encoder_layer_sizes=config['encoder_layer_sizes'],
              latent_size=config['latent_size'],
              decoder_layer_sizes=config['decoder_layer_sizes'],
              num_labels=config['num_labels'])

    vae_path = './ckpts/{}_{}_{}_{}_task_id_{}_finetune_{}_{}'.format(
        config['dataset'], config['method'], config['softmax_method'],
        config['arch'], config['task_id'], config['finetune'], 'ckpt.pth')
    vae.load_state_dict(torch.load(vae_path))
    for name, para in vae.named_parameters():
        para.requires_grad = False

    FE_model.eval()
    vae.eval()
    # print(vae)
    if CUDA:
        FE_model = FE_model.cuda()
        vae = vae.cuda()

    #seen
    task_1_real_train = get_prev_feat(FE_model, task_1_train_set, CUDA)
    # task_0_real_val = get_prev_feat(FE_model, task_0_val_set, CUDA)

    print('have got real trainval feats and labels')
    print('...GENERATING fake features...')
    task_0_fake = generate_syn_feature(150, vae, train_attr, config['syn_num'],
                                       config)

    train_X = torch.cat((task_0_fake[0].cuda(), task_1_real_train[0].cuda()))
    train_Y = torch.cat(
        (task_0_fake[1].cuda(), task_1_real_train[1].cuda() + 150))
    # train_X = task_0_fake[0].cuda()
    # train_Y = task_0_fake[1].cuda()
    test_Dataset = PURE(train_X, train_Y)

    test_dataloader = torch.utils.data.DataLoader(test_Dataset,
                                                  batch_size=256,
                                                  shuffle=True)

    test_loss_net = nn.Linear(in_features=2048, out_features=200).cuda()
    test_loss_net_optimizer = torch.optim.Adam(test_loss_net.parameters())

    print('...TRAIN test set CLASSIFIER...')
    # train_syn_val(test_loss_net,task_0_val_set, test_dataloader, test_loss_net_optimizer, 200)
    best_loss_net = train_syn_val(config, FE_model, test_loss_net,
                                  task_1_val_set, test_dataloader,
                                  test_loss_net_optimizer, 200)
    test_loss_net = copy.deepcopy(best_loss_net)
    print('\n...TESTING... GZSL: 200 labels')

    # test_loss_net = torch.load('hist_files/vae_time_2_doulbe_distill_loss_net.pth')
    # torch.save(test_loss_net,'vae_time_2_loss_net.pth')
    test_0_acc, test_0_top1, _, true_labels_0, pred_labels_0 = test(
        config,
        FE_model,
        test_loss_net,
        task_0_seen_test_set,
        CUDA,
        0,
        eval_mode=0,
        print_sign=1)
    test_1_acc, test_1_top1, _, true_labels_1, pred_labels_1 = test(
        config,
        FE_model,
        test_loss_net,
        task_1_test_set,
        CUDA,
        1,
        eval_mode=1,
        print_sign=1)
    H = 2 * test_0_acc * test_1_acc / (test_0_acc + test_1_acc)
    print(H)
    OM = (3 * test_0_acc + test_1_acc) / 4
    print(OM)
    if not os.path.exists('results'):
        os.makedirs('results')

    file_name = '{}_{}_{}_{}.txt'.format(config['dataset'], config['arch'],
                                         config['method'], config['task_id'])
    with open('results/' + file_name, 'a') as fp:
        print(best_cfg, file=fp)
        print(
            'task B: {:.3f}, task A:{:.3f}, H= {:.3f}, OM= {:.3f}  \n'.format(
                test_0_acc, test_1_acc, H, OM),
            file=fp)
Пример #3
0
def main():
    file_name = 'data/CUB/attributes.txt'
    f = open(file_name, 'r')
    content = f.readlines()
    atts = []
    for item in content:
        atts.append(item.strip().split(' ')[1])

    CUDA = False
    if torch.cuda.is_available():
        CUDA = True
        print('cuda available')
        torch.backends.cudnn.benchmark = True
    config = config_process(parser.parse_args())
    print(config)

    # pkl_name='./pkl/{}_{}_{}_{}_task_id_{}_finetune_{}_{}'.format(
    #             config['dataset'], config['method'], config['softmax_method'],config['arch'],
    #             config['task_id'], config['finetune'], '.pkl')
    #
    # with open(pkl_name,'rb') as f:
    #     feat_dict=pkl.load(f)

    with open('pkl/task_0_train.pkl', 'rb') as f:
        task_0_train = pkl.load(f)

    with open('pkl/task_1_train.pkl', 'rb') as f:
        task_1_train = pkl.load(f)

    ###### task 0:seen training data and unseen test data
    examples, labels, class_map = image_load(config['class_file'],
                                             config['image_label'])
    ###### task 0: seen test data
    examples_0, labels_0, class_map_0 = image_load(config['class_file'],
                                                   config['test_seen_classes'])

    datasets = split_byclass(config, examples, labels,
                             np.loadtxt(config['attributes_file']), class_map)
    datasets_0 = split_byclass(config, examples_0, labels_0,
                               np.loadtxt(config['attributes_file']),
                               class_map)
    print('load the task 0 train: {} the task 1 as test: {}'.format(
        len(datasets[0][0]), len(datasets[0][1])))
    print('load task 0 test data {}'.format(len(datasets_0[0][0])))

    classes_text_embedding = torch.eye(312, dtype=torch.float32)
    test_attr = classes_text_embedding[:, :]

    train_attr = F.normalize(datasets[0][3])
    # test_attr=F.normalize(datasets[0][4])

    best_cfg = config
    best_cfg['n_classes'] = datasets[0][3].size(0)
    best_cfg['n_train_lbl'] = datasets[0][3].size(0)
    best_cfg['n_test_lbl'] = datasets[0][4].size(0)

    task_0_train_set = grab_data(best_cfg, task_0_train, datasets[0][2], True)
    task_1_train_set = grab_data(best_cfg, task_1_train, datasets[0][2], False)

    base_model = models.__dict__[config['arch']](pretrained=True)
    if config['arch'].startswith('resnet'):
        FE_model = nn.Sequential(*list(base_model.children())[:-1])
    else:
        print('untested')
        raise NotImplementedError

    print('load pretrained FE_model')
    FE_path = './ckpts/{}_{}_{}_task_id_{}_finetune_{}_{}'.format(
        config['dataset'], config['softmax_method'], config['arch'],
        config['task_id'], config['finetune'], 'checkpoint.pth')

    FE_model.load_state_dict(torch.load(FE_path)['state_dict_FE'])
    for name, para in FE_model.named_parameters():
        para.requires_grad = False

    vae = VAE(encoder_layer_sizes=config['encoder_layer_sizes'],
              latent_size=config['latent_size'],
              decoder_layer_sizes=config['decoder_layer_sizes'],
              num_labels=config['num_labels'])
    vae2 = VAE(encoder_layer_sizes=config['encoder_layer_sizes'],
               latent_size=config['latent_size'],
               decoder_layer_sizes=config['decoder_layer_sizes'],
               num_labels=config['num_labels'])
    vae_path = './ckpts/{}_{}_{}_task_id_{}_finetune_{}_{}'.format(
        config['dataset'], config['method'], config['arch'], config['task_id'],
        config['finetune'], 'ckpt.pth')

    vae_path = './ckpts/{}_{}_{}_{}_task_id_{}_finetune_{}_{}'.format(
        config['dataset'], 'vae', 'softmax_distill', config['arch'], 1,
        config['finetune'], 'ckpt.pth')

    vae2_path = './ckpts/{}_{}_{}_{}_task_id_{}_finetune_{}_{}'.format(
        config['dataset'], 'vae_distill', 'softmax_distill', config['arch'], 1,
        config['finetune'], 'ckpt.pth')

    vae.load_state_dict(torch.load(vae_path))
    vae2.load_state_dict(torch.load(vae2_path))
    for name, para in vae.named_parameters():
        para.requires_grad = False
    for name, para in vae2.named_parameters():
        para.requires_grad = False

    if CUDA:
        FE_model = FE_model.cuda()
        vae = vae.cuda()
        vae2 = vae2.cuda()

    ATTR_NUM = 312
    SYN_NUM = config['syn_num']
    attr_feat, attr_lbl = generate_syn_feature(ATTR_NUM, vae, test_attr,
                                               SYN_NUM, config)
    attr_feat2, attr_lbl2 = generate_syn_feature(ATTR_NUM, vae2, test_attr,
                                                 SYN_NUM, config)

    with open('attr_tsne_data/attr_vae_time_2_fe_distill.pkl', 'wb') as f:
        pkl.dump(attr_feat, f)
    with open('attr_tsne_data/attr_vae_time_2_double_distill.pkl', 'wb') as g:
        pkl.dump(attr_feat2, g)

    colors = cm.rainbow(np.linspace(0, 1, ATTR_NUM))
    fig = plt.figure(figsize=(16, 9))
    tsne = TSNE(n_components=2)

    feat = torch.cat((attr_feat, attr_feat2))
    tsne_results = tsne.fit_transform(feat)
    color_ind = colors[attr_lbl]
    ax = fig.add_subplot(1, 1, 1)

    for i in range(ATTR_NUM):
        ax.scatter(tsne_results[i * SYN_NUM:(i + 1) * SYN_NUM, 0],
                   tsne_results[i * SYN_NUM:(i + 1) * SYN_NUM, 1],
                   label=atts[i],
                   c=np.tile(colors[i].reshape(1, -1), (SYN_NUM, 1)),
                   s=20,
                   marker='X')

    result = tsne_results[ATTR_NUM * SYN_NUM:, :]
    for j in range(ATTR_NUM):
        ax.scatter(result[j * SYN_NUM:(j + 1) * SYN_NUM, 0],
                   result[j * SYN_NUM:(j + 1) * SYN_NUM, 1],
                   label=atts[j],
                   c=np.tile(colors[j].reshape(1, -1), (SYN_NUM, 1)),
                   s=20,
                   marker='o')
    # ax.scatter(tsne_results[:, 0], tsne_results[:, 1],c=color_ind, s=20, marker='X')
    plt.legend()
    plt.show()
def main():
    CUDA = False
    if torch.cuda.is_available():
        CUDA = True
        print('cuda available')
        torch.backends.cudnn.benchmark = True
    config = config_process(parser.parse_args())
    print(config)

    with open('pkl/task_1_train.pkl', 'rb') as f:
        task_1_train = pkl.load(f)
    with open('pkl/task_1_test.pkl', 'rb') as g:
        task_1_test = pkl.load(g)

    ###### task 0:seen training data and unseen test data
    examples, labels, class_map = image_load(config['class_file'],
                                             config['image_label'])
    ###### task 0: seen test data
    examples_0, labels_0, class_map_0 = image_load(config['class_file'],
                                                   config['test_seen_classes'])

    datasets = split_byclass(config, examples, labels,
                             np.loadtxt(config['attributes_file']), class_map)
    datasets_0 = split_byclass(config, examples_0, labels_0,
                               np.loadtxt(config['attributes_file']),
                               class_map)
    print('load the task 0 train: {} the task 1 as test: {}'.format(
        len(datasets[0][0]), len(datasets[0][1])))
    print('load task 0 test data {}'.format(len(datasets_0[0][0])))

    test_attr = F.normalize(datasets[0][4])

    best_cfg = config
    best_cfg['n_classes'] = datasets[0][3].size(0)
    best_cfg['n_train_lbl'] = datasets[0][3].size(0)
    best_cfg['n_test_lbl'] = datasets[0][4].size(0)

    task_1_train_set = grab_data(best_cfg, task_1_train, datasets[0][2], True)
    # task_1_test_set = grab_data(best_cfg, task_1_test, datasets[0][2], False)

    base_model = models.__dict__[config['arch']](pretrained=False)
    if config['arch'].startswith('resnet'):
        FE_model = nn.Sequential(*list(base_model.children())[:-1])
    else:
        print('untested')
        raise NotImplementedError

    ###### if finetune == False,
    print('load pretrained FE_model')
    FE_path = './ckpts/{}_{}_{}_task_id_{}_finetune_{}_{}'.format(
        config['dataset'], config['softmax_method'], config['arch'],
        config['task_id'], config['finetune'], 'checkpoint.pth')

    FE_model.load_state_dict(torch.load(FE_path)['state_dict_FE'])
    for name, para in FE_model.named_parameters():
        para.requires_grad = False

    vae = VAE(encoder_layer_sizes=config['encoder_layer_sizes'],
              latent_size=config['latent_size'],
              decoder_layer_sizes=config['decoder_layer_sizes'],
              num_labels=config['num_labels'])

    vae_path = './ckpts/{}_{}_{}_task_id_{}_finetune_{}_{}'.format(
        config['dataset'], 'vae', config['arch'], 0, config['finetune'],
        'ckpt.pth')
    vae.load_state_dict(torch.load(vae_path))

    print(vae)
    if CUDA:
        FE_model = FE_model.cuda()
        vae = vae.cuda()
    FE_model.eval()
    optimizer = torch.optim.Adam(vae.parameters(), lr=config['lr'])

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                config['step'],
                                                gamma=0.1,
                                                last_epoch=-1)
    criterion = loss_fn
    print('have got real trainval feats and labels')

    for epoch in range(config['epoch']):
        print('\n epoch: %d' % epoch)
        print('...TRAIN...')
        print_learning_rate(optimizer)
        ### train_attr--->test_attr, task_0_train_set---> task_1_train_set
        train(epoch, FE_model, vae, task_1_train_set, optimizer, criterion,
              test_attr, CUDA)
        scheduler.step()

    vae_ckpt_name = './ckpts/{}_{}_{}_{}_task_id_{}_finetune_{}_{}'.format(
        config['dataset'], config['method'], config['softmax_method'],
        config['arch'], config['task_id'], config['finetune'], 'ckpt.pth')
    torch.save(vae.state_dict(), vae_ckpt_name)
Пример #5
0
                        default=False,
                        metavar='UT',
                        help='load pretrained model (default: False)')

    args = parser.parse_args()

    batch_loader = BatchLoader()
    parameters = Parameters(batch_loader.vocab_size)

    vae = VAE(parameters.vocab_size, parameters.embed_size,
              parameters.latent_size, parameters.decoder_rnn_size,
              parameters.decoder_rnn_num_layers)
    if args.use_trained:
        vae.load_state_dict(t.load('trained_VAE'))
    if args.use_cuda:
        vae = vae.cuda()

    optimizer = Adam(vae.parameters(), args.learning_rate)

    for iteration in range(args.num_iterations):
        '''Train step'''
        input, decoder_input, target = batch_loader.next_batch(
            args.batch_size, 'train', args.use_cuda)
        target = target.view(-1)

        logits, aux_logits, kld = vae(args.dropout, input, decoder_input)

        logits = logits.view(-1, batch_loader.vocab_size)
        cross_entropy = F.cross_entropy(logits, target, size_average=False)

        aux_logits = aux_logits.view(-1, batch_loader.vocab_size)