def get_features(mol_short='inception_v3'):
    ################################################################
    # Arguments
    ################################################################
    ae_args = train_args()
    cuda = ae_args.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if ae_args.cuda else {}
    # global ae_args, cuda, device, kwargs

    start_time = time.time()
    args = ae_args
    model_name = 'model/%s_%s%s_model-%s.pkl' % (mol_short, args.model,
                                                 '' if args.fea_c is None else
                                                 args.fea_c, args.dataset)
    evaluation_dir = 'res/evaluation_pic/%s_%s%s-%s' % (
        mol_short, args.model, '' if args.fea_c is None else args.fea_c,
        args.dataset)
    if os.path.exists(model_name) and args.load_model:
        print('Loading model ...')
        mol = torch.load(model_name).to(device)
    else:
        print('Init model ...')
        mol = inception_v3_features(pretrained=True, training=False).to(device)

    # train_loader = getDataLoader(args, kwargs)
    test_loader = getDataLoader(args, kwargs, train='test')

    check_dir_exists(['res/', 'model', 'res/evaluation_pic', evaluation_dir])
    t_per_img = []
    for epoch in range(1):
        step_time = time.time()
        for step, (x, y) in enumerate(test_loader):
            b_x = Variable(x, volatile=True).cuda() if cuda else Variable(x)

            t0 = time.time()
            features = mol(b_x)
            t_tmp = (time.time() - t0) / len(b_x) * 1000
            t_per_img.append(t_tmp)
            print(
                'cost %.6fms per image this batch. cost %.6fms per image till now.'
                % (t_tmp, np.mean(sorted(t_per_img)[1:-1])))

    print('Finished. Totally cost %.2f' % (time.time() - start_time))
Exemplo n.º 2
0
    def __init__(self, name='inception_finetune_experiments'):
        self.args = train_args()

        self._get_gpu()
        self.cuda = torch.cuda.is_available() and self.args.gpu != -2
        self.device = torch.device("cuda" if self.cuda else "cpu")
        self.kwargs = {'num_workers': 6, 'pin_memory': True}

        self.mol_dir = 'model'

        self.log_dir = os.path.join('log')
        self.log_file = os.path.join(self.log_dir, 'log_%s.log' % name)
        self.summary_dir = os.path.join('summary', self.args.name)
        check_dir_exists([
            self.mol_dir, self.log_dir,
            os.path.dirname(self.summary_dir), self.summary_dir
        ])
        self.writer = SummaryWriter(self.summary_dir)
        logging.basicConfig(filename=self.log_file, level=logging.INFO)
Exemplo n.º 3
0
def get_features(mol_short='resnet50'):
    ################################################################
    # Arguments
    ################################################################
    ae_args = train_args()
    cuda = ae_args.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if ae_args.cuda else {}
    # global ae_args, cuda, device, kwargs

    start_time = time.time()
    args = ae_args
    model_name = 'model/%s_%s%s_model-%s.pkl' % (
        mol_short, args.model, '' if args.fea_c is None else args.fea_c, args.dataset)
    evaluation_dir = 'res/evaluation_pic/%s_%s%s-%s' % (
        mol_short, args.model, '' if args.fea_c is None else args.fea_c, args.dataset)
    if os.path.exists(model_name) and args.load_model:
        print('Loading model ...')
        mol = torch.load(model_name).to(device)
    else:
        print('Init model ...')
        mol = ResNet().to(device)

    test_loader = getDataLoader(args, kwargs, train='test')
    loss_class = nn.CrossEntropyLoss().cuda(cuda)

    check_dir_exists(['res/', 'model', 'res/evaluation_pic', evaluation_dir])

    total, correct, top5correct, loss_total = 0, 0, 0, 0
    t_per_img = []
    for epoch in range(1):
        step_time = time.time()
        for step, (x, y) in enumerate(test_loader):
            t0 = time.time()
            b_x = Variable(x, volatile=True).cuda() if cuda else Variable(x)
            prob_class = mol.get_fc_features(b_x, return_both=False)
            t_tmp = (time.time() - t0) / len(b_x) * 1000
            t_per_img.append(t_tmp)
            print('cost %.6fms per image this batch. cost %.6fms per image till now.' % (
                t_tmp, np.mean(sorted(t_per_img)[1:-1])))
def sample_cover_label(mol_short='inception_v3'):

    ################################################################
    # Arguments
    ################################################################
    args = train_args()
    cuda = args.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

    start_time = time.time()
    out_path = 'res/evaluation_pic/sample_cover_label_%s.json' % mol_short
    print('Init model ...')
    mol = inception_v3(pretrained=True, training=False).to(device)
    cover_sample_loader = getDataLoader(args, kwargs, train='cover_sample')
    labels = read_imagenet_label_name(os.path.dirname(args.dataset_dir))

    mol.eval()
    out = {}
    for step, (x, y) in enumerate(cover_sample_loader):
        x = x.cuda() if cuda else x
        label = [y[1][i] for i in range(len(y[0]))]

        prob_class = mol(x)
        top5pre = prob_class.topk(10, 1, True, True)
        top5pre_label = top5pre[1].tolist()
        top5pre_prob = top5pre[0].tolist()
        for i in range(len(label)):
            out[label[i]] = [[
                labels[top5pre_label[i][j]], top5pre_label[i][j],
                top5pre_prob[i][j]
            ] for j in range(len(top5pre_prob[i]))]
    out = {k: out[k] for k in sorted(out.keys())}
    print(out)
    with open(out_path, 'w') as f:
        json.dump(out, f)
            if step % 10 == 0:
                if os.path.exists(model_name):
                    shutil.copy2(model_name,
                                 model_name.split('.pkl')[0] + '_back.pkl')
                torch.save(mol, model_name)
                total_tmp = total if total != 0 else 1
                print(
                    '[Training] Epoch:', epoch, 'Step:', step, '|',
                    'Time cost %.2f s; Classification error %.6f; '
                    'Accuracy %.3f%%; Top5 Accuracy %.3f%%' %
                    (time.time() - step_time, torch.mean(loss.data),
                     correct * 100 / total_tmp, top5correct * 100 / total_tmp))
                correct, total, top5correct = 0, 0, 0
                step_time = time.time()

            cnt += 1
    pass


def train_decoder(args):
    pass


def train(args):
    pass


if __name__ == '__main__':
    args = train_args()
    get_main_function(args.main_fn)(args)
Exemplo n.º 6
0
def train():
    ################################################################
    # Arguments
    ################################################################
    vae_args = train_args()
    cuda = vae_args.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if vae_args.cuda else {}

    start_time = time.time()
    args = vae_args
    model_name = 'model/VAE_%s%s_model-%s.pkl' % (
        args.model, '' if args.fea_c is None else args.fea_c, args.dataset)
    pic_dir = 'res/VAE_%s%s-%s/' % (args.model, '' if args.fea_c is None else
                                    args.fea_c, args.dataset)
    if os.path.exists(model_name) and args.load_model:
        print('Loading model ...')
        if cuda:
            vae = torch.load(model_name).to(device)
        else:
            vae = torch.load(model_name, map_location='cpu')
    else:
        vae = init_model(args.model).to(device)

    train_loader = getDataLoader(args, kwargs)
    optimizer = torch.optim.Adam(list(vae.parameters()), lr=args.lr)

    check_dir_exists(['res/', 'model', pic_dir])
    loss_val = None

    for epoch in range(args.epoch):
        step_time = time.time()
        for step, (x, y) in enumerate(train_loader):
            b_x = Variable(x).cuda() if cuda else Variable(x)
            b_y = b_x.detach().cuda() if cuda else b_x.detach(
            )  # batch y, shape (batch, 32*32*3)

            decoded, mu, std = vae(b_x)

            if step % 100 == 0:
                img_to_save = torch.cat([b_x.data, decoded.data])
                save_image(img_to_save,
                           '%s/%s-%s.jpg' % (pic_dir, epoch, step))
            # io.imsave('.xxx.jpg',img_to_save[0])

            loss, bce, kld = loss_function(decoded, b_y, mu,
                                           std)  # mean square error
            optimizer.zero_grad()  # clear gradients for this training step
            loss.backward()  # backpropagation, compute gradients
            optimizer.step()

            loss_val = 0.99 * loss_val + 0.01 * loss.data[
                0] if loss_val is not None else loss.data[0]

            if step % 10 == 0:
                torch.save(vae, model_name)
                print(
                    'Epoch:', epoch, 'Step:', step, '|',
                    'train loss %.6f; KLD %.6f; BCE %.6f; Time cost %.2f s' %
                    (loss_val, kld, bce, time.time() - step_time))
                step_time = time.time()
    print('Finished. Totally cost %.2f' % (time.time() - start_time))
def train(mol_short='inception_v3'):
    ################################################################
    # Arguments
    ################################################################
    ae_args = train_args()
    cuda = ae_args.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if ae_args.cuda else {}
    # global ae_args, cuda, device, kwargs

    start_time = time.time()
    args = ae_args
    model_name = 'model/%s_%s%s_model-%s.pkl' % (mol_short, args.model,
                                                 '' if args.fea_c is None else
                                                 args.fea_c, args.dataset)
    evaluation_dir = 'res/evaluation_pic/%s_%s%s-%s' % (
        mol_short, args.model, '' if args.fea_c is None else args.fea_c,
        args.dataset)
    if os.path.exists(model_name) and args.load_model:
        print('Loading model ...')
        mol = torch.load(model_name).to(device)
    else:
        print('Init model ...')
        mol = inception_v3(pretrained=True, training=False).to(device)
    # mol.eval()

    # train_loader = getDataLoader(args, kwargs)
    test_loader = getDataLoader(args, kwargs, train='test')
    # cover_loader = getDataLoader(args, kwargs, train='cover')
    cover_val_loader = getDataLoader(args, kwargs, train='cover_validation')
    cover_sample_loader = getDataLoader(args, kwargs, train='cover_sample')

    loss_class = nn.CrossEntropyLoss().cuda(cuda)

    check_dir_exists(['res/', 'model', 'res/evaluation_pic', evaluation_dir])

    # Evaluation
    # check_dir_exists([os.path.join(evaluation_dir, 'cos'), os.path.join(evaluation_dir, 'distance')])
    evaluate_cover(cover_val_loader, cover_sample_loader, mol, cuda,
                   evaluation_dir, args)
    # encode_accuracy, encode_top5accuracy, fc_accuracy, fc_top5accuracy = evaluate_labeled_data(test_loader, mol, cuda)
    # print('Encode accuracy:', np.mean(encode_accuracy))
    # print('Encode top5 accuracy:', np.mean(encode_top5accuracy))
    # print('Fc accuracy:', np.mean(fc_accuracy))
    # print('Fc top5 accuracy:', np.mean(fc_top5accuracy))

    total, correct, top5correct, loss_total = 0, 0, 0, 0
    for epoch in range(1):
        step_time = time.time()
        for step, (x, y) in enumerate(test_loader):
            b_x = Variable(x, volatile=True).cuda() if cuda else Variable(x)
            label = Variable(
                torch.Tensor([y[2][i] for i in range(len(y[0]))]).long())
            label = label.cuda() if cuda else label

            prob_class = mol(b_x)

            loss = loss_class(prob_class, label)  # mean square error

            _, predicted = torch.max(prob_class.data, 1)
            total += label.size(0)
            correct += (predicted == label).sum().item()
            top5pre = prob_class.topk(5, 1, True, True)
            top5pre = top5pre[1].t()
            top5correct += top5pre.eq(label.view(
                1, -1).expand_as(top5pre)).sum().item()

            loss_total += loss.data[0] * label.size(0)

            if step % 10 == 0:
                torch.save(mol, model_name)
                print(
                    'Epoch:', epoch, 'Step:', step, '|',
                    'test loss %.6f; Time cost %.2f s; Classification error %.6f; '
                    'Top1 Accuracy %.3f; Top5 Accuracy %.3f' %
                    (loss_total / total, time.time() - step_time, loss,
                     correct * 100 / total, top5correct * 100 / total))
                step_time = time.time()
    print('Finished. Totally cost %.2f' % (time.time() - start_time))
Exemplo n.º 8
0
def train(mol_short='VGGClass', main_model=VGGClass):
    ################################################################
    # Arguments
    ################################################################
    ae_args = train_args()
    cuda = ae_args.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if ae_args.cuda else {}
    # global ae_args, cuda, device, kwargs
    args = ae_args
    mol_short = mol_short if args.name == '' else mol_short + '_' + args.name

    log_dir = 'log/log_%s_%s%s_model-%s/' %\
              (mol_short, args.model, '' if args.fea_c is None else args.fea_c, args.dataset)
    writer = SummaryWriter(log_dir)

    start_time = time.time()
    model_name = 'model/%s_%s%s_model-%s.pkl' % (mol_short, args.model,
                                                 '' if args.fea_c is None else
                                                 args.fea_c, args.dataset)
    print('[Model] model name is', model_name)
    pic_dir = 'res/%s_%s%s-%s/' % (mol_short, args.model,
                                   '' if args.fea_c is None else args.fea_c,
                                   args.dataset)
    evaluation_dir = 'res/evaluation_pic/%s_%s%s-%s' % (
        mol_short, args.model, '' if args.fea_c is None else args.fea_c,
        args.dataset)
    if os.path.exists(model_name) and args.load_model:
        print('Loading model ...')
        if cuda:
            mol = torch.load(model_name).to(device)
        else:
            mol = torch.load(model_name, map_location='cpu')
    else:
        print('Init model ...')
        mol = main_model(args.fea_c).to(device)

    print('Prepare data loader ...')
    train_loader = getDataLoader(args, kwargs, p=args.imgnet_p)
    test_loader = getDataLoader(args, kwargs, train='test')
    # small_test_loader = getDataLoader(args, kwargs, train=False, p=10)
    # cover_loader = getDataLoader(args, kwargs, train='cover')
    cover_val_loader = getDataLoader(args, kwargs, train='cover_validation')
    cover_sample_loader = getDataLoader(args, kwargs, train='cover_sample')

    # Optimizer & Loss function
    if args.fea_c == 512:
        optimizer1 = torch.optim.Adam(list(mol.classification.parameters()),
                                      lr=args.lr)
    else:
        optimizer1 = torch.optim.Adam(list(mol.classification.parameters()) +
                                      list(mol.small_features.parameters()),
                                      lr=args.lr)

    loss_class = nn.CrossEntropyLoss().cuda(cuda)
    loss_val = None

    # Check directories
    check_dir_exists([
        'res/', 'model', pic_dir, log_dir, 'res/evaluation_pic', evaluation_dir
    ])

    total, correct, top5correct, cnt = 0, 0, 0, 0
    print('Start training ...')
    for epoch in range(args.epoch):
        # Evaluation cover
        if epoch % 5 == 0 and epoch != 0:
            mol.eval()
            eval_dir = os.path.join(evaluation_dir, 'epoch%d' % epoch)
            evaluate_cover(cover_val_loader, cover_sample_loader, mol, cuda,
                           eval_dir, args)

            encode_accuracy, encode_top5accuracy, fc_accuracy, fc_top5accuracy = evaluate_labeled_data(
                test_loader, mol, cuda)
            writer.add_scalar('test/encode_feature_accuracy',
                              np.mean(encode_accuracy), epoch)
            writer.add_scalar('test/encode_feature_top5accuracy',
                              np.mean(encode_top5accuracy), epoch)
            writer.add_scalar('test/fc_feature_accuracy', np.mean(fc_accuracy),
                              epoch)
            writer.add_scalar('test/fc_feature_top5accuracy',
                              np.mean(fc_top5accuracy), epoch)

        if epoch != 0:
            # Testing classifier
            mol.eval()
            test_acc, test_top5acc = test(test_loader, mol, cuda, 'Full')
            writer.add_scalar('test/class_accuracy', test_acc, epoch)
            writer.add_scalar('test/class_top5accuracy', test_top5acc, epoch)

        step_time = time.time()
        mol.train()
        print('######### Training with %d batches total ##########' %
              len(train_loader))
        for step, (x, y) in enumerate(train_loader):
            b_x = Variable(x).cuda() if cuda else Variable(x)
            label = Variable(
                torch.Tensor([y[2][i] for i in range(len(y[0]))]).long())
            label = label.cuda() if cuda else label

            prob_class = mol(b_x)

            loss = loss_class(prob_class, label)
            writer.add_scalar('train/loss_classifier', loss, cnt)

            optimizer1.zero_grad()
            loss.backward()
            optimizer1.step()

            _, predicted = torch.max(prob_class.data, 1)
            total += label.size(0)
            correct += (predicted == label).sum().item()
            top5pre = prob_class.topk(5, 1, True, True)
            top5pre = top5pre[1].t()
            top5correct += top5pre.eq(label.view(
                1, -1).expand_as(top5pre)).sum().item()
            writer.add_scalar('train/accuracy', correct / total, cnt)
            writer.add_scalar('train/top5_accuracy', top5correct / total, cnt)

            loss_val = 0.99 * loss_val + 0.01 * loss.data[
                0] if loss_val is not None else loss.data[0]

            if step % 10 == 0:
                if os.path.exists(model_name):
                    shutil.copy2(model_name,
                                 model_name.split('.pkl')[0] + '_back.pkl')
                torch.save(mol, model_name)
                print(
                    '[Training] Epoch:', epoch, 'Step:', step, '|',
                    'train loss %.6f; Time cost %.2f s; Accuracy %.3f%%; Top5 Accuracy %.3f%%'
                    % (loss.data[0], time.time() - step_time,
                       correct * 100 / total, top5correct * 100 / total))
                correct, total, top5correct = 0, 0, 0
                step_time = time.time()

            cnt += 1

    print('Finished. Totally cost %.2f' % (time.time() - start_time))
    writer.export_scalars_to_json(os.path.join(log_dir, 'all_scalars.json'))
    writer.close()
Exemplo n.º 9
0
def train():
    ################################################################
    # Arguments
    ################################################################
    ae_args = train_args()
    cuda = ae_args.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if ae_args.cuda else {}
    # global ae_args, cuda, device, kwargs

    start_time = time.time()
    args = ae_args
    model_name = 'model/vgg_classifier_%s%s_model-%s.pkl' % (
        args.model, '' if args.fea_c is None else args.fea_c, args.dataset)
    evaluation_dir = 'res/evaluation_pic/vgg_%s%s-%s' % (
        args.model, '' if args.fea_c is None else args.fea_c, args.dataset)
    if os.path.exists(model_name) and args.load_model:
        print('Loading model ...')
        vgg = torch.load(model_name).to(device)
    else:
        print('Init model ...')
        vgg = VGGNet(args.fea_c).to(device)

    # train_loader = getDataLoader(args, kwargs)
    test_loader = getDataLoader(args, kwargs, train='test')
    # cover_loader = getDataLoader(args, kwargs, train='cover')
    cover_val_loader = getDataLoader(args, kwargs, train='cover_validation')
    cover_sample_loader = getDataLoader(args, kwargs, train='cover_sample')

    optimizer = torch.optim.Adam(list(vgg.parameters()), lr=args.lr)
    loss_class = nn.CrossEntropyLoss().cuda(cuda)

    check_dir_exists(['res/', 'model', 'res/evaluation_pic', evaluation_dir])

    # # Evaluation
    # vgg.eval()
    # check_dir_exists([os.path.join(evaluation_dir, 'cos'), os.path.join(evaluation_dir, 'distance')])
    # # evaluate_cover(cover_val_loader, cover_sample_loader, vgg, cuda, evaluation_dir, args)
    # fc_accuracy, fc_top5accuracy = evaluate_labeled_data(test_loader, vgg, cuda, both=False)
    # # print('Encode accuracy:', np.mean(encode_accuracy))
    # # print('Encode top5 accuracy:', np.mean(encode_top5accuracy))
    # print('Fc accuracy:', np.mean(fc_accuracy))
    # print('Fc top5 accuracy:', np.mean(fc_top5accuracy))

    total, correct, top5correct, loss_total = 0, 0, 0, 0
    t_per_img = []
    for epoch in range(1):
        step_time = time.time()
        for step, (x, y) in enumerate(test_loader):
            b_x = Variable(x, volatile=True).cuda() if cuda else Variable(x)
            label = Variable(
                torch.Tensor([y[2][i] for i in range(len(y[0]))]).long())
            label = label.cuda() if cuda else label

            t0 = time.time()
            prob_class = vgg(b_x)
            t_tmp = (time.time() - t0) / len(b_x) * 1000
            t_per_img.append(t_tmp)
            print(
                'cost %.6fms per image this batch. cost %.6fms per image till now.'
                % (t_tmp, np.mean(sorted(t_per_img)[1:-1])))

            loss = loss_class(prob_class, label)  # mean square error
            # optimizer.zero_grad()  # clear gradients for this training step
            # loss.backward()
            # optimizer.step()

            _, predicted = torch.max(prob_class.data, 1)
            total += label.size(0)
            correct += (predicted == label).sum().item()
            top5pre = prob_class.topk(5, 1, True, True)
            top5pre = top5pre[1].t()
            top5correct += top5pre.eq(label.view(
                1, -1).expand_as(top5pre)).sum().item()

            loss_total += loss.data[0] * label.size(0)

            if step % 10 == 0:
                torch.save(vgg, model_name)
                print(
                    'Epoch:', epoch, 'Step:', step, '|',
                    'test loss %.6f; Time cost %.2f s; Classification error %.6f; '
                    'Top1 Accuracy %.3f; Top5 Accuracy %.3f' %
                    (loss_total / total, time.time() - step_time, loss.item(),
                     correct * 100 / total, top5correct * 100 / total))
                step_time = time.time()

            loss = None
    print('Finished. Totally cost %.2f' % (time.time() - start_time))
Exemplo n.º 10
0
def test():
    ################################################################
    # Arguments
    ################################################################
    ae_args = train_args()
    cuda = ae_args.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if ae_args.cuda else {}
    # global ae_args, cuda, device, kwargs
    args = ae_args

    log_dir = 'log/log_AE_%s%s_model-%s/' %\
              (args.model, '' if args.fea_c is None else args.fea_c, args.dataset)
    writer = SummaryWriter(log_dir)

    start_time = time.time()
    model_name = 'model/AE_%s%s_model-%s.pkl' % (
        args.model, '' if args.fea_c is None else args.fea_c, args.dataset)
    print('Model name is:', model_name)
    pic_dir = 'res/AE_%s%s-%s/' % (args.model, '' if args.fea_c is None else
                                   args.fea_c, args.dataset)
    if os.path.exists(model_name) and args.load_model:
        print('Loading model ...')
        mol = torch.load(model_name).to(device)
    else:
        print('Init model ...')
        mol = AutoEncoder(args.fea_c).to(device)

    print('Prepare data loader ...')
    test_loader = getDataLoader(args, kwargs, train='test', p=0.2)
    train_loader = getDataLoader(args, kwargs, train='train', p=0.05)
    # test_loader = getDataLoader(args, kwargs, train='test')
    cover_val_loader = getDataLoader(args,
                                     kwargs,
                                     train='cover_validation',
                                     p=0.2)

    step_time = time.time()

    loss_val = []
    print(
        '######### Testing with %d batches total of imagenet val ##########' %
        len(test_loader))
    for step, (x, y) in enumerate(train_loader):
        b_x = Variable(x).cuda() if cuda else Variable(x)
        b_y = b_x.detach().cuda() if cuda else b_x.detach(
        )  # batch y, shape (batch, 32*32*3)

        _, decoded = mol(b_x)
        loss_tmp = F.mse_loss(decoded, b_y)
        loss_val.append(loss_tmp.item())

        if step % 500 == 0:
            img_to_save = decoded.data
            save_image(img_to_save,
                       '%s/imagenet_train_step%s.jpg' % (pic_dir, step))

        if step % 10 == 0:
            print('[Testing] Step %d; Decoder loss= = %.5f; time cost %.2fs' %
                  (step, np.mean(loss_val), time.time() - step_time))
            step_time = time.time()

    print('ImageNet val decoder loss = %.4f' % np.mean(loss_val))

    loss = []
    print(
        '######### Testing with %d batches total of imagenet train ##########'
        % len(train_loader))
    for step, (x, y) in enumerate(train_loader):
        b_x = Variable(x).cuda() if cuda else Variable(x)
        b_y = b_x.detach().cuda() if cuda else b_x.detach(
        )  # batch y, shape (batch, 32*32*3)

        _, decoded = mol(b_x)
        loss_tmp = F.mse_loss(decoded, b_y)
        loss.append(loss_tmp.item())

        if step % 500 == 0:
            img_to_save = decoded.data
            save_image(img_to_save,
                       '%s/imagenet_train_step%s.jpg' % (pic_dir, step))

        if step % 10 == 0:
            print('[Testing] Step %d; Decoder loss= = %.5f; time cost %.2fs' %
                  (step, np.mean(loss), time.time() - step_time))
            step_time = time.time()

    print('ImageNet train decoder loss = %.4f' % np.mean(loss))

    loss_cover = []
    print('######### Testing with %d batches total of cover val##########' %
          len(cover_val_loader))
    for step, (x, y) in enumerate(cover_val_loader):
        b_x = Variable(x).cuda() if cuda else Variable(x)
        b_y = b_x.detach().cuda() if cuda else b_x.detach(
        )  # batch y, shape (batch, 32*32*3)

        _, decoded = mol(b_x)
        loss_tmp = F.mse_loss(decoded, b_y)
        loss_cover.append(loss_tmp.item())

        if step % 500 == 0:
            img_to_save = decoded.data
            save_image(img_to_save,
                       '%s/cover_val_step%s.jpg' % (pic_dir, step))

        if step % 10 == 0:
            print('[Testing] Step %d; Decoder loss= = %.5f; time cost %.2fs' %
                  (step, np.mean(loss_cover), time.time() - step_time))
            step_time = time.time()

    print(
        "0.25 ImageNet train decoder loss = %.4f; Cover val decoder loss = %.4f"
        % (np.mean(loss), np.mean(loss_cover)))
Exemplo n.º 11
0
def train():
    ################################################################
    # Arguments
    ################################################################
    ae_args = train_args()
    cuda = ae_args.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if ae_args.cuda else {}
    # global ae_args, cuda, device, kwargs
    args = ae_args

    log_dir = 'log/log_AE_%s%s_model-%s/' %\
              (args.model, '' if args.fea_c is None else args.fea_c, args.dataset)
    writer = SummaryWriter(log_dir)

    start_time = time.time()
    model_name = 'model/AE_%s%s_model-%s.pkl' % (
        args.model, '' if args.fea_c is None else args.fea_c, args.dataset)
    pic_dir = 'res/AE_%s%s-%s/' % (args.model, '' if args.fea_c is None else
                                   args.fea_c, args.dataset)
    if os.path.exists(model_name) and args.load_model:
        print('Loading model ...')
        mol = torch.load(model_name).to(device)
    else:
        print('Init model ...')
        mol = AutoEncoder(args.fea_c).to(device)

    print('Prepare data loader ...')
    train_loader = getDataLoader(args, kwargs, train=True)
    test_loader = getDataLoader(args, kwargs, train=False)

    optimizer1 = torch.optim.Adam(list(mol.small_features.parameters()) +
                                  list(mol.decoder.parameters()),
                                  lr=args.lr)
    optimizer2 = torch.optim.Adam(list(mol.features.parameters()),
                                  lr=args.lr / 10)
    loss_decoder = nn.MSELoss()

    check_dir_exists(['res/', 'model', pic_dir, log_dir])

    total, correct, top5correct, cnt = 0, 0, 0, 0
    print('Start training ...')
    for epoch in range(args.epoch):
        # Testing
        test_acc, test_top5acc, test_loss = test_feature(
            test_loader, mol, cuda, 'Full')
        writer.add_scalar('test/accuracy', np.mean(test_acc), epoch)
        writer.add_scalar('test/top5accuracy', np.mean(test_top5acc), epoch)
        writer.add_scalar('test/loss_decoder', test_loss, epoch)

        step_time = time.time()
        for step, (x, y) in enumerate(train_loader):
            b_x = Variable(x).cuda() if cuda else Variable(x)
            b_y = b_x.detach().cuda() if cuda else b_x.detach(
            )  # batch y, shape (batch, 32*32*3)

            encoded, decoded = mol(b_x)

            if step % 100 == 0:
                img_to_save = decoded.data
                save_image(img_to_save,
                           '%s/%s-%s.jpg' % (pic_dir, epoch, step))

            loss = loss_decoder(decoded, b_y)
            writer.add_scalar('train/loss_decoder', loss, cnt)

            optimizer1.zero_grad()
            optimizer2.zero_grad()
            loss.backward()
            optimizer1.step()
            optimizer2.step()

            if step % 50 == 0:
                if os.path.exists(model_name):
                    shutil.copy2(model_name,
                                 model_name.split('.pkl')[0] + '_back.pkl')
                torch.save(mol, model_name)
                print(
                    '[Training] Epoch:', epoch, 'Step:', step, '|',
                    'train loss %.6f; Time cost %.2f s; Decoder error %.6f' %
                    (loss.data[0], time.time() - step_time, loss))
                step_time = time.time()

            cnt += 1

    print('Finished. Totally cost %.2f' % (time.time() - start_time))
    writer.export_scalars_to_json(os.path.join(log_dir, 'all_scalars.json'))
    writer.close()