Ejemplo n.º 1
0
def main(args):
    # prepare the source data and target data

    src_train_dataloader = utils.get_train_loader('MNIST')
    src_test_dataloader = utils.get_test_loader('MNIST')
    tgt_train_dataloader = utils.get_train_loader('MNIST_M')
    tgt_test_dataloader = utils.get_test_loader('MNIST_M')

    if args.plot:
        print('Images from training on source domain:')
        utils.displayImages(src_train_dataloader)

        print('Images from test on target domain:')
        utils.displayImages(tgt_test_dataloader)

    # init models
    feature_extractor = models.Extractor()
    class_classifier = models.Class_classifier()
    domain_classifier = models.Domain_classifier()

    if params.use_gpu:
        feature_extractor.cuda()
        class_classifier.cuda()
        domain_classifier.cuda()

    # init criterions
    class_criterion = nn.NLLLoss()
    domain_criterion = nn.NLLLoss()

    # init optimizer
    optimizer = optim.SGD([{
        'params': feature_extractor.parameters()
    }, {
        'params': class_classifier.parameters()
    }, {
        'params': domain_classifier.parameters()
    }],
                          lr=0.01,
                          momentum=0.9)

    for epoch in range(params.epochs):
        print('Epoch: {}'.format(epoch))
        train.train(args.training_mode, feature_extractor, class_classifier,
                    domain_classifier, class_criterion, domain_criterion,
                    src_train_dataloader, tgt_train_dataloader, optimizer,
                    epoch)
        test.test(feature_extractor, class_classifier, domain_classifier,
                  src_test_dataloader, tgt_test_dataloader)

    if args.plot:
        visualizePerformance(feature_extractor, class_classifier,
                             domain_classifier, src_test_dataloader,
                             tgt_test_dataloader)
Ejemplo n.º 2
0
Archivo: main.py Proyecto: hccngu/MLADA
def main():

    # make_print_to_file(path='/results')

    args = parse_args()

    print_args(args)

    set_seed(args.seed)

    # load data
    train_data, val_data, test_data, vocab = loader.load_dataset(args)

    args.id2word = vocab.itos

    # initialize model
    model = {}
    model["G"], model["D"] = get_embedding(vocab, args)
    model["clf"] = get_classifier(model["G"].ebd_dim, args)

    if args.mode == "train":
        # train model on train_data, early stopping based on val_data
        train(train_data, val_data, model, args)

    # val_acc, val_std, _ = test(val_data, model, args,
    #                                         args.val_episodes)

    test_acc, test_std, drawn_data = test(test_data, model, args,
                                          args.test_episodes)

    # path_drawn = args.path_drawn_data
    # with open(path_drawn, 'w') as f_w:
    #     json.dump(drawn_data, f_w)
    #     print("store drawn data finished.")

    # file_path = r'../data/attention_data.json'
    # Print_Attention(file_path, vocab, model, args)

    if args.result_path:
        directory = args.result_path[:args.result_path.rfind("/")]
        if not os.path.exists(directory):
            os.mkdirs(directory)

        result = {
            "test_acc": test_acc,
            "test_std": test_std,
            # "val_acc": val_acc,
            # "val_std": val_std
        }

        for attr, value in sorted(args.__dict__.items()):
            result[attr] = value

        with open(args.result_path, "wb") as f:
            pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
Ejemplo n.º 3
0
def main():
    model_path = './models/hscnn_5layer_dim10_356.pkl'
    cfg = build_config('config.ini')
    result_path = './out'

    # Model
    net_type = cfg.get('Train', 'net_type')
    if net_type == 'n16_64':
        model = resblock.resblock(resblock.conv_relu_res_relu_block, 16, 3, 31)
    elif net_type == 'n16_256':
        model = resblock_256.resblock(resblock_256.conv_relu_res_relu_block,
                                      16, 3, 31)
    elif net_type == 'n14':
        model = resblock.resblock(resblock.conv_relu_res_relu_block, 14, 3, 31)
    else:
        raise RuntimeError('unsupported net type:%s' % net_type)
    save_point = torch.load(model_path)
    model_param = save_point['state_dict']
    model.load_state_dict(model_param)

    test_data = build_dataset(cfg, type="test", kfold_th=4, kfold=4)
    model = model.cuda()
    model.eval()

    # test
    rmse, rmse_g, rrmse, rrmse_g = test(model, test_data)
    print(
        f'{bfill("rmse")}:{rmse:9.4f}, {bfill("rmse_g")}:{rmse_g:9.4f}, {bfill("rrmse")}:{rrmse:9.4f}, {bfill("rrmse_g")}:{rrmse_g:9.4f}'
    )

    # generate spe imgs
    limit = cfg.getint('Train', 'spectrum_limit')
    with torch.no_grad():
        for i, (images,
                labels) in enumerate(test_data.get_dataloader(1, False)):
            out = model(images.cuda())
            img_res = out.cpu().numpy() * limit
            # shape from [1,C,H,W] to [C,H,W]
            img_res = np.squeeze(img_res, axis=0)
            # format right to  image data
            img_res_limits = np.minimum(img_res, limit)
            img_res_limits = np.maximum(img_res_limits, 0)
            # shape from [C,H,W] to [H,W,C]
            arr = img_res_limits.transpose(1, 2, 0)
            rgb_name = last_path_name(test_data.rgb_names[i])
            rgb_name = rgb_name[0:rgb_name.rfind('_')]
            mkdir(f'{result_path}/{rgb_name}_ms')
            for ch in tqdm(range(31), desc=f'generating {rgb_name} spe files'):
                img_arr = arr[:, :, ch]
                img = Image.fromarray(img_arr.astype(np.uint16), mode="I;16")
                img.save(
                    f'{result_path}/{rgb_name}_ms/{rgb_name}_ms_{str(ch + 1).zfill(2)}.png',
                    'png')
Ejemplo n.º 4
0
def train(train_data, val_data, model, args):
    '''
        Train the model
        Use val_data to do early stopping
    '''
    # creating a tmp directory to save the models
    out_dir = os.path.abspath(os.path.join(
                                  os.path.curdir,
                                  "tmp-runs",
                                  str(int(time.time() * 1e7))))
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    best_acc = 0
    sub_cycle = 0
    best_path = None

    optG = torch.optim.Adam(grad_param(model, ['G', 'clf']), lr=args.lr_g)
    optD = torch.optim.Adam(grad_param(model, ['D']), lr=args.lr_d)

    if args.lr_scheduler == 'ReduceLROnPlateau':
        schedulerG = torch.optim.lr_scheduler.ReduceLROnPlateau(
                optG, 'max', patience=args.patience//2, factor=0.1, verbose=True)
        schedulerD = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optD, 'max', patience=args.patience // 2, factor=0.1, verbose=True)

    elif args.lr_scheduler == 'ExponentialLR':
        schedulerG = torch.optim.lr_scheduler.ExponentialLR(optG, gamma=args.ExponentialLR_gamma)
        schedulerD = torch.optim.lr_scheduler.ExponentialLR(optD, gamma=args.ExponentialLR_gamma)



    print("{}, Start training".format(
        datetime.datetime.now()), flush=True)

    # train_gen = ParallelSampler(train_data, args, args.train_episodes)
    train_gen_val = ParallelSampler_Test(train_data, args, args.val_episodes)
    val_gen = ParallelSampler_Test(val_data, args, args.val_episodes)

    # sampled_classes, source_classes = task_sampler(train_data, args)
    for ep in range(args.train_epochs):

        sampled_classes, source_classes = task_sampler(train_data, args)

        train_gen = ParallelSampler(train_data, args, sampled_classes, source_classes, args.train_episodes)

        sampled_tasks = train_gen.get_epoch()

        grad = {'clf': [], 'G': [], 'D': []}

        if not args.notqdm:
            sampled_tasks = tqdm(sampled_tasks, total=train_gen.num_episodes,
                    ncols=80, leave=False, desc=colored('Training on train',
                        'yellow'))
        d_acc = 0
        for task in sampled_tasks:
            if task is None:
                break
            d_acc += train_one(task, model, optG, optD, args, grad)

        d_acc = d_acc / args.train_episodes

        print("---------------ep:" + str(ep) + " d_acc:" + str(d_acc) + "-----------")

        if ep % 10 == 0:

            acc, std, _ = test(train_data, model, args, args.val_episodes, False,
                            train_gen_val.get_epoch())
            print("{}, {:s} {:2d}, {:s} {:s}{:>7.4f} ± {:>6.4f} ".format(
                datetime.datetime.now(),
                "ep", ep,
                colored("train", "red"),
                colored("acc:", "blue"), acc, std,
                ), flush=True)

        # Evaluate validation accuracy
        cur_acc, cur_std, _ = test(val_data, model, args, args.val_episodes, False,
                                val_gen.get_epoch())
        print(("{}, {:s} {:2d}, {:s} {:s}{:>7.4f} ± {:>6.4f}, "
               "{:s} {:s}{:>7.4f}, {:s}{:>7.4f}").format(
               datetime.datetime.now(),
               "ep", ep,
               colored("val  ", "cyan"),
               colored("acc:", "blue"), cur_acc, cur_std,
               colored("train stats", "cyan"),
               colored("G_grad:", "blue"), np.mean(np.array(grad['G'])),
               colored("clf_grad:", "blue"), np.mean(np.array(grad['clf'])),
               ), flush=True)

        # Update the current best model if val acc is better
        if cur_acc > best_acc:
            best_acc = cur_acc
            best_path = os.path.join(out_dir, str(ep))

            # save current model
            print("{}, Save cur best model to {}".format(
                datetime.datetime.now(),
                best_path))

            torch.save(model['G'].state_dict(), best_path + '.G')
            torch.save(model['D'].state_dict(), best_path + '.D')
            torch.save(model['clf'].state_dict(), best_path + '.clf')

            sub_cycle = 0
        else:
            sub_cycle += 1

        # Break if the val acc hasn't improved in the past patience epochs
        if sub_cycle == args.patience:
            break

        if args.lr_scheduler == 'ReduceLROnPlateau':
            schedulerG.step(cur_acc)
            schedulerD.step(cur_acc)

        elif args.lr_scheduler == 'ExponentialLR':
            schedulerG.step()
            schedulerD.step()

    print("{}, End of training. Restore the best weights".format(
            datetime.datetime.now()),
            flush=True)

    # restore the best saved model
    model['G'].load_state_dict(torch.load(best_path + '.G'))
    model['D'].load_state_dict(torch.load(best_path + '.D'))
    model['clf'].load_state_dict(torch.load(best_path + '.clf'))

    if args.save:
        # save the current model
        out_dir = os.path.abspath(os.path.join(
                                      os.path.curdir,
                                      "saved-runs",
                                      str(int(time.time() * 1e7))))
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

        best_path = os.path.join(out_dir, 'best')

        print("{}, Save best model to {}".format(
            datetime.datetime.now(),
            best_path), flush=True)

        torch.save(model['G'].state_dict(), best_path + '.G')
        torch.save(model['D'].state_dict(), best_path + '.D')
        torch.save(model['clf'].state_dict(), best_path + '.clf')

        with open(best_path + '_args.txt', 'w') as f:
            for attr, value in sorted(args.__dict__.items()):
                f.write("{}={}\n".format(attr, value))

    return
Ejemplo n.º 5
0
import yaml
import os
from train.test import test

config = yaml.safe_load(open('config.yml'))
mode = config['mode']
os.environ["CUDA_VISIBLE_DEVICES"] = str(
    config['aspect_' + mode + '_model'][config['aspect_' + mode +
                                               '_model']['type']]['gpu'])
test(config)
Ejemplo n.º 6
0
Archivo: main.py Proyecto: yhlhit/dann
def main(args):

    # Set global parameters.
    params.fig_mode = args.fig_mode
    params.epochs = args.max_epoch
    params.training_mode = args.training_mode
    params.source_domain = args.source_domain
    params.target_domain = args.target_domain
    params.backbone = args.backbone
    if params.embed_plot_epoch is None:
        params.embed_plot_epoch = args.embed_plot_epoch
    params.lr = args.lr

    if args.save_dir is not None:
        params.save_dir = args.save_dir
    else:
        print('Figures will be saved in ./experiment folder.')

    # prepare the source data and target data

    src_train_dataloader = utils.get_train_loader(params.source_domain)
    src_test_dataloader = utils.get_train_loader(params.source_domain)
    tgt_train_dataloader = utils.get_test_loader(params.target_domain)
    tgt_test_dataloader = utils.get_test_loader(params.target_domain)

    if params.fig_mode is not None:
        print('Images from training on source domain:')

        utils.displayImages(src_train_dataloader, imgName='source')

        print('Images from test on target domain:')
        utils.displayImages(tgt_test_dataloader, imgName='target')

    # init models
    #model_index = params.source_domain + '_' + params.target_domain
    model_index = params.backbone
    feature_extractor = params.extractor_dict[model_index]
    class_classifier = params.class_dict[model_index]
    domain_classifier = params.domain_dict['Stand']

    if params.use_gpu:
        feature_extractor.cuda()
        class_classifier.cuda()
        domain_classifier.cuda()

    #data parallel
    if torch.cuda.device_count() > 1:
        feature_extractor = nn.DataParallel(feature_extractor)
        class_classifier = nn.DataParallel(class_classifier)
        domain_classifier = nn.DataParallel(domain_classifier)

    # init criterions
    class_criterion = nn.NLLLoss()
    domain_criterion = nn.NLLLoss()

    # init optimizer
    optimizer = optim.SGD([{
        'params': feature_extractor.parameters()
    }, {
        'params': class_classifier.parameters()
    }, {
        'params': domain_classifier.parameters()
    }],
                          lr=params.lr,
                          momentum=0.9)

    for epoch in range(params.epochs):
        print('Epoch: {}'.format(epoch))
        train.train(args.training_mode, feature_extractor, class_classifier,
                    domain_classifier, class_criterion, domain_criterion,
                    src_train_dataloader, tgt_train_dataloader, optimizer,
                    epoch)
        test.test(feature_extractor, class_classifier, domain_classifier,
                  src_test_dataloader, tgt_test_dataloader)

        # Plot embeddings periodically.
        if epoch % params.embed_plot_epoch == 0 and params.fig_mode is not None:
            visualizePerformance(feature_extractor,
                                 class_classifier,
                                 domain_classifier,
                                 src_test_dataloader,
                                 tgt_test_dataloader,
                                 imgName='embedding_' + str(epoch))
Ejemplo n.º 7
0
Archivo: main.py Proyecto: yyxjcc/DANN
                                          image_size)
            domain_label = torch.ones(batch_size)
            domain_label = domain_label.long()

            if cuda:
                t_img = t_img.cuda()
                input_img = input_img.cuda()
                domain_label = domain_label.cuda()

            input_img.resize_as_(t_img).copy_(t_img)

            _, domain_output = my_net(input_data=input_img, alpha=alpha)
            err_t_domain = loss_domain(domain_output, domain_label)
            err = err_t_domain + err_s_domain + err_s_label
            err.backward()
            optimizer.step()

            i += 1

            print('epoch: %d, [iter: %d / all %d], err_s_label: %f, err_s_domain: %f, err_t_domain: %f' \
                  % (epoch, i, len_dataloader, err_s_label.cpu().data.numpy(),
                     err_s_domain.cpu().data.numpy(), err_t_domain.cpu().data.numpy()))

        torch.save(
            my_net,
            '{0}/mnist_mnistm_model_epoch_{1}.pth'.format(model_root, epoch))
        test(source_dataset_name, epoch)
        test(target_dataset_name, epoch)

    print('done')
Ejemplo n.º 8
0
def main(args):

    # Set global parameters.
    #params.fig_mode = args.fig_mode
    params.epochs = args.max_epoch
    params.training_mode = args.training_mode
    source_domain = args.source_domain
    print("source domain is: ", source_domain)
    target_domain = args.target_domain
    print("target domain is: ", target_domain)

    params.modality = args.modality
    print("modality is :", params.modality)
    params.extractor_layers = args.extractor_layers
    print("number of layers in feature extractor: ", params.extractor_layers)
    #params.class_layers = args.class_layers
    #params.domain_layers  = args.domain_layers
    lr = args.lr

    #set output dims for classifier
    #TODO: change this to len of params dict?
    if source_domain == 'iemocap':
        params.output_dim = 4
    elif source_domain == 'mosei':
        params.output_dim = 6

    # prepare the source data and target data

    src_train_dataloader = dataloaders.get_train_loader(source_domain)
    src_test_dataloader = dataloaders.get_test_loader(source_domain)
    src_valid_dataloader = dataloaders.get_valid_loader(source_domain)
    tgt_train_dataloader = dataloaders.get_train_loader(target_domain)
    tgt_test_dataloader = dataloaders.get_test_loader(target_domain)
    tgt_valid_dataloader = dataloaders.get_valid_loader(target_domain)

    print(params.mod_dim)

    # init models
    #model_index = source_domain + '_' + target_domain

    feature_extractor = models.Extractor()
    class_classifier = models.Class_classifier()
    domain_classifier = models.Domain_classifier()
    # feature_extractor = params.extractor_dict[model_index]
    # class_classifier = params.class_dict[model_index]
    # domain_classifier = params.domain_dict[model_index]

    if params.use_gpu:
        feature_extractor.cuda()
        class_classifier.cuda()
        domain_classifier.cuda()

    # init criterions
    class_criterion = nn.BCEWithLogitsLoss()
    domain_criterion = nn.BCEWithLogitsLoss()

    # init optimizer
    optimizer = optim.Adam([{
        'params': feature_extractor.parameters()
    }, {
        'params': class_classifier.parameters()
    }, {
        'params': domain_classifier.parameters()
    }],
                           lr=lr)

    for epoch in range(params.epochs):
        print('Epoch: {}'.format(epoch))
        train.train(args.training_mode, feature_extractor, class_classifier,
                    domain_classifier, class_criterion, domain_criterion,
                    src_train_dataloader, tgt_train_dataloader, optimizer,
                    epoch)
        test.test(feature_extractor, class_classifier, domain_classifier,
                  src_valid_dataloader, tgt_valid_dataloader, epoch)
        if epoch == params.epochs - 1:
            test.test(feature_extractor,
                      class_classifier,
                      domain_classifier,
                      src_test_dataloader,
                      tgt_test_dataloader,
                      epoch,
                      mode='test')
        else:
            continue
Ejemplo n.º 9
0
def train(train_data, val_data, model, class_names, args):
    '''
        Train the model
        Use val_data to do early stopping
    '''
    # creating a tmp directory to save the models
    out_dir = os.path.abspath(
        os.path.join(os.path.curdir, "tmp-runs", str(int(time.time() * 1e7))))
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    best_acc = 0
    sub_cycle = 0
    best_path = None

    optG = torch.optim.Adam(grad_param(model, ['G']), lr=args.meta_lr)
    optCLF = torch.optim.Adam(grad_param(model, ['clf']), lr=args.task_lr)

    if args.lr_scheduler == 'ReduceLROnPlateau':
        schedulerG = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optG, 'max', patience=args.patience // 2, factor=0.1, verbose=True)
        schedulerCLF = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optCLF,
            'max',
            patience=args.patience // 2,
            factor=0.1,
            verbose=True)

    elif args.lr_scheduler == 'ExponentialLR':
        schedulerG = torch.optim.lr_scheduler.ExponentialLR(
            optG, gamma=args.ExponentialLR_gamma)
        schedulerCLF = torch.optim.lr_scheduler.ExponentialLR(
            optCLF, gamma=args.ExponentialLR_gamma)

    print("{}, Start training".format(datetime.datetime.now()), flush=True)

    # train_gen = ParallelSampler(train_data, args, args.train_episodes)
    # train_gen_val = ParallelSampler_Test(train_data, args, args.val_episodes)
    # val_gen = ParallelSampler_Test(val_data, args, args.val_episodes)

    # sampled_classes, source_classes = task_sampler(train_data, args)
    acc = 0
    loss = 0
    for ep in range(args.train_epochs):

        sampled_classes, source_classes = task_sampler(train_data, args)
        # class_names_dict = {}
        # class_names_dict['label'] = class_names['label'][sampled_classes]
        # class_names_dict['text'] = class_names['text'][sampled_classes]
        # class_names_dict['text_len'] = class_names['text_len'][sampled_classes]
        # class_names_dict['is_support'] = False

        train_gen = ParallelSampler(train_data, args, sampled_classes,
                                    source_classes, args.train_episodes)

        sampled_tasks = train_gen.get_epoch()
        # class_names_dict = utils.to_tensor(class_names_dict, args.cuda, exclude_keys=['is_support'])

        grad = {'clf': [], 'G': []}

        if not args.notqdm:
            sampled_tasks = tqdm(sampled_tasks,
                                 total=train_gen.num_episodes,
                                 ncols=80,
                                 leave=False,
                                 desc=colored('Training on train', 'yellow'))

        for task in sampled_tasks:
            if task is None:
                break
            q_loss, q_acc = train_one(task, class_names, model, optG, optCLF,
                                      args, grad)
            acc += q_acc
            loss += q_loss

        if ep % 100 == 0:
            print("--------[TRAIN] ep:" + str(ep) + ", loss:" +
                  str(q_loss.item()) + ", acc:" + str(q_acc.item()) +
                  "-----------")

        if (ep % 500 == 0) and (ep != 0):
            acc = acc / args.train_episodes / 500
            loss = loss / args.train_episodes / 500
            print("--------[TRAIN] ep:" + str(ep) + ", mean_loss:" +
                  str(loss.item()) + ", mean_acc:" + str(acc.item()) +
                  "-----------")

            net = copy.deepcopy(model)
            # acc, std = test(train_data, class_names, optCLF, net, args, args.test_epochs, False)
            # print("[TRAIN] {}, {:s} {:2d}, {:s} {:s}{:>7.4f} ± {:>6.4f} ".format(
            #     datetime.datetime.now(),
            #     "ep", ep,
            #     colored("train", "red"),
            #     colored("acc:", "blue"), acc, std,
            #     ), flush=True)
            acc = 0
            loss = 0

            # Evaluate validation accuracy
            cur_acc, cur_std = test(val_data, class_names, optCLF, net, args,
                                    args.test_epochs, False)
            print(("[EVAL] {}, {:s} {:2d}, {:s} {:s}{:>7.4f} ± {:>6.4f}, "
                   "{:s} {:s}{:>7.4f}, {:s}{:>7.4f}").format(
                       datetime.datetime.now(),
                       "ep",
                       ep,
                       colored("val  ", "cyan"),
                       colored("acc:", "blue"),
                       cur_acc,
                       cur_std,
                       colored("train stats", "cyan"),
                       colored("G_grad:", "blue"),
                       np.mean(np.array(grad['G'])),
                       colored("clf_grad:", "blue"),
                       np.mean(np.array(grad['clf'])),
                   ),
                  flush=True)

            # Update the current best model if val acc is better
            if cur_acc > best_acc:
                best_acc = cur_acc
                best_path = os.path.join(out_dir, str(ep))

                # save current model
                print("{}, Save cur best model to {}".format(
                    datetime.datetime.now(), best_path))

                torch.save(model['G'].state_dict(), best_path + '.G')
                torch.save(model['clf'].state_dict(), best_path + '.clf')

                sub_cycle = 0
            else:
                sub_cycle += 1

            # Break if the val acc hasn't improved in the past patience epochs
            if sub_cycle == args.patience:
                break

            if args.lr_scheduler == 'ReduceLROnPlateau':
                schedulerG.step(cur_acc)
                schedulerCLF.step(cur_acc)

            elif args.lr_scheduler == 'ExponentialLR':
                schedulerG.step()
                schedulerCLF.step()

    print("{}, End of training. Restore the best weights".format(
        datetime.datetime.now()),
          flush=True)

    # restore the best saved model
    model['G'].load_state_dict(torch.load(best_path + '.G'))
    model['clf'].load_state_dict(torch.load(best_path + '.clf'))

    if args.save:
        # save the current model
        out_dir = os.path.abspath(
            os.path.join(os.path.curdir, "saved-runs",
                         str(int(time.time() * 1e7))))
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

        best_path = os.path.join(out_dir, 'best')

        print("{}, Save best model to {}".format(datetime.datetime.now(),
                                                 best_path),
              flush=True)

        torch.save(model['G'].state_dict(), best_path + '.G')
        torch.save(model['clf'].state_dict(), best_path + '.clf')

        with open(best_path + '_args.txt', 'w') as f:
            for attr, value in sorted(args.__dict__.items()):
                f.write("{}={}\n".format(attr, value))

    return optCLF