Beispiel #1
0
        from networks import resnet50 as network
        print('my model')
    else:
        from networks import resnet50 as network

########################################################################################################################

# Load
print('Load data...')
data, taskcla, inputsize = dataloader.get(seed=args.seed)
#print('*****data*****',data,type(data));sys.exit(0)
print('Input size =', inputsize, '\nTask info =', taskcla)

# Inits
print('Inits...')
net = network.Net(inputsize, taskcla).cuda()
#from torchsummary import summary
#summary(net.cuda(), inputsize)#summary
utils.print_model_report(net)

appr = approach.Appr(net, nepochs=args.nepochs, lr=args.lr, args=args)
print(appr.criterion)
utils.print_optimizer_config(appr.optimizer)
print('-' * 100)

#appr.load_model()

# Loop tasks
acc = np.zeros((len(taskcla), len(taskcla)), dtype=np.float32)
lss = np.zeros((len(taskcla), len(taskcla)), dtype=np.float32)
for t, ncla in taskcla:
Beispiel #2
0
            continue
        else:
            taskcla_new.append((c, taskcla[i][1]))
            data_new.append(data[t])
            c += 1
    data = data_new
    taskcla = taskcla_new
print('taskcla: ', len(taskcla))
print('taskcla: ', taskcla)

# Inits
print('Inits...')
if 'sentiment' in args.experiment:
    net = network.Net(inputsize,
                      taskcla,
                      voc_size=voc_size,
                      weights_matrix=weights_matrix,
                      nhid=args.nhid).cuda()
else:
    net = network.Net(inputsize, taskcla, nhid=args.nhid, args=args).cuda()
# util.print_model_report(net)

appr = approach.Appr(net,
                     nepochs=args.nepochs,
                     lr=args.lr,
                     lr_patience=args.lr_patience,
                     nepochs_kt=args.nepochs_kt,
                     lr_kt=args.lr_kt,
                     lr_patience_kt=args.lr_patience_kt,
                     args=args)
print(appr.criterion)
Beispiel #3
0
def main(seed=0,
         experiment='',
         approach='',
         output='',
         name='',
         nepochs=200,
         lr=0.05,
         weight_init=None,
         test_mode=None,
         log_path=None,
         **parameters):
    '''Trains an experiment given the current settings.

    Args:
        seed (int): Random seed
        experiment (str): Name of the experiment to load - choices: ['mnist2','pmnist','cifar','mixture']
        approach (str): Approach to take to training the experiment - choices: ['random','sgd','sgd-frozen','lwf','lfl','ewc','imm-mean','progressive','pathnet','imm-mode','sgd-restart','joint','hat','hat-test']
        output (str): Path to store the output under
        name (str): Additional experiment name for grid search
        nepochs (int): Number of epochs to iterate through
        lr (float): Learning Rate to apply 
        weight_init (str): String that defines how the weights are initialized - it can be splitted (with `:`) between convolution (first) and Linear (second) layers. Options: ["xavier", "uniform", "normal", "ones", "zeros", "kaiming"]
        test_mode (int): Defines how many tasks to iterate through
        log_path (str): Path to store detailed logs
        parameter (str): Approach dependent parameters
    '''
    # check the output path
    if output == '':
        output = '../res/' + experiment + '_' + approach + '_' + str(seed) + (
            ("_" + name) if len(name) > 0 else "") + '.txt'
    print('=' * 100)
    print('Arguments =')
    #
    args = {
        **parameters, "seed": seed,
        "experiment": experiment,
        "approach": approach,
        "output": output,
        "nepochs": nepochs,
        "lr": lr,
        "weight_init": weight_init
    }
    for arg in args:
        print("\t{:15}: {}".format(arg, args[arg]))
    print('=' * 100)

    ########################################################################################################################

    # Seed
    np.random.seed(seed)
    torch.manual_seed(seed)

    # check if cuda available
    if torch.cuda.is_available(): torch.cuda.manual_seed(seed)
    else:
        print('[CUDA unavailable]')
        sys.exit()

    # Args -- Experiment
    if experiment == 'mnist2':
        from dataloaders import mnist2 as dataloader
    elif experiment == 'pmnist':
        from dataloaders import pmnist as dataloader
    elif experiment == 'cifar':
        from dataloaders import cifar as dataloader
    elif experiment == 'mixture':
        from dataloaders import mixture as dataloader

    # Args -- Approach
    if approach == 'random':
        from approaches import random as appr
    elif approach == 'sgd':
        from approaches import sgd as appr
    elif approach == 'sgd-restart':
        from approaches import sgd_restart as appr
    elif approach == 'sgd-frozen':
        from approaches import sgd_frozen as appr
    elif approach == 'lwf':
        from approaches import lwf as appr
    elif approach == 'lfl':
        from approaches import lfl as appr
    elif approach == 'ewc':
        from approaches import ewc as appr
    elif approach == 'imm-mean':
        from approaches import imm_mean as appr
    elif approach == 'imm-mode':
        from approaches import imm_mode as appr
    elif approach == 'progressive':
        from approaches import progressive as appr
    elif approach == 'pathnet':
        from approaches import pathnet as appr
    elif approach == 'hat-test':
        from approaches import hat_test as approach
    elif approach == 'hat':
        from approaches import hat as appr
    elif approach == 'joint':
        from approaches import joint as appr
    elif approach == 'dwa':
        from approaches import dwa as appr

    # Args -- Network
    if experiment in ['mnist2', 'pmnist']:
        if approach in ['hat', 'hat-test']:
            from networks import mlp_hat as network
        elif approach == 'dwa':
            from networks import mlp_dwa as network
        else:
            from networks import mlp as network
    else:
        if approach == 'lfl':
            from networks import alexnet_lfl as network
        elif approach == 'hat':
            from networks import alexnet_hat as network
        elif approach == 'progressive':
            from networks import alexnet_progressive as network
        elif approach == 'pathnet':
            from networks import alexnet_pathnet as network
        elif approach == 'hat-test':
            from networks import alexnet_hat_test as network
        elif approach == 'dwa':
            from networks import alexnet_dwa as network
        else:
            from networks import alexnet as network

    ########################################################################################################################

    # Load
    print('Load data...')
    data, taskcla, inputsize = dataloader.get(seed=seed)
    print('Input size =', inputsize, '\nTask info =', taskcla)

    # Init the network and put on gpu
    print('Inits...')
    # handle input parameters for dwa approaches
    if approach == "dwa":
        params = {}
        for key in parameters:
            if key in dwa_net_params:
                params[key] = parameters[key]
        net = network.Net(inputsize, taskcla, **params).cuda()
    else:
        net = network.Net(inputsize, taskcla).cuda()
    utils.print_model_report(net)

    # setup network weights
    if weight_init is not None:
        # retrieve init data
        inits = weight_init.split(":")
        conv_init = inits[0].split(",")
        conv_bias = conv_init[1] if len(conv_init) > 1 else "zeros"
        conv_init = conv_init[0]
        linear_init = inits[-1].split(",")
        linear_bias = linear_init[1] if len(linear_init) > 1 else "zeros"
        linear_init = linear_init[0]

        init_funcs = {
            "xavier":
            lambda x: torch.nn.init.xavier_uniform_(x, gain=1.0),
            "kaiming":
            lambda x: torch.nn.init.kaiming_normal_(
                x, nonlinearity="relu", mode='fan_in'),
            "normal":
            lambda x: torch.nn.init.normal_(x, mean=0., std=1.),
            "uniform":
            lambda x: torch.nn.init.uniform_(x, a=0., b=1.),
            "ones":
            lambda x: x.data.fill_(1.),
            "zeros":
            lambda x: x.data.fill_(0.)
        }

        print(
            "Init network weights:\n\tlinear weights: {}\n\tlinear bias: {}\n\tconv weights: {}\n\tconv bias: {}"
            .format(linear_init, linear_bias, conv_init, conv_bias))

        # setup init function
        def init_weights(m):
            if type(m) == torch.nn.Linear or type(m) == Linear_dwa:
                init_funcs[linear_init](m.weight)
                init_funcs[linear_bias](m.bias)
            if type(m) == torch.nn.Conv2d or type(m) == Conv2d_dwa:
                init_funcs[conv_init](m.weight)
                init_funcs[conv_bias](m.bias)
            # TODO: check for masks

        # apply to network
        net.apply(init_weights)

    # setup the approach
    params = parameters
    if approach == 'dwa':
        params = {}
        for key in parameters:
            if key not in dwa_net_params:
                params[key] = parameters[key]
    appr = appr.Appr(net, nepochs=nepochs, lr=lr, log_path=log_path, **params)
    print(appr.criterion)
    utils.print_optimizer_config(appr.optimizer)
    print('-' * 100)

    # Loop tasks
    acc = np.zeros((len(taskcla), len(taskcla)), dtype=np.float32)
    lss = np.zeros((len(taskcla), len(taskcla)), dtype=np.float32)
    i = 0
    for t, ncla in taskcla:
        # check if in test mode and finish after 1 task
        i += 1
        if test_mode is not None and i > test_mode:
            print("INFO: In Test-Mode - breaking after Task {}".format(
                test_mode))
            break

        print('*' * 100)
        print('Task {:2d} ({:s})'.format(t, data[t]['name']))
        print('*' * 100)

        if approach == 'joint':
            # Get data. We do not put it to GPU
            if t == 0:
                xtrain = data[t]['train']['x']
                ytrain = data[t]['train']['y']
                xvalid = data[t]['valid']['x']
                yvalid = data[t]['valid']['y']
                task_t = t * torch.ones(xtrain.size(0)).int()
                task_v = t * torch.ones(xvalid.size(0)).int()
                task = [task_t, task_v]
            else:
                xtrain = torch.cat((xtrain, data[t]['train']['x']))
                ytrain = torch.cat((ytrain, data[t]['train']['y']))
                xvalid = torch.cat((xvalid, data[t]['valid']['x']))
                yvalid = torch.cat((yvalid, data[t]['valid']['y']))
                task_t = torch.cat(
                    (task_t,
                     t * torch.ones(data[t]['train']['y'].size(0)).int()))
                task_v = torch.cat(
                    (task_v,
                     t * torch.ones(data[t]['valid']['y'].size(0)).int()))
                task = [task_t, task_v]
        else:
            # Get data
            xtrain = data[t]['train']['x'].cuda()
            ytrain = data[t]['train']['y'].cuda()
            xvalid = data[t]['valid']['x'].cuda()
            yvalid = data[t]['valid']['y'].cuda()
            task = t

        # Train
        appr.train(task, xtrain, ytrain, xvalid, yvalid)
        print('-' * 100)

        # Free some cache
        print("INFO: Free cuda cache")
        torch.cuda.empty_cache()

        # Test
        for u in range(t + 1):
            xtest = data[u]['test']['x'].cuda()
            ytest = data[u]['test']['y'].cuda()
            test_loss, test_acc, metric_str = appr.eval(u, xtest, ytest)
            print(
                '>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}%{} <<<'
                .format(u, data[u]['name'], test_loss, 100 * test_acc,
                        metric_str))
            acc[t, u] = test_acc
            lss[t, u] = test_loss

            # check for introspection method (and logs enabled)
            if hasattr(appr, 'introspect') and appr.logs is not None and (
                    t + 1 >= len(taskcla)):
                # randomly select from dataset
                idx = torch.randperm(xtest.size(0))
                xrand = xtest[idx[:10]]
                yrand = ytest[idx[:10]]

                # compute
                out = appr.introspect(u, xrand, yrand)

                # pickle ouptut
                print('Store task {} analytics'.format(data[u]['name']))
                with gzip.open(
                        os.path.join(
                            appr.logpath,
                            os.path.basename(output) +
                            ".task{}_{}.analysis".format(u, data[u]['name'])),
                        'wb') as intro_file:
                    pickle.dump(out, intro_file, pickle.HIGHEST_PROTOCOL)

        # check if result directory exists
        if not os.path.exists(os.path.dirname(output)):
            print("create output dir")
            os.makedirs(os.path.dirname(output))

        # Save
        print('Save at {}'.format(output))
        np.savetxt(output, acc, '%.4f')

    # Done
    print('*' * 100)
    print('Accuracies =')
    for i in range(acc.shape[0]):
        print('\t', end='')
        for j in range(acc.shape[1]):
            print('{:5.1f}% '.format(100 * acc[i, j]), end='')
        print()
    print('*' * 100)
    print('Done!')

    print('[Elapsed time = {:.1f} h]'.format(
        (time.time() - tstart) / (60 * 60)))

    # optionally: store logs
    if hasattr(appr, 'logs'):
        if appr.logs is not None:
            #save task names
            from copy import deepcopy
            appr.logs['task_name'] = {}
            appr.logs['test_acc'] = {}
            appr.logs['test_loss'] = {}
            for t, ncla in taskcla:
                appr.logs['task_name'][t] = deepcopy(data[t]['name'])
                appr.logs['test_acc'][t] = deepcopy(acc[t, :])
                appr.logs['test_loss'][t] = deepcopy(lss[t, :])
            #pickle
            with gzip.open(
                    os.path.join(appr.logpath,
                                 os.path.basename(output) + "_logs.gzip"),
                    'wb') as log_file:
                pickle.dump(appr.logs, log_file, pickle.HIGHEST_PROTOCOL)

    # store the model (full and light versions)
    model_file = os.path.join(appr.logpath,
                              os.path.basename(output) + ".model")
    torch.save(net, model_file)
    model_file = os.path.join(appr.logpath,
                              os.path.basename(output) + ".weights")
    torch.save(net.state_dict(), model_file)
Beispiel #4
0
    from networks import cnn_pathnet as network

elif 'cnn' in args.approach:
    from networks import cnn as network
elif 'mlp' in args.approach:
    from networks import mlp as network
########################################################################################################################

# Load
print('Load data...')
data, taskcla, inputsize = dataloader.get(seed=args.seed, args=args)
print('Input size =', inputsize, '\nTask info =', taskcla)

# Inits
print('Inits...')
net = network.Net(inputsize, taskcla, nhid=args.nhid, args=args).cuda()

# utils.print_model_report(net)

appr = approach.Appr(net, args=args)
# print(appr.criterion)
# utils.print_optimizer_config(appr.optimizer)
print('-' * 100)

# Loop tasks
acc = np.zeros((len(taskcla), len(taskcla)), dtype=np.float32)
lss = np.zeros((len(taskcla), len(taskcla)), dtype=np.float32)
for t, ncla in taskcla:
    print('*' * 100)
    print('Task {:2d} ({:s})'.format(t, data[t]['name']))
    print('*' * 100)
Beispiel #5
0
    def run(self):
        ##### Start Source Code Lifelong Learning ########################
        # Args -- Approach
        if self.opt.approach == 'random':
            from approaches import random as approach
        elif self.opt.approach == 'sgd':
            from approaches import sgd as approach
        elif self.opt.approach == 'sgd-restart':
            from approaches import sgd_restart as approach
        elif self.opt.approach == 'sgd-frozen':
            from approaches import sgd_frozen as approach
        elif self.opt.approach == 'lwf':
            from approaches import lwfNLP as approach
        elif self.opt.approach == 'lfl':
            from approaches import lfl as approach
        elif self.opt.approach == 'ewc':
            from approaches import ewcNLP as approach
        elif self.opt.approach == 'imm-mean':
            from approaches import imm_mean as approach
        elif self.opt.approach == 'imm-mode':
            from approaches import imm_mode as approach
        elif self.opt.approach == 'progressive':
            from approaches import progressive as approach
        elif self.opt.approach == 'pathnet':
            from approaches import pathnet as approach
        elif self.opt.approach == 'hat-test':
            from approaches import hat_test as approach

        elif self.opt.approach == 'ar1':
            from approaches import ar1 as approach
        elif self.opt.approach == 'si':
            from approaches import si as approach
            #from approaches import hat as approach
        elif self.opt.approach == 'joint':
            from approaches import joint as approach
        elif self.opt.approach == 'lifelong':
            from approaches import lifelongBing as approach

        # Args -- Network
        if self.opt.experiment == 'mnist2' or self.opt.experiment == 'pmnist':
            if self.opt.approach == 'hat' or self.opt.approach == 'hat-test':
                from networks import mlp_hat as network
            else:
                from networks import mlp as network
        else:
            if self.opt.approach == 'lfl':
                from networks import alexnet_lfl as network
            elif self.opt.approach == 'hat':  #Select the BERT model for training datasets
                from networks import bert as network
            elif self.opt.approach == 'progressive':
                from networks import alexnet_progressive as network
            elif self.opt.approach == 'pathnet':
                from networks import alexnet_pathnet as network
            elif self.opt.approach == 'lifelong' or self.opt.model_name.find(
                    "bert"
            ) == -1:  #Only for BinLiu's method (Lifelong Learning Memory Networks for Aspect
                #Sentiment Classification)
                from networks import NotBert as network
            elif self.opt.approach == 'hat-test' or self.opt.approach == 'ar1' or self.opt.approach == 'ewc' \
                    or self.opt.approach == 'si' or self.opt.approach == 'lwf':
                from networks import bert as network

                # from networks import alexnet_hat_test as network
            else:
                from networks import alexnet as network

    ##### End Source Code Lifelong Learning ########################

    # Loss and Optimizer
        criterion = nn.CrossEntropyLoss()
        _params = filter(lambda p: p.requires_grad, self.model.parameters())

        #It is a way to obtain variables for using in optimizer and not finned tuning Bert model
        # modelVariables = [(name,var) for i, (name, var) in enumerate(self.model.named_parameters())if name.find("bert") == -1]
        #
        # for name, var in modelVariables:
        #  print ("Variable ==> " + name)

        optimizer = self.opt.optimizer(_params,
                                       lr=self.opt.learning_rate,
                                       weight_decay=self.opt.l2reg)

        ##### Start Source Code Lifelong Learning ########################    # Inits

        if self.trainset.multidomain == None or self.trainset.multidomain != True:
            print('Load data...')
            train_data_loader = DataLoader(dataset=self.trainset,
                                           batch_size=self.opt.batch_size,
                                           shuffle=True)
            test_data_loader = DataLoader(dataset=self.testset,
                                          batch_size=self.opt.batch_size,
                                          shuffle=False)
            val_data_loader = DataLoader(dataset=self.valset,
                                         batch_size=self.opt.batch_size,
                                         shuffle=False)

            self._reset_params()
            best_model_path = self._train(criterion, optimizer,
                                          train_data_loader, val_data_loader)
            self.model.load_state_dict(torch.load(best_model_path))
            self.model.eval()
            test_acc, test_f1 = self._evaluate_acc_f1(test_data_loader)
            logger.info('>> test_acc: {:.4f}, test_f1: {:.4f}'.format(
                test_acc, test_f1))
        else:
            print('Inits...')
            sizesentence = 0
            ncla = 0

            for data in self.trainset:  #Compute sentence and class size in mumtidomain context
                sizesentence += data[1]
                ncla += 1
            inputsize = (ncla, sizesentence, 0)

            acc = np.zeros((ncla, ncla), dtype=np.float32)
            lss = np.zeros((ncla, ncla), dtype=np.float32)

            accNew = np.zeros((ncla, ncla), dtype=np.float32)
            lssNew = np.zeros((ncla, ncla), dtype=np.float32)

            recallNew = np.zeros((ncla, ncla), dtype=np.float32)
            f1New = np.zeros((ncla, ncla), dtype=np.float32)

            #If exist save model with same name than model and aproach load first
            #all saved model are in algorithms/ directory
            appr = None
            net = network.Net(inputsize, self.trainset, self.opt).cuda() if torch.cuda.is_available()\
                                                                     else network.Net(inputsize, self.trainset, self.opt)
            net.set_Model(self.model)
            net.set_ModelOptimizer(optimizer)

            if torch.cuda.is_available():
                dev = "cuda:0"
                self.model.to(dev)
                net.to(dev)
                print("Update GPU(Cuda support ):" + dev)
                # utils.print_model_report(net)
            appr = approach.Appr(net,
                                 nepochs=self.opt.nepochs,
                                 lr=self.opt.lr,
                                 args=self.opt)

            if os.path.exists(self.opt.output_algorithm):
                appr.loadModel(self.opt.output_algorithm)
                print("Load Module values from: " + self.opt.output_algorithm)

            #print(appr.criterion)
            #utils.print_optimizer_config(appr.optimizer)
            print('-' * 100)
            ##### End  Source Code Lifelong Learning ########################
            print("!!!!New optmization!!!!!")
            appr._set_optimizer(optimizer)
            print("-------New optmization-------")
            ##### Start Source Code Lifelong Learning ########################    # Inits
            task = 0
            if self.opt.approach == 'lifelong':
                appr.setAllAspect(self.trainset.all_aspects)
                appr.setAllWord(self.tokenizer.word2idx)
            startDateTime = datetime.now()
            test_data_list = []
            for task, (domainame, nclav, data, aspect_vocabulary,
                       word_vocabulary) in enumerate(self.trainset):
                print('*' * 100)
                print('Task {:2d} ({:s})'.format(task, domainame))
                print('*' * 100)
                if self.opt.approach == 'lifelong':
                    appr.setAspectInDomain(task, aspect_vocabulary)
                    appr.setWordInDomain(task, word_vocabulary)

                train_data_loader = DataLoader(dataset=self.trainset[task][2],
                                               batch_size=self.opt.batch_size,
                                               shuffle=True)

                if self.trainset[task][3] != None and self.trainset[task][
                        4] != None:
                    train_data_loader.aspect_vocabulary = self.trainset[task][
                        3]
                    train_data_loader.word_vocabulary = self.trainset[task][4]

                test_data_loader = DataLoader(dataset=self.testset[task][2],
                                              batch_size=self.opt.batch_size,
                                              shuffle=False)
                val_data_loader = DataLoader(dataset=self.valset[task][2],
                                             batch_size=self.opt.batch_size,
                                             shuffle=False)
                print("-- Parameters --")

                test_data_list.append(test_data_loader)

                print("Approach " + self.opt.approach)
                print("Algorithm " + self.opt.model_name)

                print("Size element  in train_data_loader: " +
                      str(train_data_loader.__len__()))
                print("Size element  in trainset(dataset): " +
                      str(self.trainset[task][2].__len__()))

                #print(self.model.parameters())
                appr.train(task, train_data_loader, test_data_loader,
                           val_data_loader)
                print('-' * 100)

                # Test
                # for u in range(task + 1):
                #     test_data_loader = DataLoader(dataset=self.testset[u][2], batch_size=self.opt.batch_size, shuffle=False)
                #
                #     test_loss, test_acc = appr.eval(u, test_data_loader)
                #     print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}% <<<'.format(u, self.testset[u][0], test_loss,
                #                                                                                   100 * test_acc))
                #     acc[task, u] = test_acc
                #     lss[task, u] = test_loss

                # Test Lomonaco evaluation measures
                for u in range(task + 1):
                    #test_data_loader = DataLoader(dataset=self.testset[u][2], batch_size=self.opt.batch_size, shuffle=False)
                    test_data_loader = test_data_list[u]
                    test_loss, test_acc, test_recall, test_f1 = appr.eval(
                        u, test_data_loader)
                    print(
                        '>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}% <<<'
                        .format(u, self.testset[u][0], test_loss,
                                100 * test_acc))
                    acc[task, u] = test_acc
                    lss[task, u] = test_loss
                    accNew[task, u] = test_acc
                    lssNew[task, u] = test_loss

                    recallNew[task, u] = test_recall
                    f1New[task, u] = test_f1
                # Save

            print('Algorithm final DataTime', datetime.now() - startDateTime)
            print('Save at ' + self.opt.output)
            np.savetxt(self.opt.output, acc, '%.4f')

            print(
                'Save at Lomonaco evaluation measures (Remenber different output file --> ) '
                + self.opt.output)
            np.savetxt(self.opt.output, accNew, '%.4f')

            print(
                'Save at Lomonaco evaluation measures (Remenber different output file --> ) '
                + self.opt.recall_output)
            np.savetxt(self.opt.recall_output, recallNew, '%.4f')

            print(
                'Save at Lomonaco evaluation measures (Remenber different output file --> ) '
                + self.opt.f1_output)
            np.savetxt(self.opt.f1_output, f1New, '%.4f')

            ##### End  Source Code Lifelong Learning ########################
            if self.opt.measure == "accuracy":
                backwardTransfer, negativebackward, positivebackward = Instructor.backwardTransfer(
                    accNew, ncla)
            elif self.opt.measure == "recall":
                backwardTransfer, negativebackward, positivebackward = Instructor.backwardTransfer(
                    recallNew, ncla)
            elif self.opt.measure == "f1":
                backwardTransfer, negativebackward, positivebackward = Instructor.backwardTransfer(
                    f1New, ncla)

            globalAccuracy = Instructor.globallMeasure(accNew, ncla)
            globalF1 = Instructor.globallMeasure(f1New, ncla)
            globalRecall = Instructor.globallMeasure(recallNew, ncla)
            # forwardTransfer = Instructor.forwardTransfer(recallNew, ncla)
            forwardTransfer = Instructor.forwardTransfer(accNew, ncla)
            result = ["BWT=" + str(backwardTransfer)]
            result.append(["-BWT=" + str(negativebackward)])
            result.append(["+BWT=" + str(positivebackward)])
            result.append(["ACC=" + str(globalAccuracy)])
            result.append(["FWD=" + str(forwardTransfer)])
            result.append(["F1=" + str(globalF1)])
            result.append(["RECALL=" + str(globalRecall)])

            np.savetxt(self.opt.multi_output, result, '%s')

            if os.path.exists(self.opt.output_algorithm):
                os.remove(self.opt.output_algorithm)

            appr.saveModel(self.opt.output_algorithm)
            print("Save Module values to: " + self.opt.output_algorithm)