def main():
    print('Started the trial >>', TRIAL_ID, 'for experiment 1')
    # init and save
    setup_experiment(experiment, config)

    # convention:  init      =>  initialization
    # convention:  t_i_seq   =>  task i (sequential)
    # convention:  t_i_mtl   => task 1 ... i (multitask)
    # convention:  t_i_lmc   => task 1 ... i (Linear Mode Connectivity)

    nni_metric = 0
    for task in range(1, config['num_tasks']+1):
        print('---- Task {} (seq) ----'.format(task))
        seq_model = train_task_sequentially_w_memory(task, loaders['sequential'][task]['train'], loaders['multitask'][task]['train'], config)
        save_task_model_by_policy(seq_model, task, 'seq', config['exp_dir'])
        avg_accs = []
        for prev_task in range(1, task+1):

            metrics = eval_single_epoch(seq_model, loaders['sequential'][prev_task]['val'])
            avg_accs.append(metrics['accuracy'])
            seq_meter.update(task, prev_task, metrics['accuracy'])
            print(prev_task, metrics)
            log_comet_metric(experiment, 't_{}_seq_acc'.format(prev_task), metrics['accuracy'], task)
            log_comet_metric(experiment, 't_{}_seq_loss'.format(prev_task), round(metrics['loss'], 5), task)

        log_comet_metric(experiment, 'avg_acc', np.mean(avg_accs), task)
        print("Avg acc >> ", np.mean(avg_accs))

        # if task > 1:
        #     accs_lmc, losses_lmc = [], []

        #     print('---- Task {} (lmc) ----'.format(task))
        #     lmc_model = train_task_LMC_offline(task, loaders, config)

        #     save_task_model_by_policy(lmc_model, task, 'lmc', config['exp_dir'])

        #     for prev_task in range(1, task+1):
        #         metrics_lmc =  eval_single_epoch(lmc_model, loaders['sequential'][prev_task]['val'])
        #         accs_lmc.append(metrics_lmc['accuracy'])
        #         losses_lmc.append(metrics_lmc['loss'])
        #         lmc_meter.update(task, prev_task, metrics_lmc['accuracy'])

        #         print('LMC >> ', prev_task, metrics_lmc)
        #         log_comet_metric(experiment, 't_{}_lmc_acc'.format(prev_task), metrics_lmc['accuracy'], task)
        #         log_comet_metric(experiment, 't_{}_lmc_loss'.format(prev_task), round(metrics_lmc['loss'], 5), task)
        #     nni_metric = np.mean(accs_lmc)
        #     log_comet_metric(experiment, 'avg_acc_lmc', np.mean(accs_lmc),task)
        #     log_comet_metric(experiment, 'avg_loss_lmc', np.mean(losses_lmc),task)
        print()

    seq_meter.save(config)
    lmc_meter.save(config)

    plot_graphs(config)

    experiment.log_asset_folder(config['exp_dir'])
    experiment.end()
    nni.report_final_result(nni_metric)
Example #2
0
def main():
    print('Started the trial >>', TRIAL_ID, 'for experiment 1')
    # init and save
    setup_experiment(experiment, config)

    # convention:  init      =>  initialization
    # convention:  t_i_seq   =>  task i (sequential)
    # convention:  t_i_mtl   => task 1 ... i (multitask)
    # convention:  t_i_lcm   => task 1 ... i (Linear Mode Connectivity)

    for task in range(1, config['num_tasks'] + 1):
        print('---- Task {} (seq) ----'.format(task))
        seq_model = train_task_sequentially(
            task, loaders['sequential'][task]['train'], config)
        save_task_model_by_policy(seq_model, task, 'seq', config['exp_dir'])
        for prev_task in range(1, task + 1):
            metrics = eval_single_epoch(
                seq_model, loaders['sequential'][prev_task]['val'])
            seq_meter.update(task, prev_task, metrics['accuracy'])
            print(prev_task, metrics)
            log_comet_metric(experiment, 't_{}_seq_acc'.format(prev_task),
                             metrics['accuracy'], task)
            log_comet_metric(experiment, 't_{}_seq_loss'.format(prev_task),
                             round(metrics['loss'], 5), task)
            if task == 1:
                log_comet_metric(experiment, 'avg_acc', metrics['accuracy'],
                                 task)
                log_comet_metric(experiment, 'avg_loss', metrics['loss'], task)

        if task > 1:
            accs_mtl, losses_mtl = [], []

            print('---- Task {} (mtl) ----'.format(task))
            mtl_model = train_task_MTL(
                task, loaders['full-multitask'][task]['train'], config,
                loaders['sequential'][1]['val'])

            save_task_model_by_policy(mtl_model, task, 'mtl',
                                      config['exp_dir'])

            for prev_task in range(1, task + 1):
                metrics_mtl = eval_single_epoch(
                    mtl_model, loaders['sequential'][prev_task]['val'])
                accs_mtl.append(metrics_mtl['accuracy'])
                losses_mtl.append(metrics_mtl['loss'])
                mtl_meter.update(task, prev_task, metrics['accuracy'])

                print('MTL >> ', prev_task, metrics_mtl)
                log_comet_metric(experiment, 't_{}_mtl_acc'.format(prev_task),
                                 metrics_mtl['accuracy'], task)
                log_comet_metric(experiment, 't_{}_mtl_loss'.format(prev_task),
                                 round(metrics_mtl['loss'], 5), task)
            log_comet_metric(experiment, 'avg_acc_mtl', np.mean(accs_mtl),
                             task)
            log_comet_metric(experiment, 'avg_loss_mtl', np.mean(losses_mtl),
                             task)
        print()

    seq_meter.save(config)
    mtl_meter.save(config)

    plot_graphs(config)

    experiment.log_asset_folder(config['exp_dir'])
    experiment.end()
Example #3
0
def main():
    print('Started the trial >>', TRIAL_ID, 'for experiment 1')
    # init and save9
    setup_experiment(experiment, config)

    # convention:  init      =>  initialization
    # convention:  t_i_seq   =>  task i (sequential)
    # convention:  t_i_mtl   => task 1 ... i (multitask)
    # convention:  t_i_lcm   => task 1 ... i (Linear Mode Connectivity)

    eigen_spectrum = {1: {}, 2: {}}

    for task in range(1, config['num_tasks'] + 1):
        print('---- Task {} (seq) ----'.format(task))
        seq_model = train_task_sequentially(
            task, loaders['sequential'][task]['train'], config)

        eigenvals, eigenvecs = get_model_eigenspectrum(
            seq_model, loaders['sequential'][task]['val'])
        eigen_spectrum[task]['eigenvals'], eigen_spectrum[task][
            'eigenvecs'] = eigenvals, eigenvecs
        save_task_model_by_policy(seq_model, task, 'seq', config['exp_dir'])

        for prev_task in range(1, task + 1):
            metrics = eval_single_epoch(
                seq_model, loaders['sequential'][prev_task]['val'])
            seq_meter.update(task, prev_task, metrics['accuracy'])
            print(prev_task, metrics)
            log_comet_metric(experiment, 't_{}_seq_acc'.format(prev_task),
                             metrics['accuracy'], task)
            log_comet_metric(experiment, 't_{}_seq_loss'.format(prev_task),
                             round(metrics['loss'], 5), task)
            if task == 1:
                log_comet_metric(experiment, 'avg_acc', metrics['accuracy'],
                                 task)
                log_comet_metric(experiment, 'avg_loss', metrics['loss'], task)

        if task > 1:
            accs_mtl, losses_mtl = [], []

            print('---- Task {} (mtl) ----'.format(task))
            mtl_model = train_task_MTL(
                task, loaders['full-multitask'][task]['train'], config,
                loaders['sequential'][1]['val'])
            #grads_t1 = get_model_grads(mtl_model, loaders['sequential'][1]['val'])
            # grads_t2 = get_model_grads(mtl_model, loaders['sequential'][2]['val'])
            grads_t1 = get_model_grads(
                load_task_model_by_policy(1, 'seq',
                                          config['exp_dir']).to(DEVICE),
                loaders['full-multitask'][2]['train'])
            grads_t3 = get_model_grads(
                load_task_model_by_policy(1, 'seq',
                                          config['exp_dir']).to(DEVICE),
                loaders['sequential'][2]['train'])

            seq_1 = flatten_params(
                load_task_model_by_policy(1, 'seq', config['exp_dir']),
                False).cpu()
            seq_2 = flatten_params(
                load_task_model_by_policy(2, 'seq', config['exp_dir']),
                False).cpu()

            cosines_t1 = compute_direction_cosines(
                grads_t1, eigen_spectrum[1]['eigenvecs'])
            # cosines_t2 = compute_direction_cosines(grads_t2, eigen_spectrum[2]['eigenvecs'])
            cosines_t3 = compute_direction_cosines(
                grads_t3, eigen_spectrum[1]['eigenvecs'])

            cosine_d1 = compute_direction_cosines(
                (flatten_params(mtl_model, False).cpu() - seq_1),
                eigen_spectrum[1]['eigenvecs'])
            cosine_d2 = compute_direction_cosines(
                seq_2 - seq_1, eigen_spectrum[1]['eigenvecs'])
            print("cos 1 >> ", cosines_t1)
            # print("cos 2 >> ", cosines_t2)
            print("cos 3 >> ", cosines_t3)

            print("dir 1 >>", cosine_d1)
            print("dir 2 >>", cosine_d2)

            save_task_model_by_policy(mtl_model, task, 'mtl',
                                      config['exp_dir'])

            for prev_task in range(1, task + 1):
                metrics_mtl = eval_single_epoch(
                    mtl_model, loaders['sequential'][prev_task]['val'])
                accs_mtl.append(metrics_mtl['accuracy'])
                losses_mtl.append(metrics_mtl['loss'])
                mtl_meter.update(task, prev_task, metrics['accuracy'])

                print('MTL >> ', prev_task, metrics_mtl)
                log_comet_metric(experiment, 't_{}_mtl_acc'.format(prev_task),
                                 metrics_mtl['accuracy'], task)
                log_comet_metric(experiment, 't_{}_mtl_loss'.format(prev_task),
                                 round(metrics_mtl['loss'], 5), task)
            log_comet_metric(experiment, 'avg_acc_mtl', np.mean(accs_mtl),
                             task)
            log_comet_metric(experiment, 'avg_loss_mtl', np.mean(losses_mtl),
                             task)
        print()

    seq_meter.save(config)
    mtl_meter.save(config)

    experiment.log_asset_folder(config['exp_dir'])
    experiment.end()