#------ Train Classifier-------#
            if i==0:

                print('\n--------------------------------------')
                print('Run #{} Task #{} --> Train Classifier'.format(
                    run, task))
                print('--------------------------------------\n')

            #---------------
            # Iteration Loop
            for it in range(args.disc_iters):
                model = retrieve_replay_update(args,
                                    model, opt, data, target, buffer, task, tr_loader,rehearse=task>0)

            buffer.add_reservoir(data.cpu(), target.cpu(), None, task)

        # ------------------------ eval ------------------------ #
        model = model.eval()
        eval_loaders = [('valid', val_loader), ('test', test_loader)]

        for mode, loader_ in eval_loaders:
            for task_t, te_loader in enumerate(loader_):
                if task_t > task: break
                LOG_temp = get_temp_logger(None, ['cls_loss', 'acc'])

                # iterate over samples from task
                for i, (data, target) in enumerate(te_loader):
                    if args.unit_test and i > 10: break

                    if args.cuda:
                        LOG_temp['acc'] += [
                            pred.eq(input_y.view_as(pred)).sum().item() /
                            pred.size(0)
                        ]
                        LOG_temp['cls_loss'] += [loss.item()]

                    LOG_temp['gen_loss'] += [gen_loss.item()]

                # add to buffer only during last epoch of task to get the best reconstructions
                if epoch == args.n_epochs - 1:

                    # buffer.add_reservoir(data, target, None, task)
                    split = data.size(0)

                    if args.store_latents:
                        buffer.add_reservoir(hid[:split], target, None, task)
                    else:
                        buffer.add_reservoir(x_recon[:split], target, None,
                                             task)

            LOG_temp.print_('train', task)

            # ------------------------ eval ------------------------ #

            # We need to update the buffer representations
            with torch.no_grad():

                if prev_gen is not None and args.store_latents and args.update_buffer_hid:

                    # sample from buffer
                    for i in range(buffer.bx.data.size(0) // 64):

            #------ Train Classifier-------#
            if i==0:
                print('\n--------------------------------------')
                print('Run #{} Task #{} --> Train Classifier'.format(
                    run, task))
                print('--------------------------------------\n')

            #---------------
            # Iteration Loop
            for it in range(args.disc_iters):
                model = retrieve_replay_update(args,
                                    model, opt, data, target, buffer, task, tr_loader,rehearse=task>0)

            buffer.add_reservoir(data, target, None, task)

        # ------------------------ eval ------------------------ #
        model = model.eval()
        eval_loaders = [('valid', val_loader), ('test', test_loader)]

        for mode, loader_ in eval_loaders:
            for task_t, te_loader in enumerate(loader_):
                if task_t > task: break
                LOG_temp = get_temp_logger(None, ['cls_loss', 'acc'])

                # iterate over samples from task
                for i, (data, target) in enumerate(te_loader):
                    if args.unit_test and i > 10: break

                    if args.cuda: