Exemplo n.º 1
0
    def __init__(self, n_inputs, n_outputs, n_tasks, args):
        super(Net, self).__init__()
        self.args = args
        self.nt = n_tasks

        self.n_feat = n_outputs
        self.n_classes = n_outputs

        arch = args.arch
        nl, nh = args.n_layers, args.n_hiddens
        config = mf.ModelFactory.get_model(model_type=arch,
                                           sizes=[n_inputs] + [nh] * nl +
                                           [n_outputs],
                                           dataset=args.dataset,
                                           args=args)
        self.net = Learner.Learner(config, args)

        # setup optimizer
        self.opt = torch.optim.SGD(self.parameters(), lr=args.lr)

        # setup losses
        self.loss = torch.nn.CrossEntropyLoss()

        self.gpu = args.cuda
        self.nc_per_task = int(n_outputs / n_tasks)
        self.n_outputs = n_outputs
Exemplo n.º 2
0
    def __init__(self, n_inputs, n_outputs, n_tasks, args):
        super(Net, self).__init__()
        self.args = args
        nl, nh = args.n_layers, args.n_hiddens

        config = mf.ModelFactory.get_model(model_type=args.arch,
                                           sizes=[n_inputs] + [nh] * nl +
                                           [n_outputs],
                                           dataset=args.dataset,
                                           args=args)

        self.net = Learner.Learner(config, args)

        # define the lr params
        self.net.define_task_lr_params(alpha_init=args.alpha_init)

        self.cuda = args.cuda
        if self.cuda:
            self.net = self.net.cuda(self.args.device_id)

        # optimizer model
        optimizer_model = optimizers_lib.__dict__[args.bgd_optimizer]
        # params used to instantiate the BGD optimiser
        optimizer_params = dict(
            {  #"logger": logger,
                "mean_eta": args.mean_eta,
                "std_init": args.std_init,
                "mc_iters": args.train_mc_iters
            },
            **literal_eval(" ".join(args.optimizer_params)))
        self.optimizer = optimizer_model(self.net, **optimizer_params)

        self.epoch = 0
        # allocate buffer
        self.M = []
        self.M_new = []
        self.age = 0

        # setup losses
        self.loss = torch.nn.CrossEntropyLoss()
        self.is_cifar = ((args.dataset == 'cifar100')
                         or (args.dataset == 'tinyimagenet'))
        self.glances = args.glances
        self.pass_itr = 0
        self.real_epoch = 0

        # setup memories
        self.current_task = 0

        self.memories = args.memories
        self.batchSize = int(args.replay_batch_size)

        if self.is_cifar:
            self.nc_per_task = n_outputs / n_tasks
        else:
            self.nc_per_task = n_outputs
        self.n_outputs = n_outputs

        self.obseve_itr = 0
Exemplo n.º 3
0
    def __init__(self, n_inputs, n_outputs, n_tasks, args):
        super(Net, self).__init__()
        self.args = args
        self.margin = args.memory_strength
        self.is_cifar = ((args.dataset == 'cifar100')
                         or (args.dataset == 'tinyimagenet'))

        nl, nh = args.n_layers, args.n_hiddens
        config = mf.ModelFactory.get_model(model_type=args.arch,
                                           sizes=[n_inputs] + [nh] * nl +
                                           [n_outputs],
                                           dataset=args.dataset,
                                           args=args)
        self.net = Learner.Learner(config, args=args)

        self.netforward = self.net.forward

        self.ce = nn.CrossEntropyLoss()
        self.n_outputs = n_outputs
        self.glances = args.glances

        self.opt = optim.SGD(self.parameters(), args.lr)

        self.n_memories = args.n_memories
        self.gpu = args.cuda

        # allocate episodic memory
        self.memory_data = torch.FloatTensor(n_tasks, self.n_memories,
                                             n_inputs)
        self.memory_labs = torch.LongTensor(n_tasks, self.n_memories)
        if args.cuda:
            self.memory_data = self.memory_data.cuda(args.device_id)
            self.memory_labs = self.memory_labs.cuda(args.device_id)

        # allocate temporary synaptic memory
        self.grad_dims = []
        for param in self.parameters():
            self.grad_dims.append(param.data.numel())
        self.grads = torch.Tensor(sum(self.grad_dims), n_tasks)
        if args.cuda:
            self.grads = self.grads.cuda(args.device_id)

        # allocate counters
        self.observed_tasks = []
        self.old_task = -1
        self.mem_cnt = 0
        if self.is_cifar:
            self.nc_per_task = int(n_outputs / n_tasks)
        else:
            self.nc_per_task = n_outputs

        if args.cuda:
            self.cuda(args.device_id)
Exemplo n.º 4
0
    def __init__(self, n_inputs, n_outputs, n_tasks, args):
        super(BaseNet, self).__init__()

        self.args = args
        nl, nh = args.n_layers, args.n_hiddens

        config = mf.ModelFactory.get_model(model_type=args.arch,
                                           sizes=[n_inputs] + [nh] * nl +
                                           [n_outputs],
                                           dataset=args.dataset,
                                           args=args)

        self.net = Learner.Learner(config, args)

        # define the lr params
        self.net.define_task_lr_params(alpha_init=args.alpha_init)

        self.opt_wt = torch.optim.SGD(list(self.net.parameters()),
                                      lr=args.opt_wt)
        self.opt_lr = torch.optim.SGD(list(self.net.alpha_lr.parameters()),
                                      lr=args.opt_lr)

        self.epoch = 0
        # allocate buffer
        self.M = []
        self.M_new = []
        self.age = 0

        # setup losses
        self.loss = torch.nn.CrossEntropyLoss()
        self.is_cifar = ((args.dataset == 'cifar100')
                         or (args.dataset == 'tinyimagenet'))
        self.glances = args.glances
        self.pass_itr = 0
        self.real_epoch = 0

        self.current_task = 0
        self.memories = args.memories
        self.batchSize = int(args.replay_batch_size)

        self.cuda = args.cuda
        if self.cuda:
            self.net = self.net.cuda()

        self.n_outputs = n_outputs
Exemplo n.º 5
0
    def __init__(self, n_inputs, n_outputs, n_tasks, args):
        super(Net, self).__init__()

        self.args = args
        nl, nh = args.n_layers, args.n_hiddens

        config = mf.ModelFactory.get_model(model_type=args.arch,
                                           sizes=[n_inputs] + [nh] * nl +
                                           [n_outputs],
                                           dataset=args.dataset,
                                           args=args)
        self.net = Learner.Learner(config, args)

        self.opt_wt = optim.SGD(self.parameters(), lr=args.lr)

        if self.args.learn_lr:
            self.net.define_task_lr_params(alpha_init=args.alpha_init)
            self.opt_lr = torch.optim.SGD(list(self.net.alpha_lr.parameters()),
                                          lr=args.opt_lr)

        self.loss = CrossEntropyLoss()
        self.is_cifar = ((args.dataset == 'cifar100')
                         or (args.dataset == 'tinyimagenet'))
        self.glances = args.glances

        self.current_task = 0
        self.memories = args.memories
        self.batchSize = int(args.replay_batch_size)

        # allocate buffer
        self.M = []
        self.age = 0

        # handle gpus if specified
        self.cuda = args.cuda
        if self.cuda:
            self.net = self.net.cuda(args.device_id)

        self.n_outputs = n_outputs
        if self.is_cifar:
            self.nc_per_task = int(n_outputs / n_tasks)
        else:
            self.nc_per_task = n_outputs
        self.args = args
Exemplo n.º 6
0
    def __init__(self, n_inputs, n_outputs, n_tasks, args):
        super(Net, self).__init__()
        self.args = args
        self.nt = n_tasks
        self.reg = args.memory_strength
        self.n_memories = args.n_memories
        self.num_exemplars = 0
        self.n_feat = n_outputs
        self.n_classes = n_outputs
        self.samples_per_task = args.samples_per_task * (1.0 - args.validation)
        if self.samples_per_task <= 0:
            error('set explicitly args.samples_per_task')
        self.examples_seen = 0

        self.glances = args.glances
        # setup network

        nl, nh = args.n_layers, args.n_hiddens
        config = mf.ModelFactory.get_model(model_type=args.arch,
                                           sizes=[n_inputs] + [nh] * nl +
                                           [n_outputs],
                                           dataset=args.dataset,
                                           args=args)
        self.net = Learner.Learner(config, args)

        # setup optimizer
        self.opt = torch.optim.SGD(self.parameters(), lr=args.lr)

        # setup losses
        self.bce = torch.nn.CrossEntropyLoss()
        self.kl = torch.nn.KLDivLoss()  # for distillation
        self.lsm = torch.nn.LogSoftmax(dim=1)
        self.sm = torch.nn.Softmax(dim=1)

        # memory
        self.memx = None  # stores raw inputs, PxD
        self.memy = None
        self.mem_class_x = {}  # stores exemplars class by class
        self.mem_class_y = {}

        self.gpu = args.cuda
        self.nc_per_task = int(n_outputs / n_tasks)
        self.n_outputs = n_outputs
Exemplo n.º 7
0
    def __init__(self, n_inputs, n_outputs, n_tasks, args):
        super(Net, self).__init__()
        self.args = args
        nl, nh = args.n_layers, args.n_hiddens

        self.is_cifar = (args.dataset == 'cifar100'
                         or args.dataset == 'tinyimagenet')
        config = mf.ModelFactory.get_model(args.arch,
                                           sizes=[n_inputs] + [nh] * nl +
                                           [n_outputs],
                                           dataset=args.dataset,
                                           args=args)
        self.net = Learner.Learner(config, args=args)

        self.netforward = self.net.forward

        self.bce = torch.nn.CrossEntropyLoss()

        self.n_outputs = n_outputs
        if self.is_cifar:
            self.nc_per_task = n_outputs / n_tasks
        else:
            self.nc_per_task = n_outputs

        self.opt = optim.SGD(self.parameters(), args.lr)
        self.batchSize = int(args.replay_batch_size)

        self.memories = args.memories
        self.steps = int(args.batches_per_example)
        self.beta = args.beta
        self.gamma = args.gamma

        # allocate buffer
        self.M = []
        self.age = 0

        # handle gpus if specified
        self.cuda = args.cuda
        if self.cuda:
            self.net = self.net.cuda(self.args.device_id)