def __init__(self, n_inputs, n_outputs, n_tasks, args):
        super(Net, self).__init__()
        nl, nh = args.n_layers, args.n_hiddens

        if 'mnist' in args.data_file:
            self.net = MLP([n_inputs] + [nh] * nl + [n_outputs])
        else:
            if args.finetune == 'None':
                self.net = ResNet18(n_outputs, bias=args.bias)
            else:
                self.net = pretrained_cifar.cifar_resnet20(
                    pretrained=args.finetune, num_classes=n_outputs)

        self.distance_measure = args.distance
        self.ce = nn.CrossEntropyLoss()
        self.n_outputs = n_outputs
        self.opt = optim.SGD(self.parameters(), args.lr)
        self.n_memories = args.n_memories  # number of memories per task
        self.n_sampled_memories = args.n_sampled_memories  # number of sampled memories per task
        self.n_constraints = args.n_constraints
        self.gpu = args.cuda
        self.batch_size = args.batch_size
        self.n_iter = args.n_iter
        self.alpha = args.alpha

        # allocate ring buffer
        self.memory_data = torch.FloatTensor(self.n_memories, n_inputs)
        self.memory_labs = torch.LongTensor(self.n_memories)
        if args.cuda:
            self.memory_data = self.memory_data.cuda()
            self.memory_labs = self.memory_labs.cuda()

        # we allocate buffer for each class, not each task
        self.sampled_class_data = {}
        self.sampled_class_labs = {}
        self.sampled_class_features = {}

        # allocate counters
        self.observed_tasks = []
        self.old_task = -1
        self.mem_cnt = 0
        self.n_task = 0
        self.n_old_task = 0
        self.sample_size_list = []
        self.class_buffer_size = 0
        self.n_class = 0
        self.n_old_class = 0
        self.total_task = args.total_task
        self.class_per_task = args.class_per_task

        # initialized for MOF
        self.mean_features = [
            0 for i in range(self.total_task * self.class_per_task)
        ]
        self.class_nums = [
            0 for i in range(self.total_task * self.class_per_task)
        ]
        self.normalize = args.normalize
Beispiel #2
0
    def __init__(self, n_inputs, n_outputs, n_tasks, args):
        super(Net, self).__init__()
        nl, nh = args.n_layers, args.n_hiddens

        if 'mnist' in args.data_file:
            self.net = MLP([n_inputs] + [nh] * nl + [n_outputs])
        else:
            if args.finetune == 'None':
                self.net = ResNet18(n_outputs, bias=args.bias)
            else:
                self.net = pretrained_cifar.cifar_resnet20(
                    pretrained=args.finetune, num_classes=n_outputs)

        # The selecton of the loss function seems not good.
        # But I think we can still compare various selection approaches.
        self.ce = nn.CrossEntropyLoss()
        self.n_outputs = n_outputs
        self.n_inputs = n_inputs
        self.opt = optim.SGD(self.parameters(), args.lr)
        self.n_memories = args.n_memories
        self.n_sampled_memories = args.n_sampled_memories
        self.n_constraints = args.n_constraints
        self.gpu = args.cuda
        self.batch_size = args.batch_size
        self.n_iter = args.n_iter
        self.alpha = args.alpha
        self.loss = args.loss
        self.feature_update = args.feature_update
        self.memory_update = args.memory_update

        self.initialize_criterion(args)

        # allocate counters
        self.observed_tasks = []
        self.old_task = -1
        self.mem_cnt = 0
        self.n_task = 0
        self.n_old_task = 0
        self.sample_size_list = []
        self.class_buffer_size = 0
        self.n_class = 0
        self.n_old_class = 0
        self.total_task = args.total_task
        self.class_per_task = args.class_per_task
        self.total_class = self.total_task * self.class_per_task
        self.class_nums = [0 for i in range(self.total_class)]
        self.initialize_memory_buffer()

        # initialized for MOF
        self.mean_features = [
            0 for i in range(self.total_task * self.class_per_task)
        ]
        self.normalize = args.normalize
Beispiel #3
0
    def __init__(self, n_inputs, n_outputs, n_tasks, args):
        super(Net, self).__init__()
        nl, nh = args.n_layers, args.n_hiddens
        self.rn = args.memory_strength  # n the number of gradient vectors to estimate new samples similarity, line 5 in alg.2

        if 'mnist' in args.data_file:
            self.net = MLP([n_inputs] + [nh] * nl + [n_outputs])
        else:
            if args.finetune == 'None':
                self.net = ResNet18(n_outputs, bias=args.bias)
            else:
                self.net = pretrained_cifar.cifar_resnet20(
                    pretrained=args.finetune, num_classes=n_outputs)

        self.ce = nn.CrossEntropyLoss()
        self.n_outputs = n_outputs

        self.opt = optim.SGD(self.parameters(), args.lr)

        self.n_memories = args.n_memories  # auxiliary storage before deciding samples to the buffer,
        # if this is equal to the batch size, then every batch the method decides which samples to add to the buffer.
        self.n_sampled_memories = args.n_sampled_memories  #buffer size, M
        self.n_constraints = args.n_constraints  #n_samples to be replayed from the buffer at each time a new batch is recieved, default equal to batch size
        self.gpu = args.cuda

        self.batch_size = args.batch_size
        self.n_iter = args.n_iter  #number of iteraions (update steps) for each recieved batch
        self.sim_th = args.change_th
        self.memory_data = torch.FloatTensor(self.n_memories, n_inputs)
        self.memory_labs = torch.LongTensor(self.n_memories)
        self.added_index = self.n_sampled_memories
        self.sampled_memory_data = None
        self.sampled_memory_labs = None
        self.sampled_memory_cos = None  # buffer cosine similarity score

        # old grads to measure changes
        if args.cuda:
            self.memory_data = self.memory_data.cuda()
            self.memory_labs = self.memory_labs.cuda()

        # allocate temporary synaptic memory
        self.grad_dims = []
        for param in self.parameters():
            self.grad_dims.append(param.data.numel())

        # allocate counters
        self.observed_tasks = []
        self.old_task = -1
        self.mem_cnt = 0
Beispiel #4
0
    def __init__(self, n_inputs, n_outputs, n_tasks, args):
        super(Net, self).__init__()
        nl, nh = args.n_layers, args.n_hiddens

        if 'mnist' in args.data_file:
            self.net = MLP([n_inputs] + [nh] * nl + [n_outputs])
        else:
            if args.finetune == 'None':
                self.net = ResNet18(n_outputs, bias=args.bias)
            else:
                self.net = pretrained_cifar.cifar_resnet20(
                    pretrained=args.finetune)

        self.opt = torch.optim.SGD(self.parameters(), lr=args.lr)
        self.ce = torch.nn.CrossEntropyLoss()
        self.n_outputs = n_outputs
        self.n_iter = args.n_iter
Beispiel #5
0
    def __init__(self, n_inputs, n_outputs, n_tasks, args):
        super(Net, self).__init__()
        nl, nh = args.n_layers, args.n_hiddens
        self.margin = args.memory_strength
        self.is_cifar = ('cifar10' in args.data_file)

        if 'mnist' in args.data_file:
            self.net = MLP([n_inputs] + [nh] * nl + [n_outputs])
        else:
            if args.finetune == 'None':
                self.net = ResNet18(n_outputs, bias=args.bias)
            else:
                self.net = pretrained_cifar.cifar_resnet20(
                    pretrained=args.finetune, num_classes=n_outputs)

        self.ce = nn.CrossEntropyLoss()
        self.n_outputs = n_outputs

        self.opt = optim.SGD(self.parameters(), args.lr)
        self.n_iter = args.n_iter
        self.n_memories = args.n_memories
        self.gpu = args.cuda

        # allocate episodic memory
        self.memory_data = torch.FloatTensor(n_tasks, self.n_memories,
                                             n_inputs)
        self.memory_labs = torch.LongTensor(n_tasks, self.n_memories)
        if args.cuda:
            self.memory_data = self.memory_data.cuda()
            self.memory_labs = self.memory_labs.cuda()

        # allocate temporary synaptic memory
        self.grad_dims = []
        for param in self.parameters():
            self.grad_dims.append(param.data.numel())
        self.grads = torch.Tensor(sum(self.grad_dims), n_tasks)
        if args.cuda:
            self.grads = self.grads.cuda()

        # allocate counters

        self.observed_tasks = []
        self.old_task = -1
        self.mem_cnt = 0
        self.nc_per_task = args.class_per_task
    def __init__(self,
                 n_inputs,
                 n_outputs,
                 n_tasks,
                 args):
        super(Net, self).__init__()
        nl, nh = args.n_layers, args.n_hiddens

        if 'mnist' in args.data_file:
            self.net = MLP([n_inputs] + [nh] * nl + [n_outputs])
        else:
            if args.finetune=='None':
                self.net = ResNet18(n_outputs, bias=args.bias)
            else:
                self.net = pretrained_cifar.cifar_resnet20(pretrained=args.finetune,num_classes=n_outputs)
                
        self.ce = nn.CrossEntropyLoss()
        self.n_outputs = n_outputs
        self.n_inputs = n_inputs
        self.opt = optim.SGD(self.parameters(), args.lr)
        self.n_memories = args.n_memories
        self.n_sampled_memories = args.n_sampled_memories
        self.n_constraints = args.n_constraints
        self.gpu = args.cuda
        self.batch_size=args.batch_size
        self.n_iter = args.n_iter
        self.mode=args.mode


        # allocate counters
        self.observed_tasks = []
        self.old_task = -1
        self.mem_cnt = 0
        self.n_task=0
        self.n_old_task=0
        self.sample_size_list=[]
        self.class_buffer_size=0
        self.n_class=0
        self.n_old_class=0
        self.total_task = args.total_task
        self.class_per_task=args.class_per_task
        self.total_class=self.total_task*self.class_per_task
        self.class_nums  = [0 for i in range(self.total_class)]
        self.initialize_memory_buffer()