예제 #1
0
    def __init__(self, train_path, test_path, imagenet_path, model):
        self.train_data_loader = dataset.loader(train_path)
        self.test_data_loader = dataset.test_loader(test_path)
        self.imagenet_val_loader = dataset.test_loader(imagenet_path)

        self.model = model
        self.criterion = torch.nn.CrossEntropyLoss()
        self.prunner = FilterPrunner(self.model)
        self.model.train()
예제 #2
0
 def __init__(self, training, testing, model):
     self.model = model
     self.train_loader = dataset.loader(training)
     self.loss = torch.nn.CrossEntropyLoss()
     self.test_loader = dataset.test_loader(testing)
     self.model.train()
     self.prunner = FilterPrunner(self.model)
예제 #3
0
    def __init__(self, train_path, test_path, model):
        self.train_data_loader = dataset.loader(train_path, batch_size=32)
        self.test_data_loader = dataset.test_loader(test_path, batch_size=32)

        self.model = model
        self.criterion = torch.nn.CrossEntropyLoss()
        self.prunner = FilterPrunner(self.model)
        self.model.train()
예제 #4
0
    def __init__(self, train_path, test_path, model, optimizer):
        self.train_data_loader = dataset.loader(train_path)
        self.test_data_loader = dataset.test_loader(test_path)

        self.optimizer = optimizer

        self.model = model
        self.criterion = torch.nn.CrossEntropyLoss()
        self.model.train()
예제 #5
0
    def __init__(self, train_path, test_path, model):
        print(' PrunningFineTuner_AlexNet init')
        self.train_data_loader = dataset.loader(train_path)
        self.test_data_loader = dataset.test_loader(test_path)

        self.model = model
        self.criterion = torch.nn.CrossEntropyLoss()
        print('PrunningFineTuner_AlexNet init filter prunner')
        self.prunner = FilterPrunner(self.model)
예제 #6
0
    def __init__(self, train_path, test_path, model):
        self.train_data_loader = dataset.loader(train_path)
        self.test_data_loader = dataset.test_loader(test_path)
        #self.test_single_loader = dataset.test_loader(test_single_path) # I am adding this line...

        self.model = model
        self.criterion = torch.nn.CrossEntropyLoss()
        self.prunner = FilterPrunner(self.model)
        self.model.train()
예제 #7
0
 def eval_all(self):
     for d in self.dataset2idx:
         self.model.set_dataset(d)
         print("Evaluating", d, self.dataset2idx[d])
         data_loader = dataset.test_loader(get_test_dataset(d),
                                           self.args.batch_size,
                                           pin_memory=self.args.cuda)
         self.eval_n(self.model, -1, data_loader, "unknown", d)
     self.model.set_dataset(self.args.dataset)
예제 #8
0
def test(model):
	test_data_loader = test_loader('/tmp/mnist/')
	model.eval()
	correct = 0
	total = 0

	for i, (batch, label) in tqdm(enumerate(test_data_loader)):
		batch = batch.cuda()
		output = model(Variable(batch))
		pred = output.data.max(1)[1]
		correct += pred.cpu().eq(label).sum()
		total += label.size(0)
	
	print("Accuracy :", float(correct) / total)
예제 #9
0
    def __init__(self, model, args):

        #self.train_data_loader,self.class_dict = dataset.loader(args.train_path, num_workers=args.num_workers,no_crop=args.no_crop,grayscale=args.grayscale)
        #self.test_data_loader = dataset.test_loader(args.test_path, num_workers=args.num_workers,no_crop=args.no_crop,grayscale=args.grayscale)
        self.train_data_loader, self.class_dict = dataset.loader(
            args.train_path, num_workers=args.num_workers)
        self.test_data_loader = dataset.test_loader(
            args.test_path, num_workers=args.num_workers)

        self.model = model
        self.criterion = torch.nn.CrossEntropyLoss()
        self.model.train()
        self.num_classes = args.num_classes
        self.seed = args.seed
예제 #10
0
    def __init__(self, model, args):

        #self.train_data_loader,self.class_dict = dataset.loader(args.train_path, num_workers=args.num_workers,no_crop=args.no_crop,grayscale = args.grayscale)
        #self.test_data_loader = dataset.test_loader(args.test_path, num_workers=args.num_workers,no_crop=args.no_crop,grayscale = args.grayscale)
        self.train_data_loader, self.class_dict = dataset.loader(
            args.train_path, num_workers=args.num_workers)
        self.test_data_loader = dataset.test_loader(
            args.test_path, num_workers=args.num_workers)

        self.model = model
        self.num_classes = self.model.out_channels
        self.criterion = torch.nn.CrossEntropyLoss()
        self.prunner = FilterPrunner(self.model)

        self.start = args.start
        self.indiv_acc = args.indiv_acc
        self.model.train()
        self.args = args
예제 #11
0
 def eval_all_layers(self):
     for layer in [0, 1, 2]:
         labels_all = []
         predictions_all = []
         tsne_all = []
         offset = 0
         for d in self.dataset2idx:
             print(d, self.dataset2idx[d], layer)
             data_loader = dataset.test_loader(get_test_dataset(d),
                                               self.args.batch_size,
                                               pin_memory=self.args.cuda)
             _, tsne, labels, predictions = self.eval_n(
                 self.model, layer, data_loader, "unknown", d)
             tsne_all.extend(tsne)
             for l in range(len(labels)):
                 labels_all.append(labels[l] + offset)
             predictions_all.extend(predictions)
             offset = offset + 2
예제 #12
0
def test_model():
    # Hyper-parameters
    num_of_samples = 500
    batch_size = 50
    input_size = 1
    num_classes = 2

    # Initialize the model and its parameters.
    model = model_0.Model(input_size, num_classes)
    with open('model_0_parameters', 'rb') as f:
        model.load_state_dict(torch.load(f))

    with torch.no_grad():
        correct = 0
        total = 0
        for data, labels in test_loader(num_of_samples, batch_size):
            data = data.reshape(-1, input_size)
            outputs = model(data)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
        print('Accuracy on {} samples: {}'.format(num_of_samples,
                                                  correct / total))
예제 #13
0
 def get_examplers(self):
     num_examplers = 64
     num_datasets = len(self.dataset2idx)
     examples = []
     lbl = []
     if num_datasets < 2:
         return examples, lbl
     num_datasets_examplers = num_examplers / float(num_datasets - 1)
     print("Number of datasets we have are: ", num_datasets,
           num_datasets_examplers)
     for d in self.dataset2idx:
         print("We have following datasets", d, self.dataset2idx[d],
               self.args.dataset)
         if d != self.args.dataset:
             print("Lets add this datasets to examplers")
             data_loader = dataset.test_loader(get_test_dataset(d),
                                               num_datasets_examplers,
                                               pin_memory=self.args.cuda)
             for btch, label in data_loader:
                 #btch = data_loader.get_batch([0])
                 examples = btch
                 lbl = label
                 break
     return examples, lbl
        x = x.view(x.size(0), -1)
        print(x.shape)
        x = self.linear(x)

        return x


config_list = [{'sparsity': 0.5, 'op_types': ['Conv2d']}]
pretrain_epochs = 1
prune_epochs = 1
device = 'cuda'
train_path = './train'
test_path = './test'

train_data_loader = dataset.train_loader(train_path)
test_data_loader = dataset.test_loader(test_path)

criterion = torch.nn.CrossEntropyLoss()


def train(model, device, train_loader, optimizer):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()

        if batch_idx % 100 == 0: