Exemple #1
0
    def fit(self, dataset, test_dataset):
        curr_date = strftime("%d-%H:%M:%S", gmtime())
        blue = lambda x: '\033[94m' + x + '\033[0m'

        # initialise dataloader as single thread else socket errors
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=self.batchsize,
                                                 shuffle=True,
                                                 num_workers=int(self.workers))

        testdataloader = torch.utils.data.DataLoader(test_dataset,
                                                     batch_size=self.batchsize,
                                                     shuffle=True,
                                                     num_workers=int(
                                                         self.workers))

        print("size of train: ", len(dataset))
        print("size of test: ", len(test_dataset))

        classifier = InceptionV4(self.num_classes)
        classifier.cuda()

        # possibly load model for fine-tuning
        if self.model is not None:
            classifier.load_state_dict(torch.load(self.model))

        optimizer = optim.Adadelta(classifier.parameters())

        num_batch = len(dataset) // self.batchsize
        test_acc = []

        class_sample_count = dataset.T1_class_counts
        print("Class sample count: ", class_sample_count)
        weights = 1 / torch.from_numpy(class_sample_count).type(
            torch.FloatTensor)

        # loss is combined cross-entropy and MSE loss
        criterion_CE = CrossEntropyLoss(weight=weights.cuda())
        criterion_MSE = MSELoss()

        for epoch in range(self.num_epoch):
            for i, data in enumerate(dataloader, 0):
                MRF, T1, T2 = data[0].type(torch.FloatTensor), data[1].type(
                    torch.LongTensor), data[2].type(torch.LongTensor)
                MRF, T1, T2 = Variable(MRF), Variable(T1), Variable(T2)
                MRF, T1, T2 = MRF.cuda(), T1.cuda(), T2.cuda()

                optimizer.zero_grad()
                classifier = classifier.train()
                pred = classifier(MRF).view(self.batchsize, -1)
                loss = criterion_CE(pred, T1) * self.alpha
                # convert predictions to integer class predictions, add square distance to loss
                loss += criterion_MSE(
                    pred.data.max(1)[1].type(torch.FloatTensor),
                    T1.type(torch.FloatTensor)) * (1 - self.alpha)
                loss.backward()
                optimizer.step()

                if i % (num_batch // 40) == 0:
                    print('[%d: %d/%d] train loss: %f' %
                          (epoch, i, num_batch, loss.item()))

                if i % (num_batch // 5) == 0:
                    j, data = next(enumerate(testdataloader, 0))
                    MRF, T1, T2 = data[0].type(
                        torch.FloatTensor), data[1].type(
                            torch.LongTensor), data[2].type(torch.LongTensor)
                    MRF, T1, T2 = Variable(MRF), Variable(T1), Variable(T2)
                    MRF, T1, T2 = MRF.cuda(), T1.cuda(), T2.cuda()
                    loss = criterion_CE(pred, T1) * self.alpha
                    loss += criterion_MSE(
                        pred.data.max(1)[1].type(torch.FloatTensor),
                        T1.type(torch.FloatTensor)) * (1 - self.alpha)

                    pred_choice = pred.data.max(1)[1]
                    correct = pred_choice.eq(T1.data).cpu().sum()
                    print(pred_choice[0:10])
                    print('[%d: %d/%d] %s loss: %f accuracy: %f' %
                          (epoch, i, num_batch, blue('test'), loss.item(),
                           correct.item() / float(self.batchsize)))

            if self.model_name is None:
                torch.save(classifier.state_dict(), "models/model" + curr_date)
            else:
                torch.save(classifier.state_dict(),
                           "models/" + self.model_name)
Exemple #2
0
    def fit(self, dataset, test_dataset):
        curr_date = strftime("%d-%H:%M:%S", gmtime())
        print("saving model to: " + "models/model" + curr_date)
        blue = lambda x: '\033[94m' + x + '\033[0m'

        # initialise dataloader as single thread else socket errors
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=self.batchsize,
                                                 shuffle=True,
                                                 num_workers=int(self.workers))

        testdataloader = torch.utils.data.DataLoader(test_dataset,
                                                     batch_size=self.batchsize,
                                                     shuffle=True,
                                                     num_workers=int(
                                                         self.workers))

        print("size of train: ", len(dataset))
        print("size of test: ", len(test_dataset))

        regressor = InceptionV4(1)
        regressor.cuda()

        # possibly load model for fine-tuning
        if self.model is not None:
            regressor.load_state_dict(torch.load(self.model))

        optimizer = optim.Adagrad(regressor.parameters(), lr=0.001)

        num_batch = len(dataset) // self.batchsize
        test_loss = []
        val_loss = []
        criterion = SmoothL1Loss()

        for epoch in range(self.num_epoch):
            for i, data in enumerate(dataloader, 0):
                MRF, T1, T2 = data[0].type(torch.FloatTensor), data[1].type(
                    torch.FloatTensor), data[2].type(torch.FloatTensor)
                MRF, T1, T2 = Variable(MRF), Variable(T1), Variable(T2)
                MRF, T1, T2 = MRF.cuda(), T1.cuda(), T2.cuda()

                optimizer.zero_grad()
                regressor = regressor.train()
                pred = regressor(MRF).view(self.batchsize, -1)
                loss = criterion(pred, T1)

                if i % (num_batch // 40) == 0:
                    print('[%d: %d/%d] train loss: %f' %
                          (epoch, i, num_batch, loss.item()))
                loss.backward()
                optimizer.step()

                if i % (num_batch // 20) == 0:
                    test_loss.append(loss.item())

                    j, data = next(enumerate(testdataloader, 0))
                    MRF, T1, T2 = data[0].type(
                        torch.FloatTensor), data[1].type(
                            torch.FloatTensor), data[2].type(torch.FloatTensor)
                    MRF, T1, T2 = Variable(MRF), Variable(T1), Variable(T2)
                    MRF, T1, T2 = MRF.cuda(), T1.cuda(), T2.cuda()

                    regressor = regressor.eval()
                    pred = regressor(MRF).view(self.batchsize, -1)

                    # print(pred[0:10])
                    loss = criterion(pred, T1)
                    val_loss.append(loss.item())

                    print('[%d: %d/%d] %s loss: %f' %
                          (epoch, i, num_batch, blue('test'), loss.item()))

            if self.model_name is None:
                torch.save(regressor.state_dict(), "models/model" + curr_date)
            else:
                torch.save(regressor.state_dict(), "models/" + self.model_name)

            np.save("outputs/test_loss" + curr_date, np.array(test_loss))
            np.save("outputs/val_loss" + curr_date, np.array(val_loss))
    def fit(self, dataset, test_dataset):
        device = torch.device("cuda:0")
        curr_date = strftime("%d-%H:%M:%S", gmtime())
        blue = lambda x: '\033[94m' + x + '\033[0m'

        # initialise dataloader as single thread else socket errors
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=self.batchsize,
                                                 shuffle=True,
                                                 num_workers=int(self.workers))

        testdataloader = torch.utils.data.DataLoader(test_dataset,
                                                     batch_size=self.batchsize,
                                                     shuffle=True,
                                                     num_workers=int(
                                                         self.workers))

        print("size of train: ", len(dataset))
        print("size of test: ", len(test_dataset))

        classifier = InceptionV4(self.num_classes)
        classifier.to(device)

        # possibly load model for fine-tuning
        if self.model is not None:
            classifier.load_state_dict(torch.load(self.model))

        optimizer = optim.Adadelta(classifier.parameters())

        test_acc = []

        class_sample_count = dataset.T1_class_counts
        print("Class sample count: ", class_sample_count)
        weights = 1 / torch.from_numpy(class_sample_count).type(
            torch.FloatTensor)

        # keep track of loss
        train_loss = []
        val_loss = []

        # loss is combined cross-entropy and MSE loss
        criterion_CE = CrossEntropyLoss(weight=weights.to(device))
        criterion_MSE = MSELoss()
        start = time.time()

        for epoch in range(self.num_epoch):
            for i, data in enumerate(dataloader, 0):
                if i > self.steps_per_batch:
                    break

                MRF, T1, T2 = data[0].type(torch.FloatTensor), data[1].type(
                    torch.LongTensor), data[2].type(torch.LongTensor)
                MRF, T1, T2 = Variable(MRF), Variable(T1), Variable(T2)
                MRF, T1, T2 = MRF.to(device), T1.to(device), T2.to(device)

                # select nonzero T1 locs
                nonzero_locs = torch.where(
                    torch.norm(MRF, dim=(1, 2)) > 1,
                    torch.tensor(1).to(device),
                    torch.tensor(0).to(device))
                nonzero_locs = nonzero_locs.type(torch.ByteTensor)
                MRF = MRF[nonzero_locs]
                T1 = T1[nonzero_locs]

                # catch if we threw out all the batch
                if MRF.size()[0] == 0:
                    break

                optimizer.zero_grad()
                classifier = classifier.train()
                pred = classifier(MRF).view(MRF.size()[0], -1)

                # convert class probabilities to choice for MSE
                pred_MSE = Variable(pred.data.max(1)[1].type(
                    torch.FloatTensor),
                                    requires_grad=True)
                pred_MSE = pred_MSE.to(device) / self.num_classes  # normalize
                T1_MSE = T1.type(
                    torch.FloatTensor).to(device) / self.num_classes

                loss = criterion_CE(pred, T1) * self.alpha
                # convert predictions to integer class predictions, add square distance to loss
                loss += criterion_MSE(pred_MSE, T1_MSE) * (1 - self.alpha)
                # print(loss)
                # print()
                loss.backward()
                optimizer.step()

                if i % (self.steps_per_batch // 40) == 0:
                    ce_loss = criterion_CE(pred, T1) * self.alpha
                    mse_loss = criterion_MSE(pred_MSE,
                                             T1_MSE) * (1 - self.alpha)
                    train_loss.append((epoch, i + epoch * self.steps_per_batch,
                                       (ce_loss, mse_loss)))

                    print('[%d: %d/%d] MSE loss: %f CE loss: %f' %
                          (epoch, i, self.steps_per_batch, mse_loss, ce_loss))

                if i % (self.steps_per_batch // 5) == 0:

                    j, data = next(enumerate(testdataloader, 0))
                    MRF, T1, T2 = data[0].type(
                        torch.FloatTensor), data[1].type(
                            torch.LongTensor), data[2].type(torch.LongTensor)
                    MRF, T1, T2 = Variable(MRF), Variable(T1), Variable(T2)
                    MRF, T1, T2 = MRF.to(device), T1.to(device), T2.to(device)

                    pred = classifier(MRF).view(MRF.size()[0], -1)

                    # convert class probabilities to choice for MSE
                    pred_MSE = Variable(pred.data.max(1)[1].type(
                        torch.FloatTensor),
                                        requires_grad=True)
                    pred_MSE = pred_MSE.to(
                        device) / self.num_classes  # normalize
                    T1_MSE = T1.type(
                        torch.FloatTensor).to(device) / self.num_classes

                    loss = criterion_CE(pred, T1) * self.alpha
                    # convert predictions to integer class predictions, add square distance to loss
                    loss += criterion_MSE(pred_MSE, T1_MSE) * (1 - self.alpha)
                    # val_loss.append((epoch, i + epoch*self.steps_per_batch, loss.item()))

                    pred_choice = pred.data.max(1)[1]
                    correct = pred_choice.eq(T1.data).cpu().sum()
                    print(pred_choice[0:10])
                    print(
                        '[%d: %d/%d] %s loss: %f accuracy: %f' %
                        (epoch, i, self.steps_per_batch, blue('test'),
                         loss.item(), correct.item() / float(self.batchsize)))
                    print("Time elapsed: ", (time.time() - start) / 60,
                          " minutes")

            if self.model_name is None:
                torch.save(classifier.state_dict(), "models/model" + curr_date)
            else:
                torch.save(classifier.state_dict(),
                           "models/" + self.model_name)

            np.save("outputs/loss" + curr_date, np.array(train_loss))
    def fit(self, dataset, test_dataset):
        device = torch.device("cuda:0")
        curr_date = strftime("%d-%H:%M:%S", gmtime())
        blue = lambda x: '\033[94m' + x + '\033[0m'

        # initialise dataloader as single thread else socket errors
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=self.batchsize,
                                                 shuffle=True,
                                                 num_workers=int(self.workers))

        testdataloader = torch.utils.data.DataLoader(test_dataset,
                                                     batch_size=self.batchsize,
                                                     shuffle=True,
                                                     num_workers=int(
                                                         self.workers))

        print("size of train: ", len(dataset))
        print("size of test: ", len(test_dataset))

        regressor = InceptionV4(1)
        regressor.to(device)

        # possibly load model for fine-tuning
        if self.model is not None:
            regressor.load_state_dict(torch.load(self.model))

        optimizer = optim.Adadelta(regressor.parameters(), weight_decay=0.1)

        test_acc = []

        # keep track of loss
        train_loss = []
        val_loss = []

        criterion_MSE = MSELoss()
        start = time.time()

        for epoch in range(self.num_epoch):
            for i, data in enumerate(dataloader, 0):
                if i > self.steps_per_batch:
                    break

                # print("yeet")
                MRF, T1, T2 = data[0].type(torch.FloatTensor), data[1].type(
                    torch.FloatTensor), data[2].type(torch.FloatTensor)
                T1, T2 = T1 / self.num_classes, T2 / self.num_classes
                MRF, T1, T2 = Variable(MRF), Variable(T1), Variable(T2)
                MRF, T1, T2 = MRF.to(device), T1.to(device), T2.to(device)

                # select nonzero T1 locs
                nonzero_locs = torch.where(
                    torch.norm(MRF, dim=(1, 2)) > 1,
                    torch.tensor(1).to(device),
                    torch.tensor(0).to(device))
                nonzero_locs = nonzero_locs.type(torch.ByteTensor)
                MRF = MRF[nonzero_locs]
                T1 = T1[nonzero_locs]

                # catch if we threw out all the batch
                if MRF.size()[0] == 0:
                    break

                optimizer.zero_grad()
                regressor = regressor.train()
                pred = regressor(MRF).view(MRF.size()[0])
                loss = criterion_MSE(pred, T1)
                loss.backward()
                optimizer.step()

                if i % (self.steps_per_batch // 40) == 0:
                    print('[%d: %d/%d] train loss: %f' %
                          (epoch, i, self.steps_per_batch, loss.item()))

                if i % (self.steps_per_batch // 5) == 0:
                    train_loss.append(
                        (epoch, i + epoch * self.steps_per_batch, loss.item()))

                    j, data = next(enumerate(testdataloader, 0))
                    MRF, T1, T2 = data[0].type(
                        torch.FloatTensor), data[1].type(
                            torch.FloatTensor), data[2].type(torch.FloatTensor)
                    T1, T2 = T1 / self.num_classes, T2 / self.num_classes
                    MRF, T1, T2 = Variable(MRF), Variable(T1), Variable(T2)
                    MRF, T1, T2 = MRF.to(device), T1.to(device), T2.to(device)

                    pred = regressor(MRF).view(MRF.size()[0])
                    loss = criterion_MSE(pred, T1)
                    loss.backward()
                    val_loss.append(
                        (epoch, i + epoch * self.steps_per_batch, loss.item()))

                    print(pred[0:10])
                    print('[%d: %d/%d] %s loss: %f ' %
                          (epoch, i, self.steps_per_batch, blue('test'),
                           loss.item()))
                    print("Time elapsed: ", (time.time() - start) / 60,
                          " minutes")

            if self.model_name is None:
                torch.save(regressor.state_dict(), "models/model" + curr_date)
            else:
                torch.save(regressor.state_dict(), "models/" + self.model_name)

            np.save("outputs/train_loss" + curr_date, np.array(train_loss))
            np.save("outputs/val_loss" + curr_date, np.array(val_loss))
Exemple #5
0
    def fit(self, dataset, val_dataset):
        device = torch.device("cuda:" + str(self.device))
        print("Using device: ", device)
        curr_date = strftime("%d-%H:%M:%S", gmtime())
        print("Training started: ", curr_date)
        red = lambda x: '\033[91m' + x + '\033[0m'
        green = lambda x: '\033[92m' + x + '\033[0m'
        blue = lambda x: '\033[94m' + x + '\033[0m'

        # initialise dataloader as single thread else socket errors
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=self.batchsize,
                                                 shuffle=True,
                                                 num_workers=int(self.workers))

        valdataloader = torch.utils.data.DataLoader(val_dataset,
                                                    batch_size=self.batchsize,
                                                    shuffle=True,
                                                    num_workers=int(
                                                        self.workers))

        print("size of train: ", len(dataset))
        print("size of val: ", len(val_dataset))

        regressor = InceptionV4(1)
        regressor.to(device)

        # possibly load model for fine-tuning
        if self.model is not None:
            regressor.load_state_dict(torch.load(self.model))

        optimizer = optim.Adagrad(regressor.parameters(), lr=1e-3)

        val_acc = []

        # keep track of loss
        train_loss = []
        val_loss = []

        criterion = MSELoss()
        start = time.time()

        for epoch in range(self.num_epoch):
            dataloader_tqdm = tqdm(enumerate(dataloader, 0), ascii=True)
            for i, data in dataloader_tqdm:
                if i > self.steps_per_batch:
                    break

                MRF, T1, T2 = data[0].type(torch.FloatTensor), data[1].type(
                    torch.FloatTensor), data[2].type(torch.FloatTensor)
                MRF, T1, T2 = Variable(MRF), Variable(T1), Variable(T2)
                MRF, T1, T2 = MRF.to(device), T1.to(device), T2.to(device)

                # select nonzero T1 locs
                nonzero_locs = torch.where(
                    torch.norm(MRF, dim=(1, 2)) > 0.1,
                    torch.tensor(1).to(device),
                    torch.tensor(0).to(device))
                nonzero_locs = nonzero_locs.type(torch.bool)
                MRF = MRF[nonzero_locs]
                T1 = T1[nonzero_locs]

                # catch if we threw out all the batch
                if MRF.size()[0] == 0:
                    break

                optimizer.zero_grad()
                regressor = regressor.train()
                pred = regressor(MRF).view(MRF.size()[0])
                loss = criterion(pred, T1)
                loss.backward()
                optimizer.step()

                train_loss.append([
                    epoch, i + epoch * self.steps_per_batch,
                    np.float32(loss.item()) / (2**16)
                ])
                dataloader_tqdm.set_description(
                    '[%d: %d/%d] train loss: %s' %
                    (epoch, i, self.steps_per_batch,
                     red(str(np.float32(loss.item()) / (2**16)))))

                # print current validation loss
                if i % (self.steps_per_batch // 10) == 0:
                    j, data = next(enumerate(valdataloader, 0))
                    MRF, T1, T2 = data[0].type(
                        torch.FloatTensor), data[1].type(
                            torch.FloatTensor), data[2].type(torch.FloatTensor)
                    MRF, T1, T2 = Variable(MRF), Variable(T1), Variable(T2)
                    MRF, T1, T2 = MRF.to(device), T1.to(device), T2.to(device)

                    regressor = regressor.eval()
                    pred = regressor(MRF).view(MRF.size()[0])
                    loss = criterion(pred, T1)
                    loss.backward()
                    val_loss.append([
                        epoch, i + epoch * self.steps_per_batch,
                        np.float32(loss.item()) / (2**16)
                    ])

                    print()
                    print('[%d: %d/%d] val loss: %s ' %
                          (epoch, i, self.steps_per_batch,
                           blue(str(np.float32(loss.item()) / (2**16)))))
                    print("Time elapsed: ",
                          int(time.time() - start) // 60, "minutes",
                          int(time.time() - start) % 60, "seconds")

            if self.model_name is None:
                torch.save(regressor.state_dict(), "models/model" + curr_date)
                np.save("outputs/train_loss" + curr_date, np.array(train_loss))
                np.save("outputs/val_loss" + curr_date, np.array(val_loss))
            else:
                torch.save(regressor.state_dict(), "models/" + self.model_name)
                np.save("outputs/train_loss" + self.model_name,
                        np.array(train_loss))
                np.save("outputs/val_loss" + self.model_name,
                        np.array(val_loss))
Exemple #6
0
    def fit(self, dataset, test_dataset):
        device = torch.device("cuda")
        curr_date = strftime("%d-%H:%M:%S", gmtime())
        blue = lambda x:'\033[94m' + x + '\033[0m' 

        dataloader = torch.utils.data.DataLoader(dataset, batch_size=self.batchsize,
                                                shuffle=True, num_workers=int(self.num_workers))

        testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=self.batchsize,
                                                shuffle=True, num_workers=int(self.num_workers))

        print("size of train: ", len(dataset))
        print("size of test: ", len(test_dataset))

        classifier = InceptionV4(self.num_classes)
        classifier.to(device)

        # possibly load model for fine-tuning
        if self.model is not None:
            classifier.load_state_dict(torch.load(self.model))

        optimizer = optim.Adam(classifier.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()

        test_acc = []
        train_loss = []
        val_loss = []

        start = time.time()

        for epoch in range(self.num_epoch):
            for i, data in enumerate(dataloader, 0):
                spectrograms, labels = data[0].type(torch.FloatTensor), data[1].type(torch.LongTensor)
                spectrograms.unsqueeze_(1)
                spectrograms, labels = Variable(spectrograms), Variable(labels)
                spectrograms, labels = spectrograms.to(device), labels.to(device)

                optimizer.zero_grad()
                classifier = classifier.train()
                pred = classifier(spectrograms).view(spectrograms.size()[0],-1)

                loss = criterion(pred, labels)
                loss.backward()
                optimizer.step()
                
                if i % (len(dataloader)/40) == 0:
                    train_loss.append([i + epoch*len(dataloader), loss.item()])
                    print("Loss: ", loss.item())
    
                if i % (len(dataloader)//5) == 0:

                    j, data = next(enumerate(testdataloader, 0))
                    spectrograms, labels = data[0].type(torch.FloatTensor), data[1].type(torch.LongTensor)
                    spectrograms.unsqueeze_(1)
                    spectrograms, labels = Variable(spectrograms), Variable(labels)
                    spectrograms, labels = spectrograms.to(device), labels.to(device)

                    optimizer.zero_grad()
                    classifier = classifier.eval()
                    pred = classifier(spectrograms).view(spectrograms.size()[0],-1)

                    loss = criterion(pred, labels)
                    val_loss.append([i + epoch*len(dataloader), loss.item()])

                    pred_choice = pred.data.max(1)[1]
                    correct = pred_choice.eq(labels.data).cpu().sum()
                    print(pred_choice[0:10])
                    print('[%d: %d/%d] %s loss: %f accuracy: %f' %(epoch, i, len(dataloader), blue('test'), loss.item(), correct.item()/float(self.batchsize)))
                    print("Time elapsed: ", (time.time() - start)/60, " minutes")

            if self.model_name is None:
                torch.save(classifier.state_dict(),"models/model" + curr_date + ".mdl")
                np.save("outputs/train_loss" + curr_date, np.array(train_loss))
                np.save("outputs/val_loss" + curr_date, np.array(val_loss))


            else:
                torch.save(classifier.state_dict(),"models/" + self.model_name + ".mdl")
                np.save("outputs/train_loss" + self.model_name, np.array(train_loss))
                np.save("outputs/val_loss" + self.model_name, np.array(val_loss))