Example #1
0
 def validateScan(self):
     v = Validator()
     result = v.validate(self.scan)
     if result == True:
         self.scan.lock.setStatus('unlocked')
     else:
         self.scan.lock.setStatus('locked')
Example #2
0
class SetTester():
    def __init__(self, module_path, hyper_params, use_cuda, mission=1):

        self.dataset = get_test_set(mission=mission)
        print("test number:", len(self.dataset))

        self.hyper_params = hyper_params
        self.data_loader = DataLoader(
            dataset=self.dataset,
            num_workers=self.hyper_params["threads"],
            batch_size=self.hyper_params["batch_size"],
            shuffle=False)

        self.resnet = get_network()
        self.resnet.load_state_dict(torch.load(module_path))
        if use_cuda:
            self.resnet = self.resnet.cuda()

        self.v = Validator(resnet=self.resnet,
                           hyper_params=hyper_params,
                           use_cuda=use_cuda,
                           data_loader=self.data_loader)

    def test(self, SHOW_PIC=False, TTA=False):
        return self.v.validate(SHOW_PIC=SHOW_PIC, TTA=TTA)

    pass
Example #3
0
class TrainingState:
    def __init__(self, corpus, dictionary, hyperparameters):
        self.corpus, self.dictionary, self.hyperparameters = corpus, dictionary, hyperparameters

        logging.info("Initializing training state.")

        from model.model import Model
        self.model = Model(self.hyperparameters)

        self.count, self.epoch = (0, 1)

        from lexicon import Corpus
        from validate import Validator

        logging.info("Processing validation corpus ...")
        validation_corpus = Corpus(os.path.join(hyperparameters.data_dir, hyperparameters.validation_sentences))
        logging.info("Validation corpus processed.")

        logging.info("Initialising model validator...")
        self.validator = Validator(validation_corpus, self.model)
        logging.info("Model validator initialised.")

        from examples import ExampleStream, BatchStream

        logging.info("Initialising text window stream...")
        self.examples = ExampleStream(self.corpus, self.dictionary, self.hyperparameters)
        logging.info("Text window stream initialised.")

        logging.info("Initialising batch stream...")
        self.batches = BatchStream(self.examples)
        logging.info("Batch stream initialised.")

    def run_epoch(self):
        logging.info("Starting epoch #%d." % self.epoch)

        for batch in self.batches: self.process(batch)

        self.epoch += 1

        logging.info("Finished epoch #%d. Rewinding training stream." % self.epoch)
        self.corpus.rewind()

        from examples import ExampleStream
        self.stream = ExampleStream(self.corpus, self.dictionary, self.hyperparameters)

    def process(self, batch):
        self.count += len(batch)
        self.model.train(batch)

        if self.count % (int(1000. / self.hyperparameters.batch_size) * self.hyperparameters.batch_size) == 0:
            logging.info("Finished training step %d (epoch %d)" % (self.count, self.epoch))

        if self.count % (int( self.hyperparameters.validate_every * 1./self.hyperparameters.batch_size ) * self.hyperparameters.batch_size) == 0:
            self.save()
            self.validator.validate(self.count)

    def save(self):
        filename = os.path.join(self.hyperparameters.run_dir, "trainstate.pkl")
        logging.info("Trying to save training state to %s..." % filename)
        with open(filename, 'wb') as f:
            pickle.dump(self.__getstate__(), f)

    def __getstate__(self):
        return (self.corpus.__getstate__(), self.dictionary.__getstate__(), self.hyperparameters, self.model.__getstate__(), self.count, self.epoch, self.stream.__getstate__())

    def __setstate__(self, state):
        from model.model import Model
        self.model = Model(self.hyperparameters)
        self.model.__setstate__(state[-4])
        self.count, self.epoch = state[-3:-1]
        self.stream.__setstate__(state[-1])
Example #4
0
class Trainer():
    def __init__(self):
        pass

    def setup(self,
              valid_rate=0.1,
              use_cuda=True,
              model_path="",
              use_exist_dataset=False,
              module_save_dir="",
              mission=1,
              criterion=None,
              hyper_params=None,
              FREEZE_PARAM=False,
              PRETRAINED=False):
        """setup the module"""
        self.train_dataset, self.valid_dataset = get_dataset(
            valid_rate, USE_TRANSFORM=True, mission=mission)

        self.hyper_params = hyper_params
        self.train_data_loader = DataLoader(
            dataset=self.train_dataset,
            num_workers=self.hyper_params["threads"],
            batch_size=self.hyper_params["batch_size"],
            shuffle=True)
        self.valid_data_loader = DataLoader(
            dataset=self.valid_dataset,
            num_workers=self.hyper_params["threads"],
            batch_size=self.hyper_params["batch_size"],
            shuffle=False)

        self.use_cuda = use_cuda
        self.resnet = get_network(mission)
        if PRETRAINED:
            self.resnet.load_state_dict(torch.load(model_path))
        if use_cuda:
            self.resnet = self.resnet.cuda()
        if SHOW_NET:
            from torchsummary import summary
            batch_size = self.hyper_params["batch_size"]
            input_size = self.hyper_params["input_size"][0]
            summary(self.resnet, (3, input_size, input_size), batch_size)

        if (self.hyper_params["optimizer"] == "SGD"):
            self.optimizer = torch.optim.SGD(
                self.resnet.parameters(),
                lr=self.hyper_params["learning_rate"],
                momentum=0.99)
        elif (self.hyper_params["optimizer"] == "Adam"):
            self.optimizer = torch.optim.Adam(
                self.resnet.parameters(),
                lr=self.hyper_params["learning_rate"],
            )

        self.StepLR = torch.optim.lr_scheduler.StepLR(
            self.optimizer,
            step_size=self.hyper_params["step_size"],
            gamma=self.hyper_params["lr_gamma"])
        self.criterion = torch.nn.CrossEntropyLoss()
        self.module_save_dir = module_save_dir
        self.v = Validator(resnet=self.resnet,
                           hyper_params=hyper_params,
                           use_cuda=use_cuda,
                           data_loader=self.valid_data_loader)
        pass

    def train(self):
        """train the model"""
        epochs = self.hyper_params["epochs"]
        epoch_lapse = self.hyper_params["epoch_lapse"]
        batch_size = self.hyper_params["batch_size"]
        epoch_save = self.hyper_params["epoch_save"]
        width_out, height_out = self.hyper_params["input_size"]
        prefix = self.hyper_params["name_prefix"]

        for _ in range(epochs):

            total_loss = 0

            for data in tqdm(self.train_data_loader, ascii=True, ncols=120):

                batch_train_x, batch_train_y = data
                batch_train_y = batch_train_y.long()
                if (len(batch_train_x.size()) == 3):
                    batch_train_x = batch_train_x.unsqueeze(1)
                if (len(batch_train_y.size()) == 3):
                    batch_train_y = batch_train_y.unsqueeze(1)

                if self.use_cuda:
                    batch_train_x = batch_train_x.cuda()
                    batch_train_y = batch_train_y.cuda()

                batch_loss = self.train_step(batch_train_x,
                                             batch_train_y,
                                             optimizer=self.optimizer,
                                             criterion=self.criterion,
                                             resnet=self.resnet,
                                             width_out=width_out,
                                             height_out=height_out,
                                             batch_size=batch_size)

                total_loss += batch_loss
                pass

            if (_ + 1) % epoch_lapse == 0:
                val_acc = self.v.validate()
                print(
                    "Total loss in epoch %d : %f , learning rate : %f,  validation accuracy : %f"
                    % (_ + 1, total_loss, self.StepLR.get_lr()[0], val_acc))

            if (_ + 1) % epoch_save == 0:
                name_else = prefix + "-epoch-" + \
                    str(_ + 1) + "-validacc-" + str(val_acc)
                self.save_module(name_else=name_else)
                print("MODULE SAVED.")

            self.StepLR.step()
            pass

        gc.collect()
        pass

    def train_step(self, inputs, labels, optimizer, criterion, resnet,
                   batch_size, width_out, height_out):
        optimizer.zero_grad()
        outputs = resnet(inputs)
        loss = criterion(outputs, labels)

        loss.backward()
        optimizer.step()
        return loss

    def save_module(self, name_else=""):
        import datetime
        module_save_dir = self.module_save_dir
        filename = 'resnet-' + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + \
            name_else + '.pth'
        torch.save(self.resnet.state_dict(), module_save_dir + filename)
        pass
Example #5
0
class Trainer():
    def __init__(self):
        pass

    def setup(self,
              valid_rate=0.1,
              use_cuda=True,
              model_path="",
              use_exist_dataset=False,
              cell_dir="",
              mask_dir="",
              module_save_dir="",
              tmp_dir="",
              criterion=None,
              optimizer=None,
              hyper_params=None,
              FREEZE_PARAM=False,
              PRETRAINED=False):
        """setup the module"""
        self.train_dataset, self.valid_dataset = get_dataset(
            cell_dir, mask_dir, valid_rate, tmp_dir, use_exist_dataset)

        self.hyper_params = hyper_params
        self.train_data_loader = DataLoader(
            dataset=self.train_dataset,
            num_workers=self.hyper_params["threads"],
            batch_size=self.hyper_params["batch_size"],
            shuffle=True)
        self.valid_data_loader = DataLoader(
            dataset=self.valid_dataset,
            num_workers=self.hyper_params["threads"],
            batch_size=self.hyper_params["batch_size"],
            shuffle=False)

        self.use_cuda = use_cuda
        self.unet = UNet(
            n_channels=1,
            n_classes=2,
        )
        if PRETRAINED:
            self.unet.load_state_dict(torch.load(model_path))

        if use_cuda:
            self.unet = self.unet.cuda()
        if SHOW_NET:
            from torchsummary import summary
            batch_size = self.hyper_params["batch_size"]
            summary(self.unet, (batch_size, 500, 500))

        # freeze parameters
        if FREEZE_PARAM:
            freeze_layers = [
                self.unet.down1,
                self.unet.down2,
            ]
            for l in freeze_layers:
                for child in l.children():
                    for param in child.parameters():
                        param.requires_grad = False

            self.optimizer = torch.optim.SGD(
                filter(lambda p: p.requires_grad, self.unet.parameters()),
                lr=self.hyper_params["learning_rate"],
                momentum=0.99)
        else:
            self.optimizer = torch.optim.SGD(
                self.unet.parameters(),
                lr=self.hyper_params["learning_rate"],
                momentum=0.99)

        self.criterion = torch.nn.CrossEntropyLoss()
        self.module_save_dir = module_save_dir
        self.v = Validator(unet=self.unet,
                           hyper_params=hyper_params,
                           use_cuda=use_cuda,
                           data_loader=self.valid_data_loader,
                           USE_EXIST_RES=False,
                           exist_res_dir="")
        pass

    def train(self):
        """train the model"""
        epochs = self.hyper_params["epochs"]
        epoch_lapse = self.hyper_params["epoch_lapse"]
        batch_size = self.hyper_params["batch_size"]
        epoch_save = self.hyper_params["epoch_save"]
        width_out, height_out = self.hyper_params["input_size"]

        for _ in range(epochs):
            total_loss = 0
            for data in tqdm(self.train_data_loader, ascii=True, ncols=120):

                batch_train_x, batch_train_y = data
                batch_train_y = batch_train_y.long()
                batch_train_y[batch_train_y > 0] = 1  # important!!!
                if (len(batch_train_x.size()) == 3):
                    batch_train_x = batch_train_x.unsqueeze(1)
                if (len(batch_train_y.size()) == 3):
                    batch_train_y = batch_train_y.unsqueeze(1)

                if self.use_cuda:
                    batch_train_x = batch_train_x.cuda()
                    batch_train_y = batch_train_y.cuda()

                batch_loss = self.train_step(batch_train_x,
                                             batch_train_y,
                                             optimizer=self.optimizer,
                                             criterion=self.criterion,
                                             unet=self.unet,
                                             width_out=width_out,
                                             height_out=height_out,
                                             batch_size=batch_size)

                total_loss += batch_loss

            if (_ + 1) % epoch_lapse == 0:
                val_acc = self.v.validate()
                print(
                    "Total loss in epoch %d : %f and validation accuracy : %f"
                    % (_ + 1, total_loss, val_acc))

            if (_ + 1) % epoch_save == 0:
                self.save_module(name_else="epoch-" + str(_ + 1 + 240))
                print("MODULE SAVED.")
        gc.collect()
        pass

    def train_step(self, inputs, labels, optimizer, criterion, unet,
                   batch_size, width_out, height_out):
        optimizer.zero_grad()
        outputs = unet(inputs)
        outputs = outputs.permute(0, 2, 3, 1)

        outputs = outputs.reshape(batch_size * width_out * height_out, 2)
        labels = labels.reshape(batch_size * width_out * height_out)
        loss = criterion(outputs, labels)

        loss.backward()
        optimizer.step()
        return loss

    def save_module(self, name_else=""):
        import datetime
        module_save_dir = self.module_save_dir
        filename = 'unet-' + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + \
            name_else + '.pth'
        torch.save(self.unet.state_dict(), module_save_dir + filename)
        pass
Example #6
0
class Tester():
    def __init__(
        self,
        module_path,
        cell_dir,
        mask_dir,
        tmp_dir,
        exist_res_dir,
        hyper_params,
        use_cuda,
        test_rate=1.0,
        use_exist_dataset=False,
        USE_EXIST_RES=True,
    ):

        print("Test rate:", test_rate)
        if USE_EXIST_RES:
            self.dataset = get_test_dataset(
                cell_dir=cell_dir,
                GT_dir=mask_dir,
                res_dir=exist_res_dir,
                valid_rate=1 - test_rate,
                tmp_dir=tmp_dir,
                use_exist_dataset=use_exist_dataset,
                for_test=True)
        else:
            self.dataset, _ = get_test_dataset(
                cell_dir=cell_dir,
                mask_dir=mask_dir,
                valid_rate=1 - test_rate,
                tmp_dir=tmp_dir,
                use_exist_dataset=use_exist_dataset,
            )

        print("test number:", len(self.dataset))

        self.hyper_params = hyper_params
        self.data_loader = DataLoader(
            dataset=self.dataset,
            num_workers=self.hyper_params["threads"],
            batch_size=self.hyper_params["batch_size"],
            shuffle=False)

        self.unet = UNet(
            n_channels=1,
            n_classes=2,
        )
        self.unet.load_state_dict(torch.load(module_path))
        if use_cuda:
            self.unet = self.unet.cuda()

        self.v = Validator(unet=self.unet,
                           hyper_params=self.hyper_params,
                           use_cuda=use_cuda,
                           data_loader=self.data_loader,
                           USE_EXIST_RES=USE_EXIST_RES,
                           exist_res_dir=exist_res_dir)

    def test(self, SHOW_PIC=False, TTA=False):
        return self.v.validate(SHOW_PIC=SHOW_PIC, TTA=TTA)

    pass