예제 #1
0
    def fit(self, x, y, x_test=None, y_test=None, time_limit=None):
        x = np.array(x)
        y = np.array(y).flatten()
        validate_xy(x, y)
        y = self.transform_y(y)
        if x_test is None or y_test is None:
            # Divide training data into training and testing data.
            validation_set_size = int(len(y) * Constant.VALIDATION_SET_SIZE)
            validation_set_size = min(validation_set_size, 500)
            validation_set_size = max(validation_set_size, 1)
            x_train, x_test, y_train, y_test = train_test_split(
                x, y, test_size=validation_set_size, random_state=42)
        else:
            x_train = x
            y_train = y
        # Transform x_train
        if self.data_transformer is None:
            self.data_transformer = ImageDataTransformer(x,
                                                         augment=self.augment)

        # Wrap the data into DataLoaders
        train_data = self.data_transformer.transform_train(x_train, y_train)
        test_data = self.data_transformer.transform_test(x_test, y_test)

        # Save the classifier
        pickle.dump(self, open(os.path.join(self.path, 'classifier'), 'wb'))
        pickle_to_file(self, os.path.join(self.path, 'classifier'))

        if time_limit is None:
            time_limit = 24 * 60 * 60

        self.cnn.fit(self.get_n_output_node(), x_train.shape, train_data,
                     test_data, time_limit)
예제 #2
0
def get_classification_data_loaders():
    x_train = np.random.rand(200, 28, 28, 3)
    y_train = np.random.rand(200, 3)
    x_test = np.random.rand(190, 28, 28, 3)
    y_test = np.random.rand(190, 3)
    data_transformer = ImageDataTransformer(x_train, augment=True)
    train_data = data_transformer.transform_train(x_train, y_train)
    test_data = data_transformer.transform_test(x_test, y_test)
    return train_data, test_data
예제 #3
0
파일: gan.py 프로젝트: nklarer/autokeras
    def fit(self, x_train):
        """ Train only

        Args:
            x_train: ndarray contained the training data

        Returns:

        """
        # input size stay the same, enable  cudnn optimization
        cudnn.benchmark = True
        self.data_transformer = ImageDataTransformer(x_train,
                                                     augment=self.augment)
        train_dataloader = self.data_transformer.transform_train(x_train)
        GANModelTrainer(self.net_g, self.net_d, train_dataloader,
                        binary_classification_loss, self.verbose,
                        self.gen_training_result).train_model()
예제 #4
0
    def fit(self, x, y, x_test=None, y_test=None, time_limit=None):
        x = np.array(x)

        if len(x.shape) != 0 and len(x[0].shape) == 3:
            if self.verbose:
                print("Preprocessing the images.")
            self.resize_height, self.resize_width = compute_image_resize_params(
                x)
            x = resize_image_data(x, self.resize_height, self.resize_width)
            if x_test is not None:
                x_test = resize_image_data(x_test, self.resize_height,
                                           self.resize_width)
            if self.verbose:
                print("Preprocessing finished.")

        y = np.array(y).flatten()
        validate_xy(x, y)
        y = self.transform_y(y)
        if x_test is None or y_test is None:
            # Divide training data into training and testing data.
            validation_set_size = int(len(y) * Constant.VALIDATION_SET_SIZE)
            validation_set_size = min(validation_set_size, 500)
            validation_set_size = max(validation_set_size, 1)
            x_train, x_test, y_train, y_test = train_test_split(
                x, y, test_size=validation_set_size, random_state=42)
        else:
            x_train = x
            y_train = y
        # Transform x_train
        if self.data_transformer is None:
            self.data_transformer = ImageDataTransformer(x,
                                                         augment=self.augment)

        # Wrap the data into DataLoaders
        train_data = self.data_transformer.transform_train(x_train, y_train)
        test_data = self.data_transformer.transform_test(x_test, y_test)

        # Save the classifier
        pickle_to_file(self, os.path.join(self.path, 'classifier'))

        if time_limit is None:
            time_limit = 24 * 60 * 60

        self.cnn.fit(self.get_n_output_node(), x_train.shape, train_data,
                     test_data, time_limit)
예제 #5
0
 def init_transformer(self, x):
     """THIS FUNCTION NEEDS A DESCRIPTION.
     
     Args:
         x: DATA TO TRANSFORM
     """
     if self.data_transformer is None:
         self.data_transformer = ImageDataTransformer(x,
                                                      augment=self.augment)
예제 #6
0
    def fit(self, x_train, y_train, time_limit=None):
        """Trains the model on the dataset given.

        Args:
            x_train: A numpy.ndarray instance containing the training data,
                or the training data combined with the validation data.
            y_train: A numpy.ndarray instance containing the label of the training data,
                or the label of the training data combined with the validation label.
            time_limit: A dictionary containing the parameters of the ModelTrainer constructor.
        """
        validate_xy(x_train, y_train)
        self.resize_shape = compute_image_resize_params(x_train)
        x_train = self.preprocess(x_train)
        self.y_encoder.fit(y_train)
        y_train = self.transform_y(y_train)
        # Divide training data into training and testing data.
        validation_set_size = int(len(y_train) * Constant.VALIDATION_SET_SIZE)
        validation_set_size = min(validation_set_size, 500)
        validation_set_size = max(validation_set_size, 1)
        x_train_new, x_test, y_train_new, y_test = train_test_split(
            x_train, y_train, test_size=validation_set_size, random_state=42)

        # initialize data_transformer
        self.data_transformer = ImageDataTransformer(x_train_new)
        # Wrap the data into DataLoaders
        train_loader = self.data_transformer.transform_train(
            x_train_new, y_train_new)
        test_loader = self.data_transformer.transform_test(x_test, y_test)

        self.generator = self._init_generator(self.y_encoder.n_classes,
                                              x_train_new.shape[1:])
        graph = self.generator.generate()

        if time_limit is None:
            time_limit = {'max_no_improvement_num': 30}
        _, _1, self.graph = train(None, graph, train_loader, test_loader,
                                  time_limit, self.metric, self.loss,
                                  self.verbose, self.path)
예제 #7
0
    def final_fit(self,
                  x_train,
                  y_train,
                  x_test,
                  y_test,
                  trainer_args=None,
                  retrain=True):
        x_train = self.preprocess(x_train)
        x_test = self.preprocess(x_test)

        self.encoder.fit(y_train)
        y_train = self.encoder.transform(y_train)
        y_test = self.encoder.transform(y_test)

        self.data_transformer = ImageDataTransformer(x_train,
                                                     augment=self.augment)
        train_data = self.data_transformer.transform_train(x_train,
                                                           y_train,
                                                           batch_size=1)
        test_data = self.data_transformer.transform_test(x_test,
                                                         y_test,
                                                         batch_size=1)

        self.net = CnnGenerator(self.encoder.n_classes, x_train.shape[1:]) \
            .generate(model_len=self.Length, model_width=self.Width).produce_model()

        pickle_to_file(self, os.path.join(self.path, 'classifier'))

        self.model_trainer = ModelTrainer(self.net,
                                          path=self.path,
                                          loss_function=classification_loss,
                                          metric=Accuracy,
                                          train_data=train_data,
                                          test_data=test_data,
                                          verbose=True)

        self.model_trainer.train_model(self.Epochs, 3)
        print('Finished Final Fit')
예제 #8
0
파일: gan.py 프로젝트: Saiuz/autokeras
    def fit(self, x_train):
        """ Train only

        Args:
            x_train: ndarray contained the training data

        Returns:

        """
        # input size stay the same, enable  cudnn optimization
        cudnn.benchmark = True
        self.data_transformer = ImageDataTransformer(x_train, augment=self.augment)
        train_dataloader = self.data_transformer.transform_train(x_train)
        GANModelTrainer(self.net_g,
                        self.net_d,
                        train_dataloader,
                        binary_classification_loss,
                        self.verbose,
                        self.gen_training_result,
                        device=get_device()).train_model()
예제 #9
0
class PredefinedModel(SingleModelSupervised):
    """The base class for the predefined model without architecture search

    Attributes:
        graph: The graph form of the model.
        y_encoder: Label encoder, used in transform_y or inverse_transform_y for encode the label. For example,
                   if one hot encoder needed, y_encoder can be OneHotEncoder.
        data_transformer: A instance of transformer to process the data, See example as ImageDataTransformer.
        verbose: A boolean of whether the search process will be printed to stdout.
        path: A string. The path to a directory, where the intermediate results are saved.
    """

    def __init__(self, y_encoder=OneHotEncoder(), data_transformer=None, verbose=False, path=None):
        super().__init__(verbose, path)
        self.graph = None
        self.generator = None
        self.y_encoder = y_encoder
        self.data_transformer = data_transformer

    @abstractmethod
    def _init_generator(self, n_output_node, input_shape):
        """Initialize the generator to generate the model architecture.

        Args:
            n_output_node:  A integer value represent the number of output node in the final layer.
            input_shape: A tuple to express the shape of every train entry.
        """
        pass

    @property
    def loss(self):
        return classification_loss

    @property
    def metric(self):
        return Accuracy

    def preprocess(self, x):
        return resize_image_data(x, self.resize_shape)

    def transform_y(self, y_train):
        return self.y_encoder.transform(y_train)

    def inverse_transform_y(self, output):
        return self.y_encoder.inverse_transform(output)

    def fit(self, x, y, trainer_args=None):
        """Trains the model on the dataset given.

        Args:
            x: A numpy.ndarray instance containing the training data or the training data combined with the
               validation data.
            y: A numpy.ndarray instance containing the label of the training data. or the label of the training data
               combined with the validation label.
            trainer_args: A dictionary containing the parameters of the ModelTrainer constructor.
        """
        validate_xy(x, y)
        self.resize_shape = compute_image_resize_params(x)
        x = self.preprocess(x)
        self.y_encoder.fit(y)
        y = self.transform_y(y)
        # Divide training data into training and testing data.
        validation_set_size = int(len(y) * Constant.VALIDATION_SET_SIZE)
        validation_set_size = min(validation_set_size, 500)
        validation_set_size = max(validation_set_size, 1)
        x_train, x_test, y_train, y_test = train_test_split(x, y,
                                                            test_size=validation_set_size,
                                                            random_state=42)

        # initialize data_transformer
        self.data_transformer = ImageDataTransformer(x_train)
        # Wrap the data into DataLoaders
        train_loader = self.data_transformer.transform_train(x_train, y_train)
        test_loader = self.data_transformer.transform_test(x_test, y_test)

        self.generator = self._init_generator(self.y_encoder.n_classes, x_train.shape[1:])
        graph = self.generator.generate()

        if trainer_args is None:
            trainer_args = {'max_no_improvement_num': 30}
        _, _1, self.graph = train(None, graph, train_loader, test_loader,
                                  trainer_args, self.metric, self.loss,
                                  self.verbose, self.path)
예제 #10
0
 def init_transformer(self, x):
     if self.data_transformer is None:
         self.data_transformer = ImageDataTransformer(x,
                                                      augment=self.augment)
예제 #11
0
class ImageSupervised(Supervised):
    """The image classifier class.

    It is used for image classification. It searches convolutional neural network architectures
    for the best configuration for the dataset.

    Attributes:
        path: A path to the directory to save the classifier.
        y_encoder: An instance of OneHotEncoder for `y_train` (array of categorical labels).
        verbose: A boolean value indicating the verbosity mode.
        searcher: An instance of BayesianSearcher. It searches different
            neural architecture to find the best model.
        searcher_args: A dictionary containing the parameters for the searcher's __init__ function.
        augment: A boolean value indicating whether the data needs augmentation.  If not define, then it
                will use the value of Constant.DATA_AUGMENTATION which is True by default.
    """
    def __init__(self,
                 verbose=False,
                 path=None,
                 resume=False,
                 searcher_args=None,
                 augment=None):
        """Initialize the instance.

        The classifier will be loaded from the files in 'path' if parameter 'resume' is True.
        Otherwise it would create a new one.

        Args:
            verbose: A boolean of whether the search process will be printed to stdout.
            path: A string. The path to a directory, where the intermediate results are saved.
            resume: A boolean. If True, the classifier will continue to previous work saved in path.
                Otherwise, the classifier will start a new search.
            augment: A boolean value indicating whether the data needs augmentation. If not define, then it
                will use the value of Constant.DATA_AUGMENTATION which is True by default.

        """
        super().__init__(verbose)

        if searcher_args is None:
            searcher_args = {}

        if path is None:
            path = temp_folder_generator()

        self.cnn = CnnModule(self.loss, self.metric, searcher_args, path,
                             verbose)

        if augment is None:
            augment = Constant.DATA_AUGMENTATION

        self.path = path
        if has_file(os.path.join(self.path, 'classifier')) and resume:
            classifier = pickle_from_file(os.path.join(self.path,
                                                       'classifier'))
            self.__dict__ = classifier.__dict__
        else:
            self.y_encoder = None
            self.data_transformer = None
            self.verbose = verbose
            self.augment = augment

    @property
    @abstractmethod
    def metric(self):
        pass

    @property
    @abstractmethod
    def loss(self):
        pass

    def fit(self, x, y, x_test=None, y_test=None, time_limit=None):
        x = np.array(x)
        y = np.array(y).flatten()
        validate_xy(x, y)
        y = self.transform_y(y)
        if x_test is None or y_test is None:
            # Divide training data into training and testing data.
            validation_set_size = int(len(y) * Constant.VALIDATION_SET_SIZE)
            validation_set_size = min(validation_set_size, 500)
            validation_set_size = max(validation_set_size, 1)
            x_train, x_test, y_train, y_test = train_test_split(
                x, y, test_size=validation_set_size, random_state=42)
        else:
            x_train = x
            y_train = y
        # Transform x_train
        if self.data_transformer is None:
            self.data_transformer = ImageDataTransformer(x,
                                                         augment=self.augment)

        # Wrap the data into DataLoaders
        train_data = self.data_transformer.transform_train(x_train, y_train)
        test_data = self.data_transformer.transform_test(x_test, y_test)

        # Save the classifier
        pickle.dump(self, open(os.path.join(self.path, 'classifier'), 'wb'))
        pickle_to_file(self, os.path.join(self.path, 'classifier'))

        if time_limit is None:
            time_limit = 24 * 60 * 60

        self.cnn.fit(self.get_n_output_node(), x_train.shape, train_data,
                     test_data, time_limit)

    @abstractmethod
    def get_n_output_node(self):
        pass

    def transform_y(self, y_train):
        return y_train

    def predict(self, x_test):
        """Return predict results for the testing data.

        Args:
            x_test: An instance of numpy.ndarray containing the testing data.

        Returns:
            A numpy.ndarray containing the results.
        """
        if Constant.LIMIT_MEMORY:
            pass
        test_loader = self.data_transformer.transform_test(x_test)
        model = self.cnn.best_model.produce_model()
        model.eval()

        outputs = []
        with torch.no_grad():
            for index, inputs in enumerate(test_loader):
                outputs.append(model(inputs).numpy())
        output = reduce(lambda x, y: np.concatenate((x, y)), outputs)
        return self.inverse_transform_y(output)

    def inverse_transform_y(self, output):
        return output

    def evaluate(self, x_test, y_test):
        """Return the accuracy score between predict value and `y_test`."""
        y_predict = self.predict(x_test)
        return self.metric().evaluate(y_test, y_predict)

    def final_fit(self,
                  x_train,
                  y_train,
                  x_test,
                  y_test,
                  trainer_args=None,
                  retrain=False):
        """Final training after found the best architecture.

        Args:
            x_train: A numpy.ndarray of training data.
            y_train: A numpy.ndarray of training targets.
            x_test: A numpy.ndarray of testing data.
            y_test: A numpy.ndarray of testing targets.
            trainer_args: A dictionary containing the parameters of the ModelTrainer constructor.
            retrain: A boolean of whether reinitialize the weights of the model.
        """
        if trainer_args is None:
            trainer_args = {'max_no_improvement_num': 30}

        y_train = self.transform_y(y_train)
        y_test = self.transform_y(y_test)

        train_data = self.data_transformer.transform_train(x_train, y_train)
        test_data = self.data_transformer.transform_test(x_test, y_test)

        self.cnn.final_fit(train_data, test_data, trainer_args, retrain)

    def load_searcher(self):
        return pickle_from_file(os.path.join(self.path, 'searcher'))

    def export_keras_model(self, model_file_name):
        """ Exports the best Keras model to the given filename. """
        self.cnn.best_model.produce_keras_model().save(model_file_name)

    def export_autokeras_model(self, model_file_name):
        """ Creates and Exports the AutoKeras model to the given filename. """
        portable_model = PortableImageSupervised(
            graph=self.cnn.best_model,
            y_encoder=self.y_encoder,
            data_transformer=self.data_transformer,
            metric=self.metric,
            inverse_transform_y_method=self.inverse_transform_y)
        pickle_to_file(portable_model, model_file_name)
예제 #12
0
파일: gan.py 프로젝트: ychjiang/autokeras
class DCGAN(Unsupervised):
    """ Deep Convolution Generative Adversary Network
    """

    def __init__(self, nz=100, ngf=32, ndf=32, nc=3, verbose=False, gen_training_result=None,
                 augment=Constant.DATA_AUGMENTATION):
        """
       Args:
            nz: size of the latent z vector
            ngf: of gen filters in first conv layer
            ndf: of discrim filters in first conv layer
            nc: number of input chanel
            verbose: A boolean of whether the search process will be printed to stdout.
            gen_training_result: A tuple of (path, size) to denote where to output the intermediate result with size
            augment: A boolean value indicating whether the data needs augmentation.
        """
        super().__init__(verbose)
        self.nz = nz
        self.ngf = ngf
        self.ndf = ndf
        self.nc = nc
        self.verbose = verbose
        self.gen_training_result = gen_training_result
        self.augment = augment
        self.data_transformer = None
        self.net_d = Discriminator(self.nc, self.ndf)
        self.net_g = Generator(self.nc, self.nz, self.ngf)

    def fit(self, x_train):
        """ Train only

        Args:
            x_train: ndarray contained the training data

        Returns:

        """
        # input size stay the same, enable  cudnn optimization
        cudnn.benchmark = True
        self.data_transformer = ImageDataTransformer(x_train, augment=self.augment)
        train_dataloader = self.data_transformer.transform_train(x_train)
        GANModelTrainer(self.net_g,
                        self.net_d,
                        train_dataloader,
                        binary_classification_loss,
                        self.verbose,
                        self.gen_training_result).train_model()

    def generate(self, input_sample=None):
        if input_sample is None:
            input_sample = torch.randn(self.gen_training_result[1], self.nz, 1, 1, device=get_device())
        if not isinstance(input_sample, torch.Tensor) and \
                isinstance(input_sample, np.ndarray):
            input_sample = torch.from_numpy(input_sample)
        if not isinstance(input_sample, torch.Tensor) and \
                not isinstance(input_sample, np.ndarray):
            raise TypeError("Input should be a torch.tensor or a numpy.ndarray")
        self.net_g.eval()
        with torch.no_grad():
            input_sample = input_sample.to(get_device())
            generated_fake = self.net_g(input_sample)
        vutils.save_image(generated_fake.detach(),
                          '%s/evaluation.png' % self.gen_training_result[0],
                          normalize=True)
예제 #13
0
파일: gan.py 프로젝트: Saiuz/autokeras
class DCGAN(Unsupervised):
    """ Deep Convolution Generative Adversary Network
    """

    def __init__(self, nz=100, ngf=32, ndf=32, nc=3, verbose=False, gen_training_result=None,
                 augment=None):
        """
       Args:
            nz: size of the latent z vector
            ngf: of gen filters in first conv layer
            ndf: of discrim filters in first conv layer
            nc: number of input chanel
            verbose: A boolean of whether the search process will be printed to stdout.
            gen_training_result: A tuple of (path, size) to denote where to output the intermediate result with size
            augment: A boolean value indicating whether the data needs augmentation.
        """
        super().__init__(verbose)
        self.nz = nz
        self.ngf = ngf
        self.ndf = ndf
        self.nc = nc
        self.verbose = verbose
        self.device = get_device()
        self.gen_training_result = gen_training_result
        self.augment = augment if augment is not None else Constant.DATA_AUGMENTATION
        self.data_transformer = None
        self.net_d = Discriminator(self.nc, self.ndf)
        self.net_g = Generator(self.nc, self.nz, self.ngf)

    def fit(self, x_train):
        """ Train only

        Args:
            x_train: ndarray contained the training data

        Returns:

        """
        # input size stay the same, enable  cudnn optimization
        cudnn.benchmark = True
        self.data_transformer = ImageDataTransformer(x_train, augment=self.augment)
        train_dataloader = self.data_transformer.transform_train(x_train)
        GANModelTrainer(self.net_g,
                        self.net_d,
                        train_dataloader,
                        binary_classification_loss,
                        self.verbose,
                        self.gen_training_result,
                        device=get_device()).train_model()

    def generate(self, input_sample=None):
        if input_sample is None:
            input_sample = torch.randn(self.gen_training_result[1], self.nz, 1, 1, device=self.device)
        if not isinstance(input_sample, torch.Tensor) and \
                isinstance(input_sample, np.ndarray):
            input_sample = torch.from_numpy(input_sample)
        if not isinstance(input_sample, torch.Tensor) and \
                not isinstance(input_sample, np.ndarray):
            raise TypeError("Input should be a torch.tensor or a numpy.ndarray")
        self.net_g.eval()
        with torch.no_grad():
            input_sample = input_sample.to(self.device)
            generated_fake = self.net_g(input_sample)
        vutils.save_image(generated_fake.detach(),
                          '%s/evaluation.png' % self.gen_training_result[0],
                          normalize=True)
예제 #14
0
    def fit(self, x_train=None, y_train=None, time_limit=60 * 60 * 6):
        end_time = time.time() + time_limit

        x_train = self.preprocess(x_train)
        x_train, x_valid, y_train, y_valid = train_test_split(x_train,
                                                              y_train,
                                                              test_size=0.25,
                                                              shuffle=True)
        x_train, y_train = np.array(x_train), np.array(y_train)
        x_valid, y_valid = np.array(x_valid), np.array(y_valid)

        self.encoder.fit(y_train)
        y_train = self.encoder.transform(y_train)
        y_valid = self.encoder.transform(y_valid)

        self.data_transformer = ImageDataTransformer(x_train,
                                                     augment=self.augment)
        train_data = self.data_transformer.transform_train(x_train,
                                                           y_train,
                                                           batch_size=1)
        test_data = self.data_transformer.transform_test(x_valid,
                                                         y_valid,
                                                         batch_size=1)

        y_valid = self.encoder.inverse_transform(y_valid)

        visited = set()
        pq = []
        trainingQ = [(self.Length, self.Width, self.Epochs)]
        accuracy = 0.0

        while trainingQ:
            for len, width, epoch in trainingQ:
                if time.time() < end_time and (len, width,
                                               epoch) not in visited:
                    visited.add((len, width, epoch))
                    try:
                        net = CnnGenerator(self.encoder.n_classes, x_train.shape[1:])\
                            .generate(model_len=len, model_width=width).produce_model()

                        model_trainer = ModelTrainer(
                            net,
                            path=self.path,
                            loss_function=classification_loss,
                            metric=Accuracy,
                            train_data=train_data,
                            test_data=test_data,
                            verbose=True)
                        model_trainer.train_model(epoch, 3)

                        outputs = []
                        with torch.no_grad():
                            for index, (inputs, _) in enumerate(test_data):
                                outputs.append(net(inputs).numpy())
                        output = reduce(lambda x, y: np.concatenate((x, y)),
                                        outputs)
                        pred_valid = self.encoder.inverse_transform(output)
                        accu = self.metric().evaluate(y_valid, pred_valid)
                        print("Accuracy: ", accu, " Parameters: ", len, width,
                              epoch)

                        pq.append((-accu, (len, width, epoch)))
                        if pq.__len__() > self.capacity:
                            heapq.heapify(pq)
                            pq.remove(heapq.nlargest(1, pq)[0])
                        if accu > accuracy:
                            self.Epochs = epoch
                            self.Length = len
                            self.Width = width
                            accuracy = accu
                    except Exception as e:
                        print(e)

            if not pq:
                break
            heapq.heapify(pq)
            _, (nexlen, nexwidth, nexepoch) = heapq.heappop(pq)

            # Create children
            trainingQ = []
            trainingQ.append((nexlen + 1, nexwidth, nexepoch))
            trainingQ.append((nexlen, nexwidth * 2, nexepoch))
            trainingQ.append((nexlen, nexwidth, nexepoch + 5))
            trainingQ.append((nexlen + 2, nexwidth, nexepoch + 3))
            trainingQ.append((nexlen, nexwidth * 2, nexepoch + 2))
            trainingQ.append((nexlen + 1, nexwidth, nexepoch + 3))

        print('Finished Fit')
        print("Optimal Conv3D Network Parameters:")
        print("Number of Layers (Length):", self.Length)
        print("Number of Filters (Width):", self.Width)
        print("Number of Epochs", self.Epochs)
        print()
예제 #15
0
class VideoClassifier(Supervised):
    def __init__(self):
        super().__init__(verbose=False)
        self.labels = None
        self.net = None
        self.augment = None
        self.Length = 3
        self.Width = 4
        self.Epochs = 10
        self.encoder = OneHotEncoder()
        self.path = '../temp'
        self.capacity = 50

    def evaluate(self, x_test, y_test):
        y_predict = self.predict(x_test)
        return self.metric().evaluate(y_test, y_predict)

    def predict(self, x_test):
        classifier = pickle_from_file(os.path.join(self.path, 'classifier'))
        self.__dict__ = classifier.__dict__
        self.net.eval()
        test_data = self.preprocess(x_test)
        test_data = self.data_transformer.transform_test(test_data)
        outputs = []
        with torch.no_grad():
            for index, inputs in enumerate(test_data):
                outputs.append(self.net(inputs).numpy())
        output = reduce(lambda x, y: np.concatenate((x, y)), outputs)
        predicted = self.encoder.inverse_transform(output)
        return predicted

    def fit(self, x_train=None, y_train=None, time_limit=60 * 60 * 6):
        end_time = time.time() + time_limit

        x_train = self.preprocess(x_train)
        x_train, x_valid, y_train, y_valid = train_test_split(x_train,
                                                              y_train,
                                                              test_size=0.25,
                                                              shuffle=True)
        x_train, y_train = np.array(x_train), np.array(y_train)
        x_valid, y_valid = np.array(x_valid), np.array(y_valid)

        self.encoder.fit(y_train)
        y_train = self.encoder.transform(y_train)
        y_valid = self.encoder.transform(y_valid)

        self.data_transformer = ImageDataTransformer(x_train,
                                                     augment=self.augment)
        train_data = self.data_transformer.transform_train(x_train,
                                                           y_train,
                                                           batch_size=1)
        test_data = self.data_transformer.transform_test(x_valid,
                                                         y_valid,
                                                         batch_size=1)

        y_valid = self.encoder.inverse_transform(y_valid)

        visited = set()
        pq = []
        trainingQ = [(self.Length, self.Width, self.Epochs)]
        accuracy = 0.0

        while trainingQ:
            for len, width, epoch in trainingQ:
                if time.time() < end_time and (len, width,
                                               epoch) not in visited:
                    visited.add((len, width, epoch))
                    try:
                        net = CnnGenerator(self.encoder.n_classes, x_train.shape[1:])\
                            .generate(model_len=len, model_width=width).produce_model()

                        model_trainer = ModelTrainer(
                            net,
                            path=self.path,
                            loss_function=classification_loss,
                            metric=Accuracy,
                            train_data=train_data,
                            test_data=test_data,
                            verbose=True)
                        model_trainer.train_model(epoch, 3)

                        outputs = []
                        with torch.no_grad():
                            for index, (inputs, _) in enumerate(test_data):
                                outputs.append(net(inputs).numpy())
                        output = reduce(lambda x, y: np.concatenate((x, y)),
                                        outputs)
                        pred_valid = self.encoder.inverse_transform(output)
                        accu = self.metric().evaluate(y_valid, pred_valid)
                        print("Accuracy: ", accu, " Parameters: ", len, width,
                              epoch)

                        pq.append((-accu, (len, width, epoch)))
                        if pq.__len__() > self.capacity:
                            heapq.heapify(pq)
                            pq.remove(heapq.nlargest(1, pq)[0])
                        if accu > accuracy:
                            self.Epochs = epoch
                            self.Length = len
                            self.Width = width
                            accuracy = accu
                    except Exception as e:
                        print(e)

            if not pq:
                break
            heapq.heapify(pq)
            _, (nexlen, nexwidth, nexepoch) = heapq.heappop(pq)

            # Create children
            trainingQ = []
            trainingQ.append((nexlen + 1, nexwidth, nexepoch))
            trainingQ.append((nexlen, nexwidth * 2, nexepoch))
            trainingQ.append((nexlen, nexwidth, nexepoch + 5))
            trainingQ.append((nexlen + 2, nexwidth, nexepoch + 3))
            trainingQ.append((nexlen, nexwidth * 2, nexepoch + 2))
            trainingQ.append((nexlen + 1, nexwidth, nexepoch + 3))

        print('Finished Fit')
        print("Optimal Conv3D Network Parameters:")
        print("Number of Layers (Length):", self.Length)
        print("Number of Filters (Width):", self.Width)
        print("Number of Epochs", self.Epochs)
        print()

    def final_fit(self,
                  x_train,
                  y_train,
                  x_test,
                  y_test,
                  trainer_args=None,
                  retrain=True):
        x_train = self.preprocess(x_train)
        x_test = self.preprocess(x_test)

        self.encoder.fit(y_train)
        y_train = self.encoder.transform(y_train)
        y_test = self.encoder.transform(y_test)

        self.data_transformer = ImageDataTransformer(x_train,
                                                     augment=self.augment)
        train_data = self.data_transformer.transform_train(x_train,
                                                           y_train,
                                                           batch_size=1)
        test_data = self.data_transformer.transform_test(x_test,
                                                         y_test,
                                                         batch_size=1)

        self.net = CnnGenerator(self.encoder.n_classes, x_train.shape[1:]) \
            .generate(model_len=self.Length, model_width=self.Width).produce_model()

        pickle_to_file(self, os.path.join(self.path, 'classifier'))

        self.model_trainer = ModelTrainer(self.net,
                                          path=self.path,
                                          loss_function=classification_loss,
                                          metric=Accuracy,
                                          train_data=train_data,
                                          test_data=test_data,
                                          verbose=True)

        self.model_trainer.train_model(self.Epochs, 3)
        print('Finished Final Fit')

    def preprocess(self, data):
        result = []
        for video in data:
            clip = np.array([
                resize(frame, output_shape=(112, 200), preserve_range=True)
                for frame in video
            ])
            clip = clip[:, :, 44:44 + 112, :]  # crop centrally
            result.append(clip)
        result = np.array(result)
        return result

    @property
    def metric(self):
        return Accuracy
예제 #16
0
    y_encoder.fit(y_train)
    y_train = y_encoder.transform(y_train)
    return y_train, y_encoder


if __name__ == '__main__':
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape + (1, ))
    x_test = x_test.reshape(x_test.shape + (1, ))
    y_train, y_encoder = transform_y(y_train)
    y_test, _ = transform_y(y_test)
    cnnModule = CnnModule(loss=classification_loss,
                          metric=Accuracy,
                          searcher_args={},
                          verbose=True)
    # specify the fit args
    data_transformer = ImageDataTransformer(x_train, augment=True)
    train_data = data_transformer.transform_train(x_train, y_train)
    test_data = data_transformer.transform_test(x_test, y_test)
    fit_args = {
        "n_output_node": y_encoder.n_classes,
        "input_shape": x_train.shape,
        "train_data": train_data,
        "test_data": test_data
    }
    cnnModule.fit(n_output_node=fit_args.get("n_output_node"),
                  input_shape=fit_args.get("input_shape"),
                  train_data=fit_args.get("train_data"),
                  test_data=fit_args.get("test_data"),
                  time_limit=24 * 60 * 60)
예제 #17
0
def get_classification_train_data_loaders():
    x_train = np.random.rand(200, 32, 32, 3)
    data_transformer = ImageDataTransformer(x_train, augment=True)
    train_data = data_transformer.transform_train(x_train)
    return train_data
예제 #18
0
def transform_y(y_train):
    # Transform y_train.
    y_encoder = OneHotEncoder()
    y_encoder.fit(y_train)
    y_train = y_encoder.transform(y_train)
    return y_train, y_encoder


if __name__ == '__main__':
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape + (1,))
    x_test = x_test.reshape(x_test.shape + (1,))
    y_train, y_encoder = transform_y(y_train)
    y_test, _ = transform_y(y_test)
    cnnModule = CnnModule(loss=classification_loss, metric=Accuracy, searcher_args={}, verbose=True)
    # specify the fit args
    data_transformer = ImageDataTransformer(x_train, augment=True)
    train_data = data_transformer.transform_train(x_train, y_train)
    test_data = data_transformer.transform_test(x_test, y_test)
    fit_args = {
        "n_output_node": y_encoder.n_classes,
        "input_shape": x_train.shape,
        "train_data": train_data,
        "test_data": test_data
    }
    cnnModule.fit(n_output_node=fit_args.get("n_output_node"),
                  input_shape=fit_args.get("input_shape"),
                  train_data=fit_args.get("train_data"),
                  test_data=fit_args.get("test_data"),
                  time_limit=24 * 60 * 60)