コード例 #1
0
 def __init__(self, batch_size=32):
     if params['RANDOM_SEED']:
         torch.manual_seed(int(params['RANDOM_SEED']))
     self.batch_size = batch_size
     self.measures = ['mse']
     self.train_loss, self.test_loss = stats('mse'), stats('mse')
     self.validation_log, self.test_log = [], []
コード例 #2
0
 def __init__(self, fcn_layers, conv_layers):
     super(ClassificationNet, self).__init__()
     self.model = eval(params['NETWORK_MODEL'])(fcn_layers=fcn_layers, conv_layers=conv_layers)
     self.criterion = F.cross_entropy
     self.optimizer = optim.SGD(self.model.parameters(), lr=params['LEARNING_RATE'], momentum=params['MOMENTUM'])
     self.measures = ['nll', 'accuracy', 'top5']
     self.train_loss, self.test_loss = stats('nll'), stats('accuracy')
     self.log_model()
コード例 #3
0
 def __init__(self, fcn_layers, conv_layers, batch_size=32):
     super(RegressionNet, self).__init__(batch_size=batch_size)
     self.model = eval(params['NETWORK_MODEL'])(fcn_layers=fcn_layers, conv_layers=conv_layers)
     self.criterion = F.mse_loss
     self.optimizer = optim.SGD(self.model.parameters(), lr=params['LEARNING_RATE'], momentum=params['MOMENTUM'])
     self.measures = ['mse', 'mse_rnk']
     self.train_loss, self.test_loss = stats('mse'), stats('mse_rnk')
     self.log_model()
コード例 #4
0
ファイル: image_regression.py プロジェクト: tony-spl/fx-cnn
    def evaluate(self, ind, **kwargs):
        # ind.phenotype will be a string, including function definitions etc.
        # When we exec it, it will create a value XXX_output_XXX, but we exec
        # inside an empty dict for safety.

        p, d = ind.phenotype, {}

        genome, output, invalid, max_depth, nodes = ind.tree.get_tree_info(params['BNF_GRAMMAR'].non_terminals.keys(),[], [])
        Logger.log("Depth: {0}\tGenome: {1}".format(max_depth, genome))

        # Exec the phenotype.
        processed_train = ImageProcessor.process_images(self.X_train, ind.tree)
        processed_test = ImageProcessor.process_images(self.X_test, ind.tree)

        init_size = ImageProcessor.image.shape[0]*ImageProcessor.image.shape[1]*ImageProcessor.image.shape[2]

        train_loss = stats('mse')
        test_loss = stats('mse_rnk')
        kf = KFold(n_splits=params['CROSS_VALIDATION_SPLIT'])
        net = RegressionNet([init_size, 9600, 1200, 1])
        fitness, fold = 0, 1

        for train_index, val_index in kf.split(processed_train):
            X_train, X_val = processed_train[train_index], processed_train[val_index]
            y_train, y_val = self.y_train[train_index], self.y_train[val_index]
            for epoch in range(1, params['NUM_EPOCHS'] + 1):
                net.train(epoch, X_train, y_train, train_loss)
                if epoch % 5 == 0:
                    Logger.log("Epoch {}\tTraining loss (MSE): {:.6f}".format(epoch, train_loss.getLoss('mse')))
            net.test(X_val, y_val, test_loss)
            fitness += test_loss.getLoss('mse_rnk')
            Logger.log("Cross Validation [Fold {}/{}] (MSE/MSE_RNK): {:.6f} {:.6f}".format(fold, kf.get_n_splits(), test_loss.getLoss('mse'), test_loss.getLoss('mse_rnk')))
            fold = fold + 1
        fitness /= kf.get_n_splits()
        ind.stats = test_loss

        net.test(processed_test, self.y_test, test_loss)
        Logger.log("Generalization Loss (MSE/MSE_RNK): {:.6f} {:.6f}".format(test_loss.getLoss('mse'), test_loss.getLoss('mse_rnk')))
        params['CURRENT_EVALUATION'] += 1
        return fitness
コード例 #5
0
    def evaluate(self, ind, **kwargs):
        # ind.phenotype will be a string, including function definitions etc.
        # When we exec it, it will create a value XXX_output_XXX, but we exec
        # inside an empty dict for safety.

        p, d = ind.phenotype, {}

        genome, output, invalid, max_depth, nodes = ind.tree.get_tree_info(params['BNF_GRAMMAR'].non_terminals.keys(),[], [])
        Logger.log("Depth: {0}\tGenome: {1}".format(max_depth, genome))

        # Exec the phenotype.
        Logger.log("Processing Pipeline Start: {} images...".format(len(self.X_train)+len(self.X_test)))
        processed_train = ImageProcessor.process_images(self.X_train, ind, resize=self.resize)
        processed_test = ImageProcessor.process_images(self.X_test, ind, resize=self.resize)

        image = ImageProcessor.image
        init_size = image.shape[0]*image.shape[1]*image.shape[2]

        train_loss = stats('mse')
        test_loss = stats('accuracy')
        kf, freq = KFold(n_splits=params['CROSS_VALIDATION_SPLIT']), params["EPOCH_FREQ"]
        net = ClassificationNet(self.layers)
        fitness, early_stop, fold = 0, 0, 1

        Logger.log("Training Start: ")
        for train_index, val_index in kf.split(processed_train):
            X_train, X_val = processed_train[train_index], processed_train[val_index]
            y_train, y_val = self.y_train[train_index], self.y_train[val_index]
            data_train = DataIterator(X_train, y_train, params['BATCH_SIZE'])
            prev, early_stop = 0, 0
            for epoch in range(1, params['NUM_EPOCHS'] + 1):
                batch = 0
                for x, y in data_train:
                    net.train(epoch, x, y, train_loss)
                    batch += 1
                    # if batch % 10 == 0:
                    #     Logger.log("Batch {}/{}".format(batch, data_train.num_splits))
                if epoch % freq == 0:
                    Logger.log("Epoch {}\tTraining loss (NLL): {:.6f}".format(epoch, train_loss.getLoss('mse')))
                if abs(prev - train_loss.getLoss('mse')) < 1e-6:
                    early_stop += 1
                    if early_stop > 10:
                        Logger.log("Early stopping at epoch {}".format(epoch))
                        break
                else:
                    early_stop = 0
                    prev = train_loss.getLoss('mse')
                if epoch % freq == 0:
                    net.test(X_val, y_val, test_loss)
                    Logger.log("Epoch {}\tTest loss (NLL): {:.6f} {:.6f}".format(epoch, test_loss.getLoss('mse'), test_loss.getLoss('accuracy')))
            net.test(X_val, y_val, test_loss)
            fitness += test_loss.getLoss('accuracy')
            Logger.log("Cross Validation [Fold {}/{}] (MSE/Accuracy): {:.6f} {:.6f}".format(fold, kf.get_n_splits(), test_loss.getLoss('mse'), test_loss.getLoss('accuracy')))
            fold = fold + 1
        fitness /= kf.get_n_splits()

        net.test(processed_test, self.y_test, test_loss)
        #ind.net = net
        Logger.log("Generalization Loss (MSE/Accuracy): {:.6f} {:.6f}".format(test_loss.getLoss('mse'), test_loss.getLoss('accuracy')))
        params['CURRENT_EVALUATION'] += 1
        return fitness
コード例 #6
0
    def evaluate(self, ind, **kwargs):
        # ind.phenotype will be a string, including function definitions etc.
        # When we exec it, it will create a value XXX_output_XXX, but we exec
        # inside an empty dict for safety.

        p, d = ind.phenotype, {}

        genome, output, invalid, max_depth, nodes = ind.tree.get_tree_info(
            params['BNF_GRAMMAR'].non_terminals.keys(), [], [])
        Logger.log("Depth: {0}\tGenome: {1}".format(max_depth, genome))

        # Exec the phenotype.
        X_test, y_test = self.X_test, self.y_test
        image_size = X_test[0].shape
        flat_ind, kernel_size = NetworkProcessor.process_network(
            ind, image_size)
        Logger.log("Individual: {}".format(flat_ind))
        Logger.log("New kernel size: {}".format(kernel_size))

        new_conv_layers = []
        for i, k in enumerate(self.conv_layers):
            new_conv_layers.append((k[0], kernel_size[i], k[2], k[3], k[4]))

        train_loss = stats('mse')
        test_loss = stats('accuracy')
        kf = KFold(n_splits=params['CROSS_VALIDATION_SPLIT'])
        net = ClassificationNet(self.fcn_layers, new_conv_layers)
        fitness, fold = 0, 1

        Logger.log("Training Start: ")

        # Cross validation
        s_time = np.empty((kf.get_n_splits()))
        validation_acc = np.empty((kf.get_n_splits()))
        test_acc = np.empty((kf.get_n_splits()))
        for train_index, val_index in kf.split(self.X_train):
            X_train, X_val = self.X_train[train_index], self.X_train[val_index]
            y_train, y_val = self.y_train[train_index], self.y_train[val_index]
            data_train = DataIterator(X_train, y_train, params['BATCH_SIZE'])
            early_ckpt, early_stop, early_crit, epsilon = 20, [], params[
                'EARLY_STOP_FREQ'], params['EARLY_STOP_EPSILON']
            s_time[fold - 1] = time.time()

            # Train model
            net.model.reinitialize_params()
            for epoch in range(1, params['NUM_EPOCHS'] + 1):
                # mini-batch training
                for x, y in data_train:
                    net.train(epoch, x, y, train_loss)

                # log training loss
                if epoch % params['TRAIN_FREQ'] == 0:
                    Logger.log("Epoch {} Training loss (NLL): {:.6f}".format(
                        epoch, train_loss.getLoss('mse')))

                # log validation/test loss
                if epoch % params['VALIDATION_FREQ'] == 0:
                    net.test(X_val, y_val, test_loss)
                    Logger.log(
                        "Epoch {} Validation loss (NLL/Accuracy): {:.6f} {:.6f}"
                        .format(epoch, test_loss.getLoss('mse'),
                                test_loss.getLoss('accuracy')))
                    net.test(X_test, y_test, test_loss)
                    Logger.log(
                        "Epoch {} Test loss (NLL/Accuracy): {:.6f} {:.6f}".
                        format(epoch, test_loss.getLoss('mse'),
                               test_loss.getLoss('accuracy')))

                # check for early stop
                if epoch == early_ckpt:
                    accuracy = net.test(X_test,
                                        y_test,
                                        test_loss,
                                        print_confusion=True)
                    early_stop.append(accuracy)
                    if len(early_stop) > 3:
                        latest_acc = early_stop[-early_crit:]
                        latest_acc = np.subtract(latest_acc,
                                                 latest_acc[1:] + [0])
                        if (abs(latest_acc[:-1]) < epsilon).all() == True:
                            Logger.log(
                                "Early stopping at epoch {} (latest {} ckpts): {}"
                                .format(
                                    epoch, early_crit, " ".join([
                                        "{:.4f}".format(x)
                                        for x in early_stop[-early_crit:]
                                    ])))
                            break
                    early_ckpt = min(early_ckpt + 300, early_ckpt * 2)

            # Validate model
            net.test(X_val, y_val, test_loss)
            validation_acc[fold - 1] = test_loss.getLoss('accuracy')
            Logger.log(
                "Cross Validation [Fold {}/{}] Validation (NLL/Accuracy): {:.6f} {:.6f}"
                .format(fold, kf.get_n_splits(), test_loss.getLoss('mse'),
                        test_loss.getLoss('accuracy')))

            # Test model
            net.test(X_test, y_test, test_loss)
            test_acc[fold - 1] = test_loss.getLoss('accuracy')
            Logger.log(
                "Cross Validation [Fold {}/{}] Test (NLL/Accuracy): {:.6f} {:.6f}"
                .format(fold, kf.get_n_splits(), test_loss.getLoss('mse'),
                        test_loss.getLoss('accuracy')))

            # Calculate time
            s_time[fold - 1] = time.time() - s_time[fold - 1]
            Logger.log(
                "Cross Validation [Fold {}/{}] Training Time (m / m per epoch): {:.3f} {:.3f}"
                .format(fold, kf.get_n_splits(), s_time[fold - 1] / 60,
                        s_time[fold - 1] / 60 / epoch))

            fold = fold + 1

        fitness = validation_acc.mean()

        for i in range(0, kf.get_n_splits()):
            Logger.log(
                "STAT -- Model[{}/{}] #{:.3f}m Validation / Generalization accuracy (%): {:.4f} {:.4f}"
                .format(i, kf.get_n_splits(), s_time[i] / 60,
                        validation_acc[i] * 100, test_acc[i] * 100))
        Logger.log(
            "STAT -- Mean Validation / Generatlization accuracy (%): {:.4f} {:.4f}"
            .format(validation_acc.mean() * 100,
                    test_acc.mean() * 100))
        # ind.net = net
        params['CURRENT_EVALUATION'] += 1
        return fitness