def __init__(self,
                 model,
                 inputs_train,
                 targets_train,
                 inputs_val,
                 targets_val,
                 to_rgb=False,
                 root_dir=os.path.dirname(__name__)):
        #inputs and target are DF
        self.model = model

        #model to evaluate
        self.model_eval = copy.deepcopy(self.model)
        self.model_eval.to('cpu')

        self.labels = None
        self.labels_num = None
        self.sampler = self._create_sampler(targets_train.values.astype(int))

        self.m_exporter = ModelExporter('temp', root_dir=root_dir)
        self.model_name = copy.deepcopy(self.model.name)

        # Generators
        self.training_set = Fer2013Dataset(inputs=inputs_train,
                                           targets=targets_train,
                                           to_rgb=to_rgb,
                                           device='cpu')
        self.validation_set = Fer2013Dataset(inputs=inputs_val,
                                             targets=targets_val,
                                             to_rgb=to_rgb,
                                             device='cpu')

        #https://stanford.edu/~shervine/blog/pytorch-how-to-generate-data-parallel
        self.use_cuda = torch.cuda.is_available()
        self.device = torch.device("cuda:0" if self.use_cuda else "cpu")
        torch.backends.cudnn.benchmark = True  #if inputs sizes remain the same, should go faster
        if self.use_cuda:
            print('empty cuda cache')
            torch.cuda.empty_cache()

        self.model.to(self.device)

        print(f'use cuda: {self.use_cuda}')
예제 #2
0
    dtype = torch.float

    model_name = f'cnn_triple_layer_D_bs_{learning_rate}_{batch_size}_{n_epochs}_{n_classes}'
    model = CnnTripleLayer(model_name, d_out=n_classes)
    model.train()

    train_classifier = TrainClassifier2(model,
                                        X_train_df,
                                        y_train_df,
                                        X_val_df,
                                        y_val_df,
                                        root_dir=current_working_dir)
    t = time.time()
    trained_model, optimizer, criterion, \
    train_loss_hist, train_acc_hist, train_f1_hist, train_b_hist,\
    val_loss_hist, val_acc_hist, val_f1_hist, val_b_hist = train_classifier.run_train(n_epochs=n_epochs,
                                                                          lr=learning_rate,
                                                                          batch_size=batch_size)
    print(f'trained in {time.time() - t} sec')

    if args.s_model:
        m_exporter = ModelExporter('fer2013_datasetD',
                                   root_dir=current_working_dir)
        m_exporter.save_nn_model(trained_model, optimizer,
                                 trained_model.get_args())
        m_exporter.save_results(f'{model_name}', train_loss_hist,
                                train_acc_hist, train_f1_hist, train_b_hist,
                                val_loss_hist, val_acc_hist, val_f1_hist,
                                val_b_hist)
예제 #3
0
    dtype = torch.float
    device = torch.device("cpu")

    model_name = f'cnn_double_layer_D_bs_{learning_rate}_{batch_size}_{n_epochs}_{n_classes}'
    model = CnnDoubleLayer(model_name, d_out=n_classes)
    model.train()

    train_classifier = TrainClassifier2(model, X_df, y_df)
    t = time.time()
    trained_model, optimizer, criterion, loss_hist, loss_val_hist, f1_val_hist = train_classifier.run_train(
        n_epochs=n_epochs, lr=learning_rate, batch_size=batch_size)
    print(f'trained in {time.time() - t} sec')
    pre.save_results(loss_hist, loss_val_hist, f1_val_hist, f'{model_name}')

    if args.s_model:
        m_exporter = ModelExporter('fer2013_DatasetD')
        m_exporter.save_nn_model(trained_model, optimizer,
                                 trained_model.get_args())

    if args.s_patterns:
        detected_patterns1 = trained_model.get_detected_patterns1()
        for idx in range(10):
            plt.figure(1, figsize=(20, 10))
            for p in range(trained_model.n_patterns1):
                pattern = detected_patterns1[idx][p].reshape(
                    detected_patterns1.shape[2], detected_patterns1.shape[3])
                patern_np = pattern.detach().numpy().reshape(24, 24)
                plt.subplot(2, 5, 1 + p)
                plt.imshow(patern_np, cmap='gray', interpolation='none')
            pre.save_plt_as_image(plt, f'patterns_1_{idx}')
예제 #4
0
class TrainClassifier4():
    def __init__(self,
                 model,
                 inputs_train,
                 targets_train,
                 inputs_val,
                 targets_val,
                 root_dir=os.path.dirname(__name__)):
        #inputs and target are DF
        self.model = model

        #model to evaluate
        self.model_eval = copy.deepcopy(self.model)
        self.model_eval.to('cpu')

        self.labels = None
        self.labels_num = None
        self.sampler = self._create_sampler(targets_train.values.astype(int))

        self.m_exporter = ModelExporter('temp', root_dir=root_dir)
        self.model_name = copy.deepcopy(self.model.name)

        # Generators
        self.training_set = Fer2013Dataset(inputs=inputs_train.values,
                                           targets=targets_train,
                                           device='cpu')
        self.validation_set = Fer2013Dataset(inputs=inputs_val.values,
                                             targets=targets_val,
                                             device='cpu')

        #https://stanford.edu/~shervine/blog/pytorch-how-to-generate-data-parallel
        self.use_cuda = torch.cuda.is_available()
        self.device = torch.device("cuda:0" if self.use_cuda else "cpu")
        torch.backends.cudnn.benchmark = True  #if inputs sizes remain the same, should go faster

        self.model.to(self.device)

        print(f'use cuda: {self.use_cuda}')

    def run_train(self, n_epochs, lr=0.001, batch_size=256):
        print(f'training model: {self.model.name}')

        training_generator = DataLoader(self.training_set,
                                        sampler=self.sampler,
                                        batch_size=batch_size,
                                        num_workers=0)

        self.model.train()  #set model to training mode
        # Loss and optimizer
        criterion = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)

        # Train
        train_loss_hist = []
        val_loss_hist = []

        train_acc_hist = []
        val_acc_hist = []

        train_f1_hist = []
        val_f1_hist = []

        train_b_hist = []
        val_b_hist = []

        model_versions = {}

        f = 5
        for t in range(n_epochs):
            for i, (batch_x, batch_y) in enumerate(training_generator):

                # move to gpu if possible
                batch_x, batch_y = batch_x.to(self.device), batch_y.to(
                    self.device)

                # Berechne die Vorhersage (foward step)
                outputs = self.model(batch_x)

                # Berechne den Fehler (Ausgabe des Fehlers alle 100 Iterationen)
                loss = criterion(outputs, batch_y)

                # Berechne die Gradienten und Aktualisiere die Gewichte (backward step)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                del batch_x, batch_y, outputs
                if self.use_cuda:
                    torch.cuda.empty_cache()

                if i % 10 == 0:
                    print('.', end='', flush=True)

            if t % f == 0:
                model_params = copy.deepcopy(self.model.state_dict())
                model_versions[t] = model_params

                train_loss, train_acc, train_f1, val_loss,\
                val_acc, val_f1, train_b, val_b = self._evaluate(model_params, criterion)

                self.model_eval.name = f'{self.model_name}_epoch{t}'
                self.m_exporter.save_nn_model(self.model_eval,
                                              optimizer,
                                              self.model.get_args(),
                                              debug=False)

                train_loss_hist.append(train_loss)
                train_acc_hist.append(train_acc)
                train_f1_hist.append(train_f1)
                train_b_hist.append(train_b)

                val_loss_hist.append(val_loss)
                val_acc_hist.append(val_acc)
                val_f1_hist.append(val_f1)
                val_b_hist.append(val_b)

                print(
                    '\n{} loss t: {:0.3f} v: {:0.3f} | acc t: {:0.3f} v: {:0.3f} |'
                    ' f1 t: {:0.3f} v: {:0.3f} | b t: {:0.3f} v: {:0.3f}'.
                    format(t, train_loss, val_loss, train_acc, val_acc,
                           train_f1, val_f1, train_b, val_b))

        print(f'\n ####training finished####')
        best_iteration_loss = f * val_loss_hist.index(min(val_loss_hist))
        print(f'optimal iteration val_loss: {best_iteration_loss}')
        best_iteration_acc = f * val_acc_hist.index(max(val_acc_hist))
        print(f'optimal iteration val_acc: {best_iteration_acc}')
        best_iteration_f1 = f * val_f1_hist.index(max(val_f1_hist))
        print(f'optimal iteration val_f1: {best_iteration_f1}')
        best_iteration_b = f * val_b_hist.index(max(val_b_hist))
        print(f'optimal iteration val_balanced_score: {best_iteration_b}')

        # use the best trained model
        self.model.load_state_dict(
            state_dict=copy.deepcopy(model_versions[best_iteration_acc]))
        self.model.eval()  # set model to test model
        self.model.name = f'{self.model_name}'

        del model_versions

        return self.model, optimizer, criterion,\
               train_loss_hist, train_acc_hist, train_f1_hist, train_b_hist,\
               val_loss_hist, val_acc_hist, val_f1_hist, val_b_hist

    def _create_sampler(self, target_np):
        self.labels = np.unique(target_np)
        class_sample_count = np.array(
            [len(np.where(target_np == t)[0]) for t in self.labels])
        weight = 1. / class_sample_count
        samples_weight = torch.from_numpy(
            np.array([weight[t] for t in target_np])).double()
        return WeightedRandomSampler(samples_weight,
                                     len(samples_weight),
                                     replacement=True)

    def _evaluate(self, model_param, criterion):
        # evaluate in CPU
        # can't move all the training dataset to GPU, in my case and resources it is too much
        with torch.no_grad():  # operations inside don't track history
            self.model_eval.load_state_dict(state_dict=model_param)
            self.model_eval.eval()

            #train_prob = self.model_eval(self.training_set.x_data)
            #train_pred = train_prob.argmax(1)
            #train_loss = criterion(train_prob, self.training_set.y_data)
            #train_acc = (train_pred == self.training_set.y_data.long()).float().mean()
            #train_f1 = metrics.f1_score(self.training_set.y_data.long().numpy(), train_pred.numpy(), average='macro')
            #train_m = Metrics(self.training_set.y_data, train_pred, self.labels)
            #train_b = train_m.balanced_score()

            gc.collect()

            val_prob = self.model_eval(self.validation_set.x_data)
            val_pred = val_prob.argmax(1)
            val_loss = criterion(val_prob, self.validation_set.y_data)
            val_acc = (
                val_pred == self.validation_set.y_data.long()).float().mean()
            val_f1 = metrics.f1_score(
                self.validation_set.y_data.long().numpy(),
                val_pred.numpy(),
                average='macro')
            val_m = Metrics(self.validation_set.y_data, val_pred, self.labels)
            val_b = val_m.balanced_score()

            gc.collect()

            # evaluating train uses too much CPU, so I actually justr need the vslidate values for now
            train_prob = val_prob
            train_pred = val_prob.argmax(1)
            train_loss = criterion(val_prob, self.validation_set.y_data)
            train_acc = (
                val_pred == self.validation_set.y_data.long()).float().mean()
            train_f1 = metrics.f1_score(
                self.validation_set.y_data.long().numpy(),
                val_pred.numpy(),
                average='macro')
            train_m = Metrics(self.validation_set.y_data, val_pred,
                              self.labels)
            train_b = val_m.balanced_score()

            return train_loss.item(), train_acc, train_f1, val_loss.item(
            ), val_acc, val_f1, train_b, val_b
    model_name = f'cnn_double_layer_C_bs_{learning_rate}_{batch_size}_{n_epochs}_{n_classes}'
    model = CnnDoubleLayer(model_name, d_out=n_classes)
    model.train()

    train_classifier = TrainClassifier2(model, X_df, y_df)
    t = time.time()
    trained_model, optimizer, criterion, \
    train_loss_hist, train_acc_hist, train_f1_hist, train_b_hist,\
    val_loss_hist, val_acc_hist, val_f1_hist, val_b_hist = train_classifier.run_train(n_epochs=n_epochs,
                                                                          lr=learning_rate,
                                                                          batch_size=batch_size)
    print(f'trained in {time.time() - t} sec')

    if args.s_model:
        m_exporter = ModelExporter('fer2013_DatasetC')
        m_exporter.save_nn_model(trained_model, optimizer,
                                 trained_model.get_args())
        m_exporter.save_results(f'{model_name}', train_loss_hist,
                                train_acc_hist, train_f1_hist, train_b_hist,
                                val_loss_hist, val_acc_hist, val_f1_hist,
                                val_b_hist)

    if args.s_patterns:
        detected_patterns1 = trained_model.get_detected_patterns1()
        for idx in range(10):
            plt.figure(1, figsize=(20, 10))
            for p in range(trained_model.n_patterns1):
                pattern = detected_patterns1[idx][p].reshape(
                    detected_patterns1.shape[2], detected_patterns1.shape[3])
                patern_np = pattern.detach().numpy().reshape(24, 24)
    def run_train(self, n_epochs, lr=0.001, batch_size=256):
        self.lr = lr
        if(self.data_is_prepared == False):
            self.prepare_data()

        # Loss and optimizer
        criterion = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)

        # Train
        loss_hist = []
        loss_val_hist = []
        acc_val_hist = []
        f1_val_hist = []
        model_versions = {}

        m_exporter = ModelExporter('temp')
        model_name = copy.deepcopy(self.model.name)

        for t in range(n_epochs):
            for batch in range(0, int(self.N / batch_size)):
                # Berechne den Batch
                batch_x, batch_y = self.model.get_batch(self.x, self.y, batch, batch_size)

                # Berechne die Vorhersage (foward step)
                outputs = self.model(batch_x)

                # Berechne den Fehler (Ausgabe des Fehlers alle 100 Iterationen)
                loss = criterion(outputs, batch_y)

                # Berechne die Gradienten und Aktualisiere die Gewichte (backward step)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            # Berechne den Fehler (Ausgabe des Fehlers alle 50 Iterationen)

            idx = 10
            if t % idx == 0:
                #current_lr = self._get_lr(optimizer)
                #self._set_lr(optimizer, self._update_lr(optimizer, t))
                #print(f'learning_rate: {current_lr}')

                outputs = self.model(self.x)
                loss = criterion(outputs, self.y)
                loss_hist.append(loss.item())

                outputs_val = self.model(self.x_val)
                loss_val = criterion(outputs_val, self.y_val)
                loss_val_hist.append(loss_val.item())
                model_versions[t] = copy.deepcopy(self.model.state_dict())

                accuracy_train = (outputs.argmax(1) == self.y.long()).float().mean()

                accuracy_val= (outputs_val.argmax(1) == self.y_val.long()).float().mean()
                acc_val_hist.append(accuracy_val)

                f1_score = metrics.f1_score(self.y_val.long().numpy(), outputs_val.argmax(1).numpy(), average='macro')
                f1_val_hist.append(f1_score)

                print(t, ' train_loss: ',loss.item(), 'val_loss: ', loss_val.item(), ' - train_acc: ',
                    accuracy_train, ', val_acc: ', accuracy_val, ', val_f1: ', f1_score)

                self.model.name = f'{model_name}_epoch{t}'
                m_exporter.save_nn_model(self.model, optimizer, self.model.get_args(), debug=False)

        best_iteration = idx*loss_val_hist.index(min(loss_val_hist))
        print(f'optimal iteration val_loss: {best_iteration}')
        best_iteration_f1 = idx * f1_val_hist.index(max(f1_val_hist))
        print(f'optimal iteration val_f1: {best_iteration_f1}')
        best_iteration_acc = idx * acc_val_hist.index(max(acc_val_hist))
        print(f'optimal iteration val_acc: {best_iteration_acc}')

        #use the best trained model
        self.model.load_state_dict(state_dict=model_versions[best_iteration])
        self.model.eval()
        self.model.name = f'{model_name}'

        y_pred = self.model(self.x).argmax(1)
        accuracy_soft = (y_pred == self.y.long()).float().mean()
        print(f'training accuracy: {accuracy_soft}')


        return self.model, optimizer, criterion, loss_hist, loss_val_hist, f1_val_hist
예제 #7
0
                                        X_train_df,
                                        y_train_df,
                                        X_val_df,
                                        y_val_df,
                                        root_dir=script_root_dir)
    t = time.time()
    trained_model, optimizer, criterion, \
    train_loss_hist, train_acc_hist, train_f1_hist, train_b_hist,\
    val_loss_hist, val_acc_hist, val_f1_hist, val_b_hist = train_classifier.run_train(n_epochs=n_epochs,
                                                                          lr=learning_rate,
                                                                          batch_size=batch_size)

    print(f'trained in {time.time() - t} sec')

    if args.s_model:
        m_exporter = ModelExporter('fer2013_reduced', root_dir=script_root_dir)
        m_exporter.save_nn_model(trained_model, optimizer,
                                 trained_model.get_args())
        m_exporter.save_results(f'{model_name}', train_loss_hist,
                                train_acc_hist, train_f1_hist, train_b_hist,
                                val_loss_hist, val_acc_hist, val_f1_hist,
                                val_b_hist)

    if args.s_patterns:
        detected_patterns = trained_model.get_detected_patterns()
        for idx in range(10):
            plt.figure(1, figsize=(20, 10))
            for p in range(trained_model.n_patterns):
                pattern = detected_patterns[idx][p].reshape(
                    detected_patterns.shape[2], detected_patterns.shape[3])
                patern_np = pattern.detach().numpy().reshape(24, 24)
예제 #8
0
    X_train = model.reshape_data(
        torch.tensor(X_train_df.values, device=device, dtype=dtype))
    y_train = torch.tensor(y_train_df.values, device=device, dtype=torch.long)

    X_pred = trained_model(X_test)
    print(f'test accuracy last trained model {c(X_pred, X_test)}')

    trained_model.load_state_dict(state_dict=best_model_param)
    trained_model.eval()

    X_pred = trained_model(X_test)
    print(f'test accuracy best model {c(X_pred, X_test)}')

    if args.s_model:
        m_exporter = ModelExporter('fer2013_DatasetA')
        m_exporter.save_nn_model(trained_model, opt, 0, n_features_encoded,
                                 n_epochs, trained_model.get_args())

    X_train_encoded = trained_model.encoder(X_train)
    X_test_encoded = trained_model.encoder(X_test)
    X_test_decoded = trained_model.decoder(X_test_encoded)

    X_train_encoded_df = pd.DataFrame(X_train_encoded.detach().numpy())
    X_test_encoded_df = pd.DataFrame(X_test_encoded.detach().numpy())

    cols = list(range(1, n_features_encoded + 1))

    X_train_encoded_df.columns = cols
    X_test_encoded_df.columns = cols