コード例 #1
0
    # save the learning history
    learning_history['epoch'].append(epoch + 1)
    learning_history['train_acc'].append(f'{avg_train_acc:.4f}')
    learning_history['train_total_loss'].append(f'{avg_train_loss:.4f}')
    learning_history['train_class_loss'].append(f'{avg_train_class_error:.4e}')
    learning_history['train_ae_loss'].append(f'{avg_train_ae_error:.4e}')
    learning_history['train_error_1_loss'].append(f'{avg_train_error_1:.4e}')
    learning_history['train_error_2_loss'].append(f'{avg_train_error_2:.4e}')
    learning_history['val_acc'].append(f'{avg_val_acc:.4f}')
    learning_history['val_total_loss'].append(f'{avg_val_loss:.4f}')
    learning_history['val_class_loss'].append(f'{avg_val_class_error:.4e}')
    learning_history['val_ae_loss'].append(f'{avg_val_ae_error:.4e}')
    learning_history['val_error_1_loss'].append(f'{avg_val_error_1:.4e}')
    learning_history['val_error_2_loss'].append(f'{avg_val_error_2:.4e}')
    learning_history['test_acc'].append(f'{avg_test_acc:.4f}')
    result_save(f'./result/csv/train_history_50.csv', learning_history)

    # save model, prototype and ae_out
    if epoch % save_step == 0 or epoch == num_epochs - 1 or (
            epoch > 200 and epoch % final_pruning_step == 0):
        with torch.no_grad():
            parameter_save(
                f'./result/pkl/train_model_epoch{epoch + 1}_{prototype_num}.pkl',
                net)

            f_width = int(
                math.sqrt(len(net.prototype_feature_vectors[0]) / class_num))
            f_height = int(
                math.sqrt(len(net.prototype_feature_vectors[0]) / class_num))
            prototype_imgs = net.decoder(
                # prototype_imgs = net.cifar_decoder(
コード例 #2
0
        if epoch == num_epochs - 1:
            learning_history['epoch'].append(epoch + 1)
            learning_history['train_acc'].append(f'{avg_train_acc:.4f}')
            learning_history['train_total_loss'].append(
                f'{avg_train_loss:.4f}')
            learning_history['train_class_loss'].append(
                f'{avg_train_class_error:.4e}')
            learning_history['train_ae_loss'].append(
                f'{avg_train_ae_error:.4e}')
            learning_history['train_error_1_loss'].append(
                f'{avg_train_error_1:.4e}')
            learning_history['train_error_2_loss'].append(
                f'{avg_train_error_2:.4e}')
            learning_history['test_acc'].append(f'{avg_test_acc:.4f}')
            result_save(
                f'./result/csv/conv_prune_finetune_train_history_{prototype}.csv',
                learning_history)

        # prototype
        if epoch == num_epochs - 1:
            with torch.no_grad():
                parameter_save(
                    f'./result/pkl/conv_prune_finetune_train_model_prune{count}_{prototype}.pkl',
                    train_net)
                f_width = int(
                    math.sqrt(
                        len(train_net.prototype_feature_vectors[1]) /
                        class_num))
                f_height = int(
                    math.sqrt(
                        len(train_net.prototype_feature_vectors[1]) /
コード例 #3
0
    # save the learning history
    learning_history['epoch'].append(epoch + 1)
    learning_history['train_acc'].append(f'{avg_train_acc:.4f}')
    learning_history['train_total_loss'].append(f'{avg_train_loss:.4f}')
    learning_history['train_class_loss'].append(f'{avg_train_class_error:.4e}')
    learning_history['train_ae_loss'].append(f'{avg_train_ae_error:.4e}')
    learning_history['train_error_1_loss'].append(f'{avg_train_error_1:.4e}')
    learning_history['train_error_2_loss'].append(f'{avg_train_error_2:.4e}')
    learning_history['val_acc'].append(f'{avg_val_acc:.4f}')
    learning_history['val_total_loss'].append(f'{avg_val_loss:.4f}')
    learning_history['val_class_loss'].append(f'{avg_val_class_error:.4e}')
    learning_history['val_ae_loss'].append(f'{avg_val_ae_error:.4e}')
    learning_history['val_error_1_loss'].append(f'{avg_val_error_1:.4e}')
    learning_history['val_error_2_loss'].append(f'{avg_val_error_2:.4e}')
    learning_history['test_acc'].append(f'{avg_test_acc:.4f}')
    result_save(f'./result/csv/train_history_{prototype_num}.csv',
                learning_history)

    # save model, prototype and ae_out
    if epoch % save_step == 0 or epoch == num_epochs - 1 or (
            epoch > 200 and epoch % final_pruning_step == 0):
        with torch.no_grad():
            parameter_save(
                f'./result/pkl/train_model_epoch{epoch + 1}_{prototype_num}.pkl',
                net)

            f_width = int(
                math.sqrt(len(net.prototype_feature_vectors[1]) / class_num))
            f_height = int(
                math.sqrt(len(net.prototype_feature_vectors[1]) / class_num))
            prototype_imgs = net.decoder(
                # prototype_imgs = net.cifar_decoder(