plot_matrix_score
import torch
import torch.nn as nn

if __name__ == "__main__":
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
    sess = tf.Session(config=config)
    set_session(sess)

    single_class_ind = 1

    (x_train, y_train), (x_val,
                         y_val), (x_test,
                                  y_test) = load_hits(n_samples_by_class=10000,
                                                      test_size=0.20,
                                                      val_size=0.10,
                                                      return_val=True)
    print(x_train.shape)
    print(x_val.shape)
    print(x_test.shape)

    transformer = TransTransformer(8, 8)
    n, k = (10, 4)

    mdl = create_wide_residual_network(input_shape=x_train.shape[1:],
                                       num_classes=transformer.n_transforms,
                                       depth=n,
                                       widen_factor=k)
    mdl.compile(optimizer='adam',
                loss='categorical_crossentropy',
                metrics=['acc'])
Пример #2
0
import tensorflow as tf
from tqdm import tqdm
from scripts.detached_transformer_od_hits import plot_histogram_disc_loss_acc_thr, \
  dirichlet_normality_score, fixed_point_dirichlet_mle, calc_approx_alpha_sum
import matplotlib.pyplot as plt

if __name__ == "__main__":
  config = tf.ConfigProto()
  config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
  sess = tf.Session(config=config)
  set_session(sess)

  single_class_ind = 1

  (x_train, y_train), (x_val, y_val), (x_test, y_test) = load_hits(n_samples_by_class=16000,
                                                   test_size=0.25,
                                                   val_size=0.125, return_val=True)
  print(x_train.shape)
  print(x_val.shape)
  print(x_test.shape)

  transformer = TransTransformer(8, 8)
  # n, k = (10, 4)
  #
  # mdl = create_wide_residual_network(input_shape=x_train.shape[1:],
  #                                    num_classes=transformer.n_transforms,
  #                                    depth=n, widen_factor=k)

  mdl = create_simple_network(input_shape=x_train.shape[1:],
                              num_classes=2, dropout_rate=0.5)
  mdl.compile(optimizer='adam', loss='categorical_crossentropy',
if __name__ == "__main__":
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
    sess = tf.Session(config=config)
    set_session(sess)
    save_path = '../results/Transforms_hits'
    check_paths(save_path)

    single_class_ind = 1

    (x_train, y_train), (x_val,
                         y_val), (x_test,
                                  y_test) = load_hits(n_samples_by_class=10000,
                                                      test_size=0.20,
                                                      val_size=0.10,
                                                      return_val=True,
                                                      channels_to_get=[2])
    print(x_train.shape)
    print(x_val.shape)
    print(x_test.shape)

    transformer = Transformer(8, 8)
    n, k = (10, 4)

    mdl = create_wide_residual_network(input_shape=x_train.shape[1:],
                                       num_classes=transformer.n_transforms,
                                       depth=n,
                                       widen_factor=k)
    mdl.compile(optimizer='adam',
                loss='categorical_crossentropy',