print("=" * 50) print("Number of Training Examples: %d/%d" % (len(train_loader.sampler), len(indices))) print("Number of Validation Examples: %d/%d" % (len(valid_loader.sampler), len(indices))) print("Number of Test Examples: %d/%d" % (len(test_loader.sampler), len(indices))) print("=" * 50, "\n") data_loaders = {"train": train_loader, "valid": valid_loader} ### latent_dim = 2048 CAE = Model(z_dim=latent_dim) CAE = nn.DataParallel(CAE).cuda() ### Training n_epochs = 200 optimiser = optim.Adam(CAE.parameters(), lr=0.0001, weight_decay=1e-6, amsgrad=True) ### best_val_loss = np.infty
print("\n") print("Data Statistics") print("=" * 50) print("Number of Training Examples: %d/%d" % (len(train_loader.sampler), len(indices))) print("Number of Validation Examples: %d/%d" % (len(valid_loader.sampler), len(indices))) print("Number of Test Examples: %d/%d" % (len(test_loader.sampler), len(indices))) print("=" * 50, "\n") data_loaders = {"train": train_loader, "valid": valid_loader} ### CAE = Model(z_dim=2048) CAE = nn.DataParallel(CAE).cuda() CAE.load_state_dict(torch.load("./models/BroadCAE.pth")) ### NN = NeuralNet() NN = nn.DataParallel(NN).cuda() NN.load_state_dict(torch.load("./models/PretrainedNeuralNetwork.pth")) ###
SilenceCorpus=None) encoder = train_corpus.encoder decoder = train_corpus.decoder num_classes = len(decoder) - 1 # set_graph Graph batch_size = batch_parameters.batch_size is_training = cfg.is_training max_gradient = cfg.max_gradient training_iters = train_corpus.len cnn_model = Model(cfg) graph = tf.Graph() with graph.as_default(): # tf Graph input tf.set_random_seed(cfg.tf_seed) with tf.name_scope("Input"): x = tf.placeholder(tf.float32, shape=batch_parameters.input_shape, name="input") y = tf.placeholder(tf.int64, shape=(None, ), name="input") keep_prob = tf.placeholder(tf.float32, name="dropout") with tf.variable_scope('logit'): logits = cnn_model.calc_logits(x, keep_prob, num_classes) predictions = tf.nn.softmax(logits)
cfg = Config() silence_corpus = SoundCorpus(cfg.soundcorpus_dir, mode='silence') test_corpus = SoundCorpus(cfg.soundcorpus_dir, mode='own_test', fn='own_test.p.soundcorpus.p') silence_classifier = SilenceDetector() batch = [item for item in test_corpus] decoder = silence_corpus.decoder encoder = silence_corpus.encoder num_classes = len(decoder) - 1 model = Model(cfg) # set_graph Graph # batch_size = cfg.batch_size is_training = cfg.is_training graph = tf.Graph() with graph.as_default(): # tf Graph input tf.set_random_seed(3) with tf.name_scope("Input"): x = tf.placeholder(tf.float32, shape=(None, 99, 13, 3), name="input") # x.set_shape([batch_size, size]) #y = tf.placeholder(tf.int64, shape=(None,), name="input") keep_prob = tf.placeholder(tf.float32, name="dropout")
print("=" * 50) print("Number of Training Examples: %d/%d" % (len(train_loader.sampler), len(indices))) print("Number of Validation Examples: %d/%d" % (len(valid_loader.sampler), len(indices))) print("Number of Test Examples: %d/%d" % (len(test_loader.sampler), len(indices))) print("=" * 50, "\n") data_loaders = {"train": train_loader, "valid": valid_loader} ### latent_dim = 2048 CAE = Model(z_dim=latent_dim) CAE = nn.DataParallel(CAE).cuda() CAE.load_state_dict(torch.load("./models/BroadCAE.pth")) CAE.eval() ### n_epochs = 200 optimiser = optim.SGD(NN.parameters(), lr=1e-1, weight_decay=1e-3) best_val_loss = np.infty