示例#1
0
def validate_model(
    net: torch.nn.Module,
    dataloader: torch.utils.data.DataLoader,
    classes: list,
    update: int,
    device: torch.device,
    plotter: Plotter,
    loss_fn=torch.nn.BCELoss()) -> Tuple[Tensor, dict, dict]:
    """
    Validate current model on validation set and return loss and metrics
    """
    plots_path = os.path.join('results', 'intermediate', 'plots')
    metrics_path = os.path.join('results', 'intermediate', 'metrics')
    os.makedirs(plots_path, exist_ok=True)
    os.makedirs(metrics_path, exist_ok=True)

    loss = torch.tensor(0., device=device)
    with torch.no_grad():
        target_list = []
        prediction_list = []
        # calculate targets and predictions on validation set
        for data in tqdm.tqdm(dataloader, desc='scoring', position=0):
            inputs, targets, _, idx = data
            inputs = inputs.to(device, dtype=torch.float32)
            targets = targets.to(device, dtype=torch.float32)
            predictions = net(inputs)
            loss += loss_fn(predictions, targets)
            # plot results
            target_array = targets.detach().cpu().numpy()
            prediction_array = predictions.detach().cpu().numpy()
            target_list.extend([*target_array])
            prediction_list.extend([*prediction_array])
        loss /= len(dataloader)
        # pick some random excerpts and plot them
        num_plots = 4
        indices = random.choices(np.arange(len(target_list)), k=num_plots)
        targets = np.stack(
            [t for i, t in enumerate(target_list) if i in indices])
        predictions = np.stack(
            [t for i, t in enumerate(prediction_list) if i in indices])
        plotter.plot(targets, predictions, plots_path, update)
        # compute dcase metrics
        targets = np.stack(target_list)
        predictions = np.stack(prediction_list)
        metrics = metric.compute_dcase_metrics([targets], [predictions],
                                               classes)
        metrics_pp = metric.compute_dcase_metrics(
            [targets], [postproc.post_process_predictions(predictions)],
            classes)
        metric.write_dcase_metrics_to_file(metrics, metrics_path,
                                           f"{update:07d}.txt")
        metric.write_dcase_metrics_to_file(metrics_pp, metrics_path,
                                           f"{update:07d}_pp.txt")
    return loss, metrics, metrics_pp
示例#2
0
 def observe(self):
     y_data_list = []
     addrs = []
     for addr, packet_bin in self.bins.iteritems():
         if len(packet_bin) > VALID_PACKET_COUNT_THRESHOLD:
             y_data_list.append(packet_bin.generate_y_data(self.observer))
             addrs.append(addr)
     plotter = Plotter(range(self.size), y_data_list)
     plotter.output_file = PLOT_DIR + '_'.join(self.plot_name.split()) + '.pdf'
     plotter.x_label = 'Packet Sequence Number'
     plotter.y_label = addrs
     plotter.plot()
示例#3
0
async def main():
    transactionCounts99 = {}
    plotter = Plotter()
    infoGetter = InfoGetter(
        "https://*****:*****@nd-806-802-183.p2pify.com"
    )
    latestBlock = infoGetter.getLatestTransactions()
    tasks = []
    async with aiohttp.ClientSession() as session:
        for selectedBlock in range(
                int(latestBlock, 16) - 100, int(latestBlock, 16)):
            task = asyncio.ensure_future(
                infoGetter.getTransactions(session, hex(selectedBlock)))
            tasks.append(task)

        responses = await asyncio.gather(*tasks)
        for response in responses:
            valuesAndKey = next(iter(response.items()))
            transactionCounts99[valuesAndKey[0]] = valuesAndKey[1]
        #we've completed the request, so now we can plot
        plotter.plot(transactionCounts99)
示例#4
0
    beg_yr = int(sys.argv[2])
    end_yr = int(sys.argv[3])
    nc_var = sys.argv[4]
    obs_pattern = sys.argv[5]
    obs_nc_var = sys.argv[6]

    for yr in xrange(beg_yr, end_yr):
        pattern = os.path.join(nc_dir, '*' + str(yr) + '*.nc')
        #        for
        r = RegCMReader(pattern)
    value = r.get_value(nc_var).mean()
    time_limits = value.get_limits('time')
    crd_limits = value.get_latlonlimits()

    obs_r = CRUReader(obs_pattern)
    obs_value = obs_r.get_value(obs_nc_var,
                                imposed_limits={
                                    'time': time_limits
                                },
                                latlon_limits=crd_limits).mean()
    if obs_nc_var == "TMP":
        obs_value.to_K()

    value.regrid(obs_value.latlon)
    diff = obs_value - value
    plt = Plotter(diff)
    plt.plot(levels=(-5, 5))
    plt.show()
    plt.save('image', format='png')
    plt.close()
示例#5
0
from plot import Plotter
from data import Data
from singleLayerNN import SingleLayerNeuralNetwork
from multiLayerNN import MultiLayerNeuralNetwork
data = Data('./dataset.csv')
plotter = Plotter(data)
slnn = SingleLayerNeuralNetwork(data, 0.01, 1000)
weightsSLNN, precisionSLNN = slnn.run()
mlnn = MultiLayerNeuralNetwork(data, 0.1, 10000)
weightsMLNN, precisionMLNN = mlnn.run()
print("\nSingle Layer Neural Net Precision:\t", precisionSLNN, "%")
print("Multi Layer Neural Net Precision: \t", precisionMLNN, "%")
plotter.plot(weightsSLNN, weightsMLNN)



示例#6
0
def main():
    print("Loading wordvecs...")
    if utils.exists("glove", "glove.840B.300d.txt", "gutenberg"):
        words, wordvecs = utils.load_glove("glove", "glove.840B.300d.txt", "gutenberg")
    else:
        words, wordvecs = utils.load_glove("glove", "glove.840B.300d.txt", "gutenberg",
                                           set(map(clean_word, gutenberg.words())))

    wordvecs_norm = wordvecs / np.linalg.norm(wordvecs, axis=1).reshape(-1, 1)

    print("Loading corpus...")
    # Convert corpus into normed wordvecs, replacing any words not in vocab with zero vector
    sentences = [[wordvecs_norm[words[clean_word(word)]] if clean_word(word) in words.keys() else np.zeros(WORD_DIM)
                  for word in sentence]
                 for sentence in gutenberg.sents()]

    print("Processing corpus...")
    # Pad sentences shorter than SEQUENCE_LENGTH with zero vectors and truncate sentences longer than SEQUENCE_LENGTH
    s_train = list(map(pad_or_truncate, sentences))

    np.random.shuffle(s_train)

    # Truncate to multiple of BATCH_SIZE
    s_train = s_train[:int(len(s_train) / BATCH_SIZE) * BATCH_SIZE]

    s_train_idxs = np.arange(len(s_train))

    print("Generating graph...")
    network = NlpGan(learning_rate=LEARNING_RATE, d_dim_state=D_DIM_STATE, g_dim_state=G_DIM_STATE,
                     dim_in=WORD_DIM, sequence_length=SEQUENCE_LENGTH)

    plotter = Plotter([2, 1], "Loss", "Accuracy")
    plotter.plot(0, 0, 0, 0)
    plotter.plot(0, 0, 0, 1)
    plotter.plot(0, 0, 1, 0)
    plotter.plot(0, 1, 1, 0)

    #d_vars = [var for var in tf.trainable_variables() if 'discriminator' in var.name]
    saver = tf.train.Saver()

    with tf.Session() as sess:
        #eval(sess, network, words, wordvecs_norm, saver)

        sess.run(tf.global_variables_initializer())
        #resume(sess, saver, plotter, "GAN_9_SEQUENCELENGTH_10", 59)

        d_loss, g_loss = 0.0, 0.0
        for epoch in range(0, 10000000):
            print("Epoch %d" % epoch)

            np.random.shuffle(s_train_idxs)
            for batch in range(int(len(s_train_idxs) / BATCH_SIZE)):
                # select next random batch of sentences
                s_batch_real = [s_train[x] for x in s_train_idxs[batch:batch + BATCH_SIZE]] # shape (BATCH_SIZE, SEQUENCE_LENGTH, WORD_DIM)

                # reshape to (SEQUENCE_LENGTH, BATCH_SIZE, WORD_DIM) while preserving sentence order
                s_batch_real = np.array(s_batch_real).swapaxes(0, 1)

                if d_loss - g_loss > MAX_LOSS_DIFF and False:
                    output_dict = sess.run(
                        network.get_fetch_dict('d_loss', 'd_train', 'g_loss'),
                        network.get_feed_dict(inputs=s_batch_real, input_dropout=D_KEEP_PROB)
                    )
                elif g_loss - d_loss > MAX_LOSS_DIFF and False:
                    output_dict = sess.run(
                        network.get_fetch_dict('d_loss', 'g_loss', 'g_train'),
                        network.get_feed_dict(inputs=s_batch_real, input_dropout=D_KEEP_PROB)
                    )
                else:
                    output_dict = sess.run(
                        network.get_fetch_dict('d_loss', 'd_train', 'g_loss', 'g_train'),
                        network.get_feed_dict(inputs=s_batch_real, input_dropout=D_KEEP_PROB,
                                              instance_variance=INSTANCE_VARIANCE)
                    )

                d_loss, g_loss = output_dict['d_loss'], output_dict['g_loss']

                if batch % 10 == 0:
                    print("Finished training batch %d / %d" % (batch, int(len(s_train) / BATCH_SIZE)))
                    print("Discriminator Loss: %f" % output_dict['d_loss'])
                    print("Generator Loss: %f" % output_dict['g_loss'])
                    plotter.plot(epoch + (batch / int(len(s_train) / BATCH_SIZE)), d_loss, 0, 0)
                    plotter.plot(epoch + (batch / int(len(s_train) / BATCH_SIZE)), g_loss, 0, 1)

                if batch % 100 == 0:
                    eval = sess.run(
                        network.get_fetch_dict('g_outputs', 'd_accuracy'),
                        network.get_feed_dict(inputs=s_batch_real, input_dropout=1.0,
                                              instance_variance=INSTANCE_VARIANCE)
                    )
                    # reshape g_outputs to (BATCH_SIZE, SEQUENCE_LENGTH, WORD_DIM) while preserving sentence order
                    generated = eval['g_outputs'].swapaxes(0, 1)
                    for sentence in generated[:3]:
                        for wordvec in sentence:
                            norm = np.linalg.norm(wordvec)
                            word, similarity = nearest_neighbor(words, wordvecs_norm, wordvec / norm)
                            print("{}({:4.2f})".format(word, similarity))
                        print('\n---------')
                    print("Total Accuracy: %f" % eval['d_accuracy'])
                    plotter.plot(epoch + (batch / int(len(s_train) / BATCH_SIZE)), eval['d_accuracy'], 1, 0)

            saver.save(sess, './checkpoints/{}.ckpt'.format(SAVE_NAME),
                       global_step=epoch)
            plotter.save(SAVE_NAME)
示例#7
0
Example: 
./app.py 'data/Africa_SRF.1970*.nc' t2m 'obs/CRUTMP.CDF' TMP
"""

if __name__ == "__main__":
    if len(sys.argv) != 5:
        print usage
        sys.exit(1)
    pattern = sys.argv[1]
    nc_var = sys.argv[2]
    obs_pattern = sys.argv[3]
    obs_nc_var = sys.argv[4]

    r = RegCMReader(pattern)
    value = r.get_value(nc_var).mean()
    time_limits = value.get_limits('time')
    crd_limits = value.get_latlonlimits()

    obs_r = CRUReader(obs_pattern)
    obs_value = obs_r.get_value(obs_nc_var, imposed_limits={'time': time_limits}, latlon_limits=crd_limits).mean()
    if obs_nc_var == "TMP":
        obs_value.to_K()

    value.regrid(obs_value.latlon)
    diff = obs_value - value
    plt = Plotter(diff)
    plt.plot(levels = (-5, 5))
    plt.show()
    plt.save('image', format='png')
    plt.close()
示例#8
0
文件: s.py 项目: pinakm9/fem
F_1 = ((v - v_n) / k)*f_1*dx + epsilon_1*v*f_1*dx + kappa*grad(T)[0]*f_1*dx +\
 A_1hat*((T - T_n) / k)*f_2*dx + a_T*grad(T)[0]*v*f_2*dx + M_s1*grad(v)[0]*f_2*dx - Q*f_2*dx


F_2 = ((v - v_n) / k)*f_1*dx + epsilon_1*v*f_1*dx + beta*v*grad(v)[0]*f_1*dx + kappa*grad(T)[0]*f_1*dx +\
 A_1hat*((T - T_n) / k)*f_2*dx + a_T*v*grad(T)[0]*f_2*dx + M_s1*grad(v)[0]*f_2*dx - Q*f_2*dx

# Create VTK files for visualization output
"""vtkfile_v = File('qtcm1/velocity.pvd')
vtkfile_T = File('qtcm1/temperature.pvd')"""
pltr = Plotter(mesh, id_='4')
# Solve the system for each time step
t = 0
v_ = lambda y: v_n([y])
T_ = lambda y: T_n([y])

for n in range(num_steps):
    #pltr.plot(v_,'qtcm1/velocity/', n, t, quantity = 'velocity_42')
    pltr.plot(T_, 'qtcm1/velocity/', n, t, quantity='temp_43')
    t += dt
    # Solve variational problem for time step
    J = derivative(F_1, u)
    solve(F_1 == 0, u, bc, J=J)
    # Save solution to file (VTK)
    """_v, _T = u.split()
	vtkfile_v << (_v, t)
	vtkfile_T << (_T, t)"""
    # Update previous solution
    u_n.assign(u)

pltr.create_video()
示例#9
0
from link import link
from measures import mu, sigma, p_correlation
from plot import Plotter
from copy import deepcopy
import networkx as nx

# Graph definition
n = 500
G = nx.random_geometric_graph(n, 0.05)
adj = list(G.adjacency())

# Derived structural information
means = list(map(lambda x: mu(x, n), adj))
variances = sigma(adj, means)
similarities = p_correlation(adj, means, variances)

# Hierarchical clustering
stree = link(deepcopy(similarities), max)
ctree = link(deepcopy(similarities), min)

splotter = Plotter(G, stree, "slink.html")
cplotter = Plotter(G, ctree, "clink.html")

level = 450
splotter.plot(level)
cplotter.plot(level)

if __name__ == '__main__':
    pass