def analysis_diff_ask_bid(dic): diff_ask_bid = dic['A1']['askPrice1'] - dic['A1']['bidPrice1'] count = 0 for i in range(diff_ask_bid.shape[0]): if diff_ask_bid[i] > 5000: count += 1 diff_ask = dic['A1']['askPrice1'][i] - dic['A1']['askPrice1'][i - 1] diff_bid = dic['A1']['bidPrice1'][i] - dic['A1']['bidPrice1'][i - 1] print("%d %d = %d %d" % (diff_ask_bid[i - 1], diff_ask_bid[i], diff_ask, diff_bid)) print("%d %f" % (count, float(count) / diff_ask_bid.shape[0])) lib.plot(diff_ask_bid, pj("fig", "A1_diff_askPrice1_bidPrice1"))
Roda essas duas linhas se vc quiser brincar com a parte do plot file = 'data.txt' plot(file, scale=100) ''' # //.......................................// # //..........VARIAVES DE ENTRADA..........// # //.......................................// Kcal = 76.8 # Modulo Bulk dos minerais (calcita, dolomita, quartzo) Kdol = 95.0 Kqtz = 37.0 Gcal = 32.0 # Modulo Shear dos minerais (calcita, dolomita, quartzo) Gdol = 45.0 Gqtz = 45.0 # //.......................................// # //..........VARIAVES AUXILIARES..........// # //.......................................// K = np.array([Kcal, Kdol, Kqtz]) # Modulo Bulk maximo e minimo G = np.array([Gcal, Gdol, Gqtz]) KHS_M, GHS_M = lib.matrix_calculation(K, G) lib.save_file('KHS_ternary.txt', KHS_M) lib.save_file('GHS_ternary.txt', GHS_M) # file, scale (x+y+z=scale), color scale min, color scale max lib.plot('KHS_ternary.txt', 100, 35, 95) lib.plot('GHS_ternary.txt', 100, 32, 45)
def main(): num = int(sys.argv[1]) if len(sys.argv) > 1 else 100 plot(num) return 0
fake_seqs = netG(noise) errD_real, score_fake = lib.backprop(netD, fake_seqs, disc_exp="real", backward=False) # How close the guesses are? valid_score = score_real - score_fake diff_vals.append((score_real+score_fake).cpu().item()) err_vals.append(errD_real+errD_fake) data = next(valid_seqs) disc_error_valid.append(np.mean(err_vals)) valid_counts.append(true_count) # log results and figures name = "valid_disc_cost" if checkpoint_baseline > 0: name += "_{}".format(checkpoint_baseline) lib.plot(valid_counts, disc_error_valid, logdir, name, xlabel="Iteration", ylabel="Discriminator cost") # Calculating 'Computation Time' for this round of iteration current_iteration_endpint = datetime.datetime.now(tz) current_iteration_elapsed = str(current_iteration_endpint - former_iteration_endpint).split(".")[0] temp = current_iteration_elapsed.split(":") if int(temp[0])==0 and int(temp[1])==0: current_iteration_elapsed = temp[2] elif int(temp[0])==0: current_iteration_elapsed = temp[1]+":"+temp[2] former_iteration_endpint = current_iteration_endpint print("Iteration {}/{}: train_diff={:.5f}, valid_diff={:.5f} ({}sec)".format(true_count, args.train_iters, train_score, valid_score, current_iteration_elapsed)) fake_seqs = fake_seqs.reshape([-1, args.max_seq_len, data_enc_dim]) lib.save_samples(logdir, fake_seqs.cpu().clone().detach().numpy(), true_count, rev_charmap, annotated=args.annotate) name = "train_disc_cost" if checkpoint_baseline > 0: name += "_{}".format(checkpoint_baseline)
train_counts += 1 train_data = next(train_feed) # validation cost_vals = [] valid_data = next(valid_feed) while valid_data is not None: valid_seqs, valid_vals = valid_data cost_val = session.run(cost, {inputs:valid_seqs, true_vals:valid_vals}) cost_vals.append(cost_val) valid_data = next(valid_feed) valid_cost.append(np.mean(cost_vals)) valid_counts.append(epoch_count) name = "valid_cost" if checkpoint_baseline > 0: name += "_{}".format(checkpoint_baseline) lib.plot(valid_counts, valid_cost, logdir, name, xlabel="Epoch", ylabel="Cost") # log results print("Epoch {}: train cost={:.8f}, valid_cost={:.8f}".format(epoch_count, cost_val, valid_cost[-1])) name = "train_cost" if checkpoint_baseline > 0: name += "_{}".format(checkpoint_baseline) lib.plot(range(train_counts), train_cost, logdir,name, xlabel="Iteration", ylabel="Cost") # save checkpoint checkpoint_epoch = args.checkpoint_iters and (epoch_count % args.checkpoint_iters == 0) or (idx == args.num_epochs - 1) if checkpoint_epoch: ckpt_dir = os.path.join(logdir, "checkpoints", "checkpoint_{}".format(epoch_count)) os.makedirs(ckpt_dir, exist_ok=True) saver.save(session, os.path.join(ckpt_dir, "trained_predictor.ckpt")) # test
data = next(valid_seqs) while data is not None: noise = np.random.normal(size=[args.batch_size, args.latent_dim]) score_diff = session.run(disc_diff, { latent_vars: noise, real_data: data }) cost_vals.append(score_diff) data = next(valid_seqs) valid_cost.append(np.mean(cost_vals)) valid_counts.append(true_count) name = "valid_disc_cost" if checkpoint_baseline > 0: name += "_{}".format(checkpoint_baseline) lib.plot(valid_counts, valid_cost, logdir, name, xlabel="Iteration", ylabel="Discriminator cost") # log results print("Iteration {}: train_disc_cost={:.5f}, valid_disc_cost={:.5f}". format(true_count, cost, score_diff)) samples = session.run(gen_data, { latent_vars: fixed_latents }).reshape([-1, args.max_seq_len, data_enc_dim]) lib.save_samples(logdir, samples, true_count, rev_charmap, annotated=args.annotate) name = "train_disc_cost"
import lib import os input_path = "dataset/epidural" input_folder = os.fsencode(input_path) files = os.listdir(input_folder) files.sort() slices = [] for file in files: # if os.fsdecode(file) == "ID_3580adc72.dcm": # "ID_635f084fc.dcm": # "ID_559b1d8f7.dcm": #"ID_894a589ad.dcm": if os.fsdecode(file) == "ID_0ed10ec08.dcm": file = os.fsdecode(file) filename = "{}/{}".format(input_path, file) image = lib.read_image(filename) lib.histogram(image, True) lib.plot("original: {} ".format(file), image) # features hematoma = lib.substance_interval(image, 40, 90) white_matter = lib.substance_interval(image, 20, 30) blood = lib.substance_interval(image, 30, 45) bone = lib.substance_interval(image, 600, 4000) #lib.plot("blood: {}".format(file), blood) #lib.plot("hematoma: {}".format(file), hematoma) lib.histogram(blood, True) print("Done")