Exemplo n.º 1
0
def valid_generator(batch_size, viz=False):
    directory = 'dataset/valid/'

    valid_x_full = np.load(directory + 'valid_x.npy')
    valid_y_prob_full = np.load(directory + 'valid_y_prob.npy')
    valid_y_keys_full = np.load(directory + 'valid_y_keys.npy')

    dataset_size = valid_y_prob_full.shape[0]
    indices = batch_indices(batch_size=batch_size, dataset_size=dataset_size)
    print('Validation  Dataset Size: {0}'.format(dataset_size))

    while True:

        for index in indices:
            # load from dataset
            valid_x = valid_x_full[index[0]:index[1]]
            valid_y_prob = valid_y_prob_full[index[0]:index[1]]
            valid_y_keys = valid_y_keys_full[index[0]:index[1]]

            if viz:
                visualize(valid_x[-1], valid_y_prob[-1], valid_y_keys[-1])

            # normalizing the image and the keypoints
            valid_x = valid_x / 255.0
            valid_y_prob = np.squeeze(valid_y_prob)
            valid_y_keys = valid_y_keys / 128.0

            # creating ensembles of the keypoints
            valid_y_keys = np.reshape(valid_y_keys,
                                      (valid_y_keys.shape[0], 1, 10))
            valid_y_keys = np.repeat(valid_y_keys, 10, axis=1)

            valid_y = [valid_y_prob, valid_y_keys]
            yield valid_x, valid_y
Exemplo n.º 2
0
def visualize():
    # load global params
    load_config()
    # load bow features
    X_bof, Y_sift, X_data = joblib.load(os.path.join(feat_dir,
                                                     'bow_feats.pkl'))
    vis.visualize(X_bof, Y_sift, X_data, 'BOW feature visualization', True)
Exemplo n.º 3
0
def generate(individual, n_population=2, n_generation=10, logs=True):
    best = individual
    best_individuals = [best]

    global LOGS
    LOGS = logs

    for i in range(n_generation):
        population = generate_population(best, n_population)
        best, score, categories = select_best(population)
        best_individuals.append(best)

        visualize(best, name=str(i + 1))

        if logs:
            print(f'\nGeneration {i + 1}. Best score: {score}.')
            print(f'Color score: {categories["color"]}.')
            print(f'Points distribution score: {categories["points"]}.')
            print(f'Polylines length score: {categories["polylines"]}.')
            print(f'Polylines smoothing score: {categories["smoothness"]}.')

            print('\n')

    write(best, avg_scores, globals())
    return best, best_individuals
Exemplo n.º 4
0
def reductionKNNAlgorithm(dataset, k=1, r=1, w='eq', v='maj', show=False, reduction_alg=None):
    """
    Execute a 10-fold kNN.
    :param dataset: either "adult" or "satimage"
    :param k: the number of nearest neighbours to take into account when voting
    :param show: boolean, whether to show a 2-plot of the results
    :return: None
    """

    df=pd.DataFrame(columns=['dataset','k','r','w','v','reduction','acc','eff','storage'])
    read = read_vowel_fold if dataset == 'vowel' else read_satimage_fold
    for i in range(10):
        x_train, y_train, x_test, y_test = read(i)
        if reduction_alg:
            n0=x_train.shape[0]
            x_train, y_train = reduction_alg(x_train, y_train, k, r, w, v)
            storage=x_train.shape[0]/n0
        else:
            storage=1
        # Call here the KNN
        indexes, y_pred, eff, acc = kNNAlgorithm(x_train,y_train, x_test,y_test, k, r, w, v)

        print(f"{dataset} fold {i}, k={k}, r={r}, w={w}, v={v}: Accuracy {acc}, Time {eff}")
        df=df.append(pd.DataFrame([[dataset,k,r,w,v,reduction_alg,acc,eff,storage]],columns=['dataset','k','r','w','v','reduction','acc','eff','storage']))
        if show:
            visualize(x_test, y_test, y_pred)
    return df
Exemplo n.º 5
0
    def _train_epoch(self, epoch):
        """
    Training logic for an epoch
    :param epoch: Integer, current training epoch.
    :return: A log that contains average loss and metric in this epoch.
    """
        self.model.train()
        self.train_metrics.reset()
        for batch_idx, batch in tqdm(enumerate(self.train_loader)):

            original_labels, masked_labels, masked_token_ids, masked_indices = MLM_load_batch(
                batch, self.config)
            actual_batch_size = original_labels.shape[0]

            self.optimizer.zero_grad()

            # https://discuss.pytorch.org/t/selecting-element-on-dimension-from-list-of-indexes/36319/2
            yhat = self.model(
                masked_labels)  # (actual_batch_size, seq_length, vocab_size)
            yhat = yhat[torch.arange(actual_batch_size),
                        masked_indices]  # (actual_batch_size, vocab_size)

            loss = self.criterion(yhat, masked_token_ids)
            self.writer.add_scalar("Loss/train", loss, self.train_iter_global)
            acc = metrics.accuracy(yhat, masked_token_ids)
            self.writer.add_scalar("Accuracy/train",
                                   acc * 1.0 / actual_batch_size,
                                   self.train_iter_global)
            self.train_iter_global += 1

            self.train_metrics.update("loss", loss.item())
            for met in self.metric_ftns:
                self.train_metrics.update(met.__name__,
                                          met(yhat, masked_token_ids))

            if batch_idx % self.log_step == 0:
                self.logger.info('Train Epoch: {}-{} Loss: {:.6f}'.format(
                    epoch, self._progress(batch_idx, self.train_loader),
                    loss.item()))

            loss.backward()
            self.optimizer.step()

        log = self.train_metrics.result()

        if self.config["do_validation"]:
            val_log = self._val_epoch(epoch)
            log.update(**{'val_' + k: v for k, v in val_log.items()})

        self.writer.flush()

        with torch.no_grad():
            # visualize embedding for a sample word by BERT as of this epoch and save as image
            visualize(self.model.bert,
                      epoch=epoch,
                      location=os.path.dirname(CUR_DIR) + "/image")

        if self.lr_scheduler is not None:
            self.lr_scheduler.step()
        return log
Exemplo n.º 6
0
def main(args):
    if not os.path.exists(args.save_path):
        os.mkdir(args.save_path)

    net = UNet(n_channels=3, n_classes=1)

    checkpoint = flow.load(args.pretrained_path)
    net.load_state_dict(checkpoint)

    net.to("cuda")

    x_test_dir, y_test_dir = get_datadir_path(args, split="test")

    test_dataset = Dataset(
        x_test_dir, y_test_dir, augmentation=get_test_augmentation(),
    )

    print("Begin Testing...")
    for i, (image, mask) in enumerate(tqdm(test_dataset)):
        show_image = image
        with flow.no_grad():
            image = image / 255.0
            image = image.astype(np.float32)
            image = flow.tensor(image, dtype=flow.float32)
            image = image.permute(2, 0, 1)
            image = image.to("cuda")

            pred = net(image.unsqueeze(0).to("cuda"))
            pred = pred.numpy()
            pred = pred > 0.5
        save_picture_name = os.path.join(args.save_path, "test_image_" + str(i))
        visualize(
            save_picture_name, image=show_image, GT=mask[0, :, :], Pred=pred[0, 0, :, :]
        )
Exemplo n.º 7
0
    def upload(self, fileToUpload, n_voices, keydiff_threshold, submit):
        name = fileToUpload.filename + '-' + str(n_voices) + '-' + str(
            keydiff_threshold)
        dir_prefix = f'src/website/html5up-dimension/products/' + name

        with open("input.wav", "wb") as fout:
            fout.write(fileToUpload.file.read())

        print('processing audio')
        if not os.path.exists(dir_prefix + "-output.mid"):
            process_audio.wav2midi(
                "input.wav", dir_prefix + "-output.mid", {
                    'n_peaks': int(n_voices),
                    'keydiff_threshold': int(keydiff_threshold)
                })

        print('generating pdf')
        if not os.path.exists(dir_prefix + "-output.pdf"):
            musescore_call.generate_pdf(dir_prefix + "-output.mid",
                                        dir_prefix + "-output.pdf")

        print('generating mp3')
        if not os.path.exists(dir_prefix + "-output.mp3"):
            musescore_call.generate_mp3(dir_prefix + "-output.mid",
                                        dir_prefix + "-output.mp3")

        print('generating video')
        if not os.path.exists(dir_prefix + "-output.mp4"):
            visualize.visualize(dir_prefix + "-output.mid",
                                dir_prefix + "-output.mp4")

        with open("src/website/html5up-dimension/pianotalks.html") as f:
            return f.read().replace("OUTPUT_FILENAME", name)
Exemplo n.º 8
0
def selection():
    imgsrc = 'static/images/image.png'
    first = request.form['first']
    second = request.form['second']
    third = request.form['third']
    visualize.visualize(first,second,third)[0].savefig(imgsrc)    
    return render_template('draw.html', imgsrc = imgsrc)
Exemplo n.º 9
0
 def run(self):
     max_gen = 0
     while max_gen < 10:
         for i in self.current_generation:
             i.play()
         self.previous_generation = copy.copy(self.current_generation)
         self.current_generation = []
         self.previous_generation.sort(key=lambda x: x.fitness,
                                       reverse=True)
         self.previous_generation = self.previous_generation[:settings.
                                                             num_par_snakes]
         if settings.visualize:
             visualize.visualize(self.previous_generation[0])
         print("Gen:" + str(self.num_gen))
         avg = 0
         for i in self.previous_generation:
             avg += i.fitness / (settings.num_par_snakes +
                                 settings.num_snakes_gen)
         print("Average Fitness: " + str(avg))
         print("Best Fitness: " + str(self.previous_generation[0].fitness))
         most_fit_apple = len(
             self.previous_generation[0].eaten_apple_locations)
         print("Most Fit Apples Eaten: " + str(most_fit_apple))
         if most_fit_apple >= 98:
             max_gen += 1
         parents = self.selection()
         for i in range(0, len(parents) - 1, 2):
             self.crossover(parents[i], parents[i + 1])
         self.mutate()
         for i in self.previous_generation:
             self.current_generation.append(
                 s.snake(i.network.weights, i.network.bias))
         self.num_gen += 1
     print("it worked?")
Exemplo n.º 10
0
def test(seg):
    visualize.visualize(seg, "Before normalization to 40dB")
    print("Normalizing to 40dB SPL...")
    print("  SPL before:", seg.spl, "dB")
    res = seg.normalize_spl_by_average(db=40)
    print("  SPL after:", res.spl, "dB")
    visualize.visualize(res, "After normalization to 40dB")
    return res
Exemplo n.º 11
0
def visualize_epoch(job):
    epoch_id = job[0]
    machine_steering = job[1]
    visualize.visualize(epoch_id,
                        machine_steering,
                        params.out_dir,
                        verbose=True,
                        frame_count_limit=None)
Exemplo n.º 12
0
def test(seg):
    print("Removing silence...")
    seg = seg.filter_silence()
    outname_silence = "results/nosilence.wav"
    seg.export(outname_silence, format="wav")
    visualize.visualize(seg[:min(visualize.VIS_MS, len(seg))], title="After Silence Removal")
    print("After removal:", outname_silence)
    return seg
Exemplo n.º 13
0
def daily_returns(df, graph_name, returns_col):
	# Daily percentage returns
	date_col = 'Date'
	daily_returns_col = 'Daily Returns'
	df[daily_returns_col] = df[returns_col].pct_change(1)
	filename = '%s_daily_returns' %graph_name.lower()
	title = '%s Daily Returns' %graph_name
	visualize.visualize(plot_direc, df, date_col, [daily_returns_col], filename, title=title, ylabel=daily_returns_col, xlabel=date_col)
	return df
Exemplo n.º 14
0
def train_generator(batch_size, is_augment=True, viz=False):
    if is_augment:
        batch_size = int(batch_size / 2)

    # load dataset
    directory = 'dataset/train/'

    train_x_full = np.load(directory + 'train_x.npy')
    train_y_prob_full = np.load(directory + 'train_y_prob.npy')
    train_y_keys_full = np.load(directory + 'train_y_keys.npy')

    dataset_size = train_y_prob_full.shape[0]
    indices = batch_indices(batch_size=batch_size, dataset_size=dataset_size)
    print('Training Dataset Size: {0}'.format(dataset_size))

    while True:

        for index in indices:
            # load from dataset
            train_x = train_x_full[index[0]:index[1]]
            train_y_prob = train_y_prob_full[index[0]:index[1]]
            train_y_keys = train_y_keys_full[index[0]:index[1]]

            if viz:
                visualize(train_x[-1], train_y_prob[-1], train_y_keys[-1])

            # augment dataset and append to the batch
            train_x_aug, train_y_keys_aug = augment(train_x, train_y_prob,
                                                    train_y_keys)
            train_x = np.append(train_x, train_x_aug, axis=0)
            train_y_prob = np.append(train_y_prob, train_y_prob, axis=0)
            train_y_keys = np.append(train_y_keys, train_y_keys_aug, axis=0)

            if viz:
                visualize(train_x[-1], train_y_prob[-1], train_y_keys[-1])

            # normalizing the image and the keypoints
            train_x = train_x / 255.0
            train_y_prob = np.squeeze(train_y_prob)
            train_y_keys = train_y_keys / 128.0

            # creating ensembles of the keypoints
            train_y_keys = np.reshape(train_y_keys,
                                      (train_y_keys.shape[0], 1, 10))
            train_y_keys = np.repeat(train_y_keys, 10, axis=1)

            # random shuffling over the batch
            seed = random.randint(0, 1000)
            np.random.seed(seed)
            np.random.shuffle(train_x)
            np.random.seed(seed)
            np.random.shuffle(train_y_prob)
            np.random.seed(seed)
            np.random.shuffle(train_y_keys)

            train_y = [train_y_prob, train_y_keys]
            yield train_x, train_y
Exemplo n.º 15
0
def process_data(method, features):
    classifier = create_classifier(method)
    visualize(classifier, training_set, data, "diabetes", features)
    verification_score = calculate_score(classifier, validation_set, features,
                                         "diabetes")
    training_score = calculate_score(classifier, training_set, features,
                                     "diabetes")
    plt.savefig('static/img/visualize.png')
    plt.clf()
    return verification_score, training_score
Exemplo n.º 16
0
def cumulative_daily_returns(df, graph_name, returns_col):
	# Cumulative daily returns
	date_col = 'Date'
	daily_returns_col = 'Cumulative Daily Returns'
	initial_price = df[returns_col][0]
	df[daily_returns_col] = pd.rolling_apply(df[returns_col], 1, lambda x: x / initial_price)
	filename = '%s_cumulative_daily_returns' %graph_name.lower()
	title = '%s Cumulative Daily Returns' %graph_name
	visualize.visualize(plot_direc, df, date_col, [daily_returns_col], filename, title=title, ylabel=daily_returns_col, xlabel=date_col)
	return df
Exemplo n.º 17
0
def simulate(params):

	# Get the selected distribution
	dist = dict(g.get_distributions())[params.distribution]		
	wire_drop_counts = range(params.wire_range[0], params.wire_range[1], params.step)

	#lg = g.get_exponential_decay_length_generator(math.e, 2, max_length, min_length)
	lengths = dist( *(params.length_range + params.distribution_parameters) )
	positions = g.get_uniform_position_generator(params.grid_width)
	angles = g.get_uniform_angle_generator()	
	
	# Setup graph animator if requested.
	after_add = None
	if params.anitmate_graph_creation:
		animation = v.GraphCreationAnimator()
		after_add = lambda *args: animation.add_graph(*args)

	wire_factory = f.WireFactory(lengths, positions, angles)
	factory = f.NocFactory(wire_factory, after_add=after_add)

	# Generate the NoC 
	trials = []
	for trial in range(0, params.trials):
		results = []

		for drop_count in wire_drop_counts:	
			noc = factory.create(drop_count)
			noc.create_graph()
				
			# Save the image of the NoC if requested
			if params.graph_save_path:
				s = "{0}/{1}_trial{2}_count{3}.png"
				path = s.format(					
					params.graph_save_path, 
					params.distribution,
					trial, 
					drop_count)
				v.save_noc_image(noc, path)

			result = a.get_stats(noc, drop_count, trial)
			results.append(result)

		trials.append(results)

	if params.anitmate_graph_creation:
		animation.create_animation("graph.mp4")

	# Save results if requested
	if params.output_file != None:
		for trial in trials:
			io.export_to_csv(trial, params.output_file, mode="a+")

	# Display the results
	if params.visualize:
		v.visualize(trials)
Exemplo n.º 18
0
def run_simulation(path):
    import main

    sim = Simulation.from_path(path)
    with unittest.mock.patch("main.pyautogui", sim):
        board = main.solve_live_game()

    visualize.visualize(board)

    sim.screenshot().show()
    assert board.is_solved()
Exemplo n.º 19
0
def main():
    config = configparser.ConfigParser()

    if len(config.read("config")) == 0:
        print("Configuration file was not found!\n")
        return

    section_name = "PARAMS"

    params = config[section_name]
    train_file = str(params["train_file"])
    test_ratio = float(params["test_ratio"])
    k = int(params["k"])
    mesh_step = float(params["mesh_step"])

    do_manhattan = config.getboolean(section_name, "use_manhattan")
    do_czebyszew = config.getboolean(section_name, "use_Czebyszew")
    do_validation = config.getboolean(section_name, "do_validation")
    do_visulization = config.getboolean(section_name, "do_visulization")

    print("train file {},\ntest ratio {},\nk parameter {}".format(
        train_file, test_ratio, k))

    train_data, test_data = load_data(train_file, test_ratio)

    num_of_classes = max(test_data, key=operator.itemgetter(-1))[-1]
    start = time.time()
    print("Length of test data: {}, train data: {}".format(
        len(test_data), len(train_data)))

    k_vals = [k]
    if do_czebyszew:
        final_metric = czebyszew.Czebyszew.calc_dist
    elif do_manhattan:
        final_metric = manhattan.Manhattan.calc_dist
    else:
        final_metric = euclidean.Euclidean.calc_dist

    result = None
    for i in k_vals:
        start = time.time()
        result = test(train_data, test_data, i, final_metric)
        end = time.time() - start
        print(';' + str(end))

    best_result = result

    end = time.time() - start
    print('end time' + str(end))

    if do_visulization:
        # this will draw classes and plot testing points
        V.visualize(best_result[0], train_data, test_data, k, num_of_classes,
                    mesh_step, do_manhattan)
Exemplo n.º 20
0
def main():
	
	try:	
		args = get_args()

		data = preprocess(args)

		if args.visualize_data:
			visualize(data)
			sys.exit(1) 

		train_set, test_set = split(data)
		
		num_examples = train_set.shape[0]
		num_features = train_set.shape[1] - 1
		if args.mini_batch:
			batch_size = 32		# or 64
			epochs = 1500
		else:
			batch_size = num_examples
			epochs = 30000

		nn = NeuralNetwork(num_features, batch_size, epochs)

		if args.train:
			nn.train(data, train_set, test_set, num_examples, args.quiet)

			if args.evaluation:
				y_pred = probability_to_class( nn.output.T)
				get_validation_metrics(y_pred[:, 0],  nn.y.T[:, 0])

			# mini-batch learning is noisy, so we don't plot it 
			if not args.mini_batch:
				plot_learning(nn.train_losses, nn.test_losses)

			# save network params
			if args.save_model:
				W1, W2, W3, W4 =  nn.weights1.tolist(),  nn.weights2.tolist(),  nn.weights3.tolist(),  nn.weights4.tolist()
				B1, B2, B3, B4 =  nn.bias1.tolist(),  nn.bias2.tolist(),  nn.bias3.tolist(),  nn.bias4.tolist()
				model = dict(weights1=W1, weights2=W2, weights3=W3, weights4=W4, bias1=B1, bias2=B2, bias3=B3, bias4=B4)
				with open("model.json", "w") as f:
					json.dump(model, f, separators=(',', ':'), indent=4)

		if args.predict and (args.predict == "model.json"):
			try:
				with open(args.predict) as file:
					model = json.load(file)
			except: 		
				error_exit("please provide a valid model")
			nn.load_model(model)
			nn.predict(test_set, epochs)

	except:
		pass
Exemplo n.º 21
0
def long_short_ratio(df, graph_name, long_col, short_col):
	date_col = 'Date'
	long_short_ratio_col = 'Long Short Ratio'
	df[long_short_ratio_col] = df[long_col] / df[short_col]
	filename = '%s_long_short_ratio' %graph_name.lower()
	title = '%s Long Short Ratio' %graph_name
	visualize.visualize(plot_direc, df, date_col, [long_short_ratio_col], filename, title=title, ylabel=long_short_ratio_col, xlabel=date_col)
	return df

	df['Long Short Ratio'] = df[long_col] / df[short_col]
	return df
Exemplo n.º 22
0
def visualize_participant_production(dataset, dir_path):
    for participant in dataset:
        if participant['test_type'] == 'production' and participant[
                'status'] == 'finished':
            partition = np.zeros(64, dtype=int)
            for answer, response in zip(participant['test_sequence'],
                                        participant['test_responses']):
                partition[answer] = response
            partition = partition.reshape((8, 8))
            figure_path = dir_path + '%s/%s.pdf' % (participant['condition'],
                                                    participant['user_id'])
            visualize.visualize(partition, figure_path)
Exemplo n.º 23
0
def main():
    print("Getting paths and labels for all train and test data")
    train_image_paths, test_image_paths, train_labels, test_labels = \
        get_image_paths(DATA_PATH, CATEGORIES, NUM_TRAIN_PER_CAT)

    if FEATURE == 'tiny_image':
        train_image_feats = get_tiny_images(train_image_paths)
        test_image_feats = get_tiny_images(test_image_paths)

    elif FEATURE == 'bag_of_sift':
        if os.path.isfile('vocab.pkl') is False:
            print('No existing visual word vocabulary found. Computing one from training images\n')
            vocab_size = 400
            vocab = build_vocabulary(train_image_paths, vocab_size)
            with open('vocab.pkl', 'wb') as handle:
                pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)

        if os.path.isfile('train_image_feats.pkl') is False:
            train_image_feats = get_bags_of_sifts(train_image_paths);
            with open('train_image_feats.pkl', 'wb') as handle:
                pickle.dump(train_image_feats, handle, protocol=pickle.HIGHEST_PROTOCOL)
        else:
            with open('train_image_feats.pkl', 'rb') as handle:
                train_image_feats = pickle.load(handle)

        if os.path.isfile('test_image_feats.pkl') is False:
            test_image_feats  = get_bags_of_sifts(test_image_paths);
            with open('test_image_feats.pkl', 'wb') as handle:
                pickle.dump(test_image_feats, handle, protocol=pickle.HIGHEST_PROTOCOL)
        else:
            with open('test_image_feats.pkl', 'rb') as handle:
                test_image_feats = pickle.load(handle)
    else:
        raise NameError('Unknown feature type')


    if CLASSIFIER == 'nearest_neighbor':
        predicted_categories = nearest_neighbor_classify(train_image_feats, train_labels, test_image_feats)

    elif CLASSIFIER == 'support_vector_machine':
        predicted_categories = svm_classify(train_image_feats, train_labels, test_image_feats)

    else:
        raise NameError('Unknown classifier type')

    accuracy = float(len([x for x in zip(test_labels,predicted_categories) if x[0]== x[1]]))/float(len(test_labels))
    print("Accuracy = ", accuracy)
    test_labels_ids = [CATE2ID[x] for x in test_labels]
    predicted_categories_ids = [CATE2ID[x] for x in predicted_categories]
    train_labels_ids = [CATE2ID[x] for x in train_labels]

    build_confusion_mtx(test_labels_ids, predicted_categories_ids, ABBR_CATEGORIES)
    visualize(CATEGORIES, test_image_paths, test_labels_ids, predicted_categories_ids, train_image_paths, train_labels_ids)
Exemplo n.º 24
0
def main():
    args = parse_arg()
    configs = [make_config(k,
                           embd=args.embd,
                           tsne=args.tsne,
                           small=args.small,
                           shuffled_class=args.shuffled_class) for k in args.datasets]

    if args.make_graph:
        visualize(configs[0], k_nearest=args.k_nearest, M_sample=args.M)

    valss = [test_job(config=k, k_nearest=args.k_nearest, M_sample=args.M) for k in tqdm(configs)]
    plot_with_err(*valss)
Exemplo n.º 25
0
def correlations_vs_shifted_returns(df, feature_col, returns_col):
	x = range(1, 200)
	pearsons = []
	distances = []

	for n in x:
		temp = shift_returns(df, returns_col, n)
		temp = temp[:-n]
		pearsons.append(pearson_correlation(temp, feature_col, returns_col))
		distances.append(distance_correlation(temp, feature_col, returns_col))
	
	filename = 'correlations_vs_%s' %'_'.join((feature_col.lower()).split(' '))
	title = 'Correlations vs Days Shifted for %s' %(feature_col)
	new_df = pd.DataFrame({'Days Shifted': x, 'Pearson Correlations': pearsons, 'Distance Correlations': distances})
	visualize.visualize(plot_direc, new_df, 'Days Shifted', ['Pearson Correlations', 'Distance Correlations'], filename, title=title, ylabel='Correlation', xlabel='Days Shifted')
Exemplo n.º 26
0
def test(seg):
    print("Removing silence...")
    result = seg.filter_silence()
    outname_silence = "results/nosilence.wav"
    result.export(outname_silence, format="wav")
    visualize.visualize(result[:min(visualize.VIS_MS, len(result))],
                        title="After Silence Removal")
    print("After removal:", outname_silence)

    # Now try again, but with massive threshold for silence removal
    # This will strip almost every sample in the file, leaving a practically empty
    # WAV file, which Pydub chokes on.
    _ = seg.filter_silence(threshold_percentage=99.9)

    return result
Exemplo n.º 27
0
def visualize_participant_comprehension(dataset, dir_path):
    for participant in dataset:
        if participant['test_type'] == 'comprehension' and participant[
                'status'] == 'finished':
            partition = np.zeros((4, 8, 8), dtype=float)
            for answer, response in zip(participant['test_sequence'],
                                        participant['test_responses']):
                row = response // 8
                col = response % 8
                partition[answer, row, col] += 1
            figure_path = dir_path + '%s/%s.pdf' % (participant['condition'],
                                                    participant['user_id'])
            for cat in range(4):
                partition[cat] = partition[cat] / partition[cat].max()
            visualize.visualize(partition, figure_path)
Exemplo n.º 28
0
def main(data_path, rootNode):
    MAX_LEVEL = 10

    folder = create_dir(data_path, rootNode)
    print(folder)

    # used_word = ["information_retrieval", "information", "model", "method"]
    # used_word = ["information_retrieval"]
    used_word = []
    for level in range(MAX_LEVEL):
        print('\n================================== Running level ', level, ' ==================================\n')
        iteration(folder, rootNode, used_word)

    print(folder)
    visualize(folder)
Exemplo n.º 29
0
def run(output_dir, input_dir):
    click.echo("output directory: {}".format(output_dir))
    click.echo("input directory: {}".format(input_dir))

    # find files that are Iguana result files
    files = list(
        filter(
            lambda path: path.is_file() and path.suffix == ".nt" and not path.
            name.startswith("cleaned_"), [
                Path(os.path.join(input_dir, path))
                for path in os.listdir(input_dir)
            ]))
    click.echo("\nFiles for conversion: \n{}".format("\n".join(
        [file.name for file in files])))
    output_files = list()
    click.echo("\nConverted files:")
    for file in files:
        for output_file in result2rdf.convert_result_file(
                file.name, input_dir, output_dir):
            click.echo("{}.csv".format(Path(output_file).name))
            output_files.append(output_file)

    click.echo("\nConcatenating all output files ... ")
    concat_output_file_path = os.path.join(output_dir, "all_results")
    with open(concat_output_file_path + ".csv", 'w') as output_file:
        csv_writer = csv.DictWriter(output_file,
                                    fieldnames=result2rdf.fieldnames,
                                    quoting=csv.QUOTE_NONNUMERIC)
        csv_writer.writeheader()
        for input_file_path in output_files:
            with open(input_file_path + ".csv", 'r') as input_file:
                csv_reader = csv.DictReader(input_file, )
                for row in csv_reader:
                    csv_writer.writerow(row)

    with open(concat_output_file_path + ".json", 'w') as output_file:
        entries = list()
        for input_file_path in output_files:
            with open(input_file_path + ".json", 'r') as input_file:
                json_obj = json.load(input_file)
                entries.append(json_obj)
        output_file.write(
            json.dumps({"benchmarks": entries}, sort_keys=True, indent=4))
    click.echo("Done\n")
    click.echo("Generating plots ...")
    visualize(os.path.join(output_dir, "all_results.json"),
              os.path.join(output_dir, "all_results.csv"))
    click.echo("Done")
Exemplo n.º 30
0
def choose():
    """ Renders a form for the user to pick a visualization style, also handles
    the request and displays the visualization """
    if request.method == 'POST':
        global visChoice
        visChoice = request.form['visChoice']
        print visChoice
        # try:
        global ical
        global dateRange
        vis.visualize(ical, visChoice, dateRange=dateRange)
        # except:
        #     print 'didnt output vis' + str(datetime.datetime.now())
        # theFile= jsonify("vis.json")
        return redirect(url_for('visualize'))
    return render_template('choose.html')
Exemplo n.º 31
0
def plot_spectro(loa_omegas,
                 loa_omega_names,
                 version_string,
                 expl_string,
                 exclude_eu=False):
    #Plot spectroscopic data for [O/H], [Fe/H], [Eu/H]
    vis_obj = visualize(loa_omegas,
                        loa_omega_names,
                        num_yaxes=3,
                        loa_abu=[],
                        loa_spectro_abu=["[O/H]", "[Fe/H]", "[Eu/H]"])
    vis_obj.add_time_relabu("[O/H]", index_yaxis=0)
    vis_obj.add_time_relabu("[Fe/H]", index_yaxis=1)
    vis_obj.add_time_relabu("[Eu/H]", index_yaxis=2)
    vis_obj.finalize(show=False,
                     save="data/star_parameters_%s_n%d" %
                     (version_string, num_steps))
    #save data from spectroscopic plots
    for i, spectro_string in enumerate(["[O/H]", "[Fe/H]", "[Eu/H]"]):
        save_obj = save_data(loa_omegas)
        save_obj.make_filenames("data/star_parameters_%s_%d_n%d" %
                                (version_string, i, num_steps))
        save_obj.make_explanatory_file(
            "spectroscopic arrays of %s \n%s" % (spectro_string, expl_string),
            ["time"] + loa_omega_names,
            "Applying star-parameters to 'Omega' with mass-parameters determined"
        )
        save_obj.make_numpy_file(["time", spectro_string])
Exemplo n.º 32
0
def main():
    #    number of measurements
    N = 1000
    
    #    means of random sample
    m = np.zeros(N)

    #    sigmas
    s = np.ones(N)
    
    #    generate random data
    R = generate_data(m, s, 3)
    
    print(R)
    
    visualize(R)
Exemplo n.º 33
0
def update_gui_tree():
    from visualize import visualize
    if 'gui_tree' in doc:
        doc['gui_tree'].deparent()
    # Should instead find the right index
    doc['overlay'].append(
        Node("group", children=list(visualize(doc[doc['selection.root']]))))
Exemplo n.º 34
0
def process_data():
    """Run all processing (analysis, graph generation) on macrogenetic data"""
    raw_graph = graph.import_graph()
    links_analysis = inscription_order.analyse_graph()


    links_graphs = visualize.visualize()
    _write_index_html(links_graphs + links_analysis)
def parse_command_line():
    parser = argparse.ArgumentParser(
        description="""Train, validate, and test a face detection classifier that will determine if
        two faces are the same or different.""")
    parser.add_argument("-p", "--prepare-data", help="Prepare training and validation data.",
        action="store_true")
    parser.add_argument("-t", "--train", help="""Train classifier. Use --graph to generate quality
        graphs""", action="store_true")
    parser.add_argument("-g", "--graph", help="Generate training graphs.", action="store_true")
    parser.add_argument("--weights", help="""The trained model weights to use; if not provided
        defaults to the network that was just trained""", type=str, default=None)
    parser.add_argument("--note", help="Adds extra note onto generated quality graph.", type=str)
    parser.add_argument("-s", "--is_same", help="""Determines if the two images provided are the
        same or different. Provide relative paths to both images.""", nargs=2, type=str)
    parser.add_argument("--visualize", help="""Writes out various visualizations of the facial
        images.""", action="store_true")

    args = vars(parser.parse_args())

    if os.environ.get("CAFFE_HOME") == None:
        print "You must set CAFFE_HOME to point to where Caffe is installed. Example:"
        print "export CAFFE_HOME=/usr/local/caffe"
        exit(1)

    # Ensure the random number generator always starts from the same place for consistent tests.
    random.seed(0)

    webface = WebFace()

    if args["prepare_data"] == True:
        webface.load_data()
        webface.pair_data()
    if args["visualize"] == True:
        # TODO: Adapt this to WebFace, not just LFW.
        visualize()
    if args["train"] == True:
        train(args["graph"], data=webface, weight_file=args["weights"], note=args["note"])
    if args["is_same"] != None:
        # TODO: Fill this out once we have a threshold and neural network trained.
        images = args["is_same"]
        predict(images[0], images[1])
Exemplo n.º 36
0
def main():
    # creates a list of student and course objects in preparation file
    lists = preparation.main()
    student_list = lists[0]
    course_list = lists[1]
    session_list = lists[2]
    room_list = lists[3]

    score_max = 0

    # searches 5 times for a local maximum and visualizes the best score
    for i in range(5):
        print i
        # executes simulated annealing
        local_max_schedule = simulated_annealing(student_list, course_list, session_list, room_list)
        schedule_room_list = local_max_schedule[0]
        schedule_student_list = local_max_schedule[1]
        score_list = score_function.main(schedule_room_list, schedule_student_list, course_list)
        score_local_max = score_list[0]

        # when new hillclimber score is better
        if score_local_max > score_max:
            max_schedule = local_max_schedule
            score_max = score_local_max

    data = max_schedule[2]
    # writes data of schedule  to csv
    with open("data_simulated_annealing.csv", "wb") as f:
        writer = csv.writer(f)
        writer.writerow(["Iteration", "Score", "malus_conflict", "malus_capacity", "malus_spread", "bonus_spread"])
        for row in data:
            writer.writerow(row)

    # visualizes room schedules
    schedule_room_list = max_schedule[0]
    schedule_student_list = max_schedule[1]
    for schedule_room in schedule_room_list:
        visualize.visualize(schedule_room)

    # visualize students schedules
    for schedule_student in schedule_student_list[151:152]:
        visualize.visualize(schedule_student)

    for schedule_student in schedule_student_list[155:156]:
        visualize.visualize(schedule_student)
Exemplo n.º 37
0
# start training
if load: saver.restore(sess, './model')

for step in range(50000):
    batch_x1, batch_y1 = mnist.train.next_batch(128)
    batch_x2, batch_y2 = mnist.train.next_batch(128)
    batch_y = (batch_y1 == batch_y2).astype('float')

    _, loss_v = sess.run([train_step, siamese.loss], feed_dict={
                        siamese.x1: batch_x1,
                        siamese.x2: batch_x2,
                        siamese.y_: batch_y})

    if np.isnan(loss_v):
        print('Model diverged with loss = NaN')
        quit()

    if step % 10 == 0:
        print ('step %d: loss %.3f' % (step, loss_v))

    if step % 1000 == 0 and step > 0:
        saver.save(sess, './model')
        embed = siamese.o1.eval({siamese.x1: mnist.test.images})
        embed.tofile('embed.txt')

# visualize result
x_test = mnist.test.images.reshape([-1, 28, 28])
y_test = mnist.test.labels
visualize.visualize(embed, x_test, y_test)
Exemplo n.º 38
0
	auth = lg_authority.AuthRoot()
	auth__doc = "The object that serves authentication pages"


	@cherrypy.expose
	def index(self):
		output = ""

		output += getIndexContent()

		output = getPage(output, '')

		return output 

if __name__ == '__main__':

	#cherrypy.config.update({'server.socket_port':index_port})
	cherrypy.config.update(cherry_settings)
	
	index = index()
	index.upload = upload.upload()
	index.manage = manage.manage()
	index.modify = modify.modify()
	index.download = download.download()
	index.learn = learn.learn()
	index.support = support.support()
	index.visualize = visualize.visualize()
	#index.dashboard = dashboard.dashboard()
	cherrypy.quickstart(index)

Exemplo n.º 39
0
 def visualize(self, filename="graph.png", include_args=True, transitive=False):
     from visualize import visualize
     visualize(self.graph, filename, include_args, transitive)
Exemplo n.º 40
0
def main(source_data='.jp2',
         time_range=TimeRange('2011/10/01 09:45:00', '2011/10/01 10:15:59'),
         algorithm='hough', feed_directory='~/Data/eitwave/jp2/20111001_jp2/',
         use_pickle=None,diff_type='running',data_savedir=None):
    '''
    This is the main executable for the Automated EUV Wave Analysis and Reduction (AWARE)
    code. The procedure is as follows:
        - Query the HEK to find whether any flares were observed in SDO/AIA 211A during the input time range
        - If yes, then read in (or download) a sequence of solar images corresponding to the time range
        - Transform these images from Helioprojective Coordinates to Heliographic Coordinates,
          with the origin centered at the flare origin
        - Create a sequence of difference maps from these transformed maps
        - Use a threshold method to create a binary map from the the difference maps.
        - Apply the Hough Transform to the binary map to search for strong lines in the image
        - Use the results of the Hough Transform to detect whether an EUV wave is present
        - Fit an appropriate function (e.g. Gaussian) to the detected wavefront as a function of longitude
        - Record fit results and return data products
    
    Parameters
    ----------    
    source_data : string
        description of the type of data being input. Allowed is '.jp2', 'fits', or 'test'.
        will look for helioviewer JP2 files, FITS files, or load the test data respectively

    time_range : a TimeRange object
        time range within which to search for EUV waves

    feed_directory : string
        A directory containing data files to be analysed. If set, AWARE will assume data files are
        already download and will search in this directory instead. Assumes that all files in the
        directory with the appropriate extension (e.g. .jp2, .fits) are relevant to the flare detection.

    use_pickle : string
        BUGGED - currently not supported, always set to None

    diff_type : string
        The type of image differencing to use. Allowed values are 'running' or 'base'. Default is 'running'
        Will perform either running differencing or base differencing on the image sequence.

    data_savedir : string
        directory in which to save downloaded jp2 files from Helioviewer. If None, then AWARE will construct a directory
        based on the start time of the query.

    Returns
    -------

    Outputs a pickle file containing the following data products (in order):
        1) a list of maps modelling the detected wavefront, transformed back to original HPC coordinates  
    '''


    if feed_directory != None:
        feed_directory = os.path.expanduser(feed_directory)

    #Check which type of data is being analysed, and establish the directory to store downloaded files,
    #if appropriate
    if source_data == 'test':
        maps = test_wave2d()
    elif source_data == '.jp2' and data_savedir == None:
        data_savedir = '~/aware_data/jp2/' + time_range.start().strftime('%Y%m%d_%H%M')
    elif source_data == '.fits' and data_savedir == None:
        data_savedir = '~/aware_data/fits/' + time_range.start().strftime('%Y%m%d_%H%M')
            
    if not os.path.exists(os.path.expanduser(data_savedir)):
        os.makedirs(os.path.expanduser(data_savedir))

    # Query the HEK to see whether there were any flares during the time range specified
    # Concentrate on the AIA 211A channel as it has clearest observations of global waves
    client = hek.HEKClient()
    hek_result = client.query(hek.attrs.Time(time_range.t1, time_range.t2),
                              hek.attrs.EventType('FL'),hek.attrs.OBS.ChannelID == '211')
    if hek_result is None:
    # if no flares found, no analysis possible. Return
        print 'No flares found in HEK database during specified time range.'
        print 'No analysis possible. Returning.'
        return None

    # Otherwise, we have found at least one flare
    print('Number of flares found = ' + str(len(hek_result)))

    #assume the first result of the HEK query has the correct information
    for flare in hek_result[0:1]:

        if feed_directory is None:
            print('Acquiring data for flare')
            filelist = aware_utils.acquire_data(data_savedir, source_data,
                                                 flare)
        else:
            # Assumes that the necessary files are already present
            filelist = aware_utils.listdir_fullpath(feed_directory,
                                                     filetype = source_data)

        #filter to only grab the data files with the source_data extn in the directory
        #this looks like duplication of listdir_fullpath
        files_tmp = []
        for f in filelist:
            if f.endswith(source_data):
                files_tmp.append(f)
            files = files_tmp

        # reduce the number of files to those that happen after the flare has
        # started
        files = []
        for f in files_tmp:
            fhv = f.split(os.sep)[-1]
            if aware_utils.hv_filename2datetime(fhv) > \
            parse_time(flare['event_starttime']):
                files.append(f)
        print('Number of files :' + str(len(files)))
        if len(files) == 0:
            print('No files found.  Returning.')
            return None

        # Define the transform parameters
        params = aware_utils.params(flare)

        # read in files and accumulate them
        if use_pickle != None:
            # load in a pickle file of the data
            pfile = open(feed_directory + use_pickle, 'rb')
            a = pickle.load(pfile)
            maps = a[0]
            new_maps = a[1]
            diffs = a[2]
            pfile.close()
        else:
            maps = aware_utils.accumulate(files[6:30], accum=1, nsuper=4,
                                   verbose=True)

            #temporary fix for exposure control and S/N changes
            long_maps = []
            for m in maps:
                if m.exposure_time > 2.0:
                    long_maps.append(m)
            maps=long_maps

            # Unravel the maps
            new_maps = aware_utils.map_unravel(maps, params, verbose=True)
            #return new_maps

            #sometimes unravelling maps leads to slight variations in the unraveled
            #image dimensions.  check dimensions of maps and resample to dimensions
            #of first image in sequence if need be.
            #new_maps[0].peek()
            new_maps = aware_utils.check_dims(new_maps)

            # calculate the differences
            if diff_type == 'base':
                diffs=aware_utils.map_basediff(new_maps)
            else:
                diffs = aware_utils.map_diff(new_maps)


        #generate persistence maps - currently bugged, so skip this step
        #persistence_maps = eitwaveutils.map_persistence(diffs)
        persistence_maps=[]

        #determine the threshold to apply to the difference maps.
        #diffs > diff_thresh will be 1, otherwise 0.
        threshold_maps = aware_utils.map_threshold(new_maps, factor=0.2)
        #return threshold_maps

        # transform difference maps into binary maps
        binary_maps = aware_utils.map_binary(diffs, threshold_maps)

        if algorithm == 'hough':
            # detection based on the hough transform
            detection = aware_utils.hough_detect(binary_maps, vote_thresh=10)
        elif algorithm == 'prob_hough':
            # detection based on the probabilistic hough transform.  Takes the
            # keywords of the probabilistic hough transform - see the documentation
            # of skimage.transform.probabilistic_hough (scikit-image.org)
            detection = aware_utils.prob_hough_detect(binary_maps, threshold=10)

        # Remove areas that are too small or that don't have enough detections
        detection = aware_utils.cleanup(detection,
                                         size_thresh=50,
                                         inv_thresh=8)

        detection_maps = copy.deepcopy(binary_maps)
        for i in range(0,len(detection)):
            detection_maps[i].data = detection[i]
        #If there is anything left in 'detection', fit a function to the original
        #diffmaps in the region defined by 'detection'. Currently fits a
        #Gaussian in the y-direction for each x
        #use 'detection' to guess starting fit parameters.

        #get just the positive elements of the difference map. Perform fitting on
        #these positive diffmaps.
        posdiffs = copy.deepcopy(diffs)
        for i in range(0, len(diffs)):
            temp = diffs[i].data < 0
            posdiffs[i].data[temp] = 0

        #fit a function to the difference maps in the cases where there has been a
        #detection
        fitparams, wavefront = aware_utils.fit_wavefront(posdiffs, detection)

        #transform the detected model wavefront back into heliocentric coordinates so it can be overlayed
        wavefront_hc = aware_utils.map_reravel(wavefront,params,verbose=True)

        #strip out the velocity information from the wavefront fitting
        velocity = aware_utils.wavefront_velocity(fitparams)

        #strip out the position and width information from the wavefront fitting
        pos_width = aware_utils.wavefront_position_and_width(fitparams)

        #now save products we have created in a pickle file for future reference
        #Will save output in ~/aware_results
        extn=time_range.start().strftime('%Y%m%d_%H%M')
        save_path=os.path.expanduser('~/aware_results/')
        save_file='aware_results_' + extn + '.pickle'
        
        if not os.path.exists(save_path):
            os.makedirs(save_path)
                          
        output=open(save_path + save_file,'wb')
        print 'Saving result products to: '+ save_path + save_file
                           
        pickle.dump(wavefront_hc,output)
        output.close()

        #visualize the model wavefront
        visualize(wavefront_hc)
        
    return maps, new_maps, diffs, threshold_maps, binary_maps, detection_maps, wavefront, velocity, pos_width, persistence_maps, wavefront_hc
Exemplo n.º 41
0
    "hpcx_min": -1228.8,
    "hpcx_max": 1228.8,
    "hpcx_bin": 2.4,
    "hpcy_min": -1228.8,
    "hpcy_max": 1228.8,
    "hpcy_bin": 2.4
}

#wave_maps = wave2d.simulate(params)
wave_maps = wave2d.simulate(params, verbose = True)

#To get simulated HG' maps (centered at wave epicenter):
#wave_maps_raw = wave2d.simulate_raw(params)
#wave_maps_raw_noise = wave2d.add_noise(params, wave_maps_raw)

visualize(wave_maps)

"""
import util

new_wave_maps = []

for wave in wave_maps:
    print("Unraveling map at "+str(wave.date))
    new_wave_maps += [util.map_hpc_to_hg_rotate(wave, epi_lon = 45., epi_lat = 30., xbin = 5, ybin = 0.2)]


from matplotlib import colors

wave_maps_raw = wave2d.simulate_raw(params)
wave_maps_transformed = wave2d.transform(params, wave_maps_raw, verbose = True)
Exemplo n.º 42
0
def user_interface():
    """
    Prompts for stock data input and offers options to perform calculations
    on data: best six, worst six, all months' average stock prices with best
    and worst visually indicated, and comparing two company's standard
    deviation of monthly average price.

    :inputs: company_name: String. Name of a stock company.
    :inputs: company_file: String. Input JSON file name and file location
    of the stock company relative to mining.py
    :inputs: option: integer between 1 and 5
    :inputs: second_company_name: String. Name of a second stock company.
    :inputs: second_company_file: String. Input JSON file name and file
    location of the second stock company relative to mining.py

    :returns: If option 1 is selected returns the top six months of average
    stock price in a list of tuples, in the format('YYYY/DD', 111.11)

    If option 2 is selected returns the top six months of average
        stock price in a list of tuples, in the format('YYYY/DD', 111.11).

    If option 3 is selected returns first company's monthly average stock
        price appears on a separate line in the format "YYYY/MM 111.11" with
        " <<< One of the Best Months" to the right if it is one of the top
        six months, or " <<< One of the Worst Months" to the right if it is
        one of the bottom six months.

    If option 4 is selected prints one of three options:
        1. stock_one_name + " has the highest standard deviation!" or
        2. stock_one_name + " and " + stock_two_name + " have the same
                standard deviation!" or
        3. stock_two_name + " has the highest standard deviation!"

    If an integer outside of 1 to 6 is entered for option, a prompt prints:

    """
    company_name = input("Company name: ")
    while True:
        try:
            company_file = input("Company stock data file "
                                 "(i.e., data\GOOG.json): ")
            read_stock_data(company_name, company_file)
            break
        except:
            print("Invalid Input.")
    visit = True
    while visit:
        print("-"*50)
        print("Choose your option # (i.e., 1)")
        print("1. Display the best six months' average stock prices")
        print("2. Display the worst six months' average stock prices")
        print("3. Display all months' average stock prices with best and " +
              "worst six")
        print("4. Display comparison of two companies' "
              "monthly average price " + "standard deviation")
        print("5. Exit")
        option = (input("Choose: "))
        if option == "1":
            print(six_best_months())
        elif option == "2":
            print(six_worst_months())
        elif option == "3":
            visualize(company_name, company_file)
        elif option == "4":
            second_company_name = input("Second Company name "
                                        "(i.e., data\GOOG.json): ")
            while True:
                try:
                    second_company_file = input("Second Company file: ")
                    print(compare_two_stocks(company_name, company_file,
                                             second_company_name,
                                             second_company_file))
                    break
                except:
                    print("Invalid Input.")
        elif option == "5":
            visit = False
        else:
            print("Invalid option selected, please choose again from the \
                following options: ")
Exemplo n.º 43
0
  p.add_argument('--usage', nargs='+', default=[],help='search for usages in all project files')
  p.add_argument('--graph', nargs='*', default=None, help='generate a project dependency graph')
  p.add_argument('--pyplot', action='store_true', help='use pyplot for the project dependency graph')
  args = p.parse_args()
  if not (args.grep or args.defn or args.usage) and args.graph is None:
    p.error('None of the options specified, nothing to do.')
  if args.pyplot and not args.graph:
    p.error('--pyplot specified without --graph')
  return args


def find_files(rootdir):
  ff = {}
  for dirname,subdirs,files in os.walk(rootdir):
    # TODO: have a directory blacklist argument
    if '.svn' in subdirs:
      subdirs.remove('.svn')
    for f in files:
      # TODO: have a filename criteria argument
      if re.search('\.(m|c(pp)?|h(pp)?)$',f):
        ff[f.lower()] = os.path.join(dirname,f)
  return ff


if __name__ == '__main__':
  args = parse_args()
  files = find_files(args.project_root)
  search_patterns(files, args)
  if args.graph is not None:
    visualize(files, args)
    tmp_file = open(tmp_file_name, 'w')
    tmp_file.write(input_data)
    tmp_file.close()

    # Runs the command: ./tsp.bin tmp_file_name

    process = Popen(['./tsp.bin', tmp_file_name], stdout=PIPE)
    (stdout, stderr) = process.communicate()

    # removes the temporay file
    os.remove(tmp_file_name)

    return stdout.strip()


import sys

if __name__ == '__main__':
    if len(sys.argv) > 1:
        file_location = sys.argv[1].strip()
        input_data_file = open(file_location, 'r')
        input_data = ''.join(input_data_file.readlines())
        input_data_file.close()
        toPrint = solve_it(input_data)
        visualize(input_data, toPrint.split('\n')[1].split(' '))
        print "Solution:"
        print toPrint
    else:
        print 'This test requires an input file.  Please select one from the data directory. (i.e. python solver.py ./data/tsp_51_1)'

Exemplo n.º 45
0
Arquivo: run.py Projeto: heechul/picar
    cap = cv2.VideoCapture(vid_path)

    machine_steering = []
    timeArr = []

    print 'performing inference...'
    time_start = time.time()
    for frame_id in xrange(frame_count):
        ret, img = cap.read()
        assert ret

        img = preprocess.preprocess(img)

        frameTime = time.time() #Get the start time of the angle calculation
        deg = model.y.eval(feed_dict={model.x: [img], model.keep_prob: 1.0})[0][0]
        timeArr.append(time.time() - frameTime) #Add angle calculation time to time array

        machine_steering.append(deg)

    cap.release()

    fps = frame_count / (time.time() - time_start)

    print 'completed inference, total frames: {}, average fps: {} Hz'.format(frame_count, round(fps, 1))

    print 'performing visualization...'
    startTime = time.time()
    visualize.visualize(epoch_id, machine_steering, params.out_dir, timeArr,
                        verbose=True, frame_count_limit=None)
    print "Visualization took a total of %i seconds" % (time.time() - startTime)
Exemplo n.º 46
0
#!/usr/bin/env python
# -*- coding: utf-8 -*- 

import numpy as np
from scipy.io import loadmat
import sys
import shelve
sys.path.append('../')
from encoder import Sparse
import encoder
import visualize

if __name__ == '__main__':

    # create new sparse enconder
    s = Sparse(nodes = 25)
    # load data
    data_mat = loadmat('patch')
    d = data_mat['patches']
    # train sparse encoder
    encoder.fit(s, d, epochs = 600)
    filename = 'persistence.data'
    f = shelve.open(filename)
    f['autoencoder'] = s
    f.close()
    visualize.visualize(s.theta1)
Exemplo n.º 47
0
    def onReadCmd(self, event):
        """ reads text from InputBox, passes it asa command to Liggghts, updates the display panel,
        and finally clears InputBox"""

        command = self.InputTxt.GetValue()
        
        if len(command.split()) == 1:
            if command == 'clc':
                self.clearDisplayPanel()

            elif command == 'whos':
                for item in self.loadedVars:
                    self.UpdateDisplayPanel(item, type(self.loadedVars[item]))
            elif command == 'visualize':
                self.onVisualize()

            else:
                self.UpdateDisplayPanel('Unknown command')

        else:
            method, var = command.split(' ', 1)

            if method == 'plot':
                if var not in self.loadedVars:
                    self.UpdateDisplayPanel('Error: could not find {}'.format(var))
                else:
                    self.UpdateDisplayPanel('Generating plot ...')

                    try:
                        if var == 'mesh':
                            visualize.visualize(meshFname=self.loadedVars[var])
                        else:
                            if var == 'mesh and particles':
                                if var not in self.loadedVars:
                                    self.UpdateDisplayPanel('Error: no particles loaded')
                                else:
                                    try:
                                        visualize.visualize(meshFname=self.loadedVars['mesh'], dumpFname=self.loadedVars['mesh and particles'])
                                    except:
                                        self.UpdateDisplayPanel('Plot failed with unexpected error: {}'.format(sys.exc_info()[0]))
                            else:
                                pass # make 2D plot
                    except:
                        pass

            elif method == 'run':
                if self.__selEngine__:
                    # try to call a DEM-engine method based on user-supplied cmds
                    try:
                        self._module.command('{}'.format(var))
                    except:
                        self.UpdateDisplayPanel('Unexpected error: {}'.format(sys.exc_info()[0]))
                else:
                    self.UpdateDisplayPanel('No engine selected. Make sure an available DEM engine is installed.')
            elif method == 'unix':
                try:
                    output = os.popen(var).read()
                    self.UpdateDisplayPanel(output)
                except:
                    self.UpdateDisplayPanel('Unexpected error: {}'.format(sys.exc_info()[0]))
            else:   
                self.UpdateDisplayPanel('Unknown command')

        self.InputTxt.Clear()
data = pickle.load(open(os.path.join(dir, pkl_fn), "rb"))
face, eye0, eye1 = pickle.load(open(os.path.join(dir, clm_pkl_fn), "rb"))

offset = [0, -2, 10, 1]
eyeball_offset_clm = face.get_rotation_transform().dot(np.array(offset))
eyeball_3d_pos_clm = (np.array(face.pts_3d[42])+np.array(face.pts_3d[45]))/2.0 + eyeball_offset_clm[:3]

true_iris_centre_3d = np.mean(data["ldmks_iris_3d"], axis=0)*100

cam_mat = np.array([[749.9999,   0.0000,   400.0000],
                    [0.0000,   749.9999,   300.0000],
                    [0.0000,     0.0000,   1.0000]])

coord_swap = np.array([[1,0,0],[0,-1,0],[0,0,-1]])

visualize.visualize(face.pts_3d)
visualize.visualize([coord_swap.dot(np.array(data["eye_centre_3d"])*100)])
visualize.visualize([eyeball_3d_pos_clm], radius=12)

# VISUALIZE IN 2D BEFORE OPTIMIZING

for pt in face.pts_2d:
    cv2.circle(img, tuple([int(p) for p in pt]), 3, 0, -1)
    cv2.circle(img, tuple([int(p) for p in pt]), 2, (255, 255, 255), -1)
cv2.polylines(img, np.array([data["ldmks_lids_2d"]], int), True, 255)
cv2.imshow("TEST", img)
cv2.waitKey(1)

#

(e_x, e_y), (e_w, e_h), e_tht = cv2.fitEllipse(np.array(data["ldmks_iris_2d"], dtype=int))
Exemplo n.º 49
0
    #in the accumulator
    #indices=((transform == transform.max())+(transform2 == transform2.max())).nonzero()
    

    indices = ((transform> votethresh)+(transform2 > votethresh)).nonzero()
    distances = d[indices[0]]
    theta = theta[indices[1]]
    n =len(indices[1])
    
   
    print("Found " + str(n) + " lines.")

    # Perform the inverse transform to get a series of rectangular
    # images that show where0.581673377128 the wavefront is.
    invTransform = sunpy.make_map(np.zeros(imgShape),input_maps[i+1]._original_header)
    #invTransform.data = np.zeros(imgShape)
   
    for i in range(0,n):
        nextLine = htLine( distances[i],theta[i], np.zeros(shape=imgShape) )
	
        invTransform = invTransform + nextLine

    # Dump the inverse transform back into a series of maps
    detection.append(invTransform)

visualize(detection)




Exemplo n.º 50
0
# if you just want to load a previously trainmodel?
new = True
model_ckpt = 'model.ckpt'
if os.path.isfile(model_ckpt):
    input_var = None
    while input_var not in ['yes', 'no']:
        input_var = raw_input("We found model.ckpt file. Do you want to load it [yes/no]?")
    if input_var == 'yes':
        new = False

# start training
if new:
    # visualize result
    x_test = mnist.test.images.reshape([-1, 28, 28])
    embed = siamese.o1.eval({siamese.x1: mnist.test.images})
    visualize.visualize(embed, x_test, 'pretrain.png')

    for step in range(100):
        batch_x1, batch_y1 = mnist.train.next_batch(128)
        batch_x2, batch_y2 = mnist.train.next_batch(128)
        batch_y = (batch_y1 == batch_y2).astype('float')

        _, loss_v = sess.run([train_step, siamese.loss], feed_dict={
                            siamese.x1: batch_x1, 
                            siamese.x2: batch_x2, 
                            siamese.y_: batch_y})

        if np.isnan(loss_v):
            print('Model diverged with loss = NaN')
            quit()
Exemplo n.º 51
0
print res3




x3 = np.where(a[0]!=0, 1, a[0])
y3 = np.where(b[0]!=0, 1, b[0])
N = (y3.sum())
z3 = np.multiply(x3, y3)
n = (z3.sum())
res4 = n/N
print res4

t = (res+res2+res3+res4)
f = t/4
print f

    
    
       

    
        

'''visualize(detection)
visualize(detection2)
visualize(diffs)
visualize(diffs2)'''


Exemplo n.º 52
0
# Let Python search for modules in the parent directory
import os, sys
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(CURRENT_DIR)
sys.path.append(PARENT_DIR)

from missionplanner import plan_complete_coverage_mission
import testfields
import visualize


if __name__ == '__main__':
    testfield = testfields.test1 # choose any test available in
                                 # testfields.py
    visualization_data, mission = plan_complete_coverage_mission(
                                           testfield)

    visualize.visualize(visualization_data);
Exemplo n.º 53
0
    n =len(indices[1])
    print("Found " + str(n) + " lines.")

    # Perform the inverse transform to get a series of rectangular
    # images that show where the wavefront is.
    invTransform = sunpy.map.BaseMap(input_maps[i+1])
    invTransform.data = np.zeros(imgShape)
    for i in range(0,n):
        nextLine = htLine( distances[i],theta[i], np.zeros(shape=imgShape) )
        invTransform = invTransform + nextLine

    # Dump the inverse transform back into a series of maps
    detection.append(invTransform)


visualize(diffs)
visualize(detection)

from matplotlib import cm
from matplotlib import colors

#wmap = sunpy.make_map(input_maps[max_steps/2], wave_maps[0], type = "composite")
#wmap.set_colors(1, cm.Reds)
#wmap.set_alpha(1,0.1)
#wmap.set_norm(1, colors.Normalize(0.1,1))
#wmap.show()

#pmap = sunpy.make_map(detection[max_steps/2],input_maps[max_steps/2], type ="composite")
#pmap.set_alpha(1,0.6)
#pmap.set_colors(0, cm.Blues)
#pmap.set_colors(1, cm.Reds)
Exemplo n.º 54
0
def main(source_data='.fits',
         time_range=TimeRange('2011/10/01 09:45:00', '2011/10/01 10:15:59'),
         algorithm='hough', feed_directory='/home/hayesla/fits/data/',
         use_pickle=None,diff_type='running'):
    '''
source_data { jp2 | fits | test }
look for helioviewer JP2 files, FITS files, or load the test data
respectively

time_range : a TimeRange object
time range that is searched for EUV waves

feed_directory
If set to a string, look in this directory for the jp2 files. Assumes that
all the JP2 files are relevant to the flare detection.

algorithm: { 'hough' : 'phough' }
algorithm used to find the wave
'''
    if feed_directory != None:
        feed_directory = os.path.expanduser(feed_directory)

    if source_data == 'test':
        # where to store those data
        maps = test_wave2d()
    elif source_data == '.fits':
        # where to store those data
        data_storage = "/home/hayesla/fits/"

    if not os.path.exists(os.path.expanduser(data_storage)):
            os.makedirs(os.path.expanduser(data_storage))

    # Query the HEK for flare information we need
    client = hek.HEKClient()
    hek_result = client.query(hek.attrs.Time(time_range.t1, time_range.t2),
                              hek.attrs.EventType('FL'))
    #hek.attrs.FRM.Name == '')
    if hek_result is None:
    # no flares, no analysis possible
        return None

    # Flares!
    print('Number of flares found = ' + str(len(hek_result)))

    for flare in hek_result[10:11]:

        if feed_directory is None:
            print('Acquiring data for flare')
            filelist = eitwaveutils.acquire_data(data_storage, source_data,
                                                 flare)
        else:
            # Assumes that the necessary files are already present

            filelist = eitwaveutils.listdir_fullpath(feed_directory,
                                                     filetype ='fits')

        #filter to only grab the data files with the source_data extn in the directory
        files_tmp = []
        for f in filelist:
            if f.endswith(source_data):
                files_tmp.append(f)
            files = files_tmp

        # reduce the number of files to those that happen after the flare has
        # started
        #files = []
        #for f in files_tmp:
         #   fhv = f.split(os.sep)[-1]
         #   if eitwaveutils.hv_filename2datetime(fhv) > \
         #   parse_time(flare['event_starttime']):
         #       files.append(f)
        #print('Number of files :' + str(len(files)))
        #if len(files) == 0:
            #print('No files found. Returning.')
            #return None

        # Define the transform parameters
        # params = eitwaveutils.params(flare='test')
        params = eitwaveutils.params(flare)

        # read in files and accumulate them
        if use_pickle != None:
            # load in a pickle file of the data
            pfile = open(feed_directory + use_pickle, 'rb')
            a = pickle.load(pfile)
            maps = a[0]
            new_maps = a[1]
            diffs = a[2]
            pfile.close()
        else:
            maps = eitwaveutils.accumulate(files[0:30], accum=1, nsuper=4,
                                   verbose=True)

            #temporary fix for exposure control and S/N changes
            long_maps = []
            for m in maps:
                if m.exposure_time > 2.0:
                    long_maps.append(m)
	    maps=long_maps
            for i in range(len(maps)):
		maps[i] = np.sqrt((maps[i] + abs(maps[i].min())+1))
	    
	
	    
            # Unravel the maps
            new_maps = eitwaveutils.map_unravel(maps, params, verbose=True)

            #sometimes unravelling maps leads to slight variations in the unraveled
            #image dimensions. check dimensions of maps and resample to dimensions
            #of first image in sequence if need be.
            new_maps = eitwaveutils.check_dims(new_maps)

            # calculate the differences
            if diff_type == 'base':
                diffs=eitwaveutils.map_basediff(new_maps)
            else:
                diffs = eitwaveutils.map_diff(new_maps)

            # save the outpout
            #output = open(feed_directory + 'maps.pkl', 'wb')
            ##pickle.dump([maps, new_maps, diffs], output, protocol=0)
            #output.close()

        # Unravel the maps
        #new_maps = eitwaveutils.map_unravel(maps, params, verbose=True)

        #sometimes unravelling maps leads to slight variations in the unraveled
        #image dimensions. check dimensions of maps and resample to dimensions
        #of first image in sequence if need be.
        #new_maps = eitwaveutils.check_dims(new_maps)

        # calculate the differences
        #diffs = eitwaveutils.map_diff(new_maps)

        #generate persistence maps
        #persistence_maps = eitwaveutils.map_persistence(diffs)

        #determine the threshold to apply to the difference maps.
        #diffs > diff_thresh will be True, otherwise False.
        threshold_maps = eitwaveutils.map_threshold(new_maps, factor=0.2)

        # transform difference maps into binary maps
        binary_maps = eitwaveutils.map_binary(diffs, threshold_maps)

        if algorithm == 'hough':
            # detection based on the hough transform
            detection = eitwaveutils.hough_detect(binary_maps, vote_thresh=10)
        elif algorithm == 'prob_hough':
            # detection based on the probabilistic hough transform. Takes the
            # keywords of the probabilistic hough transform - see the documentation
            # of skimage.transform.probabilistic_hough (scikit-image.org)
            detection = eitwaveutils.prob_hough_detect(binary_maps, threshold=10)

        # Remove areas that are too small or that don't have enough detections
        detection = eitwaveutils.cleanup(detection,
                                         size_thresh=50,
                                         inv_thresh=8)

        #If there is anything left in 'detection', fit a function to the original
        #diffmaps in the region defined by 'detection'. Simplest case: fit a
        #Gaussian in the y-direction for some x or range of x.
        #eitwaveutils.fit_wavefront should probably take the arguments of fitfunc.
        #use 'detection' to guess starting fit parameters?

        #get just the positive elements of the difference map. Perform fitting on
        #these positive diffmaps.
        '''posdiffs = copy.deepcopy(diffs)
        for i in range(0, len(diffs)):
            temp = diffs[i] < 0
            posdiffs[i][temp] = 0

        #fit a function to the difference maps in the cases where there has been a
        #detection
        wavefront = eitwaveutils.fit_wavefront(posdiffs, detection)

        #strip out the velocity information from the wavefront fitting
        velocity = eitwaveutils.wavefront_velocity(wavefront[0])

        #strip out the position and width information from the wavefront fitting
        pos_width = eitwaveutils.wavefront_position_and_width(wavefront[0])'''

        visualize(detection)

    return maps, new_maps, diffs, threshold_maps, binary_maps, detection
Exemplo n.º 55
0
    #HPC grid, probably would only want to change the bin sizes
    "hpcx_min": -1228.8,
    "hpcx_max": 1228.8,
    "hpcx_bin": 2.4,
    "hpcy_min": -1228.8,
    "hpcy_max": 1228.8,
    "hpcy_bin": 2.4
}

wave_maps = wave2d.simulate(params, verbose = True)
    
    #To get simulated HG' maps (centered at wave epicenter):
wave_maps_raw = wave2d.simulate_raw(params)
wave_maps_raw_noise = wave2d.add_noise(params, wave_maps_raw)
    
visualize(wave_maps)
    
import util
    
new_wave_maps = []
    
for wave in wave_maps:
    print("Unraveling map at "+str(wave.date))
    new_wave_maps += [util.map_hpc_to_hg_rotate(wave, epi_lon = params.get('epi_lon'), epi_lat = params.get('epi_lat'), xbin = 5, ybin = 0.2)]

input_maps = new_wave_maps

#wave_maps = wave2d.simulate(params, verbose = True)
#visualize(wave_maps)
#
# Use Albert's wavemaps to test the hough transform as a means