Exemple #1
0
# -*- coding: utf-8 -*-
"""Example for text wrapping animation
"""
from __future__ import unicode_literals
import os
import sys
import time

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from halo import Halo

spinner = Halo(
    text=
    'This is a text that it is too long. In fact, it exceeds the eighty column standard '
    'terminal width, which forces the text frame renderer to add an ellipse at the end of the '
    'text. This should definitely make it more than 180!',
    spinner='dots',
    animation='marquee')

try:
    spinner.start()
    time.sleep(15)
    spinner.succeed('End!')
except (KeyboardInterrupt, SystemExit):
    spinner.stop()
Exemple #2
0
from halo import Halo
import time
SPINERSOMETHING = 'something'
_spinner = Halo(text='trying to live', spinner='dots' )
_spinner.start()
_spinner.succeed(text='doing things')
time.sleep(1)
_spinner.fail(text=SPINERSOMETHING)
time.sleep(5)
_spinner.stop()

Exemple #3
0
# -*- coding: utf-8 -*-
"""Example for context manager
"""
import os
import sys
import time

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from halo import Halo

with Halo(text='Loading', spinner='dots'):
    # Run time consuming work here
    time.sleep(4)

with Halo(text='Loading 2', spinner='dots'):
    # Run time consuming work here
    time.sleep(4)
Exemple #4
0
    parser.add_argument('--train_steps',
                        default=1000,
                        type=int,
                        help='number of training steps')
    parser.add_argument('--train_file',
                        default="./mfcc/mfcc_training.csv",
                        type=str,
                        help='mfcc training file (csv)')
    parser.add_argument('--test_file',
                        default="./mfcc/mfcc_training.csv",
                        type=str,
                        help='mfcc test file (csv)')

    args = parser.parse_args(sys.argv[1:])

    print('- loading classifier')
    classifier = emos_classifier.Classifier()

    if args.predict:
        print('- predicting file:', args.predict)
        predictions = classifier.predict(args.predict, args.batch_size)
        print('predictions:', predictions)

    if args.train:
        print('- training data..')
        print('  batch size:', args.batch_size)
        print('  train steps:', args.train_steps)
        with Halo():
            classifier.train(batch_size=args.batch_size,
                             steps=args.train_steps)
Exemple #5
0
def halo_wrapper(**kwargs):
    return null_context() if env['user_config']['bare_output'] else Halo(**kwargs)
def find_best_parameters_for_cutoff(space_list, data, oracle_scores,
                                    space_allocations, n_workers, save_folder,
                                    save_file):

    spinner = Halo(text='Finding optimal parameters for cutoff count sketch',
                   spinner='dots')
    spinner.start()

    # figure out best cutoff threshold for
    # each error metric
    best_cutoff_thresh_for_space_weighted = []
    best_cutoff_thresh_for_space_absolute = []
    best_cutoff_thresh_for_space_relative = []

    for i, test_space in enumerate(space_allocations):
        test_space_cs = []
        test_params_cutoff_thresh = []

        # test all combinations
        for test_cutoff_frac in CUTOFF_FRAC_TO_TEST:
            # combination of parameters to test
            cutoff_thresh = int(
                (test_cutoff_frac * test_space) / CUTOFF_SPACE_COST_FACTOR)
            test_params_cutoff_thresh.append(cutoff_thresh)
            test_space_post_cutoff = int(test_space - cutoff_thresh *
                                         CUTOFF_SPACE_COST_FACTOR)
            test_space_cs.append(int(test_space_post_cutoff))

        logger.info("Learning best parameters for space setting...")
        start_t = time.time()

        test_cutoff_predictions = []
        with get_context("spawn").Pool() as pool:
            test_cutoff_predictions = pool.starmap(
                run_cutoff_count_sketch,
                zip(repeat(data), repeat(oracle_scores), repeat(oracle_scores),
                    test_space_cs, test_params_cutoff_thresh,
                    repeat(test_space)))
            pool.close()
            pool.join()

        losses_weighted = [
            np.sum(np.abs(data - predictions) * data)
            for predictions in test_cutoff_predictions
        ]
        losses_absolute = [
            np.sum(np.abs(data - predictions))
            for predictions in test_cutoff_predictions
        ]
        losses_relative = [
            np.sum(np.abs(data - predictions) / data)
            for predictions in test_cutoff_predictions
        ]

        best_loss_idx_weighted = np.argmin(losses_weighted)
        best_loss_idx_absolute = np.argmin(losses_absolute)
        best_loss_idx_relative = np.argmin(losses_relative)

        cutoff_thresh_weighted = test_params_cutoff_thresh[
            best_loss_idx_weighted]
        cutoff_thresh_absolute = test_params_cutoff_thresh[
            best_loss_idx_absolute]
        cutoff_thresh_relative = test_params_cutoff_thresh[
            best_loss_idx_relative]

        logger.info('Found optimal params for %.1f MB (took %.1f sec)' %
                    (4 * test_space / 1e6, time.time() - start_t))

        best_cutoff_thresh_for_space_weighted.append(cutoff_thresh_weighted)
        best_cutoff_thresh_for_space_absolute.append(cutoff_thresh_absolute)
        best_cutoff_thresh_for_space_relative.append(cutoff_thresh_relative)

    spinner.stop()
    np.savez(os.path.join(save_folder, save_file),
             space_list=space_list,
             best_cutoff_thresh_for_space_weighted=
             best_cutoff_thresh_for_space_weighted,
             best_cutoff_thresh_for_space_absolute=
             best_cutoff_thresh_for_space_absolute,
             best_cutoff_thresh_for_space_relative=
             best_cutoff_thresh_for_space_relative)
Exemple #7
0
from halo import Halo

csvfile = csv.writer(open('reddit.csv', 'w'))
csvfile.writerow(["topic", "url", "votes"])
pages = int(raw_input("enter number of pages to scrap:"))
url = 'https://www.reddit.com/'
i = 1
while pages > 0:
    request = urllib2.Request(url)
    request.add_header(
        'User-Agent',
        'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0'
    )
    myurlopener = urllib2.build_opener()
    myurl = myurlopener.open(request)
    spinner = Halo(text="Processing Page", spinner="dots")
    spinner.start()
    myurldata = myurl.read()
    soup = BeautifulSoup(myurldata, 'lxml')
    for choice in soup.find_all(
            'div', class_='thing'
    ):  # find the div containing data and iterate over it
        topicName = choice.find('p', class_='title').a.text.encode(
            'utf-8')  # get the topic name
        topicUrl = choice.find('p', class_='title').a.get('href').encode(
            'utf-8')  # get the url of the topic
        votes = choice.find('div', class_='score unvoted').text.encode(
            'utf-8')  # get the number of votes on topic
        topic = choice.find('a', class_="subreddit hover may-blank").text[
            2:]  # get the subreddit topic name
        if choice.find('p', class_='title').a.get('href').startswith(
Exemple #8
0
 def test_id_not_created_before_start(self):
     """Test Spinner ID not created before start.
     """
     spinner = Halo(stream=self._stream)
     self.assertEqual(spinner.spinner_id, None)
Exemple #9
0
 def test_spinner_interval_default(self):
     """Test proper assignment of the default interval value.
     """
     spinner = Halo()
     self.assertEqual(spinner._interval, default_spinner['interval'])
Exemple #10
0
import requests
import click
import pandas as pd

from halo import Halo
from pandas.io.json import json_normalize

spinner = Halo(text='Loading', spinner='dots', text_color='magenta')
url = "https://ah-django-staging.herokuapp.com/api"


@click.group()
def main():
    """
        Simple CLI for consuming Authors Haven App 😍
    """


@main.command()
@click.option("--csv")
@click.argument("slug")
def get(slug, csv):
    """
        This return a particular article from the given slug on Authors Haven API
    """
    url_format = url + "/articles/{}/"
    click.echo(slug)

    spinner.start()
    response = requests.get(url_format.format(slug))
    spinner.stop()
Exemple #11
0
 def test_context_manager_exceptions(self):
     """Test Halo context manager allows exceptions to bubble up
     """
     with self.assertRaises(SpecificException):
         with Halo(text='foo', spinner='dots', stream=self._stream):
             raise SpecificException
Exemple #12
0
# -*- coding: utf-8 -*-
"""Example for custom spinner
"""
from __future__ import unicode_literals, absolute_import, print_function
import os
import time

os.sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from halo import Halo

spinner = Halo(text='Custom Spins',
               spinner={
                   'interval': 100,
                   'frames': ['-', '+', '*', '+', '-']
               })

try:
    spinner.start()
    time.sleep(2)
    spinner.succeed('It works!')
except (KeyboardInterrupt, SystemExit):
    spinner.stop()
Exemple #13
0
def install(package):
    """Install a package"""
    click.echo(
        f'Finding packages having exact match{settings.LOADING}'
    )
    exact_match = True
    with Halo():
        matching_dirs = [i.parent for i in settings.REPO_DIR.rglob('Dockerfile') if package == i.parent.name]

    if not matching_dirs:
        exact_match = False
        click.echo(crayons.red('Exact match not found!'))
        click.echo(f'Finding packages with similar names{settings.LOADING}')
        with Halo():
            matching_dirs = [
                i.parent for i in settings.REPO_DIR.rglob('Dockerfile') if are_similar(i.parent.name, package)
            ]
    if matching_dirs:
        dir_index = 1
        # show a list of packages to choose from
        if len(matching_dirs) > 1 or not exact_match:
            count = 0
            click.echo()
            for matching_dir in matching_dirs:
                count += 1
                click.echo(
                    '[{0}] {1} ↜ {2}'.format(
                        crayons.white(count, bold=True),
                        crayons.green(matching_dir.name),
                        crayons.cyan(get_repo_name(matching_dir))
                    )
                )
            click.echo()
            dir_index = click.prompt(
                crayons.white(
                    "Which one do you want me to install?", bold=True
                ),
                type=int, default=1
            )
        # resolve the required details for building the image
        required_dir = matching_dirs[dir_index - 1]
        repo_name = get_repo_name(required_dir)
        image_tag = settings.DOCKER_IMAGE_NAMESPACE + f'{repo_name}.{required_dir.name}'

        click.echo(
            '{0} {1}{2}'.format(
                crayons.white('Installing', bold=True),
                crayons.green(required_dir.name),
                crayons.white(settings.LOADING, bold=True)
            )
        )
        # build docker image
        click.echo(f'Using Dockerfile from {required_dir}')
        click.echo(f'tagging image as {image_tag}')
        with Halo():
            image = docker_client.images.build(
                path=str(required_dir),
                tag=image_tag,
                quiet=False
            )
        click.echo(crayons.cyan(f'Successfully built image. Run `dockapt run {required_dir.name}` to use it'))
    else:
        click.echo(
            crayons.magenta(f"I couldn't find ", bold=True) + crayons.green(package, bold=True) + crayons.magenta(
                " in my repositories", bold=True)
        )
Exemple #14
0
Keywords = ['Remote Sensing', 'Automatic Generation']
ProgressCode = 'onGoing'
ClassificationCode = 'unclassified'

defaultValues = [
    defaultLocale, resourceScope, roleCode, Organisation, Abstract, Keywords,
    ProgressCode, ClassificationCode
]

#CSW information for the insert of the xml
csw_url = 'http://136.199.176.14:8080/geonetwork/srv/en/csw-publication'
user = '******'
passwd = 'admin'

#Start spinner to indicate progress
spinner = Halo()
spinner.start()

###Initiate crawler and create metadata
crawler = metacrawler.Crawler(rootDir,
                              defaultValues,
                              csw_url,
                              user,
                              passwd,
                              upload=False)
crawler.createMetadata()

#Stop spinner
spinner.succeed()
print("\nExecution Time: %s seconds. \n%d xml files created" %
      ((time.time() - start_time), crawler.number))
def experiment_comapre_loss(algo_type, space_list, data, oracle_scores,
                            test_oracle_scores, space_allocations,
                            best_cutoff_thresh_count_sketch_weighted,
                            best_cutoff_thresh_count_sketch_absolute,
                            best_cutoff_thresh_count_sketch_relative,
                            num_trials, n_workers, save_folder, save_file):

    true_counts = data.copy()

    # learned algo with cutoff
    logger.info("Running learned count sketch")

    # results across all trials
    algo_predictions_all = []
    cutoff_count_sketch_predictions_weighted_all = []
    cutoff_count_sketch_predictions_absolute_all = []
    cutoff_count_sketch_predictions_relative_all = []
    count_sketch_prediction_all = []
    count_min_prediction_all = []

    for trial in range(num_trials):

        # vanilla count sketch
        logger.info("Running vanilla count sketch on all parameters...")
        spinner = Halo(text='Evaluating vanilla count sketch algorithm',
                       spinner='dots')
        spinner.start()
        with get_context("spawn").Pool() as pool:
            count_sketch_prediction = pool.starmap(
                run_count_sketch,
                zip(repeat(data), space_allocations, space_allocations))
            pool.close()
            pool.join()

        spinner.stop()

        count_sketch_prediction_all.append(count_sketch_prediction)

        # vanilla count min
        logger.info("Running vanilla count min on all parameters...")
        spinner.stop()
        spinner = Halo(text='Evaluating vanilla count min algorithm',
                       spinner='dots')
        spinner.start()
        with get_context("spawn").Pool() as pool:
            count_min_prediction = pool.starmap(
                run_count_min,
                zip(repeat(data), space_allocations, space_allocations))
            pool.close()
            pool.join()

        spinner.stop()

        count_min_prediction_all.append(count_min_prediction)

        ########################################################

        algo_predictions = []
        cutoff_count_sketch_predictions_weighted = []
        cutoff_count_sketch_predictions_absolute = []
        cutoff_count_sketch_predictions_relative = []

        spinner = Halo(text='Evaluating learned predictions algorithm',
                       spinner='dots')
        spinner.start()

        if algo_type == ALGO_TYPE_CUTOFF_MEDIAN:
            # learned algorithm with cutoff
            with get_context("spawn").Pool() as pool:
                algo_predictions = pool.starmap(
                    run_cutoff_median_sketch,
                    zip(repeat(data), repeat(oracle_scores),
                        copy.deepcopy(space_allocations),
                        copy.deepcopy(space_allocations), repeat(False)))
                pool.close()
                pool.join()

        algo_predictions_all.append(algo_predictions)

        ########################################################

        # vanilla sketch + cutoff
        # NOTE: need to evaluate this with each cutoff threshold
        space_allocations_cutoff_weighted = []
        space_allocations_cutoff_absolute = []
        space_allocations_cutoff_relative = []
        for i, space in enumerate(space_allocations):
            space_cutoff_weighted = space - best_cutoff_thresh_count_sketch_weighted[
                i] * CUTOFF_SPACE_COST_FACTOR  # ID | count (4 bytes each)
            space_cutoff_absolute = space - best_cutoff_thresh_count_sketch_absolute[
                i] * CUTOFF_SPACE_COST_FACTOR  # ID | count (4 bytes each)
            space_cutoff_relative = space - best_cutoff_thresh_count_sketch_relative[
                i] * CUTOFF_SPACE_COST_FACTOR  # ID | count (4 bytes each)

            space_allocations_cutoff_weighted.append(space_cutoff_weighted)
            space_allocations_cutoff_absolute.append(space_cutoff_absolute)
            space_allocations_cutoff_relative.append(space_cutoff_relative)

        # 1.  evaluate cutoff count sketch on weighted error
        logger.info(
            "Running cutoff count sketch on all parameters for weighted error..."
        )
        spinner.stop()
        spinner = Halo(
            text='Evaluating cutoff count sketch algorithm for weighted error',
            spinner='dots')
        spinner.start()
        with get_context("spawn").Pool() as pool:
            cutoff_count_sketch_predictions_weighted = pool.starmap(
                run_cutoff_count_sketch,
                zip(repeat(data), repeat(oracle_scores),
                    repeat(test_oracle_scores),
                    space_allocations_cutoff_weighted,
                    best_cutoff_thresh_count_sketch_weighted,
                    space_allocations))
            pool.close()
            pool.join()

        # 2.  evaluate cutoff count sketch on absolute error
        logger.info(
            "Running cutoff count sketch on all parameters for absolute error..."
        )
        spinner.stop()
        spinner = Halo(
            text='Evaluating cutoff count sketch algorithm for absolute error',
            spinner='dots')
        spinner.start()
        with get_context("spawn").Pool() as pool:
            cutoff_count_sketch_predictions_absolute = pool.starmap(
                run_cutoff_count_sketch,
                zip(repeat(data), repeat(oracle_scores),
                    repeat(test_oracle_scores),
                    space_allocations_cutoff_absolute,
                    best_cutoff_thresh_count_sketch_absolute,
                    space_allocations))
            pool.close()
            pool.join()

        # 3.  evaluate cutoff count sketch on relative error
        logger.info(
            "Running cutoff count sketch on all parameters for relative error..."
        )
        spinner.stop()
        spinner = Halo(
            text='Evaluating cutoff count sketch algorithm for relative error',
            spinner='dots')
        spinner.start()
        with get_context("spawn").Pool() as pool:
            cutoff_count_sketch_predictions_relative = pool.starmap(
                run_cutoff_count_sketch,
                zip(repeat(data), repeat(oracle_scores),
                    repeat(test_oracle_scores),
                    space_allocations_cutoff_relative,
                    best_cutoff_thresh_count_sketch_relative,
                    space_allocations))
            pool.close()
            pool.join()
        spinner.stop()

        cutoff_count_sketch_predictions_weighted_all.append(
            cutoff_count_sketch_predictions_weighted)
        cutoff_count_sketch_predictions_absolute_all.append(
            cutoff_count_sketch_predictions_absolute)
        cutoff_count_sketch_predictions_relative_all.append(
            cutoff_count_sketch_predictions_relative)

    #################################################################
    # save all results to the folder
    #################################################################
    np.savez(os.path.join(save_folder, save_file),
             space_list=space_list,
             true_values=true_counts,
             oracle_predictions=oracle_scores,
             algo_predictions=algo_predictions_all,
             cutoff_count_sketch_predictions_weighted=
             cutoff_count_sketch_predictions_weighted_all,
             cutoff_count_sketch_predictions_absolute=
             cutoff_count_sketch_predictions_absolute_all,
             cutoff_count_sketch_predictions_relative=
             cutoff_count_sketch_predictions_relative_all,
             count_sketch_predictions=count_sketch_prediction_all,
             count_min_predictions=count_min_prediction_all)
Exemple #16
0
 def test_spinner_interval_argument(self):
     """Test proper assignment of the interval value from the constructor argument.
     """
     spinner = Halo(interval=123)
     self.assertEqual(spinner._interval, 123)
def experiment_comapre_loss_vs_oracle_error_on_synthetic_data(
        algo_type, space, data, n_trials, n_workers, save_folder, save_file):

    # learned algo with cutoff
    logger.info(
        "Running learned count sketch on synthetic data with different prediction errors"
    )

    algo_mean_absolute_error_per_oracle_error = []
    algo_std_absolute_error_per_oracle_error = []
    cutoff_absolute_error_per_oracle_error = []

    algo_mean_relative_error_per_oracle_error = []
    algo_std_relative_error_per_oracle_error = []
    cutoff_relative_error_per_oracle_error = []

    algo_mean_weighted_error_per_oracle_error = []
    algo_std_weighted_error_per_oracle_error = []
    cutoff_weighted_error_per_oracle_error = []

    # all the data / scores we test
    eval_data = []
    eval_scores = []

    # random oracle
    np.random.shuffle(data)
    scores = np.asarray(range(0, len(data)))[::-1]
    eval_data.append(data.copy())
    eval_scores.append(scores.copy())

    spinner = Halo(text='Computing oracle scores...', spinner='dots')
    spinner.start()

    for space_factor in SYNTHETIC_DATA_ORACLE_SPACE_FACTOR_TO_TEST:
        # oracle error = countmin error on allocated space
        oracle_scores = count_min(data.copy(), int(len(data) * space_factor),
                                  1)

        sort = np.argsort(oracle_scores)[::-1]
        eval_data.append(data.copy()[sort])
        eval_scores.append(scores.copy())

    # perfect oracle
    sort = np.argsort(data)[::-1]
    eval_data.append(data.copy()[sort])
    eval_scores.append(scores.copy())

    spinner.stop()
    spinner = Halo(text='Evaluating algo on parameters...', spinner='dots')
    spinner.start()

    algo_predictions_per_trial_per_error = []

    for trial in range(n_trials):
        algo_predictions_trial = []
        # learned algorithm with cutoff
        with get_context("spawn").Pool() as pool:
            algo_predictions_trial = pool.starmap(
                run_cutoff_median_sketch,
                zip(eval_data.copy(), eval_scores.copy(), repeat(space),
                    repeat(space), repeat(False)))
            pool.close()
            pool.join()

        algo_predictions_per_trial_per_error.append(
            np.array(algo_predictions_trial))

    spinner.stop()
    spinner = Halo(text='Computing errors...', spinner='dots')
    spinner.start()

    i = 0
    for d in eval_data:

        algo_absolute_error_per_trial = []
        algo_relative_error_per_trial = []
        algo_weighted_error_per_trial = []

        for trial in range(n_trials):
            abs_error = np.abs(algo_predictions_per_trial_per_error[trial][i] -
                               np.array(d))
            rel_error = abs_error / np.array(d)
            weighted_error = abs_error * np.array(d)

            algo_absolute_error_per_trial.append(np.sum(abs_error))
            algo_relative_error_per_trial.append(np.sum(rel_error))
            algo_weighted_error_per_trial.append(np.sum(weighted_error))

        algo_mean_absolute_error_per_oracle_error.append(
            np.mean(algo_absolute_error_per_trial))
        algo_mean_relative_error_per_oracle_error.append(
            np.mean(algo_relative_error_per_trial))
        algo_mean_weighted_error_per_oracle_error.append(
            np.mean(algo_weighted_error_per_trial))

        algo_std_absolute_error_per_oracle_error.append(
            np.std(algo_absolute_error_per_trial))
        algo_std_relative_error_per_oracle_error.append(
            np.std(algo_relative_error_per_trial))
        algo_std_weighted_error_per_oracle_error.append(
            np.std(algo_weighted_error_per_trial))

        i += 1

    spinner.stop()
    spinner = Halo(text='Evaluating cutoff count sketch on parameters...',
                   spinner='dots')
    spinner.start()

    best_cutoff_absolute_eror_per_oracle_error = []
    best_cutoff_relative_eror_per_oracle_error = []
    best_cutoff_weighted_eror_per_oracle_error = []

    i = 0
    for d in eval_data:

        cutoff_fracs = np.array(SYNTHETIC_DATA_CUTOFF_FRAC_TO_TEST) * (
            space / CUTOFF_SPACE_COST_FACTOR)
        cutoff_fracs = cutoff_fracs.astype(int)
        space_for_sketch = np.ones(len(
            cutoff_fracs)) * space - cutoff_fracs * CUTOFF_SPACE_COST_FACTOR

        cutoff_absolute_error_per_cutoff = []
        cutoff_relative_error_per_cutoff = []
        cutoff_weighted_error_per_cutoff = []

        # learned algorithm with cutoff
        with get_context("spawn").Pool() as pool:
            cutoff_predictions_per_cutoff_frac = pool.starmap(
                run_cutoff_count_sketch,
                zip(repeat(d), repeat(eval_scores[i]), repeat(eval_scores[i]),
                    space_for_sketch, cutoff_fracs, repeat(space)))
            pool.close()
            pool.join()

        for predictions in cutoff_predictions_per_cutoff_frac:
            abs_error = np.abs(np.array(predictions) - np.array(d))
            rel_error = abs_error / np.array(d)
            weighted_error = abs_error * np.array(d)

            cutoff_absolute_error_per_cutoff.append(np.sum(abs_error))
            cutoff_relative_error_per_cutoff.append(np.sum(rel_error))
            cutoff_weighted_error_per_cutoff.append(np.sum(weighted_error))

        best_cutoff_absolute_eror_per_oracle_error.append(
            np.min(cutoff_absolute_error_per_cutoff))
        best_cutoff_relative_eror_per_oracle_error.append(
            np.min(cutoff_relative_error_per_cutoff))
        best_cutoff_weighted_eror_per_oracle_error.append(
            np.min(cutoff_weighted_error_per_cutoff))

        i += 1

    # vanilla count sketch
    spinner = Halo(text='Running vanilla count sketch...', spinner='dots')
    spinner.start()
    count_sketch_prediction = run_count_sketch(data, space, space)
    spinner.stop()

    count_sketch_abs_error = np.abs(
        np.array(count_sketch_prediction) - np.array(data))
    count_sketch_rel_error = count_sketch_abs_error / np.array(data)
    count_sketch_weighted_error = count_sketch_abs_error * np.array(data)
    spinner.stop()

    # vanilla count min
    spinner = Halo(text='Running vanilla count min...', spinner='dots')
    spinner.start()
    count_min_prediction = run_count_min(data, space, space)
    spinner.stop()

    count_min_abs_error = np.abs(
        np.array(count_min_prediction) - np.array(data))
    count_min_rel_error = count_min_abs_error / np.array(data)
    count_min_weighted_error = count_min_abs_error * np.array(data)
    spinner.stop()

    #################################################################
    # save all results to the folder
    #################################################################
    np.savez(
        os.path.join(save_folder, save_file),
        space=space,
        num_items=len(data),
        count_sketch_abs_error=np.sum(count_sketch_abs_error),
        count_sketch_rel_error=np.sum(count_sketch_rel_error),
        count_sketch_weighted_error=np.sum(count_sketch_weighted_error),
        count_min_abs_error=np.sum(count_min_abs_error),
        count_min_rel_error=np.sum(count_min_rel_error),
        count_min_weighted_error=np.sum(count_min_weighted_error),
        cutoff_abs_error=best_cutoff_absolute_eror_per_oracle_error,
        cutoff_rel_error=best_cutoff_relative_eror_per_oracle_error,
        cutoff_weighted_error=best_cutoff_weighted_eror_per_oracle_error,
        algo_abs_error=algo_mean_absolute_error_per_oracle_error,
        algo_rel_error=algo_mean_relative_error_per_oracle_error,
        algo_weighted_error=algo_mean_weighted_error_per_oracle_error,
        algo_abs_error_std=algo_std_absolute_error_per_oracle_error,
        algo_rel_error_std=algo_std_relative_error_per_oracle_error,
        algo_weighted_error_std=algo_std_weighted_error_per_oracle_error,
        num_trials=n_trials,
    )
Exemple #18
0
 def test_spinner_interval_dict(self):
     """Test proper assignment of the interval value from a dictionary.
     """
     spinner = Halo(spinner={'interval': 321, 'frames': ['+', '-']})
     self.assertEqual(spinner._interval, 321)
Exemple #19
0
    def __init__(self, file, dir_path=None):
        """
            Create an Analyzer object and sets the dataframes to input file data.

            Parameters
            ----------
            file: str
                Path to input data file. Either a JSON or CSV file.
            dir: str
                Optional directory path to multiple input csv files.
        """
        cols = [
            'repository_url', 'repository_created_at', 'repository_name',
            'repository_description', 'repository_owner',
            'repository_open_issues', 'repository_watchers',
            'repository_language', 'actor_attributes_login',
            'actor_attributes_name', 'actor_attributes_location', 'created_at',
            'payload_action', 'payload_number', 'payload_issue', 'actor',
            'url', 'type'
        ]
        df_types = {
            'repository_url': str,
            'repository_created_at': str,
            'repository_description': str,
            'repository_owner': str,
            'repository_open_issues': str,
            'repository_watchers': str,
            'repository_language': str,
            'actor_attributes_login': str,
            'actor_attributes_name': str,
            'actor_attributes_location': str,
            'created_at': str,
            'payload_action': str,
            'payload_number': str,
            'payload_issue': str,
            'actor': str,
            'url': str,
            'type': str
        }
        if dir_path:
            all_files = glob.glob(dir_path + '/*.csv')
            li = []
            pbar = tqdm(all_files)
            for file in pbar:
                pbar.set_description("Reading %s" % file)
                df = pd.read_csv(file, usecols=cols, dtype=df_types, header=0)
                li.append(df)
            self.data = pd.concat(li, axis=0, ignore_index=True)
        else:
            f_spinner = Halo(text='Loading', spinner='dots')
            self.filename = file
            if file.endswith('.csv'):
                f_spinner.start()
                self.data = pd.read_csv(self.filename,
                                        usecols=cols,
                                        dtype=df_types)
                f_spinner.succeed(f'{file} Successfully Read!')
            elif file.endswith('.json'):
                f_spinner.start()
                self.data = pd.read_json(self.filename, lines=True)
                f_spinner.succeed(f'{file} Successfully Read!')
            else:
                print('File must be a JSON or CSV file.')
                sys.exit(0)
        spinner = Halo(text='Processing Data', spinner='dots')
        spinner.start()
        self.countries = pd.read_csv('data/countries.csv')
        self.data['created_at'] = pd.to_datetime(self.data['created_at'],
                                                 format='%Y-%m-%d %H:%M:%S')
        self.data = self.data.join(
            self.countries.set_index('actor_attributes_location'),
            on='actor_attributes_location')
        self.data['country'].replace('No Results', '', inplace=True)
        spinner.succeed('Data Successfully Proccessed!')
Exemple #20
0
    def test_default_placement(self):
        """Test default placement of spinner.
        """

        spinner = Halo()
        self.assertEqual(spinner.placement, 'left')
Exemple #21
0
def announce_grade(homework_prefix, token, org, only_id, feedback_source_repo):
    '''announce student grades to each hw repo'''

    ensure_gh_token(token)
    # TODO: use logging lib to log messages
    spinner = Halo(stream=sys.stderr)

    student_feedback_title = f"Grade for {homework_prefix}"

    gstudents = Gstudents()
    feedback_vars = gstudents.left_join(homework_prefix)

    # Clone feedback repo & set needed variables
    cur = Path('.')

    for d in cur.glob("feedback-tmp-*"):
        shutil.rmtree(d)
    spinner.info("delete dated folder")

    root_folder = Path(
        tempfile.mkdtemp(prefix="feedback-tmp-{}-".format(
            datetime.now().strftime("%b%d%H%M%S")),
                         dir="."))
    spinner.succeed(normal.txt('Create tmp folder ').kw(root_folder).to_str())

    feedback_repo_path = root_folder / 'feedbacks'

    spinner.start(f"cloning feeback source repo : {feedback_source_repo}")
    _, t = measure_time(sp.run)([
        'git',
        'clone',
        f'https://github.com/{org}/{feedback_source_repo}.git',
        feedback_repo_path.name,
    ],
                                cwd=root_folder,
                                stdout=sp.DEVNULL,
                                stderr=sp.DEVNULL)
    spinner.succeed(
        f"cloning feeback source repo : {feedback_source_repo} ... {t:4.2f} sec"
    )
    client = httpx.AsyncClient(headers=httpx.Headers(
        {
            "User-Agent": "GitHubClassroomUtils/1.0",
            "Authorization": "token " + token,
            # needed for the check-suites request
            "Accept": "application/vnd.github.antiope-preview+json"
        }))

    hw_path = feedback_repo_path / homework_prefix / 'reports'

    # generate feedbacks
    fbs, t = measure_time(gen_feedbacks)(homework_prefix, hw_path,
                                         feedback_vars)
    spinner.succeed(f"Generate content for feedbacks ... {t:5.3f} sec")

    # handle only_id
    if only_id:
        try:
            # detect possible buggy condition
            info = gstudents.get_student(only_id)
        except RuntimeError as e:
            print(' *=' * 30)
            print('Warning!')
            print(e)
            return
        only_repo_name = get_hw_repo_name(homework_prefix,
                                          info['github_handle'])
        fbs = list(filter(lambda fb: fb['repo_name'] == only_repo_name, fbs))

    async def push_to_remote(feedback_title, feedbacks):
        # push to remote
        async def push_feedback(fb):
            request_body = {'title': feedback_title, 'body': fb['value']}
            try:
                issue_num = await find_existing_issue(client, org,
                                                      fb['repo_name'],
                                                      feedback_title)
            except BaseException as e:
                print(f'error on {fb["repo_name"]}')
                return
            if issue_num:
                request_body['state'] = 'open'  # reopen issue
                url = f"https://api.github.com/repos/{org}/{fb['repo_name']}/issues/{issue_num}"
                await edit_issue_async(client, url, issue_num, request_body)
            else:
                url = f"https://api.github.com/repos/{org}/{fb['repo_name']}/issues"
                await create_issue_async(client, url, request_body)
            print(f'success {fb["repo_name"]}')

        async with trio.open_nursery() as nursery:
            for fb in feedbacks:
                nursery.start_soon(push_feedback, fb)

    _, t = measure_time(trio.run)(push_to_remote, student_feedback_title, fbs)
    spinner.succeed(f"Push feedbacks to remote ... {t:5.2f} sec")
    spinner.succeed(f'finished announce grade')
    return
Exemple #22
0
 def test_animation_setter(self):
     spinner = Halo("Asdf")
     spinner.animation = "bounce"
     self.assertEquals("bounce", spinner.animation)
     spinner.animation = "marquee"
     self.assertEquals("marquee", spinner.animation)
Exemple #23
0
    i = 0
    spinner.start()
    for i in range(1, int(rows) + 1):
        spinner.text = '{0}% - preparing sql statements..'.format(percentage_calculator(i, rows))
        name = rand_string_generator(size=3) + str(i)
        address = rand_string_generator(size=6) + str(i) + name
        insert_query = "INSERT INTO random_table (rand_column_1, rand_column_2) VALUES ('{}', '{}')".format(name,
                                                                                                            address)
        db_cursor.execute(insert_query)
    db.commit()
    spinner.succeed('preparing sql statements..')
    spinner.succeed("{}  - rows inserted to database".format(i))


if __name__ == '__main__':
    spinner = Halo(text='preparing sql statements..', spinner='dots')
    arg_parser = argparse.ArgumentParser("Script to insert random data to Mysql database.")
    arg_parser.add_argument("host", help="database hostname/ip")
    arg_parser.add_argument("username", help="database username")
    arg_parser.add_argument("password", help="database password")
    arg_parser.add_argument("database", help="database name to generate random data")
    arg_parser.add_argument("--rows", help="count of rows to be inserted to database.")
    if len(sys.argv) < 4:
        arg_parser.print_help()
        sys.exit(1)

    arguments = arg_parser.parse_args()
    db_host = arguments.host
    db_username = arguments.username
    db_password = arguments.password
    db_name = arguments.database
Exemple #24
0
def terraform(tf_vars: Dict[str, Any],
              tf_dir: Path) -> Iterator[Dict[Any, Any]]:
    if "AWS_ACCESS_KEY_ID" not in os.environ:
        raise RuntimeError("Missing AWS creds")
    with TemporaryDirectory() as tmpdir:
        with Halo("[infrastructure] checking current state") as spinner:
            plan = Path(tmpdir) / "tfplan"
            tf_args = format_args(tf_vars)
            cmd = ["terraform", "plan", f"-out={plan}", "-no-color"] + tf_args
            try:
                plan_output = check_output(cmd,
                                           stderr=subprocess.STDOUT,
                                           cwd=tf_dir)
            except subprocess.CalledProcessError as err:
                if "terraform init" in err.output.decode("utf8"):
                    # we know what to do here
                    spinner.text = "[infrastructure] initializing plugins"
                    check_output(["terraform", "init"], cwd=tf_dir)
                    spinner.text = "[infrastructure] checking current state"
                    plan_output = check_output(cmd, cwd=tf_dir)
                elif "Your query returned no results" in err.output.decode(
                        "utf8"):
                    raise NoImageError() from err
                else:
                    with open("terraform.log", "w") as log_file:
                        log_file.write(err.output.decode("utf8"))
                    raise
            changes = [
                l for l in plan_output.decode("utf8").split("\n")
                if l.lstrip().startswith("#")
            ]

            if changes:
                spinner.succeed("[infrastructure] found changes to apply:")
                for change in changes:
                    if ("unchanged attributes hidden" in change
                            or "unchanged element hidden" in change):
                        continue
                    change = change.lstrip(" #")
                    print(f"  • {change}")
            else:
                spinner.info("[infrastructure] no changes to apply")

        if changes:
            with Halo(
                    "[infrastructure] applying changes (output in [terraform.log])"
            ) as spinner:
                with open("terraform.log", "w") as log_file:
                    cmd = [
                        "terraform",
                        "apply",
                        "-refresh=false",
                        "-auto-approve",
                        str(plan),
                    ]
                    check_call(cmd, stdout=log_file, cwd=tf_dir)
                spinner.succeed("[infrastructure] created")

        data = json.loads(
            check_output(["terraform", "output", "-json"], cwd=tf_dir))
    yield {k: v["value"] for k, v in data.items()}
Exemple #25
0
    Green, Red, White, Yellow = '\033[92m', '\033[91m', '\033[0m', '\033[93m'


print(Colour.Yellow + """
╦═╗╔═╗╔╦╗╔═╗╦  ╦╔═╗╔╦╗╔╦╗╦╔╦╗
╠╦╝║╣ ║║║║ ║╚╗╔╝║╣  ║║ ║║║ ║
╩╚═╚═╝╩ ╩╚═╝ ╚╝ ╚═╝═╩╝═╩╝╩ ╩
""")
print(Colour.White + 'Press Ctrl + C to Exit\n')

reddit = praw.Reddit(user_agent=user_agent,
                     client_id=client_id,
                     client_secret=client_secret,
                     username=reddit_user,
                     password=reddit_pass)

try:
    spinner = Halo(text='Running', spinner='dots')
    spinner.start()
    for comment in reddit.subreddit(target_sub).stream.comments():
        if target_keyword in comment.body:
            splits = re.split('(\W)', comment.permalink)
            del splits[-3:]
            msg = 'https://removeddit.com' + ''.join(splits)
            comment.reply(msg)
except Exception as e:
    spinner.stop()
    print(Colour.Red + str(e))
except KeyboardInterrupt:
    spinner.stop()
    sys.exit()
def patiently_parse_log_folder(folder: str):
    with Halo(text='Parsing...', spinner='dots'):
        parse_and_save_to_db(folder)
    LogEntry.raw('CREATE INDEX t_sort ON logentry (t);').execute()
Exemple #27
0
            title, filename,
            time.strftime('%Y/%m/%d %H:%M'),
            time.strftime('%Y/%m/%d %H:%M'), cleaned_input[1]
        ]

        # find the editing location of the source file
        tag = main_index.find('div', {'class': 'wrapper'})
        tag.insert(0, body)

        # save the new edited source file
        with open(os.path.join(message.file_dir, '../index.html'),
                  'w') as file:
            file.write(str(main_index))

        spinner = Halo(text='Creating post...',
                       spinner='pong',
                       text_color='magenta')
        spinner.start()
        time.sleep(4)
        spinner.stop_and_persist(
            text="New post successfully created and appended to the index",
            symbol='✔ ')  # ✔

    if answers['item'] == "edit post":
        # filter files
        places = [y[0] for x, y in post_data.items()]

        post = {
            'type': 'list',
            'name': 'item',
            'message': 'Which post?',
                           default=False)
    argparser.add_argument("--run_perfect_oracle_version",
                           action='store_true',
                           default=False)
    argparser.add_argument("--model_size",
                           type=float,
                           default=0.0,
                           help="model size in MB")
    args = argparser.parse_args()

    assert (args.learned_algo_type == ALGO_TYPE_CUTOFF_MEDIAN)

    # set the random seed for numpy values
    np.random.seed(args.seed)

    spinner = Halo(text='Loading datasets...', spinner='dots')
    spinner.start()

    # load the test dataset (needed for both validation and testing)
    # specifically, we use the test_oracle_scores to find the score cutoff
    # value in the validation data (just the way the experiment code is setup)
    test_data, test_oracle_scores = load_dataset(
        args.test_dataset,
        args.model_test,
        'valid_output',  # old models were trained with valid/test swapped 
        args.run_perfect_oracle_version,
        args.aol_data,
        args.synth_zipfian,
        args.synth_pareto)

    if args.find_optimal_params:
Exemple #29
0
                                      t.blue(link), t.yellow(f"({link_hash})"))
                                screen_id = await generate_screenshot(
                                    link, service)
                                print(t.green('Generated screenshot for '),
                                      t.yellow(screen_id))

                            asyncio.ensure_future(
                                reply(api, chat_id, link, link_hash,
                                      photo_server_uri))
                            await cleanup_hashes(IMAGE_PATH)


if __name__ == '__main__':
    loop = asyncio.get_event_loop()

    with Halo(text='', spinner='dots') as halo:
        halo.color = 'red'
        halo.text = 'Setting up headless Chrome...'

        chrome_service = service.Service(CHROME_PATH)
        chrome_service.start()
        try:
            test_wd = webdriver.Remote(
                chrome_service.service_url,
                desired_capabilities=CHROME_OPT.to_capabilities())
            del test_wd
        except WebDriverException as e:
            halo.fail(f"Chrome init failed: {e}")
            sys.exit(-1)

        halo.text = 'Scanning existing screenshots...'
import re, json, os, requests, urllib3, time, sys, libtmux
from halo import Halo


if __name__ == "__main__":
    spinner = Halo(text='Testing Tmux Integration', spinner='dots')
    spinner.start()
    time.sleep(3)
    try:
        server = libtmux.Server()
        sessions = server.list_sessions()
        spinner.succeed('Tmux Connection Established. Detected {} Sessions'.format(len(sessions)))
        sys.exit(0)
    except Exception as e:
        spinner.fail('Tmux Connection Failed: {}'.format(e))
        sys.exit(1)