import subprocess

if len(sys.argv) < 2:
    print("No path to original / hi-res icon provided")
    raise SystemExit

if len(sys.argv) > 2:
    print("Too many arguments")
    raise SystemExit

originalPicture = sys.argv[1]
if not (os.path.isfile(originalPicture)):
    print(f"There is no such file: {sys.argv[1]}")
    raise SystemExit

fname = pathlib.Path(originalPicture).stem
ext = pathlib.Path(originalPicture).suffix
destDir = pathlib.Path(originalPicture).parent

iconsetDir = os.path.join(destDir, f"{fname}.iconset")
if not (os.path.exists(iconsetDir)):
    pathlib.Path(iconsetDir).mkdir(parents=False, exist_ok=True)


class IconParameters():
    width = 0
    scale = 1

    def __init__(self, width, scale):
        self.width = width
        self.scale = scale
Example #2
0
        q75 = np.percentile(subvalues, 75)
        subvalues_filtered = list(filter(lambda x : (x >= q25) and (x <= q75), subvalues))
        averages.append(np.mean(subvalues_filtered))
    q25 = np.percentile(averages, 25)
    q75 = np.percentile(averages, 75)
    averages_filtered = list(filter(lambda x : (x >= q25) and (x <= q75), averages))
    return np.mean(averages_filtered)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('results', help='File with results -- you can use "combine.py" script to combine multiple files into one')
    parser.add_argument('--out', '-o', required=True, help='Output file')
    args = parser.parse_args()

    data = pickle.loads(pathlib.Path(args.results).read_bytes())
    all_results = []
    results = {}
    invalid = 0
    failed = 0
    for point, value in data:
        if value == 'invalid':
            invalid += 1
        elif not isinstance(value, list):
            failed += 1
        else:
            all_results.append(value)
            point = utils.freeze(point)
            if point in results:
                raise KeyError('Duplicated entry: {}'.format(point))
            results[point] = calc_stats(value)
Example #3
0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Django CLI entry point."""

import os
import pathlib
import sys

# Append directory where `channels_graphqlws` package resides.
sys.path.append(str(pathlib.Path(__file__).resolve().parent.parent))

if __name__ == "__main__":
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
    try:
        from django.core.management import execute_from_command_line
    except ImportError:
        # The above import may fail for some other reason. Ensure that
        # the issue is really that Django is missing to avoid masking
        # other exceptions on Python 2.
        try:
            import django  # pylint: disable=unused-import
        except ImportError as ex:
            raise ImportError(
                "Could not import Django. Are you sure it is installed and "
                "available on your PYTHONPATH environment variable? Did you "
Example #4
0
def Main():
  progress.Run(TestVocab(pathlib.Path(pathflag.path())))
Example #5
0
#####################################################################################################################


if __name__ == "__main__":
    # Register the policy, it will check that the name is not already taken
    register_policy('CustomMlpPolicy', CustomMlpPolicy)

    #parse the arguments from the parser
    parser = argparse.ArgumentParser()
    parser.add_argument('-e', '--environment', type=str, help='Name of the environment.')
    parser.add_argument('-s', '--subdir', type=str, help='Subdir to combine and analyze.')
    parser.add_argument('-n', '--name', type=str, help='Name of the specific model.')
    parser.add_argument('-c', '--config', type=str, help='Name of config file in config/name')
    parser.add_argument('-t', '--tensorboard', action='store_true', help='If you want to run a TB node.')
    args = parser.parse_args()
    path = pathlib.Path().absolute()
    specified_path = join(path, 'rl', 'trained_models', args.environment, args.subdir)

    #check if a config file is defined, and if this is available, otherwise load config from config folder.
    try:
        config_path = join(specified_path, 'config.yml'.format(args.config))
        with open(config_path) as f:
            config = yaml.safe_load(f)
        print('\nLoaded config file from: {}\n'.format(config_path))

    except:
        print('specified config is not in path, getting original config: {}.yml...'.format(args.environment))
        # load config and variables needed
        config = get_parameters(args.environment)

    #check if there is allready a trained model in this directory
def main():
    # downloading the data
    print("Downloading data...\n")
    download.download_data(out_path, TCELL_CSV_FILENAME, TCELL_DOWNLOAD_URL)
    downloaded_filename = TCELL_CSV_FILENAME

    # parsing the downloaded data
    print("Organizing data, checking for duplicates (it might take a while...)\n")
    parser.make_samples(out_path, downloaded_filename,
                        PARSED_SAMPLES_FOLDER_NAME, BATCH_FILE_SIZE, BATCH_REQUEST_SIZE)
    print("Finished parsing data\n")
    # pre-proecessing the data
    print("Clustering data for train-test independence\n")
    parsed_samples_paths = get_parsed_samples_paths(
        out_path, PARSED_SAMPLES_FOLDER_NAME)
    run_processing.main(['-i', *parsed_samples_paths])
    print("Done clustering\n")
    parser.Clean_id_lines_from_samples(
        out_path, processed_folder_name, CLEAN_PROCESSED_SAMPLES)
    print("Loading the data to memory and partitioning to train and test groups\n")

    # Create dataset of sequences
    char_to_idx, idx_to_char = lstm_model.char_maps()
    vocab_len = len(char_to_idx)

    train_samples, train_labels, test_samples, test_labels = make_labelled_samples(
        out_path, CLEAN_PROCESSED_SAMPLES, char_to_idx, idx_to_char, config["train_test_ratio"])

    # ====================== MODEL AND TRAINING ======================

    # Create DataLoader returning batches of samples.
    dl_train, dl_test, _, ds_test = get_dataloaders(
        train_samples, train_labels, test_samples, test_labels)
    # get random subset text from test dataset
    subset_text = get_subset_text(ds_test, idx_to_char)
    # initialize a model and try to train it
    in_local_maximum = True
    while in_local_maximum:
        try:
            in_local_maximum = False
            model_text = ''
            model = None
            print(
                "\nInitializing a random model with a random enough capitalization before training\n")
            while not is_random(model_text):
                # init model
                model = lstm_model.LSTMTagger(hidden_dim=config["hidden_dim"], input_dim=vocab_len, tagset_size=TAGSET_SIZE,
                                              n_layers=config["n_layers"], bidirectional=(config["bidirectional"] == 1), drop_prob=config["dropout"], device=device)
                model.to(device)
                model_text = get_capitalized_model_text(
                    model, subset_text.lower(), (char_to_idx, idx_to_char))

            # see how model works before training at all
            print("Model capitalization before training:\n")
            print(model_text)
            # train the model
            fit_res = train_model(model, subset_text, (char_to_idx,
                                                       idx_to_char), dl_train, dl_test)
            print("\nFinished training\n")
        except LocalMaximumError:
            print("Stuck in local maximum of all non-epitopes! Retrying...")
            checkpoint_file = config['checkpoint_file']
            checkpoint_filename = f'{checkpoint_file}.pt'
            if os.path.isfile(checkpoint_filename):
                pathlib.Path(checkpoint_filename).unlink()
            in_local_maximum = True
    # plot the training results
    training_plot_name = config['training_plot_name']
    print(f"Saving training plot to: {training_plot_name}\n")
    fig, _ = plot_fit(fit_res)
    fig.savefig(training_plot_name)
Example #7
0
        def get_output(self, text, **kwargs):

            pat = _pathlib.Path().absolute()
            k = kwargs.keys()
            accepted_args = ['string', 'inputName', 'outputName', 'scanf']
            bad_args = [char for char in k if char not in accepted_args]

            if len(bad_args) != 0:
                raise Exception('Invalid Parameters Passed')

            else:
                for key, value in kwargs.items():

                    if key == 'string' and value == False:
                        if _os.path.exists(text):
                            filename = "foo.exe"
                            if "inputName" in k:
                                raise Exception(
                                    "Error: Both inputName and path are given!"
                                )
                            if "outputName" in k:
                                filename = kwargs.get('outputName').split(
                                    '.')[0] + '.exe'

                            if 'scanf' in kwargs.keys():
                                with open(text, 'r') as f:
                                    code = f.read()

                                if 'scanf("' in code:
                                    old = code.split('scanf("')[1].split(
                                        '"')[0]
                                    new = old.replace("\n", "\\n")
                                    code = code.replace(old, new)
                                elif "scanf('" in code:
                                    old = code.split("scanf('")[1].split(
                                        "'")[0]
                                    new = old.replace("\n", "\\n")
                                    code = code.replace(old, new)

                                inp = kwargs.get('scanf')
                                if code.count('scanf') != len(inp):
                                    raise Exception(
                                        "input parameter and the code scanf doesn't match"
                                    )
                                    return None
                                else:
                                    i = 1  #initial step
                                    while i < len(inp):
                                        inp.insert(i, '\n')
                                        i = i + 3 + 1
                                    p = _subprocess.Popen(
                                        "gcc " + text + " -o" +
                                        filename.split('.')[0],
                                        shell=True)
                                    p.wait()
                                    p1 = _subprocess.Popen(
                                        [filename.split('.')[0]],
                                        stdout=_subprocess.PIPE,
                                        stdin=_subprocess.PIPE,
                                        encoding='utf8')
                                    out = p1.communicate(''.join(
                                        char for char in inp))[0]
                                    if 'outputName' not in kwargs.keys():
                                        _os.remove(filename + '.exe')
                                    return out

                            _subprocess.call(
                                ["gcc", text, "-o" + filename.split('.')[0]],
                                shell=True)
                            return str(
                                _subprocess.check_output(
                                    [_os.path.join(pat,
                                                   filename)])).strip()[2:-1]
                        else:
                            raise Exception("Invalid Path given")

                    elif key == 'string' and value == True:
                        iname = 'temp.c'
                        oname = 'foo.exe'

                        if "inputName" in k:
                            iname = kwargs.get('inputName').split(
                                '.')[0] + '.c'
                        if "outputName" in k:
                            oname = kwargs.get('outputName').split(
                                '.')[0] + '.exe'

                        if 'scanf("' in text:
                            old = text.split('scanf("')[1].split('"')[0]
                            new = old.replace("\n", "\\n")
                            text = text.replace(old, new)
                        elif "scanf('" in text:
                            old = text.split("scanf('")[1].split("'")[0]
                            new = old.replace("\n", "\\n")
                            text = text.replace(old, new)

                        with open(_os.path.join(pat, iname), 'w') as f:
                            f.write(text)

                        if 'scanf' in kwargs.keys():
                            with open(iname, 'r') as f:
                                code = f.read()
                            inp = kwargs.get('scanf')
                            if code.count('scanf') != len(inp):
                                raise Exception(
                                    "input parameter and the code scanf doesn't match"
                                )
                                return None
                            else:
                                i = 1  #initial step
                                while i < len(inp):
                                    inp.insert(i, '\n')
                                    i = i + 3 + 1
                                p = _subprocess.Popen("gcc " + iname + " -o" +
                                                      oname.split('.')[0],
                                                      shell=True)
                                p.wait()
                                p1 = _subprocess.Popen([oname.split('.')[0]],
                                                       stdout=_subprocess.PIPE,
                                                       stdin=_subprocess.PIPE,
                                                       encoding='utf8')
                                out = p1.communicate(''.join(
                                    char for char in inp))[0]
                                if 'outputName' not in kwargs.keys():
                                    _os.remove(oname)
                                if 'inputName' not in kwargs.keys():
                                    _os.remove(iname)
                                return out

                        _subprocess.call([
                            "gcc",
                            _os.path.join(pat, iname),
                            "-o" + _os.path.join(pat,
                                                 oname.split('.')[0])
                        ],
                                         shell=True)
                        res = str(
                            _subprocess.check_output(
                                [_os.path.join(pat, oname)])).strip()[2:-1]
                        if "inputName" not in k:
                            _os.remove(_os.path.join(pat, iname))
                        if "outputName" not in k:
                            _os.remove(_os.path.join(pat, oname))
                        return res

                if _os.path.exists(text):
                    filename = "foo.exe"
                    if "inputName" in k:
                        raise Exception(
                            "Both inputName and path are cannot be given")
                    if "outputName" in k:
                        filename = kwargs.get('outputName').split(
                            '.')[0] + '.exe'

                    if 'scanf' in k:
                        with open(text, 'r') as f:
                            code = f.read()
                        inp = kwargs.get('scanf')
                        if code.count('scanf') != len(inp):
                            raise Exception(
                                "input parameter and the code scanf doesn't match"
                            )
                            return None
                        else:
                            i = 1  #initial step
                            while i < len(inp):
                                inp.insert(i, '\n')
                                i = i + 3 + 1
                            p = _subprocess.Popen("gcc " + text + " -o" +
                                                  filename.split('.')[0],
                                                  shell=True)
                            p.wait()
                            p1 = _subprocess.Popen([filename],
                                                   stdout=_subprocess.PIPE,
                                                   stdin=_subprocess.PIPE,
                                                   encoding='utf8')
                            out = str(
                                p1.communicate(''.join(char
                                                       for char in inp))[0])
                            if 'outputName' not in kwargs.keys():
                                _os.remove(filename)
                            return out

                    _subprocess.call(
                        ["gcc", text, "-o" + filename.split('.')[0]],
                        shell=True)
                    res = str(
                        _subprocess.check_output(
                            [_os.path.join(pat, filename)])).strip()[2:-1]
                    _os.remove(filename)
                    return res
                else:
                    raise Exception("Invalid Path given")
Example #8
0
def get_airflow_home():
    return expand_env_var(os.environ.get('AIRFLOW_HOME', '~/airflow'))


def get_airflow_config(airflow_home):
    if 'AIRFLOW_CONFIG' not in os.environ:
        return os.path.join(airflow_home, 'airflow.cfg')
    return expand_env_var(os.environ['AIRFLOW_CONFIG'])


# Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using
# "~/airflow" and "$AIRFLOW_HOME/airflow.cfg" respectively as defaults.

AIRFLOW_HOME = get_airflow_home()
AIRFLOW_CONFIG = get_airflow_config(AIRFLOW_HOME)
pathlib.Path(AIRFLOW_HOME).mkdir(parents=True, exist_ok=True)

# Set up dags folder for unit tests
# this directory won't exist if users install via pip
_TEST_DAGS_FOLDER = os.path.join(
    os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'tests',
    'dags')
if os.path.exists(_TEST_DAGS_FOLDER):
    TEST_DAGS_FOLDER = _TEST_DAGS_FOLDER
else:
    TEST_DAGS_FOLDER = os.path.join(AIRFLOW_HOME, 'dags')

# Set up plugins folder for unit tests
_TEST_PLUGINS_FOLDER = os.path.join(
    os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'tests',
    'plugins')
Example #9
0
import dash_html_components as html
from dash.dependencies import Input, Output
from pages import (
    overview,
    pricePerformance,
    portfolioManagement,
    feesMins,
    distributions,
    newsReviews,
)
from utils import BarGraphFigure
import pandas as pd
import pathlib

# get relative data folder
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("data").resolve()

df_output = pd.read_csv(DATA_PATH.joinpath("output.csv"))

app = dash.Dash(
    __name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}]
)
server = app.server

app.config.suppress_callback_exceptions = True

# Describe the layout/ UI of the app
app.layout = html.Div(
    [dcc.Location(id="url", refresh=False), html.Div(id="page-content")]
)
Example #10
0
def test_train(
    tmpdir,
    monkeypatch,
    batch_size,
    checkpoint_path,
    dense_size,
    extensions,
    fill_strategy,
    gpus,
    hidden_size,
    illegal_chars,
    n_layers,
    use_mlflow,
    max_epochs,
    output_path,
    early_stopping,
    use_long,
    train_test_split,
    vocab_size,
    window_size,
):
    train = getattr(mltype.cli, "train")

    path_dir = pathlib.Path(str(tmpdir))
    path_file = path_dir / "1.txt"
    path_file.write_text("HELLOOOO THEREEEE")

    fake_run_train = Mock()
    monkeypatch.setattr("mltype.ml.run_train", fake_run_train)

    runner = CliRunner()
    options = [
        ("b", "batch-size", batch_size),
        ("c", "checkpoint-path", checkpoint_path),
        ("d", "dense-size", dense_size),
        ("e", "extensions", extensions),
        ("f", "fill-strategy", fill_strategy),
        ("g", "gpus", gpus),
        ("h", "hidden-size", hidden_size),
        ("i", "illegal-chars", illegal_chars),
        ("l", "n-layers", n_layers),
        ("m", "use-mlflow", use_mlflow),
        ("n", "max-epochs", max_epochs),
        ("o", "output-path", output_path),
        ("s", "early-stopping", early_stopping),
        ("t", "train-test-split", train_test_split),
        ("v", "vocab-size", vocab_size),
        ("w", "window-size", window_size),
    ]

    command = command_composer(
        (str(path_dir), str(path_file), "naame"), options, use_long=use_long
    )

    print(command)  # to know why it failed

    result = runner.invoke(train, command)
    print(result.output)

    assert result.exit_code == 0

    fake_run_train.assert_called_once()

    call = fake_run_train.call_args

    assert isinstance(call[0][0], list)
    assert call[0][1] == "naame"

    assert call[1] == dict(
        batch_size=batch_size,
        checkpoint_path=checkpoint_path,
        dense_size=dense_size,
        fill_strategy=fill_strategy,
        gpus=gpus,
        hidden_size=hidden_size,
        illegal_chars=illegal_chars,
        n_layers=n_layers,
        use_mlflow=use_mlflow,
        max_epochs=max_epochs,
        output_path=output_path,
        early_stopping=early_stopping,
        train_test_split=train_test_split,
        vocab_size=vocab_size,
        window_size=window_size,
    )
Example #11
0
def test_file(
    tmpdir,
    monkeypatch,
    end_line,
    force_perfect,
    include_whitespace,
    instant_death,
    n_lines,
    output_file,
    random_state,
    start_line,
    use_long,
    target_wpm,
):
    file_path = pathlib.Path(str(tmpdir)) / "texts.txt"
    file_path.write_text("\n".join(30 * ["sds"]))

    file_ = getattr(mltype.cli, "file")

    fake_main_basic = Mock()
    monkeypatch.setattr("mltype.interactive.main_basic", fake_main_basic)

    runner = CliRunner()
    options = [
        ("e", "end-line", end_line),
        ("f", "force-perfect", force_perfect),
        ("i", "instant-death", instant_death),
        ("l", "n-lines", n_lines),
        ("o", "output-file", output_file),
        ("r", "random-state", random_state),
        ("s", "start-line", start_line),
        ("t", "target-wpm", target_wpm),
        ("w", "include-whitespace", include_whitespace),
    ]

    command = command_composer((str(file_path),), options, use_long=use_long)
    print(command)  # to know why it failed

    result = runner.invoke(file_, command)

    mode_exact = start_line is not None and end_line is not None
    mode_random = n_lines is not None

    if not (mode_exact ^ mode_random):
        assert result.exit_code != 0
        return

    if mode_exact:
        if random_state is not None:
            assert result.exit_code != 0
            return

        if start_line >= end_line:
            assert result.exit_code != 0
            return

    print(result.output)
    assert result.exit_code == 0
    fake_main_basic.assert_called_once()

    call = fake_main_basic.call_args

    assert isinstance(call[0][0], str)

    assert call[1] == {
        "force_perfect": force_perfect,
        "instant_death": instant_death,
        "output_file": output_file,
        "target_wpm": target_wpm,
    }
Example #12
0
import logging
import pathlib

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from PIL import ImageColor, ImageDraw, ImageFont

from datasetinsights.data.datasets.cityscapes import CITYSCAPES_COLOR_MAPPING

logger = logging.getLogger(__name__)
COLORS = list(ImageColor.colormap.values())
CUR_DIR = pathlib.Path(__file__).parent.absolute()


def decode_segmap(labels, dataset="cityscapes"):
    """Decode segmentation class labels into a color image.

    Args:
        labels (np.array): an array of size (H, W) with integer grayscale
        values denoting the class label at each spatial location.
        dataset (str): dataset name. Defaults to "cityscapes".

    Returns:
        A np.array of the resulting decoded color image in (H, W, C).

    .. note:: (H, W, C) stands for the (height, width, channel) of the 2D image.
    """
    if dataset == "cityscapes":
Example #13
0
import tensorflow as tf     # tensorflow

import models               # our libraries
import datasets             #

# ---------------------------------------------------------------------------- #

# Parameters
f                  = 20
batch_size         = 50
learning_rate      = 0.05
activation_func    = tf.nn.relu
max_train_epoch    = 100000
max_train_accur    = 0.97
load_parameters    = True
parameters_path    = pathlib.Path("model.npy")

# ---------------------------------------------------------------------------- #

# Dataset instantiation
dataset   = datasets.load_mnist() # handwritten digit database
train_set = dataset.cut(0, 50000, 50000).shuffle().cut(0, 50000, batch_size) # 1000 batches of size 50
test_set  = dataset.cut(50000, 60000, 10000)                                 # 1 batch of size 10 000

# Model instantiator
builder_opt  = tf.train.AdagradOptimizer(learning_rate)
builder_dims = [784, 100, 10] # 3 layer neural network:
                              # input layer  : 784 neurons (1 image = 28*28 pixels)
                              # hidden layer : 100 neurons
                              # output layer :  10 neurons (digits 0-9)
def builder(inputs=None):
Example #14
0
def init():
    # Initializes these globals:
    #
    # kconf:
    #   Kconfig instance for the configuration
    #
    # out_dir:
    #   Output directory
    #
    # index_desc:
    #   Set to the corresponding command-line arguments (or None if
    #   missing)
    #
    # modules:
    #   A list of (<title>, <suffix>, <path>, <desc. path>) tuples. See the
    #   --modules flag. Empty if --modules wasn't passed.
    #
    #   <path> is an absolute pathlib.Path instance, which is handy for robust
    #   path comparisons.
    #
    # separate_all_index:
    #   True if --separate-all-index was passed
    #
    # strip_module_paths:
    #   True unless --keep-module-paths was passed

    global kconf
    global out_dir
    global index_desc
    global modules
    global separate_all_index
    global strip_module_paths

    args = parse_args()

    kconf = kconfiglib.Kconfig(args.kconfig, suppress_traceback=True)
    out_dir = args.out_dir
    index_desc = args.index_desc
    separate_all_index = args.separate_all_index
    strip_module_paths = args.strip_module_paths

    modules = []
    for module_spec in args.modules:
        # Split on ',', but keep any ',,' as a literal ','. Temporarily
        # represent a literal comma with null.
        spec_parts = [
            part.replace("\0", ",")
            for part in module_spec.replace(",,", "\0").split(",")
        ]

        if len(spec_parts) == 3:
            title, suffix, path_s = spec_parts
            desc_path = None
        elif len(spec_parts) == 4:
            title, suffix, path_s, desc_path = spec_parts
        else:
            sys.exit(
                "error: --modules argument '{}' should have the format "
                "<title>,<suffix>,<path> or the format "
                "<title>,<suffix>,<path>,<index description filename>. "
                "A doubled ',,' in any part is treated as a literal comma.".
                format(module_spec))

        abspath = pathlib.Path(path_s).resolve()
        if not abspath.exists():
            sys.exit(
                "error: path '{}' in --modules argument does not exist".format(
                    abspath))

        modules.append((title, suffix, abspath, desc_path))
Example #15
0
import typing

import jsons
import jsons.decorators
import yaml
from loguru import logger

from smlpy import errors, units

msg_start = "1b1b1b1b"
msg_end = "1b1b1b1b"
msg_version_1 = "01010101"

DATA_MIN_LEN = len(msg_start) + len(msg_version_1) + len(msg_end) + 8  # crc length etc.

obis_path = pathlib.Path(__file__).parent / "obis_t_kennzahlen.yaml"

with obis_path.open() as f:
    obis_t_kennzahlen = yaml.safe_load(f)["kennzahlen"]

# from the type-length definition, first tuple is byte length, second is signed
_integer_hex_marker = {
    "62": (1, False),
    "63": (2, False),
    "65": (4, False),
    "69": (8, False),
    "52": (1, True),
    "53": (2, True),
    "55": (4, True),
    "56": (
        5,
Example #16
0
	all_data_filtered_loss = all_data[['Parameters', 'model_3_loss', 'val_model_3_loss', 'model_2_loss', 'val_model_2_loss', 'model_loss', 'val_model_loss']].copy()
	all_data_filtered_loss.rename(inplace=True, index=str, columns={'model_3_loss': 'Contextual loss', 'val_model_3_loss':'Val contextual loss', 'model_2_loss': 'Encoder loss', 'val_model_2_loss':'Val encoder loss', 'model_loss': 'Adversarial loss', 'val_model_loss':'Val adversarial loss'})
	all_data_filtered_loss.set_index('Parameters', inplace=True)
	all_data_loss_graph_df = all_data_filtered_loss.stack().reset_index()
	all_data_loss_graph_df.rename(inplace=True, index=str, columns={'level_0':'Parameters', 'level_1':'Metric', 0:'Loss'})
	#all_data_loss_graph_df['Latent dim - Dropout rate'] = all_data_loss_graph_df['Latent dim - Dropout rate'].astype('int8')
	sns.set(font_scale=1.5, style="whitegrid")
	f = sns.catplot(x="Parameters", y="Loss", hue="Metric", kind="point", data=all_data_loss_graph_df)
	f.set_xticklabels(rotation=20,  horizontalalignment='right')
	#f.set_xlabels('')
	plt.gcf().subplots_adjust(bottom=0.2)
	plt.savefig(os.path.join(datapath, 'summary', 'experiments_results_loss.png'))
	results = pd.concat([all_data_graph_df.groupby(['Parameters', 'Metric']).mean(),all_data_graph_df.groupby(['Parameters', 'Metric']).std()], axis=1)
	results.to_csv(os.path.join(datapath, 'summary', 'experiment_results_loss.csv'))

#############
## EXECUTE ##
#############

if __name__ == '__main__':

	parser = ap.ArgumentParser(description='Make the summary graphs for model evaluation', formatter_class=ap.RawTextHelpFormatter)
	parser.add_argument('--datadir', metavar='Type_String', type=str, nargs="?", default='', help='The directory where csv files are located. No default.')

	args = parser.parse_args()
	datadir = args.datadir

	datapath = os.path.join(os.getcwd(), datadir)
	pathlib.Path(os.path.join(datapath, 'summary')).mkdir(parents=True, exist_ok=True)
	makegraphs(datapath)
Example #17
0
from setuptools import setup
import pathlib

# The directory containing this file
HERE = pathlib.Path(__file__).parent

# The text of the README file
README = (HERE / "README.md").read_text()

setup(
    name='string_grouper',
    version='0.6.1',
    packages=['string_grouper', 'string_grouper_utils'],
    license='MIT License',
    description='String grouper contains functions to do string matching using TF-IDF and the cossine similarity. '
                'Based on https://bergvca.github.io/2017/10/14/super-fast-string-matching.html',
    author='Chris van den Berg',
    long_description=README,
    long_description_content_type="text/markdown",
    author_email='*****@*****.**',
    url='https://github.com/Bergvca/string_grouper',
    zip_safe=False,
    python_requires='>3.7',
    install_requires=['pandas>=0.25.3'
                      , 'scipy'
                      , 'scikit-learn'
                      , 'numpy'
                      , 'sparse_dot_topn_for_blocks>=0.3.1'
                      , 'topn>=0.0.7'
                      ]
)
    enrollment_data_path = "./data/CASIA_thousand_norm_256_64_e_nn_open_set_stacked/enrollment"
    test_data_path = "./data/CASIA_thousand_norm_256_64_e_nn_open_set_stacked/test"
    batch_size = 196

    model, input_size = get_model(model_name, checkpoint_path)

    device = torch.device('cuda')
    model.to(device)
    model.eval()


    enrollment_dataloader = get_dataloader(enrollment_data_path, input_size, batch_size=batch_size)
    test_dataloader = get_dataloader(test_data_path, input_size, batch_size=batch_size)

    print("Enrolling identities...")
    enrolled = enroll_identities(model.feature_extract_avg_pool, enrollment_dataloader, device)

    print("Running recognition evaluation...")
    rank_1_accuracy, rank_5_accuracy, rank_n_accuracy = evaluate(enrolled, model.feature_extract_avg_pool, test_dataloader, device)

    results = {
        "rank_1_acc": rank_1_accuracy,
        "rank_5_acc": rank_5_accuracy,
        "rank_n_accuracies": list(rank_n_accuracy)
    }

    pathlib.Path("./results").mkdir(parents=True, exist_ok=True)

    with open(f'./results/{model_name}_results.json', 'w') as f:
        json.dump(results, f)
Example #19
0
import stat

import requests
import sys
import zipfile
from typing import Dict, Any
from urllib.parse import urlparse

from electrumsv_sdk.builtin_components.merchant_api.mapi_db_config import check_postgres_db
from electrumsv_sdk.config import Config
from electrumsv_sdk.utils import get_directory_name


VERSION = "0.0.2"  # electrumsv/electrumsv-mAPI version
MERCHANT_API_VERSION = "1.3.0"
MODULE_DIR = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
PFX_PATH = pathlib.Path(MODULE_DIR) / "cli_inputs/localhost.pfx"
SDK_POSTGRES_PORT: int = int(os.environ.get('SDK_POSTGRES_PORT', "5432"))
SDK_PORTABLE_MODE: int = int(os.environ.get('SDK_PORTABLE_MODE', "0"))
SDK_SKIP_POSTGRES_INIT: int = int(os.environ.get('SDK_SKIP_POSTGRES_INIT', "0"))

COMPONENT_NAME = get_directory_name(__file__)
logger = logging.getLogger(COMPONENT_NAME)



# The uri is copied from the Github repository release assets list.
PREBUILT_ENTRIES = {
    "Darwin": {
        "uri": f"https://github.com/electrumsv/electrumsv-mAPI/releases/download/{VERSION}/"
               "MacOSXMerchantAPI.zip",
Example #20
0
def module_path():
    return pathlib.PurePath(pathlib.Path(__file__).parents[0])
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from kfp import components
from kfp.v2 import dsl
import kfp.v2.compiler as compiler
import pathlib

test_data_dir = pathlib.Path(__file__).parent / 'component_yaml'

ingestion_op = components.load_component_from_file(
    str(test_data_dir / 'ingestion_component.yaml'))

training_op = components.load_component_from_file(
    str(test_data_dir / 'fancy_trainer_component.yaml'))


@dsl.pipeline(
    name='two-step-pipeline-with-ontology',
    description='A linear two-step pipeline with artifact ontology types.')
def my_pipeline(input_location: str = 'gs://test-bucket/pipeline_root',
                optimizer: str = 'sgd',
                n_epochs: int = 200):
  ingestor = ingestion_op(input_location=input_location)
Example #22
0
def main():
    # Create the log and model directiory if they're not present.
    model_dir = os.path.join(
        args.log_dir,
        'models_' + time.strftime('%d_%b_%Y_%H_%M_%S', time.localtime()))
    pathlib.Path(model_dir).mkdir(parents=True, exist_ok=True)

    log_writer = SummaryWriter(log_dir=model_dir)

    train_data_path = os.path.join(args.data_dir, args.train_data_dict)
    with open(train_data_path, 'rb') as f:
        train_data_dict = pickle.load(f, encoding='latin1')
    train_dt = train_data_dict['dt']
    print('Loaded training data from %s, train_dt = %.2f' %
          (train_data_path, train_dt))

    if args.eval_every is not None:
        eval_data_path = os.path.join(args.data_dir, args.eval_data_dict)
        with open(eval_data_path, 'rb') as f:
            eval_data_dict = pickle.load(f, encoding='latin1')
        eval_dt = eval_data_dict['dt']
        print('Loaded evaluation data from %s, eval_dt = %.2f' %
              (eval_data_path, eval_dt))

    if args.preloaded_data == 'eth':
        robot_node = stg_node.STGNode('0', 'Pedestrian')
    elif args.preloaded_data == 'nba':
        robot_node = stg_node.STGNode('Al Horford', 'HomeC')

    model_registrar = ModelRegistrar(model_dir, args.device)
    hyperparams['state_dim'] = train_data_dict['input_dict'][robot_node].shape[
        2]
    hyperparams['pred_dim'] = len(train_data_dict['pred_indices'])
    hyperparams['pred_indices'] = train_data_dict['pred_indices']
    hyperparams['dynamic_edges'] = args.dynamic_edges
    hyperparams['edge_state_combine_method'] = args.edge_state_combine_method
    hyperparams[
        'edge_influence_combine_method'] = args.edge_influence_combine_method
    hyperparams['nodes_standardization'] = train_data_dict[
        'nodes_standardization']
    hyperparams['labels_standardization'] = train_data_dict[
        'labels_standardization']
    hyperparams['edge_radius'] = args.edge_radius

    if args.eval_every is not None:
        eval_hyperparams = copy.deepcopy(hyperparams)
        eval_hyperparams['nodes_standardization'] = eval_data_dict[
            "nodes_standardization"]
        eval_hyperparams['labels_standardization'] = eval_data_dict[
            "labels_standardization"]

    kwargs_dict = {
        'dynamic_edges':
        hyperparams['dynamic_edges'],
        'edge_state_combine_method':
        hyperparams['edge_state_combine_method'],
        'edge_influence_combine_method':
        hyperparams['edge_influence_combine_method']
    }

    stg = SpatioTemporalGraphCVAEModel(robot_node, model_registrar,
                                       hyperparams, kwargs_dict, log_writer,
                                       args.device)
    print('Created training STG model.')

    if args.eval_every is not None:
        # It is important that eval_stg uses the same model_registrar as
        # the stg being trained, otherwise you're just repeatedly evaluating
        # randomly-initialized weights!
        eval_stg = SpatioTemporalGraphCVAEModel(robot_node, model_registrar,
                                                eval_hyperparams, kwargs_dict,
                                                log_writer, args.eval_device)
        print('Created evaluation STG model.')

    # Create the aggregate scene_graph for all the data, allowing
    # for batching, just like the old one. Then, for speed tests
    # we'll show how much faster this method is than keeping the
    # full version. Can show graphs of forward inference time vs problem size
    # with two lines (using aggregate graph, using online-computed graph).
    agg_scene_graph = create_batch_scene_graph(
        train_data_dict['input_dict'],
        float(hyperparams['edge_radius']),
        use_old_method=(args.dynamic_edges == 'no'))
    print('Created aggregate training scene graph.')

    if args.dynamic_edges == 'yes':
        agg_scene_graph.compute_edge_scaling(args.edge_addition_filter,
                                             args.edge_removal_filter)
        train_data_dict['input_dict'][
            'edge_scaling_mask'] = agg_scene_graph.edge_scaling_mask
        print('Computed edge scaling for the training scene graph.')

    stg.set_scene_graph(agg_scene_graph)
    stg.set_annealing_params()

    if args.eval_every is not None:
        eval_agg_scene_graph = create_batch_scene_graph(
            eval_data_dict['input_dict'],
            float(hyperparams['edge_radius']),
            use_old_method=(args.dynamic_edges == 'no'))
        print('Created aggregate evaluation scene graph.')

        if args.dynamic_edges == 'yes':
            eval_agg_scene_graph.compute_edge_scaling(
                args.edge_addition_filter, args.edge_removal_filter)
            eval_data_dict['input_dict'][
                'edge_scaling_mask'] = eval_agg_scene_graph.edge_scaling_mask
            print('Computed edge scaling for the evaluation scene graph.')

        eval_stg.set_scene_graph(eval_agg_scene_graph)
        eval_stg.set_annealing_params()

    # model_registrar.print_model_names()
    optimizer = optim.Adam(model_registrar.parameters(),
                           lr=hyperparams['learning_rate'])
    lr_scheduler = optim.lr_scheduler.ExponentialLR(
        optimizer, gamma=hyperparams['learning_decay_rate'])

    print_training_header(newline_start=True)
    for curr_iter in range(args.num_iters):
        # Necessary because we flip the weights contained between GPU and CPU sometimes.
        model_registrar.to(args.device)

        # Setting the current iterator value for internal logging.
        stg.set_curr_iter(curr_iter)

        # Stepping forward the learning rate scheduler and annealers.
        lr_scheduler.step()
        log_writer.add_scalar('dynstg/learning_rate',
                              lr_scheduler.get_lr()[0], curr_iter)
        stg.step_annealers()

        # Zeroing gradients for the upcoming iteration.
        optimizer.zero_grad()

        # Obtaining the batch's training loss.
        train_inputs, train_labels = sample_inputs_and_labels(
            train_data_dict, batch_size=hyperparams['batch_size'])

        # Compute the training loss.
        train_loss = stg.train_loss(train_inputs, train_labels,
                                    hyperparams['prediction_horizon'])

        # Print training information. Also, no newline here. It's added in at a later line.
        print('{:9} | {:10} | '.format(curr_iter, '%.2f' % train_loss.item()),
              end='',
              flush=True)

        # Calculating gradients.
        train_loss.backward()

        # Clipping gradients.
        if hyperparams['grad_clip'] is not None:
            nn.utils.clip_grad_value_(model_registrar.parameters(),
                                      hyperparams['grad_clip'])

        # # Logging gradient norms.
        # len_prefix = len('model_dict.')
        # for name, param in model_registrar.named_parameters():
        #     if param.grad is None:
        #         # print(name, 'grad is None')
        #         continue

        #     log_writer.add_scalar('gradient_norms/' + name[len_prefix:],
        #                           param.grad.norm(),
        #                           curr_iter)

        # Performing a gradient step.
        optimizer.step()

        # Freeing up memory.
        del train_loss

        if args.eval_every is not None and (curr_iter +
                                            1) % args.eval_every == 0:
            with torch.no_grad():
                # First plotting training predictions.
                pred_fig = plot_utils.plot_predictions_during_training(
                    stg,
                    train_inputs,
                    hyperparams['prediction_horizon'],
                    num_samples=100,
                    dt=train_dt,
                    max_speed=max_speed)
                log_writer.add_figure('dynstg/train_prediction', pred_fig,
                                      curr_iter)

                # Then computing evaluation values and predictions.
                model_registrar.to(args.eval_device)
                eval_stg.set_curr_iter(curr_iter)
                eval_inputs, eval_labels = sample_inputs_and_labels(
                    eval_data_dict,
                    device=args.eval_device,
                    batch_size=args.eval_batch_size)

                (eval_loss_q_is, eval_loss_p,
                 eval_loss_exact) = eval_stg.eval_loss(
                     eval_inputs,
                     eval_labels,
                     hyperparams['prediction_horizon'],
                     eval_dt=eval_dt,
                     max_speed=max_speed)
                print('{:15} | {:10} | {:14}'.format(
                    '%.2f' % eval_loss_q_is.item(),
                    '%.2f' % eval_loss_p.item(),
                    '%.2f' % eval_loss_exact.item()),
                      end='',
                      flush=True)

                # Freeing up memory.
                del eval_loss_q_is
                del eval_loss_p
                del eval_loss_exact

        else:
            print('{:15} | {:10} | {:14}'.format('', '', ''),
                  end='',
                  flush=True)

        # Here's the newline that ends the current training information printing.
        print('')

        if args.save_every is not None and (curr_iter +
                                            1) % args.save_every == 0:
            model_registrar.save_models(curr_iter)
            print_training_header()
def link(genast: ast.Module, params: str):
    opt = Optimizer()

    @opt.register
    @macro_exp("[]")
    def builtin_empty_list():
        return subst()

    @opt.register
    @macro_stmt("""
_rbnf_immediate_lst = a
_rbnf_immediate_lst.append(b)
        """,
                ret="_rbnf_immediate_lst")
    def builtin_push_list(a, b):
        return subst(a=a, b=b)

    @opt.register
    @macro_exp("a is b")
    def builtin_eq(a, b):
        return subst(a=a, b=b)

    @opt.register
    @macro_exp("a is not b")
    def builtin_not_eq(a, b):
        return subst(a=a, b=b)

    @opt.register
    @macro_stmt("""
try:
    _rbnf_cur_token = tokens.array[tokens.offset]
    if _rbnf_cur_token.idint is idint:
        tokens.offset += 1
    else:
        _rbnf_cur_token  = None
except IndexError:
    _rbnf_cur_token = None
""", "_rbnf_cur_token")
    def builtin_match_tk(tokens, idint):
        return subst(tokens=tokens, idint=idint)

    @opt.register
    @macro_stmt("""
try:
    tokens.array[tokens.offset + i]
    _rbnf_peek_tmp = True
except IndexError:
    _rbnf_peek_tmp = False
    """,
                ret="_rbnf_peek_tmp")
    def builtin_peekable(tokens, i):
        return subst(tokens=tokens, i=i)

    @opt.register
    @macro_stmt("""
_rbnf_old_offset = tokens.offset
_rbnf_cur_token = tokens.array[_rbnf_old_offset]
tokens.offset = _rbnf_old_offset + 1
    """,
                ret="_rbnf_cur_token")
    def builtin_mv_forward(tokens):
        return subst(tokens=tokens)

    @opt.register
    @macro_exp("tokens.array[tokens.offset + i]")
    def builtin_peek(tokens, i):
        return subst(tokens=tokens, i=i)

    @opt.register
    @macro_exp("x")
    def builtin_to_result(x):
        return subst(x=x)

    @opt.register
    @macro_exp("x")
    def builtin_to_any(x):
        return subst(x=x)

    @opt.register
    @macro_exp("x is None")
    def builtin_is_null(x):
        return subst(x=x)

    @opt.register
    @macro_exp("x is not None")
    def builtin_is_not_null(x):
        return subst(x=x)

    genast: ast.Module = opt.visit(genast)
    parser_template = (pathlib.Path(__file__).parent /
                       "fffparser_template.py").open().read()
    imp: ast.Module = ast.parse(f"""
{parser_template}
builtin_cons = Cons
builtin_nil = _nil
builtin_mk_ast = AST
def mk_parser({params}):
    pass
""")
    fn: ast.FunctionDef = imp.body[-1]
    fn.body.extend(genast.body)
    fn.body.append(ast.Return(ast.Name("rbnf_named_parse_START", ast.Load())))
    ast.fix_missing_locations(fn)
    return imp
Example #24
0
        sys.exit(1)

    try:
        from __metadata__ import metadata as metadata
    except ImportError:
        traceback.print_exc()
        sys.exit(1)

    try:
        {previous_segment_metadata_import_statement}
    except ImportError:
        traceback.print_exc()
        sys.exit(1)

    try:
        segment_directory = pathlib.Path(os.path.realpath(__file__)).parent
        builds_directory = segment_directory.parent.parent / 'builds'
        builds_directory = ide.Path(builds_directory)
    except:
        traceback.print_exc()
        sys.exit(1)

    try:
        with abjad.Timer() as timer:
            lilypond_file = maker.run(
                metadata=metadata,
                previous_metadata=previous_metadata,
            )
        segment_maker_runtime = int(timer.elapsed_time)
        count = segment_maker_runtime
        counter = abjad.String('second').pluralize(count)
Example #25
0
"""Meme engine tester module."""

import pathlib
import unittest

from MemeEngine import MemeEngine

PROJECT_ROOT = pathlib.Path(__file__).parent.parent


class TestMemeEngine(unittest.TestCase):
    """Collection of meme engine testers."""
    def test_meme_engine__make_meme__pass(self):
        """Test meme engine, make_meme function."""
        output_directory = './tmp'
        font_path = f'{PROJECT_ROOT}/_data/fonts/LilitaOne-Regular.ttf'
        meme = MemeEngine(output_directory, font_path)

        img = f'{PROJECT_ROOT}/_data/photos/dog/xander_1.jpg'
        text = 'when in doubt, run'
        author = 'someone from the internet'
        result = meme.make_meme(img, text, author)
        self.assertEqual(result.split('.')[-1], 'jpg')


if __name__ == '__main__':
    unittest.main()
Example #26
0
def datapkg_to_sqlite(sqlite_url, out_path, clobber=False, fkeys=False):
    """
    Load a PUDL datapackage into a sqlite database.

    Args:
        sqlite_url (str): An SQLite database connection URL.
        out_path (path-like): Path to the base directory of the datapackage
            to be loaded into SQLite. Must contain the datapackage.json file.
        clobber (bool): If True, replace an existing PUDL DB if it exists. If
            False (the default), fail if an existing PUDL DB is found.
        fkeys(bool): If true, tell SQLite to check foreign key constraints
            for the records that are being loaded. Left off by default.

    Returns:
        None

    """
    # Using SQL Alchemy event hooks to enable the foreign key checking pragma
    # within SQLite for all subsequent database connections. See these pages for
    # additional documentation on how this stuff works:
    # https://docs.sqlalchemy.org/en/13/core/event.html
    # https://docs.sqlalchemy.org/en/13/dialects/sqlite.html#foreign-key-support
    if fkeys:
        @sa.event.listens_for(sa.engine.Engine, "connect")
        def _set_sqlite_pragma(dbapi_connection, connection_record):
            from sqlite3 import Connection as SQLite3Connection
            if isinstance(dbapi_connection, SQLite3Connection):
                logger.warning("Enforcing foreign key constraints in SQLite3")
                cursor = dbapi_connection.cursor()
                cursor.execute("PRAGMA foreign_keys=ON;")
                cursor.close()

    # prepping the sqlite engine
    pudl_engine = sa.create_engine(sqlite_url)
    logger.info("Dropping the current PUDL DB, if it exists.")
    try:
        # So that we can wipe it out
        pudl.helpers.drop_tables(pudl_engine, clobber=clobber)
    except sa.exc.OperationalError:
        pass
    # And start anew
    pudl_engine = sa.create_engine(sqlite_url)

    # grab the merged datapackage metadata file:
    pkg = datapackage.DataPackage(
        descriptor=str(pathlib.Path(out_path, 'datapackage.json')))
    # we want to grab the dictionary of columns that need autoincrement id cols
    try:
        autoincrement = pkg.descriptor['autoincrement']
    # in case there is no autoincrement columns in the metadata..
    except KeyError:
        autoincrement = {}

    logger.info("Loading merged datapackage into SQLite.")
    logger.info("This could take a while. It might be a good time")
    logger.info("to get a drink of water. Hydrate or die!")
    try:
        # Save the data package in SQL
        pkg.save(storage='sql', engine=pudl_engine, merge_groups=True,
                 autoincrement=autoincrement)
    except exceptions.TableSchemaException as exception:
        logger.error('SQLite conversion failed. See following errors:')
        logger.error(exception.errors)
Example #27
0
plt.rcParams["xtick.major.size"] = 15
plt.rcParams["ytick.major.size"] = 15
plt.rcParams["xtick.minor.size"] = 12
plt.rcParams["ytick.minor.size"] = 12
plt.rcParams["xtick.major.pad"] = 5
plt.rcParams["ytick.major.pad"] = 5
plt.rcParams["xtick.minor.pad"] = 3
plt.rcParams["ytick.minor.pad"] = 3
plt.rcParams["xtick.major.width"] = 7
plt.rcParams["ytick.major.width"] = 7
plt.rcParams["xtick.minor.width"] = 5
plt.rcParams["ytick.minor.width"] = 5


folder = "calc_datasheets/{}{}".format(sample, label2)
pathlib.Path(folder).mkdir(parents=True, exist_ok=True)
print(folder)


datasheet_blank = "{}.csv".format(blank)
df_blank = pd.read_csv(datasheet_blank, low_memory=False)
datalist_blank = list(df_blank[:0])

blank_avg_list = []
#blank_thres_list = []
for i in range(1, len(datalist_blank)-1):
    isotope_i = df_blank[datalist_blank[i]]
    #isotope_i_nonzero = []
    #for j in isotope_i:
        #if positive(j, 0):
        #isotope_i_nonzero.append(j)
Example #28
0
# -*- coding: utf-8 -*-
"""LogViewer Bottle version
"""
import sys
import time
import pathlib
from bottle import route, run, debug, template, request, static_file  # , validate, error
sys.path.insert(0, pathlib.Path(__file__).parent.resolve())
from readlog import init_db, rereadlog, get_data

timestr = str(int(time.time() * 10))


@route('/')
def logviewer():
    """load a log file and display a first page
    """
    logfile = request.GET.get('logfile', '')
    entries = request.GET.get('entries', '')
    order = request.GET.get('order', '')
    if not logfile:
        init_db(timestr)
    else:
        rereadlog(logfile, entries, order, timestr)
    ## return str(get_data())
    return template('viewlogs', get_data(timestr))


@route('/top')
def first_page():
    """Go to first displayable page
Example #29
0
                def check_ffmpeg(
                ):  # FFMPEG -------------------------------------------------------------------------
                    global ffmpeg
                    import shutil

                    def write_path_to_ffmpeg(
                    ):  # Writes path to ffmpeg to the config.ini file
                        try:
                            config.set('ffmpeg_path', 'path', ffmpeg)
                            with open(config_file, 'w') as configfile:
                                config.write(configfile)
                        except:
                            pass

                    if shutil.which('ffmpeg') is not None:
                        ffmpeg = str(pathlib.Path(
                            shutil.which('ffmpeg'))).lower()
                        messagebox.showinfo(
                            title='Prompt!',
                            message='ffmpeg.exe found on system PATH, '
                            'automatically setting path to location.\n\n'
                            'Note: This can be changed in the config.ini file'
                            ' or in the Options menu')
                        if pathlib.Path("Apps/ffmpeg/ffmpeg.exe").is_file():
                            rem_ffmpeg = messagebox.askyesno(
                                title='Delete Included ffmpeg?',
                                message=
                                'Would you like to delete the included FFMPEG?'
                            )
                            if rem_ffmpeg:
                                try:
                                    shutil.rmtree(
                                        str(pathlib.Path("Apps/ffmpeg")))
                                except:
                                    pass
                        write_path_to_ffmpeg()
                    elif pathlib.Path("Apps/ffmpeg/ffmpeg.exe").is_file():
                        messagebox.showinfo(
                            title='Info',
                            message='Program will use the included '
                            '"ffmpeg.exe" located in the "Apps" folder')
                        ffmpeg = str(pathlib.Path("Apps/ffmpeg/ffmpeg.exe"))
                        write_path_to_ffmpeg()
                    else:
                        error_prompt = messagebox.askyesno(
                            title='Error!',
                            message='Cannot find ffmpeg, '
                            'please navigate to "ffmpeg.exe"')
                        if not error_prompt:
                            messagebox.showerror(
                                title='Error!',
                                message=
                                'Program requires ffmpeg.exe to work correctly'
                            )
                            main.destroy()
                        if error_prompt:
                            set_ffmpeg_path()
                            if not pathlib.Path(ffmpeg).is_file():
                                messagebox.showerror(
                                    title='Error!',
                                    message=
                                    'Program requires ffmpeg.exe to work correctly'
                                )
                                main.destroy()
Example #30
0
#dictionarys to store the data after its read in from a text file.
cryptoOpenPriceData = {}
cryptoClosePriceData = {}
cryptoVolumeData = {}
cryptoHighData = {}
cryptoLowData = {}
stepsize = {}

#setup the relative file path
dirname = os.path.dirname(os.path.realpath(__file__))

#path to save the different text files in
cryptoPaths = os.path.join(dirname + '/', 'CryptoData')

#makes the directorys in the path variable if they do not exist
pathlib.Path(cryptoPaths).mkdir(parents=True, exist_ok=True)

logPath = os.path.join(dirname + '/', 'CryptoDataDebug.txt')


file = open(logPath, "w")

#one day in ms
ONE_DAY = 86400000
ONE_THIRD_DAY = 28800000
COUNT = 3

def getDataDatabase(startMinuteBack, endMinuteBack):
    """
    :param startMinuteBack: first minute of the interval you want
    :param endMinuteBack: end minute of the interval desired