def setUp(self):
     log_file = "pytorch_conv2D_winograd.log"
     is_debug = True
     set_up_logging(log_file=log_file, is_debug=is_debug)
     self.logger = get_logger(name=__name__)
     self.logger.setLevel(logging.DEBUG)
     self.logger.info("Set up test")
Beispiel #2
0
    def __init__(self, phase="none"):

        if phase == "none":

            # Only using Configuration variables and vocabs

            self.vocab_words = load_vocab(self.filename_words)
            self.vocab_tags = load_vocab(self.filename_tags)
            self.vocab_chars = load_vocab(self.filename_chars)
            self.vocab_pos = load_vocab(self.filename_pos)

        if phase == "train":

            self.dir_model = self.dir_model + "/"
            #self.dir_model = self.dir_model + str(datetime.datetime.now().strftime("%d-%m-%Y_%H-%M")) + "/"

            # For trainng phase
            # directory for training outputs
            if not os.path.exists(self.dir_model):
                os.makedirs(self.dir_model)

            # Directory inside dir_model to save tf model
            self.model_dir = self.dir_model + "model"

            # create instance of logger
            self.logger = get_logger(self.dir_model + self.log_file)
            self.load_self()

        if phase == "restore":
            self.logger = None
            self.model_dir = self.dir_model + "model"
            self.load_self()
Beispiel #3
0
def publish(base_url,
            site_prefix,
            bucket,
            federalist_config,
            aws_region,
            aws_access_key_id,
            aws_secret_access_key,
            dry_run=False):
    '''
    Publish the built site to S3.
    '''
    logger = get_logger('publish')

    logger.info('Publishing to S3')

    start_time = datetime.now()

    s3_client = boto3.client(service_name='s3',
                             aws_access_key_id=aws_access_key_id,
                             aws_secret_access_key=aws_secret_access_key,
                             region_name=aws_region)

    s3publisher.publish_to_s3(directory=str(SITE_BUILD_DIR_PATH),
                              base_url=base_url,
                              site_prefix=site_prefix,
                              bucket=bucket,
                              federalist_config=federalist_config,
                              s3_client=s3_client,
                              dry_run=dry_run)

    delta_string = delta_to_mins_secs(datetime.now() - start_time)
    logger.info(f'Total time to publish: {delta_string}')
def build_hugo(branch,
               owner,
               repository,
               site_prefix,
               base_url='',
               user_env_vars=[]):
    '''
    Builds the cloned site with Hugo
    '''
    logger = get_logger('build-hugo')

    HUGO_BIN_PATH = WORKING_DIR_PATH / HUGO_BIN

    run(logger,
        f'echo hugo version: $({HUGO_BIN_PATH} version)',
        env={},
        check=True)

    logger.info('Building site with hugo')

    hugo_args = f'--source {CLONE_DIR_PATH} --destination {SITE_BUILD_DIR_PATH}'
    if base_url:
        hugo_args += f' --baseURL {base_url}'

    env = build_env(branch, owner, repository, site_prefix, base_url,
                    user_env_vars)
    return run(logger,
               f'{HUGO_BIN_PATH} {hugo_args}',
               cwd=CLONE_DIR_PATH,
               env=env,
               node=True)
def setup_ruby():
    '''
    Sets up RVM and installs ruby
    Uses the ruby version specified in .ruby-version if present
    '''
    logger = get_logger('setup-ruby')

    def runp(cmd):
        return run(logger, cmd, cwd=CLONE_DIR_PATH, env={}, ruby=True)

    returncode = 0

    RUBY_VERSION_PATH = CLONE_DIR_PATH / RUBY_VERSION
    if RUBY_VERSION_PATH.is_file():
        ruby_version = ''
        with RUBY_VERSION_PATH.open() as ruby_vers_file:
            ruby_version = ruby_vers_file.readline().strip()
            # escape-quote the value in case there's anything weird
            # in the .ruby-version file
            ruby_version = shlex.quote(ruby_version)
        if ruby_version:
            logger.info('Using ruby version in .ruby-version')
            returncode = runp(f'rvm install {ruby_version}')

    if returncode:
        return returncode

    return runp('echo Ruby version: $(ruby -v)')
Beispiel #6
0
def is_supported_ruby_version(version):
    '''
    Checks if the version defined in .ruby-version is supported
    '''
    is_supported = 0

    if version:
        logger = get_logger('setup-ruby')

        RUBY_VERSION_MIN = os.getenv('RUBY_VERSION_MIN')

        is_supported = run(
            logger,
            f'ruby -e "exit Gem::Version.new(\'{shlex.split(version)[0]}\') >= Gem::Version.new(\'{RUBY_VERSION_MIN}\') ? 1 : 0"',  # noqa: E501
            cwd=CLONE_DIR_PATH,
            env={},
            ruby=True
        )

        upgrade_msg = 'Please upgrade to an actively supported version, see https://www.ruby-lang.org/en/downloads/branches/ for details.'  # noqa: E501

        if not is_supported:
            logger.error(
                'ERROR: Unsupported ruby version specified in .ruby-version.')
            logger.error(upgrade_msg)

        if version == RUBY_VERSION_MIN:
            logger.warning(
                f'WARNING: Ruby {RUBY_VERSION_MIN} will soon reach end-of-life, at which point Federalist will no longer support it.')  # noqa: E501
            logger.warning(upgrade_msg)

    return is_supported
    def __init__(self, name=None, encoding='bin'):
        Field.__init__(self, name)
        self.name='Bitmap'

        self.encoding = encoding
        self.log = log_utils.get_logger('spec_bmp_field')
        self.long_value_1 = 0
        self.long_value_2 = 0
Beispiel #8
0
def init(client_id, client_secret, test_md):
    global client
    client = praw.Reddit(
        client_id=client_id,
        client_secret=client_secret,
        user_agent='AWS:Discord Image Extractor:0.1 (by u/xmangoslushie)')
    global logger
    logger = log_utils.get_logger(test_md)
    global test_mode
    test_mode = test_md
Beispiel #9
0
def init(pixiv_client, username, password, test_md):
    pixiv_client.login(username, password)

    global client
    client = pixiv_client
    global refresh_token
    refresh_token = pixiv_client.refresh_token
    global logger
    logger = log_utils.get_logger(test_md)
    global test_mode
    test_mode = test_md
def update_repo(clone_dir):
    '''
    Updates the repo with the full git history
    '''
    logger = get_logger('update')

    logger.info('Fetching full git history')

    command = 'git pull --unshallow'

    return run(logger, command, cwd=clone_dir)
Beispiel #11
0
def time_execution(f):
    logger = get_logger("timing")

    @wraps(f)
    def wrap(*args, **kw):
        ts = time()
        result = f(*args, **kw)
        te = time()
        logger.info('func:%r took: %2.4f sec', f.__name__, te - ts)
        return result

    return wrap
def download_hugo():
    logger = get_logger('download-hugo')

    HUGO_VERSION_PATH = CLONE_DIR_PATH / HUGO_VERSION
    if HUGO_VERSION_PATH.is_file():
        logger.info('.hugo-version found')
        hugo_version = ''
        with HUGO_VERSION_PATH.open() as hugo_vers_file:
            try:
                hugo_version = hugo_vers_file.readline().strip()
                hugo_version = shlex.quote(hugo_version)
                regex = r'^(extended_)?[\d]+(\.[\d]+)*$'
                hugo_version = re.search(regex, hugo_version).group(0)
            except Exception:
                raise RuntimeError('Invalid .hugo-version')

        if hugo_version:
            logger.info(f'Using hugo version in .hugo-version: {hugo_version}')
    else:
        raise RuntimeError(".hugo-version not found")
    '''
    Downloads the specified version of Hugo
    '''
    logger.info(f'Downloading hugo version {hugo_version}')
    failed_attempts = 0
    while (failed_attempts < 5):
        try:
            dl_url = ('https://github.com/gohugoio/hugo/releases/download/v' +
                      hugo_version.split('_')[-1] +
                      f'/hugo_{hugo_version}_Linux-64bit.tar.gz')
            response = requests.get(dl_url, verify=CERTS_PATH)

            hugo_tar_path = WORKING_DIR_PATH / 'hugo.tar.gz'
            with hugo_tar_path.open('wb') as hugo_tar:
                for chunk in response.iter_content(chunk_size=128):
                    hugo_tar.write(chunk)

            HUGO_BIN_PATH = WORKING_DIR_PATH / HUGO_BIN
            run(logger,
                f'tar -xzf {hugo_tar_path} -C {WORKING_DIR_PATH}',
                env={},
                check=True)
            run(logger, f'chmod +x {HUGO_BIN_PATH}', env={}, check=True)
            return 0
        except Exception:
            failed_attempts += 1
            logger.info(
                f'Failed attempt #{failed_attempts} to download hugo version: {hugo_version}'
            )
            if failed_attempts == 5:
                raise RuntimeError(
                    f'Unable to download hugo version: {hugo_version}')
            time.sleep(2)  # try again in 2 seconds
Beispiel #13
0
def setup_node():
    '''
    Sets up node and installs dependencies.

    Uses the node version specified in the cloned repo's .nvmrc
    file if it is present.
    '''
    logger = get_logger('setup-node')

    def runp(cmd):
        return run(logger, cmd, cwd=CLONE_DIR_PATH, env={}, check=True, node=True)

    try:
        NVMRC_PATH = CLONE_DIR_PATH / NVMRC
        if NVMRC_PATH.is_file():
            # nvm will output the node and npm versions used
            logger.info('Checking node version specified in .nvmrc')
            runp("""
                RAW_VERSION=$(nvm version-remote $(cat .nvmrc))
                MAJOR_VERSION=$(echo $RAW_VERSION | cut -d. -f 1 | cut -dv -f 2)
                if [[ "$MAJOR_VERSION" =~ ^(12|14|16)$ ]]; then
                    echo "Switching to node version $RAW_VERSION specified in .nvmrc"

                    if [[ "$MAJOR_VERSION" -eq 12 ]]; then
                        echo "WARNING: Node $RAW_VERSION will reach end-of-life on 4-30-2022, at which point Federalist will no longer support it."
                        echo "Please upgrade to LTS major version 14 or 16, see https://nodejs.org/en/about/releases/ for details."
                    fi

                    nvm install $RAW_VERSION
                    nvm alias default $RAW_VERSION
                else
                    echo "Unsupported node major version '$MAJOR_VERSION' specified in .nvmrc."
                    echo "Please upgrade to LTS major version 14 or 16, see https://nodejs.org/en/about/releases/ for details."
                    exit 1
                fi
            """)  # noqa: E501
        else:
            # output node and npm versions if the defaults are used
            logger.info('Using default node version')
            runp('nvm alias default $(nvm version)')
            runp('echo Node version: $(node --version)')
            runp('echo NPM version: $(npm --version)')

        PACKAGE_JSON_PATH = CLONE_DIR_PATH / PACKAGE_JSON
        if PACKAGE_JSON_PATH.is_file():
            logger.info('Installing dependencies in package.json')
            runp('npm set audit false')
            runp('npm ci')

    except (CalledProcessError, OSError, ValueError):
        return 1

    return 0
 def __init__(self,
              num_iterations: int = 10,
              radius: float = 0.1,
              lin_penalty: float = 1.0,
              ang_penalty: float = 10 * 1.0 / 360.):
     self.num_iterations = num_iterations
     self.radius = radius
     self.lin_penalty = lin_penalty
     self.ang_penalty = ang_penalty
     self.logger = get_logger(self.__class__.__name__)
     self.num_samples = 50
     self.cost_func = partial(compute_chi,
                              radius=self.radius,
                              lin_penalty=self.lin_penalty,
                              ang_penalty=self.ang_penalty)
Beispiel #15
0
def run_build_script(branch, owner, repository, site_prefix,
                     base_url='', user_env_vars=[]):
    '''
    Runs the npm build (ie: "federalist","pages", ...) script if it is defined
    '''

    scripts = ["pages", "federalist"]
    for script_name in scripts:
        if has_build_script(script_name):
            logger = get_logger(f'run-{script_name}-script')
            logger.info(f'Running {script_name} build script in package.json')
            env = build_env(branch, owner, repository, site_prefix, base_url, user_env_vars)
            return run(logger, f'npm run {script_name}', cwd=CLONE_DIR_PATH, env=env, node=True)

    return 0
class BinTreeParser(object):

    _logger = log_utils.get_logger("bt_parser")

    @classmethod
    def parse(cls, tokens):
        # parentheses:
        #   find leftmost ")"
        #   find rightmost "("
        #   parse everything in between and replace it with the result
        while True:
            for close_index, t in enumerate(tokens):
                if isinstance(t, CloseParenthesis):
                    break
            else:  # no closing parenthesis
                break
            for open_index in range(close_index - 1, -1, -1):
                t = tokens[open_index]
                if isinstance(t, OpenParenthesis):
                    break
            else:  # open parenthesis not found, unbalanced parentheses
                # TODO better error message including the token's span
                raise ValueError("unbalanced parentheses")
            tree = cls.parse(tokens[open_index + 1:close_index])
            tokens = tokens[:open_index] + [tree] + tokens[close_index + 1:]

        for group in Operators.ALL_OPERATORS:
            while True:
                for index, t in enumerate(tokens):
                    if not isinstance(t, Operator):
                        continue
                    if t in group:
                        break
                else:  # did not find an operator in wanted group
                    break
                tree = BinTree(t, tokens[index - 1], tokens[index + 1])
                tokens = tokens[:index - 1] + [tree] + tokens[index + 2:]
        assert len(tokens) == 1, ("%s did not consume all tokens" %
                                  (cls.__name__, ))
        return tokens[0]

    @classmethod
    def solve(cls, tree):
        if isinstance(tree, Number):  # a number
            return tree.value
        left = cls.solve(tree.left)
        right = cls.solve(tree.right)
        return tree.value(left, right)
def build_jekyll(branch,
                 owner,
                 repository,
                 site_prefix,
                 base_url='',
                 config='',
                 user_env_vars=[]):
    '''
    Builds the cloned site with Jekyll
    '''
    logger = get_logger('build-jekyll')

    JEKYLL_CONF_YML_PATH = CLONE_DIR_PATH / JEKYLL_CONFIG_YML

    # Add baseurl, branch, and the custom config to _config.yml.
    # Use the 'a' option to create or append to an existing config file.
    with JEKYLL_CONF_YML_PATH.open('a') as jekyll_conf_file:
        jekyll_conf_file.writelines([
            '\n'
            f'baseurl: {base_url}\n',
            f'branch: {branch}\n',
            config,
            '\n',
        ])

    jekyll_cmd = 'jekyll'

    GEMFILE_PATH = CLONE_DIR_PATH / GEMFILE
    if GEMFILE_PATH.is_file():
        jekyll_cmd = f'bundle exec {jekyll_cmd}'

    run(logger,
        f'echo Building using Jekyll version: $({jekyll_cmd} -v)',
        cwd=CLONE_DIR_PATH,
        env={},
        check=True,
        ruby=True)

    env = build_env(branch, owner, repository, site_prefix, base_url,
                    user_env_vars)
    env['JEKYLL_ENV'] = 'production'

    return run(logger,
               f'{jekyll_cmd} build --destination {SITE_BUILD_DIR_PATH}',
               cwd=CLONE_DIR_PATH,
               env=env,
               node=True,
               ruby=True)
def fetch_repo(owner, repository, branch, github_token=''):  # nosec
    '''
    Clones the GitHub repository specified by owner and repository
    into CLONE_DIR_PATH.
    '''
    logger = get_logger('clone')

    owner = shlex.quote(owner)
    repository = shlex.quote(repository)
    branch = shlex.quote(branch)

    command = (f'git clone -b {branch} --single-branch --depth 1 '
               f'{fetch_url(owner, repository, github_token)} '
               f'{CLONE_DIR_PATH}')

    return run(logger, command)
def build_static():
    '''Moves all files from CLONE_DIR into SITE_BUILD_DIR'''
    logger = get_logger('build-static')

    dir = path.join(CLONE_DIR_PATH, '.git')
    logger.info(f'Cleaning {dir}')
    shutil.rmtree(dir, ignore_errors=True)

    logger.info(f'Moving files to {SITE_BUILD_DIR}')

    # Make the site build directory first
    SITE_BUILD_DIR_PATH.mkdir(exist_ok=True)

    files = os.listdir(CLONE_DIR_PATH)

    for file in files:
        # don't move the SITE_BUILD_DIR dir into itself
        if file is not SITE_BUILD_DIR:
            shutil.move(str(CLONE_DIR_PATH / file), str(SITE_BUILD_DIR_PATH))
def build_jekyll(branch,
                 owner,
                 repository,
                 site_prefix,
                 base_url='',
                 config='',
                 user_env_vars=[]):
    '''
    Builds the cloned site with Jekyll
    '''
    logger = get_logger('build-jekyll')

    result = update_jekyll_config(dict(baseurl=base_url, branch=branch),
                                  config)

    if result != 0:
        return result

    jekyll_cmd = 'jekyll'

    GEMFILE_PATH = CLONE_DIR_PATH / GEMFILE
    if GEMFILE_PATH.is_file():
        jekyll_cmd = f'bundle exec {jekyll_cmd}'

    run(logger,
        f'echo Building using Jekyll version: $({jekyll_cmd} -v)',
        cwd=CLONE_DIR_PATH,
        env={},
        check=True,
        ruby=True)

    env = build_env(branch, owner, repository, site_prefix, base_url,
                    user_env_vars)
    env['JEKYLL_ENV'] = 'production'

    return run(logger,
               f'{jekyll_cmd} build --destination {SITE_BUILD_DIR_PATH}',
               cwd=CLONE_DIR_PATH,
               env=env,
               node=True,
               ruby=True)
def setup_node():
    '''
    Sets up node and installs production dependencies.

    Uses the node version specified in the cloned repo's .nvmrc
    file if it is present.
    '''
    logger = get_logger('setup-node')

    def runp(cmd):
        return run(logger,
                   cmd,
                   cwd=CLONE_DIR_PATH,
                   env={},
                   check=True,
                   node=True)

    try:
        NVMRC_PATH = CLONE_DIR_PATH / NVMRC
        if NVMRC_PATH.is_file():
            # nvm will output the node and npm versions used
            logger.info('Using node version specified in .nvmrc')
            runp('nvm install')
            runp('nvm use')
        else:
            # output node and npm versions if the defaults are used
            logger.info('Using default node version')
            runp('echo Node version: $(node --version)')
            runp('echo NPM version: $(npm --version)')

        PACKAGE_JSON_PATH = CLONE_DIR_PATH / PACKAGE_JSON
        if PACKAGE_JSON_PATH.is_file():
            logger.info('Installing production dependencies in package.json')
            runp('npm set audit false')
            runp('npm ci --production')

    except (CalledProcessError, OSError, ValueError):
        return 1

    return 0
def setup_bundler():
    logger = get_logger('setup-bundler')

    def runp(cmd):
        return run(logger, cmd, cwd=CLONE_DIR_PATH, env={}, ruby=True)

    GEMFILE_PATH = CLONE_DIR_PATH / GEMFILE

    if not GEMFILE_PATH.is_file():
        logger.info('No Gemfile found, installing Jekyll.')
        return runp('gem install jekyll --no-document')

    logger.info('Gemfile found, setting up bundler')

    version = '<2'

    BUNDLER_VERSION_PATH = CLONE_DIR_PATH / BUNDLER_VERSION

    if BUNDLER_VERSION_PATH.is_file():
        with BUNDLER_VERSION_PATH.open() as bundler_vers_file:
            try:
                bundler_vers = bundler_vers_file.readline().strip()
                # escape-quote the value in case there's anything weird
                # in the .bundler-version file
                bundler_vers = shlex.quote(bundler_vers)
                regex = r'^[\d]+(\.[\d]+)*$'
                bundler_vers = re.search(regex, bundler_vers).group(0)
                if bundler_vers:
                    logger.info('Using bundler version in .bundler-version')
                    version = bundler_vers
            except Exception:
                raise RuntimeError('Invalid .bundler-version')

    returncode = runp(f'gem install bundler --version "{version}"')

    if returncode:
        return returncode

    logger.info('Installing dependencies in Gemfile')
    return runp('bundle install')
def update_jekyll_config(federalist_config={}, custom_config=''):
    logger = get_logger('build-jekyll')

    JEKYLL_CONF_YML_PATH = CLONE_DIR_PATH / JEKYLL_CONFIG_YML

    config_yml = {}
    with JEKYLL_CONF_YML_PATH.open('r') as jekyll_conf_file:
        config_yml = yaml.safe_load(jekyll_conf_file)

    if custom_config:
        try:
            custom_config = json.loads(custom_config)
        except json.JSONDecodeError:
            logger.error('Could not load/parse custom yaml config.')
            return 1

    config_yml = {**config_yml, **custom_config, **federalist_config}

    with JEKYLL_CONF_YML_PATH.open('w') as jekyll_conf_file:
        yaml.dump(config_yml, jekyll_conf_file, default_flow_style=False)

    return 0
Beispiel #24
0
def fetch_commit_sha(clone_dir):
    '''
    fetch the last commitSHA
    '''
    try:
        logger = get_logger('clone')
        logger.info('Fetching commit details ...')
        command = shlex.split('git log -1')  # get last commit only
        process = subprocess.run(  # nosec
            command,
            shell=False,
            check=True,
            stdout=subprocess.PIPE,
            universal_newlines=True,
            cwd=clone_dir)
        commit_log = process.stdout
        commit_sha = commit_log.split()[1]
        logger.info(f'commit {commit_sha}')
        return commit_sha
    except Exception:
        raise StepException(
            'There was a problem fetching the commit hash for this build')
def run_federalist_script(branch,
                          owner,
                          repository,
                          site_prefix,
                          base_url='',
                          user_env_vars=[]):
    '''
    Runs the npm "federalist" script if it is defined
    '''

    if has_federalist_script():
        logger = get_logger('run-federalist-script')
        logger.info('Running federalist build script in package.json')
        env = build_env(branch, owner, repository, site_prefix, base_url,
                        user_env_vars)
        return run(logger,
                   'npm run federalist',
                   cwd=CLONE_DIR_PATH,
                   env=env,
                   node=True)

    return 0
import os
import re
import torch
import argparse
import PairDataset
import TripletDataset
import torchvision
import autoencoder
from log_utils import get_logger
from torch.utils.data import DataLoader

log = get_logger()


def parse_args():
    parser = argparse.ArgumentParser(
        description=
        'Pytorch implementation of arbitrary style transfer via CNN features WCT trasform',
        epilog='Supported image file formats are: jpg, jpeg, png')

    parser.add_argument(
        '--content',
        help=
        'Path of the content image (or a directory containing images) to be trasformed'
    )
    parser.add_argument(
        '--style',
        help='Path of the style image (or a directory containing images) to use'
    )
    parser.add_argument(
        '--synthesis',
 def __init__(self, name=None, length=None, encoding=None):
     Field.__init__(self, name)
     self.length = length
     self.encoding = encoding
     self.log = log_utils.get_logger('spec_fixed_field')
Beispiel #28
0
'''
Classes and methods for publishing a directory to S3
'''

import glob
import requests

from os import path, makedirs
from datetime import datetime

from log_utils import get_logger
from .models import (remove_prefix, SiteObject, SiteFile, SiteRedirect)

LOGGER = get_logger('S3_PUBLISHER')

MAX_S3_KEYS_PER_REQUEST = 1000


def list_remote_objects(bucket, site_prefix, s3_client):
    '''
    Generates a list of remote S3 objects that have keys starting with
    site_preix in the given bucket.
    '''
    results_truncated = True
    continuation_token = None

    remote_objects = []

    while results_truncated:
        prefix = site_prefix
        # Add a / to the end of the prefix to prevent
Beispiel #29
0
import os
import argparse
from os.path import join as pjoin

# from brown_corpus import BrownCorpus
# from char_corpus import CharCorpus, CONTEXT
# from char_stream import CharStream, CONTEXT
from utt_char_stream import UttCharStream
from model_utils import get_model_class_and_params
from optimizer import OptimizerHyperparams
from log_utils import get_logger
from run_utils import dump_config, add_run_data
from gpu_utils import gnumpy_setup
import gnumpy as gnp

logger = get_logger()
gnumpy_setup()
# gnp.track_memory_usage = True

# PARAM
SAVE_PARAMS_EVERY = 5000
MODEL_TYPE = "rnn"
# MODEL_TYPE = 'dnn'


def main():
    # TODO Be able to pass in different models into training script as well?

    model_class, model_hps = get_model_class_and_params(MODEL_TYPE)
    opt_hps = OptimizerHyperparams()
Beispiel #30
0
import json
import shutil

from contextlib import ExitStack
from pathlib import Path

import requests
from invoke import task, call

from log_utils import get_logger
from .common import (CLONE_DIR_PATH, SITE_BUILD_DIR,
                     WORKING_DIR, SITE_BUILD_DIR_PATH,
                     clean)


LOGGER = get_logger('BUILD')

NVM_SH_PATH = Path('/usr/local/nvm/nvm.sh')
RVM_PATH = Path('/usr/local/rvm/scripts/rvm')

HUGO_BIN = 'hugo'
HUGO_BIN_PATH = Path(path.join(WORKING_DIR), HUGO_BIN)

PACKAGE_JSON_PATH = Path(path.join(CLONE_DIR_PATH, 'package.json'))
NVMRC_PATH = Path(path.join(CLONE_DIR_PATH, '.nvmrc'))
RUBY_VERSION_PATH = Path(path.join(CLONE_DIR_PATH), '.ruby-version')
GEMFILE_PATH = Path(path.join(CLONE_DIR_PATH), 'Gemfile')
JEKYLL_CONF_YML_PATH = Path(path.join(CLONE_DIR_PATH, '_config.yml'))


def has_federalist_script():
'''Main task entrypoint'''

import os
import shlex

from datetime import datetime

from invoke import task, UnexpectedExit
from stopit import TimeoutException, SignalTimeout as Timeout

from log_utils import get_logger
from log_utils.remote_logs import (post_output_log, post_build_complete,
                                   post_build_error, post_build_timeout)
from .common import load_dotenv, delta_to_mins_secs

LOGGER = get_logger('MAIN')

TIMEOUT_SECONDS = 45 * 60  # 45 minutes


def format_output(stdout_str, stderr_str):
    '''
    Convenience method for combining strings of stdout and stderr.

    >>> format_output('stdout', 'stderr')
    '>> STDOUT:\\nstdout\\n---------------------------\\n>> STDERR:\\nstderr'

    >>> format_output('abc', 'def')
    '>> STDOUT:\\nabc\\n---------------------------\\n>> STDERR:\\ndef'
    '''
    output = f'>> STDOUT:\n{stdout_str}'
Beispiel #32
0
 def __init__(self, input_string, logger=None):
     self._input_string = input_string.strip()
     self._logger = (logger if logger is not None
                     else log_utils.get_logger("lexer"))
Beispiel #33
0
 def __init__(self, input_string, logger=None):
     self._input_string = input_string.strip()
     self._logger = (logger if logger is not None else
                     log_utils.get_logger("lexer"))
Beispiel #34
0
WORKING_DIR_PATH = Path('/work')

# Make the working directory if it doesn't exist
WORKING_DIR_PATH.mkdir(exist_ok=True)

CLONE_DIR = 'site_repo'
CLONE_DIR_PATH = WORKING_DIR_PATH / CLONE_DIR

SITE_BUILD_DIR = '_site'

BASE_DIR = Path(path.dirname(path.dirname(__file__)))
DOTENV_PATH = BASE_DIR / '.env'

SITE_BUILD_DIR_PATH = CLONE_DIR_PATH / SITE_BUILD_DIR

LOGGER = get_logger('COMMON')


def load_dotenv():  # pragma: no cover
    '''Loads environment from a .env file'''
    if path.exists(DOTENV_PATH):
        LOGGER.info('Loading environment from .env file')
        _load_dotenv(DOTENV_PATH)


def delta_to_mins_secs(delta):
    '''
    Converts a timedelta to a string of minutes and seconds.

    >>> td = timedelta(seconds=55)
    >>> delta_to_mins_secs(td)