Esempio n. 1
0
def parse_pob(author, content, minify=False):
    """
    Trigger the parsing of the pastebin link, pass it to the output creating object and send a message back
    :param channel: receiving channel
    :param author: user sending the message
    :param paste_key: pastebin paste key
    :param argument: optional: arguments to determine the output
    :return:
    """
    paste_key = pastebin.fetch_paste_key(content)
    if paste_key:
        xml = None
        log.info("Parsing pastebin with key={}".format(paste_key))

        try:
            xml = pastebin.get_as_xml(paste_key)
        except HTTPError as err:
            log.error("Invalid pastebin-url msg={}".format(err))
        if xml:
            parser = Parser()
            build = parser.parse_build(xml)
            # print(build)

            embed = pob_output.generate_response(author,
                                                 build,
                                                 minified=minify)

            log.debug("embed={}; length={}".format(embed, embed.__sizeof__()))
            return embed
Esempio n. 2
0
    def init(cls):
        if cls.config is not None:
            return cls.config

        runtime = os.environ.get("RUNTIME")

        if runtime == "development":
            log.info("Lewis & Wood Adjustments - Starting up - Development")
            cls.config = toml.load(
                os.path.join(os.path.dirname(__file__),
                             "config-development.toml"))
        elif runtime == "testing":
            log.info("Lewis & Wood Adjustments - Starting up - Testing")
            cls.config = toml.load(
                os.path.join(os.path.dirname(__file__), "config-testing.toml"))
        elif runtime == "production":
            log.info("Lewis & Wood Adjustments - Starting up - Production")
            cls.config = toml.load(
                os.path.join(os.path.dirname(__file__),
                             "config-production.toml"))
        else:
            sys.exit(
                "RUNTIME environment variable is not set. Set it to development | testing | production"
            )

        return cls.config
Esempio n. 3
0
    def parse_build(xml_root):
        xml_build = xml_root.find('Build')
        xml_items = xml_root.find('Items')
        xml_skills = xml_root.find('Skills')
        xml_tree = xml_root.find('Tree')
        selected_tree = Parser.get_tree_link(xml_tree)

        # parse items
        item_slots = Parser.parse_item_slots(xml_items)
        skills = Parser.parse_skills(xml_skills)
        active_skill = xml_build.attrib['mainSocketGroup']

        build = Build(xml_build.attrib['level'],
                      xml_build.attrib['targetVersion'],
                      Parser.get_attrib_if_exists(xml_build, 'bandit'),
                      xml_build.attrib['className'],
                      xml_build.attrib['ascendClassName'], selected_tree,
                      skills, active_skill, item_slots)
        for player_stat in xml_build:
            if 'stat' in player_stat.attrib and 'value' in player_stat.attrib:
                build.append_stat(player_stat.attrib['stat'],
                                  player_stat.attrib['value'], player_stat.tag)
            else:
                log.info(
                    "Encountered unsupported player stat: k={}, v={}".format(
                        player_stat.tag, player_stat.attrib))

        # parse config
        for input in xml_root.find('Config'):
            if input.tag == "Input":
                extracted = [val for (key, val) in input.attrib.items()]
                if len(extracted) < 1:
                    continue
                build.append_conf(extracted[0], extracted[1])

        return build
Esempio n. 4
0
def highlights(comments, min_size=5, dist_cutoff=0.5):
    """
    This takes a set of comments,
    clusters them, and then returns representatives from clusters above
    some threshold size.

    Args:
        | comments      -- list of Commentables
        | min_size      -- int, minimium cluster size to consider
        | dist_cutoff   -- float, the density at which to snip the hierarchy for clusters

    Future improvements:
        - Persist hierarchies instead of rebuilding from scratch (using Hierarchy.load & Hierarchy.save)
        - Tweak min_size and dist_cutoff for the domain.
    """
    v = joblib.load(geiger_path)
    vecs = v.vectorize([strip_tags(c.body) for c in comments], train=False)
    vecs = vecs.toarray()

    log.info('Clustering {0} comments...'.format(vecs.shape[0]))

    # Build the hierarchy.
    h = Hierarchy(metric='cosine',
                  lower_limit_scale=0.9,
                  upper_limit_scale=1.2)
    ids = h.fit(vecs)

    log.info('Processing resulting clusters...')

    # Build a map of hierarchy ids to comments.
    map = {ids[i]: c for i, c in enumerate(comments)}

    # Generate the clusters.
    clusters = h.clusters(distance_threshold=dist_cutoff, with_labels=False)

    # Filter to clusters of at least some minimum size.
    clusters = [c for c in clusters if len(c) >= min_size]

    # Get the clusters as comments.
    clusters = [[map[id] for id in clus] for clus in clusters]

    # From each cluster, pick the comment with the highest score.
    highlights = [max(clus, key=lambda c: c.score) for clus in clusters]

    # Suppress replies, show only top-level.
    for h in highlights:
        h.replies = []

    log.info('Done.')

    return highlights
Esempio n. 5
0
def highlights(comments, min_size=5, dist_cutoff=0.5):
    """
    This takes a set of comments,
    clusters them, and then returns representatives from clusters above
    some threshold size.

    Args:
        | comments      -- list of Commentables
        | min_size      -- int, minimium cluster size to consider
        | dist_cutoff   -- float, the density at which to snip the hierarchy for clusters

    Future improvements:
        - Persist hierarchies instead of rebuilding from scratch (using Hierarchy.load & Hierarchy.save)
        - Tweak min_size and dist_cutoff for the domain.
    """
    v = joblib.load(geiger_path)
    vecs = v.vectorize([strip_tags(c.body) for c in comments], train=False)
    vecs = vecs.toarray()

    log.info('Clustering {0} comments...'.format(vecs.shape[0]))

    # Build the hierarchy.
    h = Hierarchy(metric='cosine', lower_limit_scale=0.9, upper_limit_scale=1.2)
    ids = h.fit(vecs)

    log.info('Processing resulting clusters...')

    # Build a map of hierarchy ids to comments.
    map = {ids[i]: c for i, c in enumerate(comments)}

    # Generate the clusters.
    clusters = h.clusters(distance_threshold=dist_cutoff, with_labels=False)

    # Filter to clusters of at least some minimum size.
    clusters = [c for c in clusters if len(c) >= min_size]

    # Get the clusters as comments.
    clusters = [[map[id] for id in clus] for clus in clusters]

    # From each cluster, pick the comment with the highest score.
    highlights = [max(clus, key=lambda c: c.score) for clus in clusters]

    # Suppress replies, show only top-level.
    for h in highlights:
        h.replies = []

    log.info('Done.')

    return highlights
Esempio n. 6
0
        def decorated(*args, **kwargs):
            global freezer
            sig = _signature(dep_keys)
            fpath = _build_path(dtype, path, sig)

            freezer.append(fpath)

            if _preserved(fpath) and not _refresh(stage):
                log.info('Defreezing {0} (sig:{1})...'.format(path, sig))
                return _defreeze(fpath)

            else:
                log.info('Running {0}...'.format(path))
                data = f(*args, **kwargs)

                log.info('Freezing {0} (sig:{1})...'.format(path, sig))
                # TO DO this needs to set the refresh flag for the stage.
                _freeze(data, fpath)
                return data
Esempio n. 7
0
        def decorated(*args, **kwargs):
            global freezer
            sig = _signature(dep_keys)
            fpath = _build_path(dtype, path, sig)

            freezer.append(fpath)

            if _preserved(fpath) and not _refresh(stage):
                log.info('Defreezing {0} (sig:{1})...'.format(path, sig))
                return _defreeze(fpath)

            else:
                log.info('Running {0}...'.format(path))
                data = f(*args, **kwargs)

                log.info('Freezing {0} (sig:{1})...'.format(path, sig))
                # TO DO this needs to set the refresh flag for the stage.
                _freeze(data, fpath)
                return data
Esempio n. 8
0
def dredd(refresh):
    """
    Evaluate Dredd's performance on a task.
    """
    if refresh is not None:
        stage = refresh
        names = [s[0] for s in cryo.stages]
        i = names.index(stage)
        cryo.stages[i] = (stage, True)

    data = Sampler().sample()
    log.info('Data set includes {0} examples...'.format(data.shape[0]))


    log.info('Building features...')
    X, y = features.featurize(data)
    log.info('Using {0} features...'.format(X.shape[1]))

    X_train, y_train, X_test, y_test = eval.cross_validation_split(X, y, test_size=config.test_size)
    log.info('Training on {0} examples...'.format(X_train.shape[0]))
    log.info('Testing on {0} examples...'.format(X_test.shape[0]))


    log.info('Training model...')
    m = models.Model(**config.model['params'])
    m.train(X_train, y_train)

    log.info('Testing model...')
    scores = m.evaluate(X_test, y_test, **config.model['eval'])
    print(eval.report(config, X_train, X_test, scores))
Esempio n. 9
0
def dredd(refresh):
    """
    Evaluate Dredd's performance on a task.
    """
    if refresh is not None:
        stage = refresh
        names = [s[0] for s in cryo.stages]
        i = names.index(stage)
        cryo.stages[i] = (stage, True)

    data = Sampler().sample()
    log.info('Data set includes {0} examples...'.format(data.shape[0]))

    log.info('Building features...')
    X, y = features.featurize(data)
    log.info('Using {0} features...'.format(X.shape[1]))

    X_train, y_train, X_test, y_test = eval.cross_validation_split(
        X, y, test_size=config.test_size)
    log.info('Training on {0} examples...'.format(X_train.shape[0]))
    log.info('Testing on {0} examples...'.format(X_test.shape[0]))

    log.info('Training model...')
    m = models.Model(**config.model['params'])
    m.train(X_train, y_train)

    log.info('Testing model...')
    scores = m.evaluate(X_test, y_test, **config.model['eval'])
    print(eval.report(config, X_train, X_test, scores))
Esempio n. 10
0
def update_sage() -> Result:
    with session_scope() as session:
        sage_stats = get_sage_stats(session)

        if sage_stats.paused == 1:
            log.info("Adjustments are paused, skipping")
            return Result()

        adj = get_sage_adjustment(session)

        if adj is None:
            # log.info("No adjustment(s) to process")
            return Result()

        log.info(f"Processing adjustment: {adj.stock_code}")

        if get_cost_price is True:
            try:
                cost = get_sage_cost_price(adj.stock_code)
                if cost is None:
                    log.info(
                        f"Cost price for {adj.stock_code} not found - does the product exist?"
                    )
                else:
                    log.info(f"Cost price for {adj.stock_code} is {cost}")
            except SageException as e:
                return Result(sage_failed=True,
                              stock_code=adj.stock_code,
                              error=e.message)
        else:
            cost = None

        try:
            result: Optional[str] = update_sage_stock(
                adj_type=1 if adj.adjustment_type.name
                == AdjustmentType.adj_in.name else 2,
                quantity=adj.amount,
                stock_code=adj.stock_code,
                adjustment_date=adj.adjustment_date,
                reference=get_reference(adj.reference_text),
                batch=adj.batch,
                cost=cost)
        except Exception as e:
            result = str(e)

        if result is None:
            adj.sage_updated = True
            adj.sage_updated_at = datetime.now()
            session.add(adj)
            sage_stats.total_updated = sage_stats.total_updated + 1
            session.add(sage_stats)
            log.info(f"Updated Sage record for product {adj.stock_code}")
            return Result()

        adj.num_retries = adj.num_retries + 1
        sage_stats.total_failures = sage_stats.total_failures + 1
        session.add(adj)
        session.add(sage_stats)
        log.warning(
            f"Update Sage record for product {adj.stock_code} FAILED, {result}"
        )
        return Result(sage_failed=True,
                      stock_code=adj.stock_code,
                      error=result)
Esempio n. 11
0
 def sample(self):
     log.info('Loading {0} users'.format(config.sampling['n_users']))
     return pd.read_sql(self.query_by_user(), db)
Esempio n. 12
0
 def sample(self):
     log.info('Loading {0} users'.format(config.sampling['n_users']))
     return pd.read_sql(self.query_by_user(), db)
Esempio n. 13
0
async def on_ready():
    log.info('Logged in: uname={}, id={}'.format(bot.user.name, bot.user.id))
    if config.presence_message:
        await bot.change_presence(game=discord.Game(
            name=config.presence_message))
Esempio n. 14
0
def update_sage_stock(*, adj_type: int, quantity: Decimal, stock_code: str,
                      adjustment_date: datetime, reference: str, batch: str,
                      cost: Optional[float]) -> Optional[str]:
    now = datetime.today()
    payload = {
        "stockCode":
        stock_code,
        "quantity":
        float(quantity),
        "type":
        adj_type,
        "date":
        now.strftime("%d/%m/%Y")
        if adjustment_date is None else adjustment_date.strftime("%d/%m/%Y"),
        "reference":
        reference,
        "details":
        batch,
    }

    if cost is not None:
        payload['costPrice'] = cost

    endpoint = f"{hyper_uri}{adjustments_uri}"
    log.info(f"Calling HyperSage endpoint: {endpoint}")

    try:
        r = requests.post(
            endpoint,
            headers={"AuthToken": hyper_api_key},
            timeout=hyper_timeout,
            json=payload,
            verify=False,
        )
    except ConnectTimeout:
        log.warn("Connection timeout connecting to HyperSage")
        return "Timed out connecting to HyperSage"
    except TimeoutError:
        log.warn("Connection timeout connecting to HyperSage")
        return "Timed out communicating with HyperSage"
    except Exception as e:
        log.warn(f"Error communicating with HyperSage: {e}")
        return f"Error communicating with HyperSage: {e}"

    if r.status_code != 200:
        if adj_type == AdjustmentType.adj_out:
            err = f"Cannot add an adjustment out to Sage, error status is {r.status_code}"
            log.warn(err)
            raise SageException(
                err + ". The product quantity on Sage may be incorrect")
        err = f"Cannot add an adjustment in to Sage, error status is {r.status_code}"
        log.warn(err)
        raise SageException(err)

    try:
        r.encoding = 'utf-8-sig'
        i = r.json()
    except Exception as e:
        log.warn(f"HyperSage did not return a valid json response: {e}")
        raise SageException(
            f"HyperSage did not return a valid json response: {e}")

    try:
        response = HypersageResponse(**i)
    except ValidationError as e:
        log.warn(f"Validation of Sage response failed: {e}")
        raise SageException(f"Validation of Sage response failed: {e}")

    if response.success is False:
        return f"error {response.code} from HyperSage, message: {response.message}"

    typ = "In" if adj_type == 1 else "Out"
    log.info(
        f"Added adjustment: Stock Code: {stock_code}, Type: {typ}, Quantity: {float(quantity)}"
    )
    return None
Esempio n. 15
0
import config
from bot.discord_bot import bot
from util.logging import log

if __name__ == '__main__':
    token = config.token  # create config.py file and enter a new string!
    if token:
        # initialize_logging()
        log.info("Starting pob discord bot...")
        bot.run(token)