Ejemplo n.º 1
0
def format_event_data_group_update(data):
    """Return a structure describing a group update event."""
    event_data = ns()
    event_data.member_lists = list()

    event_data.header = "Group update"
    if data.change_by:
        event_data.header += " by " + format_message(data.change_by.name)

    if data.group_name:
        event_data.name = data.group_name

    if data.new_members and len(data.new_members) > 0:
        member_list = format_member_list("New members:", data.new_members)
        event_data.member_lists.append(member_list)

    if data.deleted_members and len(data.deleted_members) > 0:
        member_list = format_member_list(
            "Deleted members:", data.deleted_members
        )
        event_data.member_lists.append(member_list)

    if data.members and len(data.members) > 0:
        member_list = format_member_list("Members:", data.members)
        event_data.member_lists.append(member_list)

    return event_data
Ejemplo n.º 2
0
def try_clustering(model, pairwise_similarity, names=None, verbose=True):
    pairwise_distance = pairwise_similarity.max() - pairwise_similarity
    output_ids = model.fit_predict(pairwise_distance)

    unk_labels, cluster_sizes = np.unique(output_ids, return_counts=True)
    num_clusters = np.count_nonzero(unk_labels >= 0)
    non_outliers = np.flatnonzero(output_ids >= 0)
    num_outliers = len(output_ids) - len(non_outliers)

    if len(non_outliers):
        score = silhouette_score(pairwise_distance[np.ix_(
            non_outliers, non_outliers)],
                                 output_ids[non_outliers],
                                 metric='precomputed')
    else:
        score = -1

    result = ns(score=score,
                num_clusters=num_clusters,
                labels=output_ids,
                num_outliers=num_outliers,
                unk_labels=unk_labels,
                cluster_sizes=cluster_sizes)

    if verbose:
        show_clusters(result, names)

    return result
Ejemplo n.º 3
0
 def search(self, pat, line, num):
     found = search(pat, line)
     if found:
         grp = found.groupdict()
         grp['line'] = line
         grp['num'] = num
         return ns(**grp)
Ejemplo n.º 4
0
def parse(message: str):
    details = False
    if message.endswith('?'):
        details = True
        message = message[:-1]

    components = is_this_for_me(message)

    if not components:
        return None

    if components == ['apua']:
        return ns(action='apua')

    modifier = 0

    try:
        modifier = int(components[-1])
        components = components[:-1]
    except IndexError:
        return
    except ValueError:
        pass

    try:
        if components[-1] in ('+', '-'):
            if components[-1] == '-':
                modifier = -modifier
            components = components[:-1]
    except IndexError:
        return

    move = ' '.join(components)

    if not move:
        return

    return ns(
        action='move',
        move=move,
        modifier=modifier,
        details=details,
    )
Ejemplo n.º 5
0
    def test_multiply(self):
        point = Point2D(2, 3)
        tests = [
            (Point2D(1, 1), (2, 3)),
            ((1, 1), (2, 3)),
            ((0.5, 4.5), (1, 13.5)),
            (1, (2, 3)),
            (1.0, (2, 3)),
            (ns(x=1, y=1), (2, 3)),
        ]
        for other, expected in tests:
            with self.subTest(other):
                result = point * other

                self.assertEqual(result, expected)
Ejemplo n.º 6
0
    def test_subtract(self):
        point = Point2D(2, 3)
        tests = [
            (Point2D(1, 1), (1, 2)),
            ((1, 1), (1, 2)),
            ((0.5, 4.5), (1.5, -1.5)),
            (1, (1, 2)),
            (1.0, (1, 2)),
            (ns(x=1, y=1), (1, 2)),
        ]
        for other, expected in tests:
            with self.subTest(other):
                result = point - other

                self.assertEqual(result, expected)
Ejemplo n.º 7
0
    def test_divide(self):
        point = Point2D(2, 3)
        tests = [
            (Point2D(1, 1), (2, 3)),
            ((1, 1), (2, 3)),
            ((0.5, 6.0), (4, 0.5)),
            (1, (2, 3)),
            (1.0, (2, 3)),
            (ns(x=1, y=1), (2, 3)),
        ]
        for other, expected in tests:
            with self.subTest(other):
                result = point / other

                self.assertEqual(result, expected)
Ejemplo n.º 8
0
    def test_floordiv(self):
        point = Point2D(2, 3)
        tests = [
            (Point2D(1, 1), (2, 3)),
            ((1, 1), (2, 3)),
            ((0.4, -6.0), (4.0, -1.0)),
            (1, (2, 3)),
            (1.0, (2, 3)),
            (ns(x=1, y=1), (2, 3)),
        ]
        for other, expected in tests:
            with self.subTest(other):
                result = point // other

                self.assertEqual(result, expected)
Ejemplo n.º 9
0
def format_member_list(header: str, member_list):
    """Return a list of printable group members belonging to a category (e.g. new members)."""
    people = ns()
    people.header = header
    people.members = list()
    for member in member_list:
        designation = ""
        if not member.match_from_phone:
            if member.phone:
                designation = f"{format_message(member.name)} ({format_message(member.phone)})"
            else:
                designation = f"{format_message(member.name)}"
        else:
            if member.name is None or member.name == member.phone:
                designation = f"{format_message(member.phone)}"
            else:
                designation = f"{format_message(member.phone)} ~ {format_message(member.name)}"

        if member.admin:
            designation += " (admin)"

        people.members.append(designation)

    return people
Ejemplo n.º 10
0
 def __init__(self, source_data):
     prop = 'function' if callable(source_data) else 'constant'
     super().__init__(ns(view=self), prop)
     self.data = source_data
Ejemplo n.º 11
0
from animations import directions as dirs
from types import SimpleNamespace as ns
from gui import gui, terrain_colors
# from random import choice
from hex_tile import get_hex_neighbors, pixel_to_hex, hex_to_pixel, tile_hash

# dice_pools = []
state = ns(
    **{
        "run": True,
        "hex_map": {},
        "valid": {},
        "selected": None,
        "offset": (0, 0),
        "active_player": 0,
        "board_size": 36,
        "terrain_tiles": {
            "plains": 12,
            "forest": 9,
            "towns": 6,
            "hills": 6,
            "mounts": 3
        },
        "selected_terrain": "plains"
    })
ms_st = ns(**{"but_1": 0, "but_2": 0, "last": (0, 0)})

# # Dice functions
# def generate_dice(count = 6,size = 6):
#     if count < 6:
#         count = 6
#     return {hash(die):die[0] for die in[(roll_die((size,)),x)
Ejemplo n.º 12
0
import colors as c
from types import SimpleNamespace as ns
from animation import Animation as anim
from hex_tile import hex_to_pixel
from gui import draw_box, draw_hex, draw_text, screen

directions = ns(**{"TOP": 0, "LEFT": 1, "BOTTOM": 3, "RIGHT": 4})


def white_hex_blink(selected, loop=2, ms=200):
    def step(percent, off=(0, 0)):
        r, g, b = c.WHITE
        pos, ci = selected
        coords = hex_to_pixel(pos)
        highlight = (r, g, b, 255 * percent)
        bc = c.YELLOW
        draw_hex(coords, color=highlight, bc=bc, off=off)

    return anim(ms, step, loop)


def slide_in(pos, size, drtn, ms=1000):
    ss = screen.get_size()
    x1, y1 = [(0 - l) if drtn < 2 else (sl + l) for l, sl in zip(size, ss)]
    Δx, Δy = [(a - b) for a, b in zip(pos, (x1, y1))]
    x2, y2 = pos
    vert = drtn % 2 == 0

    def step(percent, off=(0, 0)):
        x = (x1 + Δx * percent) if not vert else x2
        y = (y1 + Δy * percent) if vert else y2
Ejemplo n.º 13
0
def train(
    traindir,
    *,
    datadir='data/conll2003',
    epochs=100,
    bert_model='bert-base-multilingual-cased',
):
    save_json(locals(), f'{traindir}/params.json')

    summary_writer = SummaryWriter(f'{traindir}/summaries')

    DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    train_data = list(read_conll2003(f'{datadir}/eng.train'))
    testa_data = list(read_conll2003(f'{datadir}/eng.testa'))
    testb_data = list(read_conll2003(f'{datadir}/eng.testb'))

    feats = FeatsBert(bert_model=bert_model)
    train, testa, testb = feats.encode(train_data, testa_data, testb_data)
    feats.save(traindir)  # save vocabularies

    labels_vocab = feats.vocab['labels']

    train_batches = Batcher(train,
                            batch_size=10, shuffle=True, max_seqlen=60).to(
                                DEVICE)  # BERT needs to limit seq length (OOM)
    testa_batches = Batcher(testa, batch_size=32).to(DEVICE)
    testb_batches = Batcher(testb, batch_size=32).to(DEVICE)

    model = BertTagger(bert_model,
                       labels_vocab_size=len(labels_vocab)).to(DEVICE)

    # for p in model.bert.parameters():
    #     p.requires_grad = False

    optimizer = torch.optim.SGD([
        {
            'params': model.bert.parameters(),
            'lr': 0.00015
        },
        {
            'params': model.output.parameters(),
            'lr': 0.15
        },
    ],
                                lr=1.5,
                                momentum=0.0,
                                weight_decay=0.000001)

    schedule = torch.optim.lr_scheduler.LambdaLR(
        optimizer, lambda epoch: 1. / (1. + 0.05 * epoch))

    eval_metric = MetricSet({
        'acc': TokenAndRecordAccuracyBert(),
        'entity': F1ScoreBert(labels_vocab=labels_vocab),
        'loss': CrossEntropyLoss(),
    })
    train_metric = MetricSet({
        'acc': TokenAndRecordAccuracyBert(),
    })

    def cross_entropy(x, labels):
        x = x.transpose(1, 2)
        return F.cross_entropy(x, labels, ignore_index=0, reduction='mean')

    global_step = 0
    best = ns(f1=0.0, epoch=-1)
    for epoch in trange(epochs, desc='epoch'):
        schedule.step()

        summary_writer.add_scalar('lr',
                                  optimizer.param_groups[0]['lr'],
                                  global_step=global_step)

        train_metric.reset()
        for count, (words, labels) in enumerate(tqdm(train_batches,
                                                     desc='step'),
                                                start=1):
            optimizer.zero_grad()

            x = model(words)
            loss = cross_entropy(x, labels)
            loss.backward()
            optimizer.step()

            # accumulate metrics
            train_metric.append(x, labels, loss=loss.item())

            global_step += 1
            if count % 50 == 0:
                summ = train_metric.summary
                tqdm.write(
                    f'epoch: {epoch:5}, step: {count:6}, loss: {summ["acc.loss"]:10.4f} '
                    f'tacc: {summ["acc.tacc"]:6.4f}, racc: {summ["acc.racc"]:6.4f}'
                )
                summary_writer['train'].add_scalar_metric(
                    summ, global_step=global_step)

        with torch.no_grad():
            model.train(False)
            eval_metric.reset().update(
                (model(x), y) for x, y in tqdm(testa_batches, desc='dev'))
            summ = eval_metric.summary

            f1 = summ['entity.f1']
            if f1 > best.f1:
                best.f1 = f1
                best.epoch = epoch
                torch.save(model, f'{traindir}/model.pickle')

            tqdm.write(
                f'Dev: loss: {summ["loss.loss"]:6.4f}, tacc: {summ["acc.tacc"]:6.4f}, racc: {summ["acc.racc"]:6.4f}, '
                f'entity.f1: {summ["entity.f1"]:6.4f}, best.f1: {best.f1:6.4f} at epoch {best.epoch}'
            )
            summary_writer['dev'].add_scalar_metric(summ,
                                                    global_step=global_step)
            model.train(True)

    model = torch.load(f'{traindir}/model.pickle')

    with torch.no_grad():
        model.train(False)

        metric = MetricSet({
            'acc':
            TokenAndRecordAccuracyBert(),
            'entity':
            F1ScoreBert(labels_vocab=labels_vocab),
            'viterbi':
            F1ScoreBert(labels_vocab=labels_vocab,
                        entity_decoder='viterbi'),  # this is sloow
            'loss':
            CrossEntropyLoss(),
        })

        tqdm.write('Evaluating the best model on testa')
        metric.reset().update(
            (model(x), y) for x, y in tqdm(testa_batches, desc="dev"))
        tqdm.write(repr(metric.summary))
        summary_writer['final-dev'].add_scalar_metric(metric.summary)

        tqdm.write('Evaluating the best model on testb')
        metric.reset().update(
            (model(x), y) for x, y in tqdm(testb_batches, desc="test"))
        tqdm.write(repr(metric.summary))
        summary_writer['final-test'].add_scalar_metric(metric.summary)

    summary_writer.close()
Ejemplo n.º 14
0
class Activation():
    def __init__(self, f, df):
        self.f = f
        self.df = df 

    def __call__(self, *args, **kwargs): 
        return self.f(*args, **kwargs)

# index into the derivative of the activation function
def d(activation):
    return activation.df

# activation functions
_sigmoid = lambda z: 1 / (1 + np.exp(-z))
activations = ns(
    sigmoid = Activation(_sigmoid, lambda z: _sigmoid(z) * (1 - _sigmoid(z))),
    relu    = Activation(lambda z: z*(z>0), lambda z: (z > 0) + 1e-12)
)

class Autoencoder(): 
    def __init__(self, layers):
        logging.info("Setting up autoencoder")
        self.layers = layers
        self.params = []

    def train(self, x, tau=0.01, epochs=100, seed=112358):
        np.random.seed(seed)
        p, n = x.shape 

        # wire up layers 
        L = len(self.layers)
        # inputs, outputs, weights, biases, and activation functions
Ejemplo n.º 15
0
 def examples(cls):
     return [
         ns(use_exception_handler=False),
         ns(use_exception_handler=True),
     ]
Ejemplo n.º 16
0
import ui, console, objc_util, keychain
from types import SimpleNamespace as ns

font = 'Apple SD Gothic Neo'
UIColor = objc_util.ObjCClass('UIColor')
objc_black = UIColor.darkGrayColor().CGColor()

light_theme = ns(front='black',
                 back='white',
                 secondary='darkgrey',
                 shadow=objc_black)
dark_theme = ns(front='white',
                back='black',
                secondary='grey',
                shadow=objc_black)
blue_theme = ns(front='#1976D2',
                back='white',
                secondary='#03A9F4',
                shadow=objc_black)
green_theme = ns(front='#009688',
                 back='white',
                 secondary='#80CBC4',
                 shadow=objc_black)
red_theme = ns(front='#E53935',
               back='white',
               secondary='#FFA726',
               shadow=objc_black)
cyan_dark_theme = ns(front='#4DD0E1',
                     back='black',
                     secondary='#00897B',
                     shadow=objc_black)
Ejemplo n.º 17
0
def cross_entropy(x, labels):
    x = x.transpose(1, 2)
    return F.cross_entropy(x, labels, ignore_index=0, reduction='mean')

    global_step = 0
    best = ns(f1=0.0, epoch=-1)
    for epoch in trange(epochs, desc='epoch'):
        schedule.step()

        summary_writer.add_scalar('lr',
                                  optimizer.param_groups[0]['lr'],
                                  global_step=global_step)

        train_metric.reset()
        for count, (words, labels) in enumerate(tqdm(train_batches,
                                                     desc='step'),
                                                start=1):
            optimizer.zero_grad()

            x = model(words)
            loss = cross_entropy(x, labels)
            loss.backward()
            optimizer.step()

            # accumulate metrics
            train_metric.append(x, labels, loss=loss.item())

            global_step += 1
            if count % 50 == 0:
                summ = train_metric.summary
                tqdm.write(
                    f'epoch: {epoch:5}, step: {count:6}, loss: {summ["acc.loss"]:10.4f} '
                    f'tacc: {summ["acc.tacc"]:6.4f}, racc: {summ["acc.racc"]:6.4f}'
                )
                summary_writer['train'].add_scalar_metric(
                    summ, global_step=global_step)

        with torch.no_grad():
            model.train(False)
            eval_metric.reset().update(
                (model(x), y) for x, y in tqdm(testa_batches, desc='dev'))
            summ = eval_metric.summary

            f1 = summ['entity.f1']
            if f1 > best.f1:
                best.f1 = f1
                best.epoch = epoch
                torch.save(model, f'{traindir}/model.pickle')

            tqdm.write(
                f'Dev: loss: {summ["loss.loss"]:6.4f}, tacc: {summ["acc.tacc"]:6.4f}, racc: {summ["acc.racc"]:6.4f}, '
                f'entity.f1: {summ["entity.f1"]:6.4f}, best.f1: {best.f1:6.4f} at epoch {best.epoch}'
            )
            summary_writer['dev'].add_scalar_metric(summ,
                                                    global_step=global_step)
            model.train(True)

    model = torch.load(f'{traindir}/model.pickle')

    with torch.no_grad():
        model.train(False)

        metric = MetricSet({
            'acc':
            TokenAndRecordAccuracyBert(),
            'entity':
            F1ScoreBert(labels_vocab=labels_vocab),
            'viterbi':
            F1ScoreBert(labels_vocab=labels_vocab,
                        entity_decoder='viterbi'),  # this is sloow
            'loss':
            CrossEntropyLoss(),
        })

        tqdm.write('Evaluating the best model on testa')
        metric.reset().update(
            (model(x), y) for x, y in tqdm(testa_batches, desc="dev"))
        tqdm.write(repr(metric.summary))
        summary_writer['final-dev'].add_scalar_metric(metric.summary)

        tqdm.write('Evaluating the best model on testb')
        metric.reset().update(
            (model(x), y) for x, y in tqdm(testb_batches, desc="test"))
        tqdm.write(repr(metric.summary))
        summary_writer['final-test'].add_scalar_metric(metric.summary)

    summary_writer.close()