Ejemplo n.º 1
0
    def solve(self):
        for t in monit.loop(self.epochs):
            if not self.is_online_update:
                for I in self.info_sets.values():
                    I.clear()
            for i in range(self.n_players):
                self.cfr(self.create_new_history(), cast(Player, i),
                         [1 for _ in range(self.n_players)])
            if not self.is_online_update:
                self.update()
            with monit.section("Track"):
                for I in self.info_sets.values():
                    for a in I.actions():
                        tracker.add({
                            f'strategy.{I.key}.{a}': I.strategy[a],
                            f'average_strategy.{I.key}.{a}': I.average_strategy[a],
                            f'regret.{I.key}.{a}': I.regret[a],
                            f'current_regret.{I.key}.{a}': I.current_regret[a]
                        })

            if t % self.track_frequency == 0:
                tracker.save()
                logger.log()

            if (t + 1) % self.save_frequency == 0:
                experiment.save_checkpoint()

        logger.inspect(self.info_sets)
Ejemplo n.º 2
0
def _test():
    from labml.logger import inspect

    projects = Projects()
    runs = projects.get_runs()
    for r in runs:
        inspect(r.to_dict())
Ejemplo n.º 3
0
def split_train_valid(
        files: List[EncodedFile],
        is_shuffle=True) -> (List[EncodedFile], List[EncodedFile]):
    """
    Split training and validation sets
    """
    if is_shuffle:
        np.random.shuffle(files)

    total_size = sum([len(f.codes) for f in files])
    valid = []
    valid_size = 0
    while len(files) > 0:
        if valid_size > total_size * 0.15:
            break
        valid.append(files[0])
        valid_size += len(files[0].codes)
        files.pop(0)

    train_size = sum(len(f.codes) for f in files)
    if train_size < total_size * 0.60:
        raise RuntimeError("Validation set too large")

    logger.inspect(train_size=train_size,
                   valid_size=valid_size,
                   vocab=tokenizer.VOCAB_SIZE)
    return files, valid
Ejemplo n.º 4
0
def print_env_vars():
    inspect(world_size=os.environ['WORLD_SIZE'],
            run_uuid=os.environ['RUN_UUID'],
            local_rank=os.environ['LOCAL_RANK'],
            rank=os.environ['RANK'],
            master_addr=os.environ['MASTER_ADDR'],
            master_port=os.environ['MASTER_PORT'])
Ejemplo n.º 5
0
def _test():
    """
    ### Test the model with fake data
    """
    chunk_len = 4
    d_model = 8
    d_ff = 32
    n_heads = 2
    d_k = 4

    device = torch.device('cuda:0')

    m = RetroModel(5,
                   d_model,
                   6, {2, 5},
                   chunk_len,
                   n_heads,
                   d_k,
                   d_ff,
                   encoder=NearestNeighborEncoder(chunk_len, 2, {1}, d_model,
                                                  n_heads, d_k, d_ff))

    m.to(device)
    x = [1, 2, 4, 4, 0, 1, 2, 3, 4, 3]
    ret = [
        [[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1]],
        [[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1]],
    ]
    res = m(
        torch.tensor([x] * 10).to(device),
        torch.tensor([ret] * 10).to(device))

    inspect(res)
Ejemplo n.º 6
0
def predict():
    data = ["This year business is good", "Fortnite, Football And Soccer, And Their Surprising Similarities"]
    client = get_deploy_client('torchserve')
    for d in data:
        data_json = json.dumps({'data': [d], 'uuid': 'str'})
        res = client.predict('news_classification_test', data_json)
        inspect(text=d, category=res)
Ejemplo n.º 7
0
def _test():
    """
    A simple test to verify the tensor sizes
    """
    conv2d = Conv2d(10, 20, 5)
    from labml.logger import inspect
    inspect(conv2d.weight)
    import torch
    inspect(conv2d(torch.zeros(10, 10, 100, 100)))
Ejemplo n.º 8
0
def evaluate(predictor: Predictor, text: str):
    line_no = 1
    logs = [(f"{line_no: 4d}: ", Text.meta), (text[0], Text.subtle)]

    correct = 0
    i = 0
    key_strokes = 0

    while i + 1 < len(text):
        prefix = text[:i + 1]
        stripped, prompt = predictor.rstrip(prefix)
        rest = prefix[len(stripped):]
        prediction_complete = NextWordPredictionComplete(rest, 5)
        prompt = torch.tensor(prompt, dtype=torch.long).unsqueeze(-1)

        predictions = predictor.get_next_word(prompt, None, rest, [1.],
                                              prediction_complete, 5)
        predictions.sort(key=lambda x: -x[0])
        if predictions:
            next_token = predictions[0].text[len(rest):]
        else:
            next_token = ''

        if next_token and next_token == text[i + 1:i + 1 + len(next_token)]:
            correct += len(next_token)
            right = True
        else:
            next_token = text[i + 1]
            right = False

        for j, c in enumerate(next_token):
            if c == '\n':
                logger.log(logs)
                line_no += 1
                logs = [(f"{line_no: 4d}: ", Text.meta)]
            elif c == '\r':
                continue
            else:
                if right:
                    if j == 0:
                        logs.append((c, [Text.meta, Style.underline]))
                    else:
                        logs.append((c, [Text.success, Style.underline]))
                else:
                    logs.append((c, [Text.warning]))

        i += len(next_token)
        key_strokes += 1

    logger.log(logs)

    logger.inspect(accuracy=correct / (len(text) - 1),
                   key_strokes=key_strokes,
                   length=len(text))
Ejemplo n.º 9
0
def _test_rotary():
    """
    Testing RoPE with a simple example
    """
    x = torch.tensor([[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]],
                     dtype=torch.float)
    x = x[:, None, None, :]
    inspect(x)

    rotary_pe = RotaryPositionalEmbeddings(3)
    inspect(rotary_pe(x))
Ejemplo n.º 10
0
def main():
    source_files = _GetPythonFiles().files

    logger.inspect(source_files)

    with open(str(Path(os.getcwd()) / 'data' / 'all.py'), 'w') as f:
        for i, source in monit.enum("Parse", source_files):
            serialized = _read_file(source.path)
            # return
            serialized = [str(t) for t in serialized]
            f.write(f"{str(source.path)}\n")
            f.write(" ".join(serialized) + "\n")
Ejemplo n.º 11
0
    def eval(self):
        keys_saved = 0

        for line, content in enumerate(self.__content):
            # Keep reference to rest of the line
            rest_of_line = content

            # Build the line for logging with colors
            # The line number
            logs = [(f"{line: 4d}: ", Text.meta)]

            # Type the line character by character
            while rest_of_line != '':
                suggestion = self.__predictor.get_suggestion()

                # If suggestion matches
                if suggestion != '' and rest_of_line.startswith(suggestion):
                    # Log
                    logs.append((suggestion[0], [Style.underline,
                                                 Text.danger]))
                    logs.append((suggestion[1:], Style.underline))

                    keys_saved += len(suggestion) - 1

                    # Skip the prediction text
                    rest_of_line = rest_of_line[len(suggestion):]

                    # Add text to the predictor
                    self.__predictor.add(suggestion)

                # If the suggestion doesn't match
                else:
                    # Add the next character
                    self.__predictor.add(rest_of_line[0])
                    logs.append((rest_of_line[0], Text.subtle))
                    rest_of_line = rest_of_line[1:]

            # Add a new line
            self.__predictor.add("\n")

            # Log the line
            logger.log(logs)

        # Log time taken for the file
        logger.inspect(add=self.__predictor.time_add,
                       check=self.__predictor.time_check,
                       predict=self.__predictor.time_predict)

        total_keys = sum([len(c) for c in self.__content])
        logger.inspect(keys_saved=keys_saved,
                       percentage_saved=100 * keys_saved / total_keys,
                       total_keys=total_keys,
                       total_lines=len(self.__content))
def main():
    source_files = _GetPythonFiles().files

    np.random.shuffle(source_files)

    logger.inspect(source_files)

    train_valid_split = int(len(source_files) * 0.9)
    _load_code(lab.get_data_path() / 'train.py',
               source_files[:train_valid_split])
    _load_code(lab.get_data_path() / 'valid.py',
               source_files[train_valid_split:])
Ejemplo n.º 13
0
def _test():
    """
    Simple test
    """
    from labml.logger import inspect

    x = torch.zeros([2, 6, 2, 4])
    inspect(x.shape)
    bn = InstanceNorm(6)

    x = bn(x)
    inspect(x.shape)
Ejemplo n.º 14
0
def _launcher():
    import os
    world_size = int(os.environ['WORLD_SIZE'])
    run_uuid = os.environ['RUN_UUID']
    local_rank = int(os.environ['LOCAL_RANK'])
    rank = int(os.environ['RANK'])
    inspect(world_size=os.environ['WORLD_SIZE'],
            run_uuid=os.environ['RUN_UUID'],
            local_rank=os.environ['LOCAL_RANK'],
            rank=os.environ['RANK'],
            master_addr=os.environ['MASTER_ADDR'],
            master_port=os.environ['MASTER_PORT'])
    main(local_rank, rank, world_size, run_uuid, 'env://')
Ejemplo n.º 15
0
def main():
    try:
        batch()
    except KeyboardInterrupt:
        pass

    source_files = get_python_files()

    np.random.shuffle(source_files)

    logger.inspect(source_files)

    train_valid_split = int(len(source_files) * 0.9)
    concat_and_save(lab.get_data_path() / 'train.py',
                    source_files[:train_valid_split])
    concat_and_save(lab.get_data_path() / 'valid.py',
                    source_files[train_valid_split:])
Ejemplo n.º 16
0
def main():
    # Create experiment
    experiment.create(name='cifar10', comment='small model')
    # Create configurations
    conf = Configs()
    # Load configurations
    experiment.configs(conf, {
        'optimizer.optimizer': 'Adam',
        'optimizer.learning_rate': 2.5e-4,
    })
    # Set model for saving/loading
    experiment.add_pytorch_models({'model': conf.model})
    # Print number of parameters in the model
    logger.inspect(params=(sum(p.numel() for p in conf.model.parameters() if p.requires_grad)))
    # Start the experiment and run the training loop
    with experiment.start():
        conf.run()
Ejemplo n.º 17
0
def to_numpy(df: pd.DataFrame):
    dates, packets = to_daily_packets(df)
    empty_mins = np.sum(packets[:, :, 4] == 0)
    filled_mins = np.sum(packets[:, :, 4] > 0)
    empty_mins_high_activity = np.sum(packets[:, :, 4] == 0)
    fill_empty_minutes_in_packets(packets)
    zero_price = np.sum(packets[:, :, 0:4] == 0)
    zero_volume = np.sum(packets[:, :, 4] == 0)
    packets[:, :, 4] = np.maximum(packets[:, :, 4], 1)

    logger.inspect(empty_mins=empty_mins,
                   filled_mins=filled_mins,
                   empty_mins_high_activity=empty_mins_high_activity,
                   zero_price=zero_price,
                   zero_volume=zero_volume)

    return dates, packets
Ejemplo n.º 18
0
def _test():
    """
    ### Code to test BERT embeddings
    """
    from labml.logger import inspect

    # Initialize
    device = torch.device('cuda:0')
    bert = BERTChunkEmbeddings(device)

    # Sample
    text = ["Replace me by any text you'd like.", "Second sentence"]

    # Check BERT tokenizer
    encoded_input = bert.tokenizer(text,
                                   return_tensors='pt',
                                   add_special_tokens=False,
                                   padding=True)

    inspect(encoded_input, _expand=True)

    # Check BERT model outputs
    output = bert.model(
        input_ids=encoded_input['input_ids'].to(device),
        attention_mask=encoded_input['attention_mask'].to(device),
        token_type_ids=encoded_input['token_type_ids'].to(device))

    inspect(
        {
            'last_hidden_state': output['last_hidden_state'],
            'pooler_output': output['pooler_output']
        },
        _expand=True)

    # Check recreating text from token ids
    inspect(bert.tokenizer.convert_ids_to_tokens(
        encoded_input['input_ids'][0]),
            _n=-1)
    inspect(bert.tokenizer.convert_ids_to_tokens(
        encoded_input['input_ids'][1]),
            _n=-1)

    # Get chunk embeddings
    inspect(bert(text))
Ejemplo n.º 19
0
 def run(self):
     retries = 1
     while not self.is_stopped:
         response = self.caller.send({'jobs': self.results})
         if response is None:
             logger.log(f'Retrying again in 10 seconds ({retries})...',
                        Text.highlight)
             time.sleep(10)
             retries += 1
             continue
         retries = 1
         self.results = []
         jobs = response.get('jobs', [])
         logger.log(f'Jobs: {len(jobs)}')
         for j in jobs:
             inspect(j)
             res = self.do_job(j)
             self.results.append(res)
             inspect(res)
Ejemplo n.º 20
0
def evaluate(predictor: Predictor, text: str):
    line_no = 1
    logs = [(f"{line_no: 4d}: ", Text.meta), (text[0], Text.subtle)]

    correct = 0
    i = 0
    right = False
    key_strokes = 0

    while i + 1 < len(text):
        next_token = predictor.get_token(text[:i + 1])
        if next_token == text[i + 1:i + 1 + len(next_token)]:
            correct += len(next_token)
            right = True
        else:
            next_token = text[i + 1]
            right = False

        for j, c in enumerate(next_token):
            if c == '\n':
                logger.log(logs)
                line_no += 1
                logs = [(f"{line_no: 4d}: ", Text.meta)]
            elif c == '\r':
                continue
            else:
                if right:
                    if j == 0:
                        logs.append((c, [Text.meta, Style.underline]))
                    else:
                        logs.append((c, [Text.success, Style.underline]))
                else:
                    logs.append((c, [Text.warning]))

        i += len(next_token)
        key_strokes += 1

    logger.log(logs)

    logger.inspect(accuracy=correct / (len(text) - 1),
                   key_strokes=key_strokes,
                   length=len(text))
Ejemplo n.º 21
0
def main():
    from labml_nn.transformers.knn.build_index import load_experiment
    # Load the experiment. Replace the run uuid with you run uuid from
    # [training the model](train_model.html).
    conf = load_experiment('4984b85c20bf11eb877a69c1a03717cd')
    # Set model to evaluation mode
    conf.model.eval()

    # Load index
    index, keys_store, vals_store = load_index(conf)
    # List of weights given to $k$-NN prediction. We will evaluate the validation loss for
    # each of the weights
    knn_weights = [i / 20 for i in range(10)]
    # Evaluate validation loss
    losses, n_samples = validation_loss(knn_weights, None, conf, index,
                                        keys_store, vals_store)
    # Output the losses for each of `knn_weights`.
    inspect({
        c: np.sum(losses[i]) / np.sum(n_samples)
        for i, c in enumerate(knn_weights)
    })
Ejemplo n.º 22
0
def _test():
    """
    #### Code to test the FTA module
    """
    from labml.logger import inspect

    # Initialize
    a = FTA(-10, 10, 2., 0.5)
    # Print $\mathbf{c}$
    inspect(a.c)
    # Print number of bins $\frac{u - l}{\delta}$
    inspect(a.expansion_factor)

    # Input $z$
    z = torch.tensor([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9., 10., 11.])
    # Print $z$
    inspect(z)
    # Print $\phi_\eta(z)$
    inspect(a(z))
Ejemplo n.º 23
0
def test_nvidia_device(idx: int):
    from py3nvml import py3nvml as nvml

    handle = nvml.nvmlDeviceGetHandleByIndex(idx)

    pciInfo = nvml.nvmlDeviceGetPciInfo(handle)

    brands = {
        nvml.NVML_BRAND_UNKNOWN: "Unknown",
        nvml.NVML_BRAND_QUADRO: "Quadro",
        nvml.NVML_BRAND_TESLA: "Tesla",
        nvml.NVML_BRAND_NVS: "NVS",
        nvml.NVML_BRAND_GRID: "Grid",
        nvml.NVML_BRAND_GEFORCE: "GeForce"
    }

    inspect(
        idx=idx,
        # id=pciInfo.busId,
        # uuid=nvml.nvmlDeviceGetUUID(handle),
        name=nvml.nvmlDeviceGetName(handle),
        # brand=brands[nvml.nvmlDeviceGetBrand(handle)],
        # multi_gpu=nvml.nvmlDeviceGetMultiGpuBoard(handle),
        # pcie_link=nvml.nvmlDeviceGetCurrPcieLinkWidth(handle),
        fan=nvml.nvmlDeviceGetFanSpeed(handle),
        # power=nvml.nvmlDeviceGetPowerState(handle),
        mem_total=nvml.nvmlDeviceGetMemoryInfo(handle).total,
        mem_used=nvml.nvmlDeviceGetMemoryInfo(handle).used,
        util_gpu=nvml.nvmlDeviceGetUtilizationRates(handle).gpu,
        # util_mem=nvml.nvmlDeviceGetUtilizationRates(handle).memory,
        temp=nvml.nvmlDeviceGetTemperature(handle, nvml.NVML_TEMPERATURE_GPU),
        power=nvml.nvmlDeviceGetPowerUsage(handle),
        power_limit=nvml.nvmlDeviceGetPowerManagementLimit(handle),

        # display=nvml.nvmlDeviceGetDisplayMode(handle),
        display_active=nvml.nvmlDeviceGetDisplayActive(handle),
    )

    logger.log()

    procs = nvml.nvmlDeviceGetGraphicsRunningProcesses(handle)
    for p in procs:
        inspect(name=nvml.nvmlSystemGetProcessName(p.pid),
                pid=p.pid,
                mem=p.usedGpuMemory)

    procs = nvml.nvmlDeviceGetComputeRunningProcesses(handle)
    for p in procs:
        inspect(name=nvml.nvmlSystemGetProcessName(p.pid),
                pid=p.pid,
                mem=p.usedGpuMemory)

    logger.log()
Ejemplo n.º 24
0
def _test():
    """
    Simple test
    """
    from labml.logger import inspect

    x = torch.zeros([2, 3, 2, 4])
    inspect(x.shape)
    ln = LayerNorm(x.shape[2:])

    x = ln(x)
    inspect(x.shape)
    inspect(ln.gain.shape)
Ejemplo n.º 25
0
def test_sensors():
    try:
        inspect(psutil.sensors_temperatures())
    except AttributeError as e:
        pass
    try:
        inspect(psutil.sensors_fans())
    except AttributeError as e:
        pass
    try:
        inspect(psutil.sensors_battery()._asdict())
    except AttributeError as e:
        pass
Ejemplo n.º 26
0
def _test():
    """
    Simple test
    """
    from labml.logger import inspect

    x = torch.zeros([2, 3, 2, 4])
    inspect(x.shape)
    bn = BatchNorm(3)

    x = bn(x)
    inspect(x.shape)
    inspect(bn.exp_var.shape)
Ejemplo n.º 27
0
def test_nvidia():
    # pip install py3nvml
    import py3nvml
    from py3nvml import py3nvml as nvml

    inspect(py3nvml.get_free_gpus())

    nvml.nvmlInit()
    inspect(version=nvml.nvmlSystemGetDriverVersion())
    inspect(count=nvml.nvmlDeviceGetCount())

    for i in range(nvml.nvmlDeviceGetCount()):
        test_nvidia_device(i)

    nvml.nvmlShutdown()
Ejemplo n.º 28
0
def _test_shift_right():
    x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    inspect(x)
    inspect(shift_right(x))

    x = torch.arange(1, 6)[None, :, None, None].repeat(5, 1, 1, 1)
    inspect(x[:, :, 0, 0])
    inspect(shift_right(x)[:, :, 0, 0])

    x = torch.arange(1, 6)[None, :, None, None].repeat(3, 1, 1, 1)
    inspect(x[:, :, 0, 0])
    inspect(shift_right(x)[:, :, 0, 0])
Ejemplo n.º 29
0
Archivo: __init__.py Proyecto: wx-b/nn
def _test_local_mask():
    """
    Test local mask
    """
    from labml.logger import inspect
    inspect(AFTLocal.create_local_mask(10, 4))
Ejemplo n.º 30
0
def test_psutil():
    # sudo apt-get install gcc python3-dev
    # xcode on mac
    # pip install psutil
    import psutil

    # https://psutil.readthedocs.io/en/latest/#
    inspect(mac=psutil.MACOS, linux=psutil.LINUX, windows=psutil.WINDOWS)
    inspect(psutil.net_io_counters()._asdict())
    inspect(psutil.net_if_addrs())
    inspect(psutil.net_if_stats())
    inspect(psutil.virtual_memory()._asdict())
    inspect(psutil.cpu_count())
    inspect(psutil.cpu_times()._asdict())
    inspect(psutil.cpu_stats()._asdict())
    inspect(psutil.cpu_freq()._asdict())
    inspect(psutil.cpu_percent(percpu=True))
    inspect(psutil.disk_usage(lab.get_path())._asdict())
    inspect(psutil.Process().as_dict())
    inspect([p for p in psutil.process_iter()])
    # inspect(psutil.Process().terminate())
    # inspect('test')
    p = psutil.Process()
    with p.oneshot():
        inspect(p.memory_info()._asdict())
        inspect(p.memory_percent())
        inspect(p.cpu_percent(1))
        inspect(p.cpu_times()._asdict())
        inspect(p.num_threads())
        inspect(p.threads())
        try:
            inspect(p.cpu_num())
        except AttributeError as e:
            pass
    try:
        inspect(psutil.sensors_temperatures())
    except AttributeError as e:
        pass
    try:
        inspect(psutil.sensors_fans())
    except AttributeError as e:
        pass
    try:
        inspect(psutil.sensors_battery()._asdict())
    except AttributeError as e:
        pass