示例#1
0
def test_liberate(writer_stock):
    with pytest.raises(PermissionError):
        StockRoom(enable_write=True)
    runner = CliRunner()
    res = runner.invoke(cli.liberate)
    assert res.exit_code == 0
    stock = StockRoom(enable_write=True)
    stock.close()
示例#2
0
def test_init_repo():
    runner = CliRunner()
    with runner.isolated_filesystem():
        with pytest.raises(RuntimeError):
            StockRoom()
        res = runner.invoke(cli.init, ['--username', 'test', '--email', '*****@*****.**'])
        assert res.exit_code == 0
        StockRoom()
示例#3
0
    def test_opening_two_instances(self, writer_stock):
        with pytest.raises(PermissionError):
            StockRoom(write=True)
        arr = np.arange(20).reshape(4, 5)
        oldarr = arr * randint(1, 100)
        col1 = writer_stock.data['ndcol']
        col1[1] = oldarr
        writer_stock.commit('added data')

        stock2 = StockRoom()
        col2 = stock2.data['ndcol']

        assert np.allclose(col2[1], oldarr)
        stock2._repo._env._close_environments()
示例#4
0
def test_init_repo():
    runner = CliRunner()
    with runner.isolated_filesystem():
        with pytest.raises(RuntimeError):
            stock = StockRoom()
        res = runner.invoke(cli.init,
                            ['--name', 'test', '--email', '*****@*****.**'])
        assert 'Error: stock init should execute only in a git repository' in res.output

        cwd = Path.cwd()
        cwd.joinpath('.git').mkdir(exist_ok=True)
        res = runner.invoke(cli.init,
                            ['--name', 'test', '--email', '*****@*****.**'])
        assert res.exit_code == 0
        stock = StockRoom()
示例#5
0
def reader_stock(writer_stock):
    arr = np.arange(20).reshape(4, 5)
    col = writer_stock.data["ndcol"]
    col[1] = arr
    writer_stock.commit("added first data point")
    writer_stock.close()
    stock_obj = StockRoom()
    yield stock_obj
    stock_obj._repo._env._close_environments()
示例#6
0
def test_commit(repo_with_aset):
    runner = CliRunner()
    stock = StockRoom()
    stock.tag['key'] = 'value'
    res = runner.invoke(cli.commit, [])
    assert 'Error: Require commit message\n' in res.stdout
    res = runner.invoke(cli.commit, ['-m', 'test commit'])
    assert res.exit_code == 0
    assert 'Commit message:\ntest commit\nCommit Successful. Digest' in res.stdout
    stock._repo.hangar_repository._env._close_environments()
示例#7
0
def test_commit(repo_with_col):
    runner = CliRunner()
    stock = StockRoom(enable_write=True)
    stock.experiment["key"] = "value"
    stock.close()
    res = runner.invoke(cli.commit, [])
    assert "Error: Require commit message\n" in res.stdout
    res = runner.invoke(cli.commit, ["-m", "test commit"])
    assert res.exit_code == 0
    assert "Commit message:\ntest commit" in res.stdout
    assert "Commit Successful. Digest" in res.stdout
    stock._repo._env._close_environments()
示例#8
0
def test_commit(repo_with_col):
    runner = CliRunner()
    stock = StockRoom(write=True)
    stock.experiment['key'] = 'value'
    stock.close()
    res = runner.invoke(cli.commit, [])
    assert 'Error: Require commit message\n' in res.stdout
    res = runner.invoke(cli.commit, ['-m', 'test commit'])
    assert res.exit_code == 0
    assert 'Commit message:\ntest commit' in res.stdout
    assert 'Commit Successful. Digest' in res.stdout
    stock._repo._env._close_environments()
def test_import_cifar(repo, torchvision_datasets, dataset, splits, columns):
    runner = CliRunner()
    res = runner.invoke(cli.import_data, [f"torchvision.{dataset}"])
    assert res.exit_code == 0

    keys = [
        f"{dataset}-{split}-{column}" for split in splits for column in columns
    ]
    stock = StockRoom()
    assert stock.data.keys() == tuple(keys)

    assert stock.data[f"{dataset}-train-image"][0].shape == (3, 32, 32)
    assert stock.data[f"{dataset}-test-label"][0].shape == tuple()
    assert stock.data[f"{dataset}-train-image"][0].dtype == np.float32
def test_import_mnist(repo, torchvision_datasets, dataset, splits, columns):
    runner = CliRunner()
    res = runner.invoke(cli.import_data, [f"torchvision.{dataset}"])
    assert res.exit_code == 0

    keys = [
        f"{dataset}-{split}-{column}" for split in splits for column in columns
    ]
    keys = sorted(keys)
    stock = StockRoom()
    data_keys = sorted(list(stock.data.keys()))
    assert data_keys == keys

    assert stock.data[f"{dataset}-train-image"][0].shape == (28, 28)
    assert stock.data[f"{dataset}-test-label"][0].shape == tuple()
    assert stock.data[f"{dataset}-train-image"][0].dtype == np.float32
def test_import_voc(repo, torchvision_datasets, dataset, splits, columns):
    runner = CliRunner()
    res = runner.invoke(cli.import_data, [f"torchvision.{dataset}"])
    assert res.exit_code == 0

    keys = [
        f"{dataset}-{split}-{column}" for split in splits for column in columns
    ]
    stock = StockRoom()
    assert sorted(stock.data.keys()) == sorted(keys)

    assert stock.data[f"{dataset}-train-image"][0].shape == (3, 500, 500)
    assert stock.data[f"{dataset}-train-image"][0].dtype == np.uint8
    assert stock.data[f"{dataset}-val-image"][0].shape == (3, 500, 500)
    assert stock.data[f"{dataset}-trainval-image"][0].shape == (3, 500, 500)
    if dataset == "voc_segmentation":
        assert stock.data[f"{dataset}-train-segment"][0].shape == (500, 500)
    elif dataset == "voc_detection":
        assert stock.data[f"{dataset}-train-names"][0][0] == "testname"
        assert stock.data[f"{dataset}-train-boxes"][0][0].shape == (2, 2)
示例#12
0
def writer_stock(repo_with_col):
    stock_obj = StockRoom(write=True)
    yield stock_obj
    stock_obj._repo._env._close_environments()
示例#13
0
def stock(repo_with_aset):
    stock_obj = StockRoom()
    yield stock_obj
    stock_obj._repo.hangar_repository._env._close_environments()
示例#14
0
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


stock = StockRoom()
imgcol = stock.data['cifar10-train-image']
lblcol = stock.data['cifar10-train-label']
# imshow(imgcol[11])

lr = 0.001
momentum = 0.9
check_every = 500
net = Net()
dset = make_torch_dataset([imgcol, lblcol])
dloader = DataLoader(dset, batch_size=64)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum)

for epoch in range(2):
    running_loss = 0.0