home

Menu
  • ripgrep search

ripgrep

Options:

For example *.py or **/templates/**/*.html or datasette/** or !setup.py

yaml-to-sqlite/setup.py

24      packages=find_packages(),
25      install_requires=["click", "PyYAML", "sqlite-utils>=3.9.1"],
26      setup_requires=["pytest-runner"],
27      extras_require={"test": ["pytest"]},
28      entry_points="""
29          [console_scripts]

twitter-to-sqlite/tests/test_save_tweets.py

2   import pathlib
3   
4   import pytest
5   import sqlite_utils
6   from twitter_to_sqlite import utils
7   
8   
9   @pytest.fixture
10  def tweets():
11      return json.load(open(pathlib.Path(__file__).parent / "tweets.json"))
12  
13  
14  @pytest.fixture
15  def db(tweets):
16      db = sqlite_utils.Database(memory=True)

twitter-to-sqlite/tests/test_import.py

2   import pathlib
3   
4   import pytest
5   import sqlite_utils
6   from click.testing import CliRunner
10  
11  
12  @pytest.fixture
13  def zip_contents_path():
14      return pathlib.Path(__file__).parent / "zip_contents"
15  
16  
17  @pytest.fixture
18  def import_test_zip(tmpdir, zip_contents_path):
19      archive = str(tmpdir / "archive.zip")

twitter-to-sqlite/setup.py

37          "python-dateutil",
38      ],
39      extras_require={"test": ["pytest"]},
40      tests_require=["twitter-to-sqlite[test]"],
41  )

tableau-to-sqlite/setup.py

33      """,
34      install_requires=["click", "TableauScraper==0.1.3"],
35      extras_require={"test": ["pytest", "vcrpy"]},
36      tests_require=["tableau-to-sqlite[test]"],
37      python_requires=">=3.6",

tableau-to-sqlite/README.md

72  To run the tests:
73  
74      pytest

til/sqlite/related-content.md

17  select title, rank from til_fts where til_fts match '
18    i OR wanted OR to OR run OR some OR django OR tests OR using OR
19    pytestdjango OR and OR with OR configured OR pick OR up OR the
20    OR databaseurl OR environment OR variable OR via OR djdatabaseurl
21    OR against OR a OR postgresql OR server OR running OR in OR
25  ```
26  
27  Here are the results from [that query](https://til.simonwillison.net/tils?sql=select+title%2C+rank+from+til_fts+where+til_fts+match+%27%0D%0A++i+OR+wanted+OR+to+OR+run+OR+some+OR+django+OR+tests+OR+using+OR%0D%0A++pytestdjango+OR+and+OR+with+OR+configured+OR+pick+OR+up+OR+the%0D%0A++OR+databaseurl+OR+environment+OR+variable+OR+via+OR+djdatabaseurl%0D%0A++OR+against+OR+a+OR+postgresql+OR+server+OR+running+OR+in+OR%0D%0A++github+OR+actions+OR+it+OR+took+OR+while+OR+figure+OR+out+OR%0D%0A++right+OR+pattern+OR+trick+OR+was+OR+define+OR+postgres+OR+service%27%0D%0Aorder+by+rank+limit+5). Unsurprisingly the entry itself shows up first, but the other items look relevant enough to me:
28  
29  title | rank
60    5
61  ```
62  And [an example of it running](https://til.simonwillison.net/tils?sql=select%0D%0A++til.topic%2C+til.slug%2C+til.title%2C+til.created%0D%0Afrom%0D%0A++til%0D%0A++join+til_fts+on+til.rowid+%3D+til_fts.rowid%0D%0Awhere%0D%0A++til_fts+match+%3Awords%0D%0A++and+not+(%0D%0A++++til.slug+%3D+%3Aslug%0D%0A++++and+til.topic+%3D+%3Atopic%0D%0A++)%0D%0Aorder+by%0D%0A++til_fts.rank%0D%0Alimit%0D%0A++5&words=i+OR+wanted+OR+to+OR+run+OR+some+OR+django+OR+tests+OR+using+OR+++pytestdjango+OR+and+OR+with+OR+configured+OR+pick+OR+up+OR+the+++OR+databaseurl+OR+environment+OR+variable+OR+via+OR+djdatabaseurl+++OR+against+OR+a+OR+postgresql+OR+server+OR+running+OR+in+OR+++github+OR+actions+OR+it+OR+took+OR+while+OR+figure+OR+out+OR+++right+OR+pattern+OR+trick+OR+was+OR+define+OR+postgres+OR+service&slug=postgresq-service-container&topic=github-actions), which returns the following:
63  
64  topic | slug | title | created

til/sphinx/literalinclude-with-markers.md

35  
36  # -- start test_homepage --
37  @pytest.mark.asyncio
38  async def test_homepage():
39      ds = Datasette(memory=True)
44  
45  # -- start test_actor_is_null --
46  @pytest.mark.asyncio
47  async def test_actor_is_null():
48      ds = Datasette(memory=True)
52  
53  # -- start test_signed_cookie_actor --
54  @pytest.mark.asyncio
55  async def test_signed_cookie_actor():
56      ds = Datasette(memory=True)

til/sphinx/blacken-docs.md

23  
24  ```python
25  @pytest.fixture
26  def datasette(tmp_path_factory):
27      # This fixture will be executed repeatedly for every test
29  This is because of the missing function body. It turns out adding `...` (which looks prettier than `pass`) fixes this issue:
30  ```python
31  @pytest.fixture
32  def datasette(tmp_path_factory):
33      # This fixture will be executed repeatedly for every test

til/readthedocs/pip-install-docs.md

10      # ...
11      extras_require={
12          "test": ["pytest", "pytest-asyncio", "black", "cogapp", "ruff"],
13          "docs": [
14              "sphinx==7.2.6",

til/pytest/treat-warnings-as-errors.md

1   # Treating warnings as errors in pytest
2   
3   I was seeing this warning in a Django project when I thought I was correctly using timezone-aware dates everywhere:
5   > RuntimeWarning: DateTimeField Shift.shift_start received a naive datetime (2022-04-01 00:00:00) while time zone support is active
6   
7   Running `pytest -Werror` turns those warnings into errors that fail the tests.
8   
9   Which means you can investigate them in the Python debugger by running:
10  
11      pytest -Werror --pdb -x
12  
13  The `--pdb` starts the debugger at the warning (now error) and the `-x` stops the tests after the first failure.
14  
15  ## In pytest.ini
16  
17  You can also set this in `pytest.ini` - useful if you want ALL warnings to be failures in both development and CI.
18  
19  Add the following to the `pytest.ini` file:
20  
21  ```ini
22  [pytest]
23  # ...
24  filterwarnings =
31  
32  ```ini
33  [pytest]
34  # ...
35  filterwarnings =

til/pytest/syrupy.md

3   I'm a big fan of snapshot testing - writing tests where you compare the output of some function to a previously saved version, and can re-generate that version from scratch any time something changes.
4   
5   I usually do this by hand - I run `pytest -x --pdb` to stop at the first failing test and drop into a debugger, then copy out the representation of the generated value and copy it into the test. I wrote about how I use this pattern a few years ago in [How to cheat at unit tests with pytest and Black](https://simonwillison.net/2020/Feb/11/cheating-at-unit-tests-pytest-black/).
6   
7   Today I learned how to do the same thing with the [Syrupy](https://github.com/tophat/syrupy) plugin for [pytest](https://docs.pytest.org/). I think I'll be using this for many of my future projects.
8   
9   ## Some initial tests
19      assert snapshot == {"foo": [1, 2, 3], "bar": {"baz": "qux"}}
20  ```
21  Then I installed both `pytest` and `syrupy`:
22  
23  ```bash
24  pip install pytest syrupy
25  ```
26  Now in my parent folder I can run this:
27  ```bash
28  pytest
29  ```
30  And the tests fail:
67  The snapshots don't exist yet. But I can create them automatically by running this:
68  ```bash
69  pytest --snapshot-update
70  ```
71  Which outputs passing tests along with:
94  # ---
95  ```
96  Running `pytest` again passes, because the snapshots exist and continue to match the test output.
97  
98  The serialized snapshot format is designed to be checked into Git. It's pleasantly readable - I can review that and see what it's testing, and I could even update it by hand - though I'll much more likely use the `--snapshot-update` flag and then eyeball the differences.
123     assert Foo(1, "hello") == snapshot
124 ```
125 Running `pytest` again failed. `pytest --snapshot-update` passed and updated my snapshot file, adding this to it:
126 ```
127 # name: test_three

til/pytest/subprocess-server.md

1   # Start a server in a subprocess during a pytest session
2   
3   I wanted to start an actual server process, run it for the duration of my pytest session and shut it down at the end.
4   
5   Here's the recipe I came up with. This fixture lives in `conftest.py`:
6   
7   ```python
8   import pytest
9   import sqlite_utils
10  import subprocess
11  
12  @pytest.fixture(scope="session")
13  def ds_server(tmp_path_factory):
14      db_directory = tmp_path_factory.mktemp("dbs")
31      assert not ds_proc.poll(), ds_proc.stdout.read().decode("utf-8")
32      yield ds_proc
33      # Shut it down at the end of the pytest session
34      ds_proc.terminate()
35  ```
47  While [adding tests to Datasette Lite](https://github.com/simonw/datasette-lite/issues/35) I found myself needing to run a localhost server that served static files directly.
48  
49  I completely forgot about this TIL, and instead took inspiration [from pytest-simplehttpserver](https://github.com/ppmdo/pytest-simplehttpserver/blob/a82ad31912121c074ff1a76c4628a1c42c32b41b/src/pytest_simplehttpserver/pytest_plugin.py#L17-L28) - coming up with this pattern:
50  
51  ```python
52  from subprocess import Popen, PIPE
53  import pathlib
54  import pytest
55  import time
56  from http.client import HTTPConnection
59  
60  
61  @pytest.fixture(scope="module")
62  def static_server():
63      process = Popen(
85  Again, including `static_server` as a fixture is enough to ensure requests to `http://localhost:8123/` will be served by that temporary server.
86  
87  I like how this version polls for a successful HEAD request (a trick inspired by `pytest-simplehttpserver`) rather than just sleeping.

til/pytest/session-scoped-tmp.md

1   # Session-scoped temporary directories in pytest
2   
3   I habitually use the `tmpdir` fixture in pytest to get a temporary directory that will be cleaned up after each test, but that doesn't work with `scope="session"` - which can be used to ensure an expensive fixture is run only once per test session and the generated content is used for multiple tests.
4   
5   To get a temporary directory that works with `scope="session"`, use the `tmp_path_factory` built-in pytest fixture like this:
6   
7   ```python
8   import pytest
9   
10  
11  @pytest.fixture(scope="session")
12  def template_dir(tmp_path_factory):
13      template_dir = tmp_path_factory.mktemp("page-templates")

til/pytest/registering-plugins-in-tests.md

8   from datasette import hookimpl
9   from datasette.plugins import pm
10  import pytest
11  
12  
27  ```
28  
29  Here's [an example](https://github.com/simonw/datasette-insert/blob/7f4c2b3954190d547619d043bbe714481b10ac1e/tests/test_insert_api.py) of a test that uses a pytest fixture to register (and de-register) a plugin:
30  
31  ```python
33  from datasette.app import Datasette
34  from datasette.plugins import pm
35  import pytest
36  
37  
38  @pytest.fixture
39  def unsafe():
40      class UnsafeInsertAll:
51  
52  
53  @pytest.mark.asyncio
54  async def test_insert_alter(ds, unsafe):
55      async with httpx.AsyncClient(app=ds.app()) as client:

til/pytest/pytest-subprocess.md

1   # Mocking subprocess with pytest-subprocess
2   
3   For [apple-notes-to-sqlite](https://github.com/dogsheep/apple-notes-to-sqlite) I needed to write some tests that simulated executing the `osascript` command using the Python `subprocess` module.
5   I wanted my tests to run on Linux CI machines, where that command would not exist.
6   
7   After failing to use `unittest.mock.patch` to solve this, I went looking for alternatives. I found [pytest-subprocess](https://pypi.org/project/pytest-subprocess/).
8   
9   Here's the relevant section of [the test I wrote](https://github.com/dogsheep/apple-notes-to-sqlite/blob/0.1/tests/test_apple_notes_to_sqlite.py):
25          # ...
26  ```
27  `fp` is the fixture provided by the package (you need to `pip install pytest-subprocess` for this to work).
28  
29  `COUNT_SCRIPT` here is the first of my `osascript` constants. It looks like this (in `cli.py`):
51  I eventually figured that using `fp.any()` was easier than specifying the exact script. This is a wildcard value which matches any string. It returns the full `FAKE_OUTPUT` variable as the simulated standard out.
52  
53  What's useful about `pytest-subprocess` is that it works for both `subprocess.check_output()` and more complex `subprocess.Popen()` calls - both of which I was using in this script.

til/pytest/pytest-recording-vcr.md

1   # Using VCR and pytest with pytest-recording
2   
3   [pytest-recording](https://github.com/kiwicom/pytest-recording) is a neat pytest plugin that makes it easy to use the [VCR library](https://vcrpy.readthedocs.io/), which helps write tests against HTTP resources by automatically capturing responses and baking them into a YAML file to be replayed during the tests.
4   
5   It even works with [boto3](https://aws.amazon.com/sdk-for-python/)!
6   
7   To use it, first install it with `pip install pytest-recording` and then add the `@pytest.mark.vcr` decorator to a test that makes HTTP calls:
8   
9   ```python
10  @pytest.mark.vcr
11  def test_create():
12      runner = CliRunner()
13      with runner.isolated_filesystem():
14          result = runner.invoke(cli, ["create", "pytest-bucket-simonw-1", "-c"])
15          assert result.exit_code == 0
16  ```
18  The first time you run the tests, use the `--record-mode=once` option:
19  
20      pytest -k test_create --record-mode=once
21  
22  This defaults to creating a YAML file in `tests/cassettes/test_s3_credentials/test_create.yaml`.
23  
24  Subsequent runs of `pytest -k test_create` will reuse those recorded HTTP requests and will not make any network requests - I confirmed this by turning off my laptop's WiFi.

til/pytest/pytest-mock-calls.md

3   I needed to write a test that checked for a really complex sequence of mock calls for [s3-credentials#3](https://github.com/simonw/s3-credentials/issues/3).
4   
5   I ended up using the following trick, using [pytest-mock](https://pypi.org/project/pytest-mock/):
6   
7   ```python
10      runner = CliRunner()
11      with runner.isolated_filesystem():
12          result = runner.invoke(cli, ["create", "pytest-bucket-simonw-1", "-c"])
13          assert [str(c) for c in boto3.mock_calls] == [
14              "call('s3')",
15              "call('iam')",
16              "call().head_bucket(Bucket='pytest-bucket-simonw-1')",
17              "call().get_user(UserName='s3.read-write.pytest-bucket-simonw-1')",
18              'call().put_user_policy(PolicyDocument=\'{"Version": "2012-10-17", "Statement": [{"Sid": "ListObjectsInBucket", "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1"]}, {"Sid": "AllObjectActions", "Effect": "Allow", "Action": "s3:*Object", "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1/*"]}]}\', PolicyName=\'s3.read-write.pytest-bucket-simonw-1\', UserName=\'s3.read-write.pytest-bucket-simonw-1\')',
19              "call().create_access_key(UserName='s3.read-write.pytest-bucket-simonw-1')",
20              "call().create_access_key().__getitem__('AccessKey')",
21              "call().create_access_key().__getitem__().__str__()",
22          ]
23  ```
24  I used the trick I describe in [How to cheat at unit tests with pytest and Black](https://simonwillison.net/2020/Feb/11/cheating-at-unit-tests-pytest-black/) where I run that comparison against an empty `[]` list, then use `pytest --pdb` to drop into a debugger and copy and paste the output of `[str(c) for c in boto3.mock_calls]` into my test code.
25  
26  Initially I used a comparison directly against `boto3.mock_calls` - but this threw a surprising error. The calls sequence I baked into my tests looked like this:
34              call("s3"),
35              call("iam"),
36              call().head_bucket(Bucket="pytest-bucket-simonw-1"),
37              call().get_user(UserName="s3.read-write.pytest-bucket-simonw-1"),
38              call().put_user_policy(
39                  PolicyDocument='{"Version": "2012-10-17", "Statement": [{"Sid": "ListObjectsInBucket", "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1"]}, {"Sid": "AllObjectActions", "Effect": "Allow", "Action": "s3:*Object", "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1/*"]}]}',
40                  PolicyName="s3.read-write.pytest-bucket-simonw-1",
41                  UserName="s3.read-write.pytest-bucket-simonw-1",
42              ),
43              call().create_access_key(UserName="s3.read-write.pytest-bucket-simonw-1"),
44              call().create_access_key().__getitem__("AccessKey"),
45              call().create_access_key().__getitem__().__str__(),
46          ]
47  ```
48  But when I ran `pytest` that last one failed:
49  ```
50  E             -  'call().create_access_key().__getitem__()',

til/pytest/pytest-code-coverage.md

1   # Code coverage using pytest and codecov.io
2   
3   I got my [asgi-csrf](https://github.com/simonw/asgi-csrf) Python package up to 100% code coverage. Here's [the pull request](https://github.com/simonw/asgi-csrf/issues/13).
4   
5   I started by installing and using the [pytest-cov](https://pypi.org/project/pytest-cov/) pytest plugin.
6   
7   ```
8   pip install pytest-cov
9   pytest --cov=asgi_csrf
10  ```
11  This shows the current code coverage percentage for the `asgi_csrf` module in the terminal output:
25  To generate an HTML report showing which lines are not covered by tests:
26  ```
27  pytest --cov=asgi_csrf --cov-report=html
28  open htmlcov/index.html
29  ```
35  
36  ```
37  pytest --cov-fail-under=100 --cov asgi_csrf 
38  ======= test session starts =======
39  platform darwin -- Python 3.7.3, pytest-6.0.1, py-1.9.0, pluggy-0.13.1
40  rootdir: /Users/simon/Dropbox/Development/asgi-csrf
41  plugins: cov-2.10.1, asyncio-0.14.0
55      - name: Run tests
56        run: |
57          pytest --cov-fail-under=100 --cov asgi_csrf
58  ```
59  ## Pushing results to codecov.io

til/pytest/pytest-argparse.md

1   # Writing pytest tests against tools written with argparse
2   
3   I usually build command-line tools using [Click](https://click.palletsprojects.com/) (and my [click-app](https://github.com/simonw/click-app) cookiecutter template), which includes a really nice [set of tools](https://click.palletsprojects.com/en/8.0.x/testing/) for writing tests.
5   Today I decided to try building a tool called [stream-delay](https://github.com/simonw/stream-delay) using [argparse]() from the Python standard library, since it didn't need any other dependencies.
6   
7   The one challenge I had was how to write the tests. I used [pytest](https://pytest.org/) as a test-only dependency.
8   
9   Here's the pattern I came up with, using the [capsys pytest fixture](https://docs.pytest.org/en/6.2.x/capture.html) to capture standard output from my tool.
10  
11  ```python
12  from stream_delay import main
13  import pytest
14  
15  @pytest.mark.parametrize("option", ("-h", "--help"))
16  def test_help(capsys, option):
17      try:

til/pytest/playwright-pytest.md

1   # Using pytest and Playwright to test a JavaScript web application
2   
3   I [decided to add](https://github.com/simonw/datasette-lite/issues/35) automated tests to my [Datasette Lite](https://simonwillison.net/2022/May/4/datasette-lite/) project. Datasette Lite bundles my Datasette Python web application as a client-side application running inside WebAssembly using Pyodide.
4   
5   I wrote the tests using [playwright-pytest](https://github.com/microsoft/playwright-pytest), which lets you write tests in Python using Microsoft's [Playwright](https://playwright.dev/) browser automation library.
6   
7   ## Installing playwright-pytest
8   
9   Two steps:
10  
11      pip install playwright-pytest
12  
13  Then a second step to install the browsers using by Playwright itself:
15      playwright install
16  
17  I had those browsers installed already, but I still needed to run that command since the updated version of `playwright-pytest` needed more recent versions.
18  
19  (I had limited internet while doing this, and discovered that you can trick Playwright into using an older browser version by renaming a folder in `~/Library/Caches/ms-playwright` to the one that shows up in the error message that says that the browsers cannot be found.)
40  Then run the test by running this in the same directory as that file:
41  
42      pytest
43  
44  `playwright-pytest` provides the `page` fixture - annotating it with `: Page` is optional but if you do that then VS Code knows what it is and can provide autocomplete in the editor.
45  
46  `page.goto()` causes the browser to navigate to that URL.
67  ```
68  
69  ## pytest options
70  
71  The `playwright-pytest` package adds a bunch of new options to `pytest`. The most useful is `--headed`:
72  
73      pytest --headed
74  
75  This runs the tests in "headed" mode - which means a visible browser window pops up during the tests so you can see what is happening.
87  I wanted to run the tests against the most recent version of my code, which consists of an `index.html` file and a `webworker.js` file. Because these use web workers they need to be run from an actual localhost web server, so I needed to start one at the beginning of the tests and shut it down at the end.
88  
89  I wrote about my solution for this in another TIL: [Start a server in a subprocess during a pytest session](https://til.simonwillison.net/pytest/subprocess-server).
90  
91  ## My test suite so far
97  from subprocess import Popen, PIPE
98  import pathlib
99  import pytest
100 import time
101 from http.client import HTTPConnection
104 
105 
106 @pytest.fixture(scope="module")
107 def static_server():
108     process = Popen(
129 
130 
131 @pytest.fixture(scope="module")
132 def dslite(static_server, browser: Browser) -> Page:
133     page = browser.new_page()
208     - name: Run test
209       run: |
210         pytest
211 ```
212 [dev-requirements.txt](https://raw.githubusercontent.com/simonw/datasette-lite/main/dev-requirements.txt) contains this:
213 ```
214 pytest-playwright==0.3.0
215 playwright==1.24.0
216 ```

til/pytest/only-run-integration.md

1   # Opt-in integration tests with pytest --integration
2   
3   For both [s3-credentials](https://github.com/simonw/s3-credentials) and [datasette-publish-fly](https://github.com/simonw/datasette-publish-fly) I have a need for real-world integration tests that actually interact with the underlying APIs (AWS or Fly) to create and destroy resources on those platforms.
4   
5   Most of the time I want my tests to run without doing these. I want the option to run `pytest --integration` to opt-in to running those extra integration tests.
6   
7   Here's the pattern I'm using. First, in `tests/conftest.py`:
8   
9   ```python
10  import pytest
11  
12  
13  def pytest_addoption(parser):
14      parser.addoption(
15          "--integration",
20  
21  
22  def pytest_configure(config):
23      config.addinivalue_line(
24          "markers",
27  
28  
29  def pytest_collection_modifyitems(config, items):
30      if config.getoption("--integration"):
31          # Also run integration tests
32          return
33      skip_integration = pytest.mark.skip(reason="use --integration option to run")
34      for item in items:
35          if "integration" in item.keywords:
36              item.add_marker(skip_integration)
37  ```
38  This implements a `@pytest.mark.integration` marker which I can use to mark any test that should be considered part of the integration test suite. These will be skipped by default... but will not be skipped if the `--integration` option is passed to `pytest`.
39  
40  Then in the tests I can either do this:
41  
42  ```python
43  @pytest.mark.integration
44  def test_integration_s3():
45      # ...
47  Or if I have a module that only contains integration tests - `tests/test_integration.py` - I can use the following line to apply that decorator to every test in the module:
48  ```python
49  import pytest
50  
51  pytestmark = pytest.mark.integration
52  
53  def test_integration_s3():

til/pytest/mocking-boto.md

7   I uses [moto](https://github.com/spulec/moto) to simulate AWS in that test suite, but moto does not yet have a mechanism for simulating Textract errors like this one.
8   
9   I ended up turning to Python mocks, here provided by the the [pytest-mock](https://pypi.org/project/pytest-mock/) fixture. Here's the test I came up with:
10  
11  ```python

til/pytest/mock-httpx.md

1   # How to mock httpx using pytest-mock
2   
3   I wrote this test to exercise some [httpx](https://pypi.org/project/httpx/) code today, using [pytest-mock](https://pypi.org/project/pytest-mock/).
4   
5   The key was to use `mocker.patch.object(cli, "httpx")` which patches the `httpx` module that was imported by the `cli` module.
6   
7   Here the `mocker` function argument is a fixture that is provided by `pytest-mock`.
8   
9   ```python
45  Here's a mock for a GraphQL POST request that returns JSON:
46  ```python
47  @pytest.fixture
48  def mock_graphql_region(mocker):
49      m = mocker.patch("datasette_publish_fly.httpx")

til/pytest/coverage-with-context.md

1   # pytest coverage with context
2   
3   [This tweet](https://twitter.com/mariatta/status/1499863816489734146) from \@Mariatta tipped me off to the ability to measure "contexts" when [running coverage](https://coverage.readthedocs.io/en/6.3.2/contexts.html#context-reporting) - as a way to tell which tests exercise which specific lines of code.
4   
5   My [sqlite-utils](https://github.com/simonw/sqlite-utils) project uses `pytest` for the test suite. I decided to figure out how to get this working with [pytest-cov](https://pypi.org/project/pytest-cov/).
6   
7   After some experimentation, this is the recipe that worked for me:
8   
9   ```
10  # In the virtual environment, make sure pytest-cov is installed:
11  % pip install pytest-cov
12  # First, run pytest to calculate coverage of the `sqlite_utils` package, with context
13  % pytest --cov=sqlite_utils --cov-context=test
14  # The .coverage file is actually a SQLite database:
15  % ls -lah .coverage

til/pytest/async-fixtures.md

1   # Async fixtures with pytest-asyncio
2   
3   I wanted to use a fixture with `pytest-asyncio` that was itsef as `async def` function, so that it could execute `await` statements.
4   
5   Since I'm using a `pytest.ini` file containing `asyncio_mode = strict` I had to use the `@pytest_asyncio.fixture` fixture to get this to work. Without that fixture I got this error:
6   
7   ```
12  ```
13  
14  Swapping `@pytest.fixture` for `@pytest_asyncio.fixture` fixed this problem:
15  
16  ```python
17  import pytest_asyncio
18  
19  @pytest_asyncio.fixture
20  async def ds_with_route():
21      ds = Datasette()

til/pytest/assert-dictionary-subset.md

1   # Asserting a dictionary is a subset of another dictionary
2   
3   My [lazy approach to writing unit tests](https://simonwillison.net/2020/Feb/11/cheating-at-unit-tests-pytest-black/) means that sometimes I want to run an assertion against most (but not all) of a dictionary.
4   
5   Take for example an API endpoint that returns something like this:

til/python/too-many-open-files-psutil.md

11  I ran `pip install psutil` in my virtual environment.
12  
13  Then I ran `pytest --pdb` to drop into a Python debugger when a test failed.
14  
15  In the debugger I ran this:

til/python/setup-py-from-url.md

7   ```python
8       extras_require={
9           "test": ["pytest", "black", "hypothesis", "cogapp"],
10          "docs": ["furo", "sphinx-autobuild", "codespell", "sphinx-copybutton"],
11          "mypy": [
36  ```python
37      extras_require={
38          "test": ["pytest", "black", "hypothesis", "cogapp"],
39          "docs": [
40              "furo",

til/pytest/show-files-opened-by-tests.md

1   # Show files opened by pytest tests
2   
3   My test suite for [Datasette](https://github.com/simonw/datasette) has grown so large that running the whole thing sometimes causes me to run out of file handles.
4   
5   I've not solved this yet, but I did figure out a pattern to get `pytest` to show me which new files were opened by which tests.
6   
7   Add the following to `conftest.py`:
11  
12  
13  @pytest.fixture(autouse=True)
14  def check_for_new_file_handles(request):
15      proc = psutil.Process()
24  This uses [psutil](https://pypi.org/project/psutil/) (`pip install psutil`) to build a set of the open files before and after the test runs. It then uses a list comprehension to figure out which file handles are new.
25  
26  Using `@pytest.fixture(autouse=True)` means it will automatically be used for every test.
27  
28  It's a `yield` fixture, which means the part of the code before the `yield` statement runs before the test, then the part afterwards runs after the test function has finished.
29  
30  Accepting the `request` argument means it gets access to a `pytest` request object, which includes `request.node` which is an object representing the test that is being executed.
31  
32  You need to run `pytest -s` to see the output (without the `-s` the output is hidden).
33  
34  Example output:

til/python/quick-testing-pyenv.md

62  Now I can run the tests like this:
63  ```bash
64  /tmp/py38env/bin/pytest
65  ```

til/python/pyproject.md

203 ## Test dependencies
204 
205 I like being able to run `pip install -e '.[test]'` to install test dependencies - things like `pytest`, which are needed to run the project tests but shouldn't be bundled with the project itself when it is installed.
206 
207 Those can be added in a section like this:
209 ```toml
210 [project.optional-dependencies]
211 test = ["pytest"]
212 ```
213 I added that to my `/tmp/demo-package/pyproject.toml` file, then ran this in my elsewhere virtual environment:
215 pip install -e '/tmp/demo-package[test]'
216 ```
217 The result was an installation of `pytest`, visible when I ran `pip freeze`.
218 
219 ## Package data

til/python/pip-tools.md

14  psycopg2-binary
15  dj-database-url
16  pytest-django
17  django-extensions
18  django-htmx
21  httpx
22  sentry-sdk
23  pytest-httpx
24  ics==0.7
25  ```

til/python/pdb-interact.md

3   Today [Carlton told me](https://twitter.com/carltongibson/status/1587155176590385159) about the [interact command](https://docs.python.org/3.10/library/pdb.html#pdbcommand-interact) in the Python debugger.
4   
5   Here's how to use it with `pytest` (but it works anywhere else where you find yourself in a `pdb` session).
6   
7   Use `pytest --pdb` to cause `pytest` to open a debugger at the first failed assertion (I added `assert False` to my test suite to demonstrate this).
8   
9   Then type `interact` to drop into a full Python interactive prompt that keeps all of the local and global variables from the debugger:
10  
11  ```
12  % pytest -k test_drop --pdb                                               
13  ======== test session starts ========
14  platform darwin -- Python 3.10.3, pytest-7.1.3, pluggy-1.0.0
15  ...
16  >       assert False
26  >>> locals().keys()
27  dict_keys(['__name__', '__doc__', '__package__', '__loader__', '__spec__', '__file__', '__cached__', '__builtins__',
28    '@py_builtins', '@pytest_ar', 'Datasette', 'sqlite3', 'pytest', 'time', 'ds_write', 'write_token', 'test_write_row',
29    'test_write_rows', 'test_write_row_errors', 'test_delete_row', 'test_drop_table', 'scenario', 'token', 'should_work',
30    'path', 'response', '@py_assert0', '@py_format2'])

til/python/os-remove-windows.md

4   
5   ```python
6   @pytest.mark.parametrize(
7       "use_path,file_exists", [(True, True), (True, False), (False, True), (False, False)]
8   )
30  FAILED tests/test_recreate.py::test_recreate[True-True] - 
31    PermissionError: [WinError 32] The process cannot access the file because it is being used by another process:
32    'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\pytest-of-runneradmin\\pytest-0\\test_recreate_True_True_0\\data.db'
33  ```
34  Eventually I spotted the problem: my call on this line was opening a SQLite connection to the `data.db` file:

til/python/introspect-function-parameters.md

28      assert "1+2" == utils.call_with_supported_arguments(foo, a=1, b=2, c=3)
29  
30      with pytest.raises(TypeError):
31          utils.call_with_supported_arguments(foo, a=1)
32  ```

til/python/cog-to-update-help-in-readme.md

45  Any time I generate content like this in a repo I like to include a test that will fail if I forget to update the content.
46  
47  `cog` clearly isn't designed to be used as an independent library, but I came up with the following pattern `pytest` test which works well, in my `tests/test_csvs_to_sqlite.py` module:
48  
49  ```python

til/python/callable.md

33  `check_callable(obj)` returns a `CallableStatus` named tuple, with an `is_callable` boolean saying if it can be caled and an `is_async_callable` boolean specifying if you need to use `await` with it.
34  
35  I wrote these `pytest` tests to exercise the `check_callable()` function:
36  
37  ```python
38  import pytest
39  
40  
61  
62  
63  @pytest.mark.parametrize(
64      "obj,expected_is_callable,expected_is_async_callable",
65      (

til/pypi/pypi-releases-from-github.md

83      - name: Run tests
84        run: |
85          pytest
86    deploy:
87      runs-on: ubuntu-latest
107       uses: pypa/gh-action-pypi-publish@release/v1
108 ```
109 The `test` job is pretty standard - it sets up a matrix to run the tests against multiple Python versions, then runs `pytest`.
110 
111 It's set to trigger by this block:

til/pluggy/multiple-hooks-same-file.md

26  Which allows you to write more than one plugin implementation function in the same Python module file.
27  
28  Note that the `specname` feature requires [Pluggy 1.0.0](https://github.com/pytest-dev/pluggy/blob/main/CHANGELOG.rst#pluggy-100-2021-08-25) or higher.
29  
30  These can be combined with `tryfirst=` and `trylast=`. This example adds one link at the start of the Datasette application menu and one at the end, using the [menu_links hook](https://docs.datasette.io/en/stable/plugin_hooks.html#menu-links-datasette-actor-request).

til/llms/openai-embeddings-related-content.md

256 - [Docker Compose for Django development](https://til.simonwillison.net/docker/docker-compose-for-django-development) and [Running a Django and PostgreSQL development environment in GitHub Codespaces](https://til.simonwillison.net/github/django-postgresql-codespaces) - 0.896930635052645
257 - [Installing Python on macOS with the official Python installer](https://til.simonwillison.net/macos/python-installer-macos) and [macOS Catalina sort-of includes Python 3](https://til.simonwillison.net/python/macos-catalina-sort-of-ships-with-python3) - 0.892173321940446
258 - [Testing Electron apps with Playwright and GitHub Actions](https://til.simonwillison.net/electron/testing-electron-playwright) and [Using pytest and Playwright to test a JavaScript web application](https://til.simonwillison.net/pytest/playwright-pytest) - 0.892025528713046
259 - [Pisco sour](https://til.simonwillison.net/cocktails/pisco-sour) and [Whisky sour](https://til.simonwillison.net/cocktails/whisky-sour) - 0.891786930904611
260 - [Using pysqlite3 on macOS](https://til.simonwillison.net/sqlite/pysqlite3-on-macos) and [Loading SQLite extensions in Python on macOS](https://til.simonwillison.net/sqlite/sqlite-extensions-python-macos) - 0.890980471839453

til/gpt3/writing-test-with-copilot.md

36          return _error(["Duplicate column name: {}".format(", ".join(dupes))])
37  ```
38  I wanted to write tests for each of the error cases. I'd already constructed the start of a parameterized `pytest` test for these.
39  
40  I got Copilot/GPT-3 to write most of the tests for me.

til/github-actions/service-containers-docker.md

49            my-tag
50  ```
51  My `github-actions-runtests.sh` file uses [django-pytest](https://pytest-django.readthedocs.io/) and looks like this:
52  ```bash
53  #!/bin/bash
54  cd /app
55  pytest --ds=config.test_settings
56  ```

til/github-actions/running-tests-against-multiple-verisons-of-dependencies.md

31      - name: Run tests
32        run: |
33          pytest
34  ```
35  The trick here is to set up a matrix for `datasette-version` (to accompany my existing `python-version` one) defining these two installation specifiers:
47  The end result of this is that tests run against the highest Datasette release in the `0.x` series, and also against the highest release in the `1.x` series, including alphas if no `1.x` stable release is out yet.
48  
49  ## Adding extra version information to the pytest report
50  
51  When using this pattern, it can be useful to include the Datasette version in the output of the `pytest` command.
52  
53  Here's an easy way to do that: add the following to `tests/conftest.py`:
57  
58  
59  def pytest_report_header():
60      return "Datasette: {}".format(datasette.__version__)
61  ```
62  Running `pytest` will now output the following:
63  ```
64  ============================ test session starts ============================
65  platform darwin -- Python 3.9.17, pytest-7.4.2, pluggy-1.3.0
66  Datasette: 1.0a6
67  rootdir: /Users/...

til/github-actions/postgresq-service-container.md

1   # Running tests against PostgreSQL in a service container
2   
3   I wanted to run some Django tests - using `pytest-django` and with Django configured to pick up the `DATABASE_URL` environment variable via [dj-database-url](https://github.com/jacobian/dj-database-url) - against a PostgreSQL server running in GitHub Actions.
4   
5   It took a while to figure out the right pattern. The trick was to define a `postgres:` service and then set the `DATABASE_URL` environment variable to the following:
49        run: |
50          cd myproject
51          pytest
52  ```
53  
71        env:
72          MYSQL_TEST_DB_CONNECTION: mysql://root@127.0.0.1:${{ job.services.mysql.ports['3306'] }}/test_db_to_sqlite
73        run: pytest -vv
74  ```

til/github-actions/different-postgresql-versions.md

42          export POSTGRESQL_PATH="/usr/lib/postgresql/$POSTGRESQL_VERSION/bin/postgres"
43          export INITDB_PATH="/usr/lib/postgresql/$POSTGRESQL_VERSION/bin/initdb"
44          pytest
45  ```
46  I modified my tests to call the `postgres` and `initdb` binaries specified by the `POSTGRESQL_PATH` and `INITDB_PATH` environment variables.

til/github-actions/deploy-live-demo-when-tests-pass.md

38      - name: Run tests
39        run: |
40          pytest
41    deploy_demo:
42      runs-on: ubuntu-latest

til/github-actions/debug-tmate.md

40      steps:
41      - name: Run tests
42        run: pytest
43      - name: tmate session if tests fail
44        if: failure() && github.event_name == 'workflow_dispatch'

til/github-actions/cache-setup-py.md

50      - name: Run tests
51        run: |
52          pytest
53  ```
54  I updated my various cookiecutter templates to use this new pattern in [this issue](https://github.com/simonw/click-app/issues/6).

til/git/git-bisect.md

19  Then you provide a script that will return an error if the bug is present.
20  
21  Usually you would use `pytest` or similar for this, but for the bug I was investigating here I wrote this custom script and saved it as `check_templates_considered.py`:
22  
23  ```python

til/docker/pytest-docker.md

1   # Run pytest against a specific Python version using Docker
2   
3   For [datasette issue #1802](https://github.com/simonw/datasette/issues/1802) I needed to run my `pytest` test suite using a specific version of Python 3.7.
4   
5   I decided to do this using Docker, using the official [python:3.7-buster](https://hub.docker.com/_/python/tags?page=1&name=3.7-buster) image.
9   docker run --rm -it -v `pwd`:/code \
10    python:3.7-buster \
11    bash -c "cd /code && pip install -e '.[test]' && pytest"
12  ```
13  
20  It then runs the following using `bash -c`:
21  
22      cd /code && pip install -e '.[test]' && pytest
23  
24  This installs my project's dependencies and test dependencies and then runs `pytest`.
25  
26  The truncated output looks like this:
28  % docker run -it -v `pwd`:/code \
29    python:3.7-buster \
30    bash -c "cd /code && pip install -e '.[test]' && pytest"
31  Obtaining file:///code
32    Preparing metadata (setup.py) ... done
34    Downloading asgiref-3.5.2-py3-none-any.whl (22 kB)
35  ...
36  Installing collected packages: rfc3986, mypy-extensions, iniconfig, zipp, typing-extensions, typed-ast, tomli, soupsieve, sniffio, six, PyYAML, pyparsing, pycparser, py, platformdirs, pathspec, mergedeep, MarkupSafe, itsdangerous, idna, hupper, h11, execnet, cogapp, certifi, attrs, aiofiles, python-multipart, packaging, Jinja2, janus, importlib-metadata, cffi, beautifulsoup4, asgiref, anyio, pluggy, pint, httpcore, cryptography, click, asgi-csrf, uvicorn, trustme, pytest, httpx, click-default-group-wheel, black, pytest-timeout, pytest-forked, pytest-asyncio, datasette, blacken-docs, pytest-xdist
37    Running setup.py develop for datasette
38  ...
39  ========================================================= test session starts ==========================================================
40  platform linux -- Python 3.7.13, pytest-7.1.3, pluggy-1.0.0
41  SQLite: 3.27.2
42  rootdir: /code, configfile: pytest.ini
43  plugins: asyncio-0.19.0, anyio-3.6.1, timeout-2.1.0, xdist-2.5.0, forked-1.4.0
44  asyncio: mode=strict

til/docker/emulate-s390x-with-qemu.md

84            source venv/bin/activate &&
85            pip install -e '.[test]' &&
86            pytest
87            "
88  ```

til/django/testing-django-admin-with-pytest.md

1   # Writing tests for the Django admin with pytest-django
2   
3   I'm using [pytest-django](https://pytest-django.readthedocs.io/) on a project and I wanted to write a test for a Django admin create form submission. Here's the pattern I came up with:
4   
5   ```python
6   from .models import Location
7   import pytest
8   
9   
28      assert location.public_id == "lc"
29  ```
30  The trick here is to use the `client` and `admin_user` pytest-django fixtures ([documented here](https://pytest-django.readthedocs.io/en/latest/helpers.html#fixtures)) to get a configured test client and admin user object, then use `client.force_login(admin_user)` to obtain a session where that user is signed-in to the admin. Then write tests as normal.
31  
32  ## Using the admin_client fixture
33  
34  Even better: use the `admin_client` fixture provided by `pytest-django 
35  ` which is already signed into the admin:
36  
45  
46  ```python
47  import pytest
48  
49  
50  @pytest.fixture()
51  def admin_client(client, admin_user):
52      client.force_login(admin_user)

til/django/just-with-django.md

33  #    just test -k auth --pdb
34  #
35  # To pass the "-k auth --pdb" options to pytest
36  
37  @test *options:
38    pipenv run pytest {{options}}
39  
40  # This starts the Django development server with an extra environment variable
57  
58  @test *options:
59    pipenv run pytest {{options}}
60  
61  @server:

til/django/building-a-blog-in-django.md

232 
233 ```python
234 import pytest
235 from datetime import datetime
236 from django.contrib.auth.models import User
240 
241 
242 @pytest.fixture
243 def client():
244     from django.test import Client
247 
248 
249 @pytest.fixture
250 def five_entries():
251     author = User.objects.create_user(username="author")
269 
270 
271 @pytest.mark.django_db
272 def test_index_page(client, five_entries):
273     response = client.get("/blog/")
298 
299 
300 @pytest.mark.django_db
301 def test_entry_page(client, five_entries):
302     # Test a draft and a not-draft one
322 
323 
324 @pytest.mark.django_db
325 @pytest.mark.parametrize(
326     "path", ("/blog/", "/blog/archive/", "/blog/2023/", "/blog/tag/all/")
327 )
335 
336 
337 @pytest.mark.django_db
338 def test_atom_feed(client, five_entries):
339     response = client.get("/blog/feed/")

til/datasette/pytest-httpx-datasette.md

1   # Using pytest-httpx to run intercepted requests through an in-memory Datasette instance
2   
3   I've been working on a tool called [dclient](https://github.com/simonw/dclient) which is a CLI client tool for talking to Datasette instances.
5   I wanted to write some tests for that tool which would simulate an entire Datasette instance for it to talk to, without starting a `localhost` server running Datasette itself.
6   
7   I figured out a pattern for doing that using `pytest-httpx` to intercept outbound HTTP requests and send them through a Datasette ASGI application instead.
8   
9   Here's a simplified example of this pattern, with inline comments explaining how it works.
11  Dependencies are:
12  ```bash
13  pip install pytest pytest-httpx httpx datasette
14  ```
15  I saved this as `test_demo.py` and ran it with `pytest test_demo.py`:
16  ```python
17  import asyncio
18  from datasette.app import Datasette
19  import httpx
20  import pytest
21  
22  
23  @pytest.fixture
24  def non_mocked_hosts():
25      # This ensures that httpx-mock will not affect things once a request
37  
38  
39  # The httpx_mock fixtures comes from pytest-httpx
40  def test_get_version(httpx_mock):
41      ds = Datasette()
45      loop = asyncio.get_event_loop()
46  
47      # This function will be called every time pytest-httpx intercepts an HTTP request
48      def custom_response(request: httpx.Request):
49          # Need to run this in async loop, because get_versions uses

til/datasette/playwright-tests-datasette-plugin.md

9   ## Playwright as a test dependency
10  
11  I ended up needing two new test dependencies to get Playwright running: `pytest-playwright` and `nest-asyncio` (for reasons explained later).
12  
13  I added those to my `setup.py` file like this:
14  ```python
15      extras_require={
16          "test": ["pytest", "pytest-asyncio", "sqlite-utils", "nest-asyncio"],
17          "playwright": ["pytest-playwright"]
18      },
19  ```
23  ```toml
24  [project.optional-dependencies]
25  test = ["pytest", "pytest-asyncio", "sqlite-utils", "nest-asyncio"]
26  playwright = ["pytest-playwright"]
27  ```
28  With either of these patterns in place, the new dependencies can be installed like this:
33  ## Running a localhost server for the tests
34  
35  I decided to use a [pytest fixture](https://docs.pytest.org/en/6.2.x/fixture.html) to start a `localhost` server running for the duration of the test. The simplest version of that (`wait_until_responds` from Alex's `datasette-comments`) looks like this:
36  ```python
37  import pytest
38  import sqlite3
39  from subprocess import Popen, PIPE
42  import httpx
43  
44  @pytest.fixture(scope="session")
45  def ds_server(tmp_path_factory):
46      tmpdir = tmp_path_factory.mktemp("tmp")
84  The `ds_server` fixture creates a SQLite database in a temporary directory, runs Datasette against it using `subprocess.Popen()` and then waits for the server to respond to a request. Then it yields the URL to that server - that yielded value will become available to any test that uses that fixture.
85  
86  Note that `ds_server` is marked as `@pytest.fixture(scope="session")`. This means that the fixture will be excuted just once per test session and re-used by each test. Without the `scope="session"` the server will be started and then terminated once per test, which is a lot slower.
87  
88  See [Session-scoped temporary directories in pytest](https://til.simonwillison.net/pytest/session-scoped-tmp) for an explanation of the `tmp_path_factory` fixture.
89  
90  Here's what a basic test then looks like (in `tests/test_playwright.py`):
94  except ImportError:
95      sync_api = None
96  import pytest
97  
98  @pytest.mark.skipif(sync_api is None, reason="playwright not installed")
99  def test_homepage(ds_server):
100     with sync_api.sync_playwright() as playwright:
107 Within that test, the full [Python Playwright API](https://playwright.dev/python/docs/writing-tests) is available for interacting with the server and running assertions. Since it's running in a real headless Chromium instance all of the JavaScript will be executed as well.
108 
109 I'm using a `except ImportError` pattern here such that my tests won't fail if Playwright has not been installed. The `@pytest.mark.skipif` decorator causes the test to be marked as skipped if the module was not imported.
110 
111 ## Running the tests
112 
113 With this module in place, running the tests is like any other `pytest` invocation:
114 ```bash
115 pytest
116 ```
117 Or run them specifically like this:
118 ```bash
119 pytest tests/test_playwright.py
120 # or
121 pytest -k test_homepage
122 ```
123 
131 except ImportError:
132     sync_api = None
133 import pytest
134 import nest_asyncio
135 
136 nest_asyncio.apply()
137 
138 pytestmark = pytest.mark.skipif(sync_api is None, reason="playwright not installed")
139 
140 
154 There are two new tricks in here:
155 
156 1. I'm using the `pytestmark = pytest.mark.skipif()` pattern to apply that `skipif` decorator to every test in this file, without needing to repeat it.
157 2. I'm using the `page` fixture [provided by pytest-playwright](https://playwright.dev/python/docs/test-runners#fixtures). This gives me a new `page` object for each test, without me needing to call the `with sync_api.sync_playwright() as playwright` boilerplate every time.
158 
159 One catch with the `page` fixture is when I first started using it I got this error:
200     - name: Run tests
201       run: |
202         pytest
203 ```
204 This workflow configures caching for Playwright browsers, to ensure that `playwright install` only downloads the browser binaries the first time the workflow is executed.

til/cookiecutter/pytest-for-cookiecutter.md

1   # Testing cookiecutter templates with pytest
2   
3   I added some unit tests to my [datasette-plugin](https://github.com/simonw/datasette-plugin) cookiecutter template today, since the latest features involved adding a `hooks/post_gen_project.py` script.
5   Here's [the full test script](https://github.com/simonw/datasette-plugin/blob/503e6fef8e1000ab70103a61571d47ce966064ba/tests/test_cookiecutter_template.py) I wrote. It lives in `tests/test_cookiecutter_template.py` in the root of the repository.
6   
7   To run the tests I have to use `pytest tests` because running just `pytest` gets confused when it tries to run the templated tests that form part of the cookiecutter template.
8   
9   The pattern I'm using looks like this:

til/asgi/lifespan-test-httpx.md

18  from asgi_lifespan import LifespanManager
19  
20  @pytest.mark.asyncio
21  async def test_datasette_debug_asgi():
22      ds = Datasette([], memory=True)

til/README.md

198 * [Restricting SSH connections to devices within a Tailscale network](https://github.com/simonw/til/blob/main/tailscale/lock-down-sshd.md) - 2020-04-23
199 
200 ## pytest
201 
202 * [Session-scoped temporary directories in pytest](https://github.com/simonw/til/blob/main/pytest/session-scoped-tmp.md) - 2020-04-26
203 * [How to mock httpx using pytest-mock](https://github.com/simonw/til/blob/main/pytest/mock-httpx.md) - 2020-04-29
204 * [Asserting a dictionary is a subset of another dictionary](https://github.com/simonw/til/blob/main/pytest/assert-dictionary-subset.md) - 2020-05-28
205 * [Registering temporary pluggy plugins inside tests](https://github.com/simonw/til/blob/main/pytest/registering-plugins-in-tests.md) - 2020-07-21
206 * [Code coverage using pytest and codecov.io](https://github.com/simonw/til/blob/main/pytest/pytest-code-coverage.md) - 2020-08-15
207 * [Start a server in a subprocess during a pytest session](https://github.com/simonw/til/blob/main/pytest/subprocess-server.md) - 2020-08-31
208 * [Using VCR and pytest with pytest-recording](https://github.com/simonw/til/blob/main/pytest/pytest-recording-vcr.md) - 2021-11-02
209 * [Quick and dirty mock testing with mock_calls](https://github.com/simonw/til/blob/main/pytest/pytest-mock-calls.md) - 2021-11-02
210 * [Writing pytest tests against tools written with argparse](https://github.com/simonw/til/blob/main/pytest/pytest-argparse.md) - 2022-01-08
211 * [Testing a Click app with streaming input](https://github.com/simonw/til/blob/main/pytest/test-click-app-with-streaming-input.md) - 2022-01-09
212 * [Opt-in integration tests with pytest --integration](https://github.com/simonw/til/blob/main/pytest/only-run-integration.md) - 2022-01-26
213 * [pytest coverage with context](https://github.com/simonw/til/blob/main/pytest/coverage-with-context.md) - 2022-03-04
214 * [Async fixtures with pytest-asyncio](https://github.com/simonw/til/blob/main/pytest/async-fixtures.md) - 2022-03-19
215 * [Treating warnings as errors in pytest](https://github.com/simonw/til/blob/main/pytest/treat-warnings-as-errors.md) - 2022-04-01
216 * [Using pytest and Playwright to test a JavaScript web application](https://github.com/simonw/til/blob/main/pytest/playwright-pytest.md) - 2022-07-24
217 * [Mocking a Textract LimitExceededException with boto](https://github.com/simonw/til/blob/main/pytest/mocking-boto.md) - 2022-08-07
218 * [Show files opened by pytest tests](https://github.com/simonw/til/blob/main/pytest/show-files-opened-by-tests.md) - 2022-12-11
219 * [Mocking subprocess with pytest-subprocess](https://github.com/simonw/til/blob/main/pytest/pytest-subprocess.md) - 2023-03-08
220 * [Snapshot testing with Syrupy](https://github.com/simonw/til/blob/main/pytest/syrupy.md) - 2023-09-26
221 
222 ## github
289 * [PostgreSQL full-text search in the Django Admin](https://github.com/simonw/til/blob/main/django/postgresql-full-text-search-admin.md) - 2020-07-25
290 * [Adding extra read-only information to a Django admin change page](https://github.com/simonw/til/blob/main/django/extra-read-only-admin-information.md) - 2021-02-25
291 * [Writing tests for the Django admin with pytest-django](https://github.com/simonw/til/blob/main/django/testing-django-admin-with-pytest.md) - 2021-03-02
292 * [Show the timezone for datetimes in the Django admin](https://github.com/simonw/til/blob/main/django/show-timezone-in-django-admin.md) - 2021-03-02
293 * [Pretty-printing all read-only JSON in the Django admin](https://github.com/simonw/til/blob/main/django/pretty-print-json-admin.md) - 2021-03-07
314 * [Testing things in Fedora using Docker](https://github.com/simonw/til/blob/main/docker/test-fedora-in-docker.md) - 2022-07-27
315 * [Emulating a big-endian s390x with QEMU](https://github.com/simonw/til/blob/main/docker/emulate-s390x-with-qemu.md) - 2022-07-29
316 * [Run pytest against a specific Python version using Docker](https://github.com/simonw/til/blob/main/docker/pytest-docker.md) - 2022-09-05
317 * [Using pipenv and Docker](https://github.com/simonw/til/blob/main/docker/pipenv-and-docker.md) - 2022-11-28
318 
427 * [Exploring Baseline with Datasette Lite](https://github.com/simonw/til/blob/main/datasette/baseline.md) - 2023-05-12
428 * [Syntax highlighted code examples in Datasette](https://github.com/simonw/til/blob/main/datasette/syntax-highlighted-code-examples.md) - 2023-07-01
429 * [Using pytest-httpx to run intercepted requests through an in-memory Datasette instance](https://github.com/simonw/til/blob/main/datasette/pytest-httpx-datasette.md) - 2023-07-24
430 * [Remember to commit when using datasette.execute_write_fn()](https://github.com/simonw/til/blob/main/datasette/remember-to-commit.md) - 2023-08-31
431 * [Running Datasette on Hugging Face Spaces](https://github.com/simonw/til/blob/main/datasette/hugging-face-spaces.md) - 2023-09-08
438 ## cookiecutter
439 
440 * [Testing cookiecutter templates with pytest](https://github.com/simonw/til/blob/main/cookiecutter/pytest-for-cookiecutter.md) - 2021-01-27
441 * [Conditionally creating directories in cookiecutter](https://github.com/simonw/til/blob/main/cookiecutter/conditionally-creating-directories.md) - 2021-01-27
442 

swarm-to-sqlite/tests/test_save_checkin.py

1   from swarm_to_sqlite import utils
2   import pytest
3   import json
4   import sqlite_utils
12  
13  
14  @pytest.fixture(scope="session")
15  def converted():
16      db = sqlite_utils.Database(":memory:")

swarm-to-sqlite/setup.py

33      """,
34      install_requires=["sqlite-utils>=3.3", "click", "requests"],
35      extras_require={"test": ["pytest"]},
36      tests_require=["swarm-to-sqlite[test]"],
37  )

sqlite-utils-shell/README.md

47  To run the tests:
48  
49      pytest

sqlite-utils-shell/pyproject.toml

23  
24  [project.optional-dependencies]
25  test = ["pytest"]

sqlite-utils-shell/tests/test_shell.py

1   import pytest
2   from sqlite_utils.plugins import get_plugins
3   from sqlite_utils_shell import run_sql_shell
10  
11  
12  @pytest.mark.parametrize(
13      "inputs,expected_outputs",
14      (

sqlite-utils-plugin/sqlite-utils-{{cookiecutter.hyphenated}}/pyproject.toml

23  
24  [project.optional-dependencies]
25  test = ["pytest"]

sqlite-utils-plugin/sqlite-utils-{{cookiecutter.hyphenated}}/README.md

32  To run the tests:
33  ```bash
34  pytest
35  ```

sqlite-utils-plugin/requirements.txt

1   cookiecutter
2   pytest

sqlite-utils-plugin/README.md

66  ]
67  ```
68  You can run the tests for your plugin with `pytest` - follow the development environment instructions in the plugin's generated README for details.

sqlite-utils-litecli/pyproject.toml

24  
25  [project.optional-dependencies]
26  test = ["pytest"]

sqlite-utils-litecli/README.md

41  To run the tests:
42  ```bash
43  pytest
44  ```

sqlite-utils-move-tables/tests/test_move_tables.py

2   from sqlite_utils.cli import cli
3   from click.testing import CliRunner
4   import pytest
5   
6   
7   @pytest.fixture
8   def databases(tmpdir):
9       origin = str(tmpdir / "origin.db")
16  
17  
18  @pytest.mark.parametrize(
19      "extra_args,expected_destination_tables,expected_origin_tables,expected_error",
20      (

sqlite-utils-move-tables/pyproject.toml

23  
24  [project.optional-dependencies]
25  test = ["pytest"]

sqlite-utils-move-tables/README.md

55  To run the tests:
56  ```bash
57  pytest
58  ```

sqlite-utils-fast-fks/tests/test_fast_fks_cli.py

3   from sqlite_utils.cli import cli
4   from click.testing import CliRunner
5   import pytest
6   
7   
8   @pytest.fixture
9   def db_and_db_path(tmpdir):
10      db_path = str(tmpdir / "data.db")
23  
24  
25  @pytest.mark.parametrize(
26      "args,expected",
27      (
57  
58  
59  @pytest.mark.parametrize(
60      "args,expected_error",
61      (

sqlite-utils-jq/pyproject.toml

15  
16  [project.optional-dependencies]
17  test = ["pytest"]
18  
19  [project.urls]

sqlite-utils-jq/README.md

46  To run the tests:
47  
48      pytest

sqlite-utils-fast-fks/pyproject.toml

20  
21  [project.optional-dependencies]
22  test = ["pytest"]
23  
24  [project.entry-points.sqlite_utils]

sqlite-utils-fast-fks/README.md

101 To run the tests:
102 
103     pytest

sqlite-utils-dateutil/tests/test_dateutil.py

1   import pytest
2   import sqlite_utils
3   from sqlite_utils.plugins import get_plugins
10  
11  
12  @pytest.mark.parametrize(
13      "sql,expected",
14      [

sqlite-utils/tests/test_wal.py

1   import pytest
2   from sqlite_utils import Database
3   
4   
5   @pytest.fixture
6   def db_path_tmpdir(tmpdir):
7       path = tmpdir / "test.db"

sqlite-utils/tests/test_utils.py

2   import csv
3   import io
4   import pytest
5   
6   
7   @pytest.mark.parametrize(
8       "input,expected,should_be_is",
9       [
25  
26  
27  @pytest.mark.parametrize(
28      "size,expected",
29      (
61      fp = io.BytesIO(long_csv.encode("utf-8"))
62      # Using rows_from_file should error
63      with pytest.raises(csv.Error):
64          rows, _ = utils.rows_from_file(fp, utils.Format.CSV)
65          list(rows)
74  
75  
76  @pytest.mark.parametrize(
77      "input,expected",
78      (

sqlite-utils/tests/test_upsert.py

1   from sqlite_utils.db import PrimaryKeyRequired
2   import pytest
3   
4   
41  def test_upsert_error_if_no_pk(fresh_db):
42      table = fresh_db["table"]
43      with pytest.raises(PrimaryKeyRequired):
44          table.upsert_all([{"id": 1, "name": "Cleo"}])
45      with pytest.raises(PrimaryKeyRequired):
46          table.upsert({"id": 1, "name": "Cleo"})
47  
56  
57  
58  @pytest.mark.parametrize("hash_id", (None, "custom_id"))
59  def test_upsert_with_hash_id_columns(fresh_db, hash_id):
60      table = fresh_db["table"]

sqlite-utils/tests/test_update.py

2   import json
3   
4   import pytest
5   
6   from sqlite_utils.db import NotFoundError
30  
31  
32  @pytest.mark.parametrize(
33      "pk,update_pk",
34      (
45      table = fresh_db["table"]
46      table.insert({"id1": 5, "id2": 3, "v": 1}, pk=pk).last_pk
47      with pytest.raises(NotFoundError):
48          table.update(update_pk, {"v": 2})
49  
74      table = fresh_db["table"]
75      rowid = table.insert({"foo": "bar"}).last_pk
76      with pytest.raises(AssertionError):
77          table.update(rowid, {"new_col[abc]": 1.2}, alter=True)
78  
85      table.update(2)
86      assert table.last_pk == 2
87      with pytest.raises(NotFoundError):
88          table.update(3)
89  
90  
91  @pytest.mark.parametrize(
92      "data_structure",
93      (

sqlite-utils/tests/test_transform.py

1   from sqlite_utils.db import ForeignKey
2   from sqlite_utils.utils import OperationalError
3   import pytest
4   
5   
6   @pytest.mark.parametrize(
7       "params,expected_sql",
8       [
99      ],
100 )
101 @pytest.mark.parametrize("use_pragma_foreign_keys", [False, True])
102 def test_transform_sql_table_with_primary_key(
103     fresh_db, params, expected_sql, use_pragma_foreign_keys
127 
128 
129 @pytest.mark.parametrize(
130     "params,expected_sql",
131     [
172     ],
173 )
174 @pytest.mark.parametrize("use_pragma_foreign_keys", [False, True])
175 def test_transform_sql_table_with_no_primary_key(
176     fresh_db, params, expected_sql, use_pragma_foreign_keys
245 
246 
247 @pytest.mark.parametrize("not_null", [{"age"}, {"age": True}])
248 def test_transform_add_not_null_with_rename(fresh_db, not_null):
249     dogs = fresh_db["dogs"]
286 
287 
288 @pytest.fixture
289 def authors_db(fresh_db):
290     books = fresh_db["books"]
313 
314 
315 @pytest.mark.parametrize("use_pragma_foreign_keys", [False, True])
316 def test_transform_foreign_keys_survive_renamed_column(
317     authors_db, use_pragma_foreign_keys
345 
346 
347 @pytest.mark.parametrize("use_pragma_foreign_keys", [False, True])
348 def test_transform_drop_foreign_keys(fresh_db, use_pragma_foreign_keys):
349     if use_pragma_foreign_keys:
386     )
387     # Renaming the id column on authors should break everything
388     with pytest.raises(OperationalError) as e:
389         fresh_db["authors"].transform(rename={"id": "id2"})
390     assert e.value.args[0] == 'foreign key mismatch - "books" referencing "authors"'
430 
431 
432 @pytest.mark.parametrize(
433     "add_foreign_keys",
434     (
470 
471 
472 @pytest.mark.parametrize(
473     "foreign_keys",
474     (
501 
502 
503 @pytest.mark.parametrize("table_type", ("id_pk", "rowid", "compound_pk"))
504 def test_transform_preserves_rowids(fresh_db, table_type):
505     pk = None
533 
534 
535 @pytest.mark.parametrize("strict", (False, True))
536 def test_transform_strict(fresh_db, strict):
537     dogs = fresh_db.table("dogs", strict=strict)

sqlite-utils/tests/test_sniff.py

2   from click.testing import CliRunner
3   import pathlib
4   import pytest
5   
6   sniff_dir = pathlib.Path(__file__).parent / "sniff"
7   
8   
9   @pytest.mark.parametrize("filepath", sniff_dir.glob("example*"))
10  def test_sniff(tmpdir, filepath):
11      db_path = str(tmpdir / "test.db")

sqlite-utils/tests/test_rows_from_file.py

1   from sqlite_utils.utils import rows_from_file, Format, RowError
2   from io import BytesIO, StringIO
3   import pytest
4   
5   
6   @pytest.mark.parametrize(
7       "input,expected_format",
8       (
19  
20  
21  @pytest.mark.parametrize(
22      "ignore_extras,extras_key,expected",
23      (
48  
49  def test_rows_from_file_error_on_string_io():
50      with pytest.raises(TypeError) as ex:
51          rows_from_file(StringIO("id,name\r\n1,Cleo"))
52      assert ex.value.args == (

sqlite-utils/tests/test_rows.py

1   import pytest
2   
3   
8   
9   
10  @pytest.mark.parametrize(
11      "where,where_args,expected_ids",
12      [
32  
33  
34  @pytest.mark.parametrize(
35      "where,order_by,expected_ids",
36      [
54  
55  
56  @pytest.mark.parametrize(
57      "offset,limit,expected",
58      [

sqlite-utils/tests/test_register_function.py

1   # flake8: noqa
2   import pytest
3   import sys
4   from unittest.mock import MagicMock, call

sqlite-utils/tests/test_recreate.py

2   import sqlite3
3   import pathlib
4   import pytest
5   
6   
15  def test_recreate_not_allowed_for_connection():
16      conn = sqlite3.connect(":memory:")
17      with pytest.raises(AssertionError):
18          Database(conn, recreate=True)
19  
20  
21  @pytest.mark.parametrize(
22      "use_path,create_file_first",
23      [(True, True), (True, False), (False, True), (False, False)],

sqlite-utils/tests/test_recipes.py

2   from sqlite_utils.utils import sqlite3
3   import json
4   import pytest
5   
6   
7   @pytest.fixture
8   def dates_db(fresh_db):
9       fresh_db["example"].insert_all(
39  
40  
41  @pytest.mark.parametrize(
42      "recipe,kwargs,expected",
43      (
63  
64  
65  @pytest.mark.parametrize("fn", ("parsedate", "parsedatetime"))
66  @pytest.mark.parametrize("errors", (None, recipes.SET_NULL, recipes.IGNORE))
67  def test_dateparse_errors(fresh_db, fn, errors):
68      fresh_db["example"].insert_all(
74      if errors is None:
75          # Should raise an error
76          with pytest.raises(sqlite3.OperationalError):
77              fresh_db["example"].convert("dt", lambda value: getattr(recipes, fn)(value))
78      else:
85  
86  
87  @pytest.mark.parametrize("delimiter", [None, ";", "-"])
88  def test_jsonsplit(fresh_db, delimiter):
89      fresh_db["example"].insert_all(
109 
110 
111 @pytest.mark.parametrize(
112     "type,expected",
113     (

sqlite-utils/tests/test_m2m.py

1   from sqlite_utils.db import ForeignKey, NoObviousTable
2   import pytest
3   
4   
140 def test_m2m_requires_either_records_or_lookup(fresh_db):
141     people = fresh_db.table("people", pk="id").insert({"name": "Wahyu"})
142     with pytest.raises(AssertionError):
143         people.m2m("tags")
144     with pytest.raises(AssertionError):
145         people.m2m("tags", {"tag": "hello"}, lookup={"foo": "bar"})
146 
211         foreign_keys=["people_id", "tags_id"],
212     )
213     with pytest.raises(NoObviousTable):
214         people.insert({"name": "Wahyu"}).m2m("tags", lookup={"tag": "Coworker"})

sqlite-utils/tests/test_lookup.py

1   from sqlite_utils.db import Index
2   import pytest
3   
4   
65      species.insert_all([{"id": 1, "name": "Palm"}, {"id": 2, "name": "Palm"}])
66      # This will fail because the name column is not unique
67      with pytest.raises(Exception, match="UNIQUE constraint failed"):
68          species.lookup({"name": "Palm"})
69  
154 
155 
156 @pytest.mark.parametrize("strict", (False, True))
157 def test_lookup_new_table_strict(fresh_db, strict):
158     fresh_db["species"].lookup({"name": "Palm"}, strict=strict)

sqlite-utils/tests/test_introspect.py

1   from sqlite_utils.db import Index, View, Database, XIndex, XIndexColumn
2   import pytest
3   
4   
37  
38  
39  @pytest.mark.parametrize("reverse_order", (True, False))
40  def test_detect_fts_similar_tables(fresh_db, reverse_order):
41      # https://github.com/simonw/sqlite-utils/issues/434
149 
150 
151 @pytest.mark.parametrize(
152     "column,expected_table_guess",
153     (
165 
166 
167 @pytest.mark.parametrize(
168     "pk,expected", ((None, ["rowid"]), ("id", ["id"]), (["id", "id2"], ["id", "id2"]))
169 )
230 
231 
232 @pytest.mark.parametrize(
233     "sql,expected_name,expected_using",
234     [
282 
283 
284 @pytest.mark.skipif(
285     not Database(memory=True).supports_strict,
286     reason="Needs SQLite version that supports strict",
287 )
288 @pytest.mark.parametrize(
289     "create_table,expected_strict",
290     (
302 
303 
304 @pytest.mark.parametrize(
305     "value",
306     (

sqlite-utils/tests/test_insert_files.py

3   import os
4   import pathlib
5   import pytest
6   import sys
7   
8   
9   @pytest.mark.parametrize("silent", (False, True))
10  def test_insert_files(silent):
11      runner = CliRunner()
108 
109 
110 @pytest.mark.parametrize(
111     "use_text,encoding,input,expected",
112     (
142 
143 
144 @pytest.mark.skipif(
145     sys.platform.startswith("win"),
146     reason="Windows has a different way of handling default encodings",

sqlite-utils/tests/test_gis.py

1   import json
2   import pytest
3   
4   from click.testing import CliRunner
13  
14  
15  pytestmark = [
16      pytest.mark.skipif(
17          not find_spatialite(), reason="Could not find SpatiaLite extension"
18      ),
19      pytest.mark.skipif(
20          not hasattr(sqlite3.Connection, "enable_load_extension"),
21          reason="sqlite3.Connection missing enable_load_extension",
22      ),
23      pytest.mark.skipif(
24          sqlean is not None, reason="sqlean.py is not compatible with SpatiaLite"
25      ),
98  
99  # cli tests
100 @pytest.mark.parametrize("use_spatialite_shortcut", [True, False])
101 def test_query_load_extension(use_spatialite_shortcut):
102     # Without --load-extension:

sqlite-utils/tests/test_get.py

1   import pytest
2   from sqlite_utils.db import NotFoundError
3   
18  
19  
20  @pytest.mark.parametrize(
21      "argument,expected_msg",
22      [(100, None), (None, None), ((1, 2), "Need 1 primary key value"), ("2", None)],
26          {"id": 1, "name": "Cleo", "age": 4, "is_good": True}, pk="id"
27      )
28      with pytest.raises(NotFoundError) as excinfo:
29          fresh_db["dogs"].get(argument)
30      if expected_msg is not None:

sqlite-utils/tests/test_fts.py

1   import pytest
2   from sqlite_utils import Database
3   from sqlite_utils.utils import sqlite3
95  
96  
97  @pytest.mark.parametrize("fts_version", ("FTS4", "FTS5"))
98  def test_search_where(fresh_db, fts_version):
99      table = fresh_db["t"]
115 def test_search_where_args_disallows_query(fresh_db):
116     table = fresh_db["t"]
117     with pytest.raises(ValueError) as ex:
118         list(
119             table.search(
181 
182 
183 @pytest.mark.parametrize("fts_version", ("4", "5"))
184 def test_fts_tokenize(fresh_db, fts_version):
185     table_name = "searchable_{}".format(fts_version)
253 
254 
255 @pytest.mark.parametrize("create_triggers", [True, False])
256 def test_disable_fts(fresh_db, create_triggers):
257     table = fresh_db["searchable"]
310 
311 
312 @pytest.mark.parametrize("invalid_table", ["does_not_exist", "not_searchable"])
313 def test_rebuild_fts_invalid(fresh_db, invalid_table):
314     fresh_db["not_searchable"].insert({"foo": "bar"})
315     # Raise OperationalError on invalid table
316     with pytest.raises(sqlite3.OperationalError):
317         fresh_db[invalid_table].rebuild_fts()
318 
319 
320 @pytest.mark.parametrize("fts_version", ["FTS4", "FTS5"])
321 def test_rebuild_removes_junk_docsize_rows(tmpdir, fts_version):
322     # Recreating https://github.com/simonw/sqlite-utils/issues/149
335 
336 
337 @pytest.mark.parametrize(
338     "kwargs",
339     [
397     db = Database(memory=True)
398     db.create_view("hello", "select 1 + 1")
399     with pytest.raises(NotImplementedError) as e:
400         db["hello"].enable_fts()
401         assert e.value.args[0] == "enable_fts() is supported on tables but not on views"
402 
403 
404 @pytest.mark.parametrize(
405     "kwargs,fts,expected",
406     [
616 
617 
618 @pytest.mark.parametrize(
619     "input,expected",
620     (
647     table.enable_fts(["text", "country"])
648     query = "cat's"
649     with pytest.raises(sqlite3.OperationalError):
650         list(table.search(query))
651     # No exception with quote=True

sqlite-utils/tests/test_extracts.py

1   from sqlite_utils.db import Index
2   import pytest
3   
4   
5   @pytest.mark.parametrize(
6       "kwargs,expected_table",
7       [
11      ],
12  )
13  @pytest.mark.parametrize("use_table_factory", [True, False])
14  def test_extracts(fresh_db, kwargs, expected_table, use_table_factory):
15      table_kwargs = {}

sqlite-utils/tests/test_extract.py

1   from sqlite_utils.db import InvalidColumns
2   import itertools
3   import pytest
4   
5   
6   @pytest.mark.parametrize("table", [None, "Species"])
7   @pytest.mark.parametrize("fk_column", [None, "species"])
8   def test_extract_single_column(fresh_db, table, fk_column):
9       expected_table = table or "species"
108         pk="id",
109     )
110     with pytest.raises(InvalidColumns):
111         fresh_db["tree"].extract(["bad_column"])
112 
173     fresh_db["species"].insert({"id": 1})
174     fresh_db["tree"].insert({"name": "Tree 1", "common_name": "Palm"})
175     with pytest.raises(InvalidColumns):
176         fresh_db["tree"].extract("common_name", table="species")
177 
178     # Try again with incompatible existing column type
179     fresh_db["species2"].insert({"id": 1, "common_name": 3.5})
180     with pytest.raises(InvalidColumns):
181         fresh_db["tree"].extract("common_name", table="species2")
182 

sqlite-utils/tests/test_enable_counts.py

2   from sqlite_utils import cli
3   from click.testing import CliRunner
4   import pytest
5   
6   
84  
85  
86  @pytest.fixture
87  def counts_db_path(tmpdir):
88      path = str(tmpdir / "test.db")
95  
96  
97  @pytest.mark.parametrize(
98      "extra_args,expected_triggers",
99      [

sqlite-utils/tests/test_duplicate.py

1   from sqlite_utils.db import NoTable
2   import datetime
3   import pytest
4   
5   
40  
41  def test_duplicate_fails_if_table_does_not_exist(fresh_db):
42      with pytest.raises(NoTable):
43          fresh_db["not_a_table"].duplicate("duplicated")

sqlite-utils/tests/test_docs.py

2   from sqlite_utils import cli, recipes
3   from pathlib import Path
4   import pytest
5   import re
6   
10  
11  
12  @pytest.fixture(scope="session")
13  def documented_commands():
14      rst = ""
22  
23  
24  @pytest.fixture(scope="session")
25  def documented_recipes():
26      rst = (docs_path / "cli.rst").read_text()
28  
29  
30  @pytest.mark.parametrize("command", cli.cli.commands.keys())
31  def test_commands_are_documented(documented_commands, command):
32      assert command in documented_commands
33  
34  
35  @pytest.mark.parametrize("command", cli.cli.commands.values())
36  def test_commands_have_help(command):
37      assert command.help, "{} is missing its help".format(command)
49  
50  
51  @pytest.mark.parametrize(
52      "recipe",
53      [

sqlite-utils/tests/test_default_value.py

1   import pytest
2   
3   
26  
27  
28  @pytest.mark.parametrize("column_def,initial_value,expected_value", EXAMPLES)
29  def test_quote_default_value(fresh_db, column_def, initial_value, expected_value):
30      fresh_db.execute("create table foo (col {})".format(column_def))

sqlite-utils/tests/test_create_view.py

1   import pytest
2   from sqlite_utils.utils import OperationalError
3   
11  def test_create_view_error(fresh_db):
12      fresh_db.create_view("bar", "select 1 + 1")
13      with pytest.raises(OperationalError):
14          fresh_db.create_view("bar", "select 1 + 2")
15  
16  
17  def test_create_view_only_arrow_one_param(fresh_db):
18      with pytest.raises(AssertionError):
19          fresh_db.create_view("bar", "select 1 + 2", ignore=True, replace=True)
20  

sqlite-utils/tests/test_create.py

16  import json
17  import pathlib
18  import pytest
19  import uuid
20  
75  
76  
77  @pytest.mark.parametrize("pk", ("id", ["id"]))
78  def test_create_table_with_single_primary_key(fresh_db, pk):
79      fresh_db["foo"].insert({"id": 1}, pk=pk)
84  
85  def test_create_table_with_invalid_column_characters(fresh_db):
86      with pytest.raises(AssertionError):
87          fresh_db.create_table("players", {"name[foo]": str})
88  
104 
105 def test_create_table_with_bad_not_null(fresh_db):
106     with pytest.raises(AssertionError):
107         fresh_db.create_table(
108             "players", {"name": str, "score": int}, not_null={"mouse"}
126 
127 
128 @pytest.mark.parametrize(
129     "example,expected_columns",
130     (
171 
172 
173 @pytest.mark.parametrize(
174     "method_name", ("insert", "upsert", "insert_all", "upsert_all")
175 )
194 
195 
196 @pytest.mark.parametrize("use_table_factory", [True, False])
197 def test_create_table_column_order(fresh_db, use_table_factory):
198     row = collections.OrderedDict(
219 
220 
221 @pytest.mark.parametrize(
222     "foreign_key_specification,expected_exception",
223     (
244     ),
245 )
246 @pytest.mark.parametrize("use_table_factory", [True, False])
247 def test_create_table_works_for_m2m_with_only_foreign_keys(
248     fresh_db, foreign_key_specification, expected_exception, use_table_factory
264 
265     if expected_exception:
266         with pytest.raises(expected_exception):
267             do_it()
268         return
312 
313 def test_create_error_if_invalid_foreign_keys(fresh_db):
314     with pytest.raises(AlterError):
315         fresh_db["one"].insert(
316             {"id": 1, "ref_id": 3},
321 
322 def test_create_error_if_invalid_self_referential_foreign_keys(fresh_db):
323     with pytest.raises(AlterError) as ex:
324         fresh_db["one"].insert(
325             {"id": 1, "ref_id": 3},
330 
331 
332 @pytest.mark.parametrize(
333     "col_name,col_type,not_null_default,expected_schema",
334     (
433         {"id": 1, "title": "Hedgehogs of the world", "author_id": 1}
434     )
435     with pytest.raises(AlterError):
436         fresh_db["books"].add_foreign_key("author2_id", "books", "id")
437 
439 def test_add_foreign_key_error_if_other_table_does_not_exist(fresh_db):
440     fresh_db["books"].insert({"title": "Hedgehogs of the world", "author_id": 1})
441     with pytest.raises(AlterError):
442         fresh_db["books"].add_foreign_key("author_id", "authors", "id")
443 
447     fresh_db["authors"].insert({"id": 1, "name": "Sally"}, pk="id")
448     fresh_db["books"].add_foreign_key("author_id", "authors", "id")
449     with pytest.raises(AlterError) as ex:
450         fresh_db["books"].add_foreign_key("author_id", "authors", "id")
451     assert "Foreign key already exists for author_id => authors.id" == ex.value.args[0]
544 
545 
546 @pytest.mark.parametrize(
547     "extra_data,expected_new_columns",
548     [
558     ],
559 )
560 @pytest.mark.parametrize("use_table_factory", [True, False])
561 def test_insert_row_alter_table(
562     fresh_db, extra_data, expected_new_columns, use_table_factory
592 
593 
594 @pytest.mark.parametrize("use_table_factory", [True, False])
595 def test_insert_replace_rows_alter_table(fresh_db, use_table_factory):
596     first_row = {"id": 1, "title": "Hedgehogs of the world", "author_id": 1}
688 
689 
690 @pytest.mark.parametrize(
691     "num_columns,should_error", ((900, False), (999, False), (1000, True))
692 )
694     record = dict([("c{}".format(i), i) for i in range(num_columns)])
695     if should_error:
696         with pytest.raises(AssertionError):
697             fresh_db["big"].insert(record)
698     else:
724 
725 
726 @pytest.mark.parametrize(
727     "columns,index_name,expected_index",
728     (
797     dogs.create_index(["name"])
798     assert len(dogs.indexes) == 1
799     with pytest.raises(Exception, match="index idx_dogs_name already exists"):
800         dogs.create_index(["name"])
801     dogs.create_index(["name"], if_not_exists=True)
820     table.create_index(["id"])
821     # Without find_unique_name should error
822     with pytest.raises(OperationalError, match="index idx_t_id already exists"):
823         table.create_index(["id"])
824     # With find_unique_name=True it should work
841 
842 
843 @pytest.mark.parametrize(
844     "data_structure",
845     (
901 def test_insert_thousands_raises_exception_with_extra_columns_after_first_100(fresh_db):
902     # https://github.com/simonw/sqlite-utils/issues/139
903     with pytest.raises(Exception, match="table test has no column named extra"):
904         fresh_db["test"].insert_all(
905             [{"i": i, "word": "word_{}".format(i)} for i in range(100)]
922     fresh_db["test"].insert({"id": 1, "bar": 2}, pk="id")
Powered by Datasette