Permalink
Checking mergeability…
Don’t worry, you can still create the pull request.
Comparing changes
Open a pull request
- 3 commits
- 20 files changed
- 0 commit comments
- 3 contributors
Unified
Split
Showing
with
1,155 additions
and 7 deletions.
- +3 −5 CONTRIBUTING.md
- +4 −0 build/ci/templates/compile-and-validate.yml
- +2 −0 news/1 Enhancements/2522.md
- +1 −0 news/3 Code Health/4033.md
- 0 pythonFiles/testing_tools/adapter/__init__.py
- +84 −0 pythonFiles/testing_tools/adapter/__main__.py
- +11 −0 pythonFiles/testing_tools/adapter/errors.py
- +9 −0 pythonFiles/testing_tools/adapter/info.py
- +150 −0 pythonFiles/testing_tools/adapter/pytest.py
- +26 −0 pythonFiles/testing_tools/adapter/report.py
- +13 −0 pythonFiles/testing_tools/run_adapter.py
- +3 −1 pythonFiles/tests/{run_all.py → __main__.py}
- 0 pythonFiles/tests/testing_tools/__init__.py
- 0 pythonFiles/tests/testing_tools/adapter/__init__.py
- +140 −0 pythonFiles/tests/testing_tools/adapter/test___main__.py
- +341 −0 pythonFiles/tests/testing_tools/adapter/test_pytest.py
- +343 −0 pythonFiles/tests/testing_tools/adapter/test_report.py
- +25 −0 pythonFiles/tests/util.py
- 0 pythonFilestesting_tools/adapters/__init__.py
- +0 −1 src/client/providers/importSortProvider.ts
| @@ -45,12 +45,10 @@ You may see warnings that ```The engine "vscode" appears to be invalid.```, you | |||
|
|
|||
| Run the `Compile` and `Hygiene` build Tasks from the [Command Palette](https://code.visualstudio.com/docs/editor/tasks) (short cut `CTRL+SHIFT+B` or `⇧⌘B`) | |||
|
|
|||
| You can also compile from the command-line: | |||
|
|
|||
| You can also compile from the command-line. For a full compile you can use `npx gulp prePublishNonBundle`. For incremental builds you can use the following commands depending on your needs: | |||
| ```shell | |||
| npx gulp prePublishNonBundle # full compile | |||
| npm run compile # incremental | |||
| npm run compile-webviews-watch # incremental for data science (React Code) | |||
| npm run compile | |||
| npm run compile-webviews-watch # For data science (React Code) | |||
| ``` | |||
|
|
|||
| Sometimes you will need to run `npm run clean` and even `rm -r out`. | |||
| @@ -156,6 +156,10 @@ jobs: | |||
| arguments: '-m pip install --upgrade -r ./build/test-requirements.txt' | |||
|
|
|||
|
|
|||
| - bash: 'python -m pythonFiles.tests' | |||
| displayName: 'run pythonFiles unit tests' | |||
|
|
|||
|
|
|||
| - task: CmdLine@1 | |||
| displayName: 'pip install python packages' | |||
| inputs: | |||
| @@ -0,0 +1,2 @@ | |||
| Disabled opening the output pane when sorting imports via isort fails. | |||
| (thanks [chrised](https://github.com/chrised/)) | |||
| @@ -0,0 +1 @@ | |||
| Add a Python script to run PyTest correctly for discovery. | |||
No changes.
| @@ -0,0 +1,84 @@ | |||
| from __future__ import absolute_import | |||
|
|
|||
| import argparse | |||
| import sys | |||
|
|
|||
| from . import pytest, report | |||
| from .errors import UnsupportedToolError, UnsupportedCommandError | |||
|
|
|||
|
|
|||
| # Set this to True to pretty-print the output. | |||
| DEBUG=False | |||
| #DEBUG=True | |||
|
|
|||
| TOOLS = { | |||
| 'pytest': { | |||
| '_add_subparser': pytest.add_cli_subparser, | |||
| 'discover': pytest.discover, | |||
| }, | |||
| } | |||
| REPORTERS = { | |||
| 'discover': report.report_discovered, | |||
| } | |||
|
|
|||
|
|
|||
| def parse_args( | |||
| argv=sys.argv[1:], | |||
| prog=sys.argv[0], | |||
| ): | |||
| """ | |||
| Return the subcommand & tool to run, along with its args. | |||
| This defines the standard CLI for the different testing frameworks. | |||
| """ | |||
| parser = argparse.ArgumentParser( | |||
| description='Run Python testing operations.', | |||
| prog=prog, | |||
| ) | |||
| cmdsubs = parser.add_subparsers(dest='cmd') | |||
|
|
|||
| # Add "run" and "debug" subcommands when ready. | |||
| for cmdname in ['discover']: | |||
| sub = cmdsubs.add_parser(cmdname) | |||
| subsubs = sub.add_subparsers(dest='tool') | |||
| for toolname in sorted(TOOLS): | |||
| try: | |||
| add_subparser = TOOLS[toolname]['_add_subparser'] | |||
| except KeyError: | |||
| continue | |||
| add_subparser(cmdname, toolname, subsubs) | |||
|
|
|||
| # Parse the args! | |||
| args, toolargs = parser.parse_known_args(argv) | |||
| ns = vars(args) | |||
|
|
|||
| cmd = ns.pop('cmd') | |||
| if not cmd: | |||
| parser.error('missing command') | |||
| tool = ns.pop('tool') | |||
| if not tool: | |||
| parser.error('missing tool') | |||
|
|
|||
| return tool, cmd, ns, toolargs | |||
|
|
|||
|
|
|||
| def main(toolname, cmdname, subargs, toolargs, | |||
| _tools=TOOLS, _reporters=REPORTERS): | |||
| try: | |||
| tool = _tools[toolname] | |||
| except KeyError: | |||
| raise UnsupportedToolError(toolname) | |||
|
|
|||
| try: | |||
| run = tool[cmdname] | |||
| report_result = _reporters[cmdname] | |||
| except KeyError: | |||
| raise UnsupportedCommandError(cmdname) | |||
|
|
|||
| result = run(toolargs, **subargs) | |||
| report_result(result, debug=DEBUG) | |||
|
|
|||
|
|
|||
| if __name__ == '__main__': | |||
| tool, cmd, subargs, toolargs = parse_args() | |||
| main(tool, cmd, subargs, toolargs) | |||
| @@ -0,0 +1,11 @@ | |||
|
|
|||
| class UnsupportedToolError(ValueError): | |||
| def __init__(self, tool): | |||
| super().__init__('unsupported tool {!r}'.format(tool)) | |||
| self.tool = tool | |||
|
|
|||
|
|
|||
| class UnsupportedCommandError(ValueError): | |||
| def __init__(self, cmd): | |||
| super().__init__('unsupported cmd {!r}'.format(cmd)) | |||
| self.cmd = cmd | |||
| @@ -0,0 +1,9 @@ | |||
| from collections import namedtuple | |||
|
|
|||
|
|
|||
| class TestPath(namedtuple('TestPath', 'root relfile func sub')): | |||
| """Where to find a single test.""" | |||
|
|
|||
|
|
|||
| class TestInfo(namedtuple('TestInfo', 'id name path lineno markers')): | |||
| """Info for a single test.""" | |||
| @@ -0,0 +1,150 @@ | |||
| import os.path | |||
|
|
|||
| import pytest | |||
|
|
|||
| from .errors import UnsupportedCommandError | |||
| from .info import TestInfo, TestPath | |||
|
|
|||
|
|
|||
| def add_cli_subparser(cmd, name, parent): | |||
| """Add a new subparser to the given parent and add args to it.""" | |||
| parser = parent.add_parser(name) | |||
| if cmd == 'discover': | |||
| # For now we don't have any tool-specific CLI options to add. | |||
| pass | |||
| else: | |||
| raise UnsupportedCommandError(cmd) | |||
| return parser | |||
|
|
|||
|
|
|||
| def discover(pytestargs=None, | |||
| _pytest_main=pytest.main, _plugin=None): | |||
| """Return the results of test discovery.""" | |||
| if _plugin is None: | |||
| _plugin = TestCollector() | |||
|
|
|||
| pytestargs = _adjust_pytest_args(pytestargs) | |||
| ec = _pytest_main(pytestargs, [_plugin]) | |||
| if ec != 0: | |||
| raise Exception('pytest discovery failed (exit code {})'.format(ec)) | |||
| if _plugin.discovered is None: | |||
| raise Exception('pytest discovery did not start') | |||
| return _plugin.discovered | |||
|
|
|||
|
|
|||
| def _adjust_pytest_args(pytestargs): | |||
| pytestargs = list(pytestargs) if pytestargs else [] | |||
| # Duplicate entries should be okay. | |||
| pytestargs.insert(0, '--collect-only') | |||
| pytestargs.insert(0, '-pno:terminal') | |||
| # TODO: pull in code from: | |||
| # src/client/unittests/pytest/services/discoveryService.ts | |||
| # src/client/unittests/pytest/services/argsService.ts | |||
| return pytestargs | |||
|
|
|||
|
|
|||
| class TestCollector(object): | |||
| """This is a pytest plugin that collects the discovered tests.""" | |||
|
|
|||
| discovered = None | |||
|
|
|||
| # Relevant plugin hooks: | |||
| # https://docs.pytest.org/en/latest/reference.html#collection-hooks | |||
|
|
|||
| def pytest_collection_modifyitems(self, session, config, items): | |||
| self.discovered = [] | |||
| for item in items: | |||
| info = _parse_item(item) | |||
| self.discovered.append(info) | |||
|
|
|||
| # This hook is not specified in the docs, so we also provide | |||
| # the "modifyitems" hook just in case. | |||
| def pytest_collection_finish(self, session): | |||
| try: | |||
| items = session.items | |||
| except AttributeError: | |||
| # TODO: Is there an alternative? | |||
| return | |||
| # print(', '.join(k for k in dir(items[0]) if k[0].islower())) | |||
| self.discovered = [] | |||
| for item in items: | |||
| # print(' ', item.user_properties) | |||
| # print(' ', item.own_markers) | |||
| # print(' ', list(item.iter_markers())) | |||
| # print() | |||
| info = _parse_item(item) | |||
| self.discovered.append(info) | |||
|
|
|||
|
|
|||
| def _parse_item(item): | |||
| """ | |||
| (pytest.Collector) | |||
| pytest.Session | |||
| pytest.Package | |||
| pytest.Module | |||
| pytest.Class | |||
| (pytest.File) | |||
| (pytest.Item) | |||
| pytest.Function | |||
| """ | |||
| # Figure out the file. | |||
| filename, lineno, fullname = item.location | |||
| if not str(item.fspath).endswith(os.path.sep + filename): | |||
| raise NotImplementedError | |||
| testroot = str(item.fspath)[:-len(filename)].rstrip(os.path.sep) | |||
| if os.path.sep in filename: | |||
| relfile = filename | |||
| else: | |||
| relfile = os.path.join('.', filename) | |||
|
|
|||
| # Figure out the func (and subs). | |||
| funcname = item.function.__name__ | |||
| parts = item.nodeid.split('::') | |||
| if parts.pop(0) != filename: | |||
| # TODO: What to do? | |||
| raise NotImplementedError | |||
| suites = [] | |||
| while len(parts) > 1: | |||
| suites.append(parts.pop(0)) | |||
| parameterized = '' | |||
| if '[' in parts[0]: | |||
| _func, sep, parameterized = parts[0].partition('[') | |||
| parameterized = sep + parameterized | |||
| if _func != funcname: | |||
| # TODO: What to do? | |||
| raise NotImplementedError | |||
| if suites: | |||
| testfunc = '.'.join(suites) + '.' + funcname | |||
| else: | |||
| testfunc = funcname | |||
| if fullname != testfunc + parameterized: | |||
| # TODO: What to do? | |||
| raise NotImplementedError | |||
|
|
|||
| # Sort out markers. | |||
| # See: https://docs.pytest.org/en/latest/reference.html#marks | |||
| markers = set() | |||
| for marker in item.own_markers: | |||
| if marker.name == 'parameterize': | |||
| # We've already covered these. | |||
| continue | |||
| elif marker.name == 'skip': | |||
| markers.add('skip') | |||
| elif marker.name == 'skipif': | |||
| markers.add('skip-if') | |||
| elif marker.name == 'xfail': | |||
| markers.add('expected-failure') | |||
| # TODO: Support other markers? | |||
|
|
|||
| return TestInfo( | |||
| id=item.nodeid, | |||
| name=item.name, | |||
| path=TestPath( | |||
| root=testroot, | |||
| relfile=relfile, | |||
| func=testfunc, | |||
| sub=[parameterized] if parameterized else None, | |||
| ), | |||
| lineno=lineno, | |||
| markers=sorted(markers) if markers else None, | |||
| ) | |||
| @@ -0,0 +1,26 @@ | |||
| import json | |||
|
|
|||
|
|
|||
| def report_discovered(tests, debug=False, | |||
| _send=print): | |||
| """Serialize the discovered tests and write to stdout.""" | |||
| data = [{ | |||
| 'id': test.id, | |||
| 'name': test.name, | |||
| 'testroot': test.path.root, | |||
| 'relfile': test.path.relfile, | |||
| 'lineno': test.lineno, | |||
| 'testfunc': test.path.func, | |||
| 'subtest': test.path.sub or None, | |||
| 'markers': test.markers or None, | |||
| } for test in tests] | |||
| kwargs = {} | |||
| if debug: | |||
| # human-formatted | |||
| kwargs = dict( | |||
| sort_keys=True, | |||
| indent=4, | |||
| separators=(',', ': '), | |||
| ) | |||
| serialized = json.dumps(data, **kwargs) | |||
| _send(serialized) | |||
| @@ -0,0 +1,13 @@ | |||
| # Replace the "." entry. | |||
| import os.path | |||
| import sys | |||
| sys.path[0] = os.path.dirname( | |||
| os.path.dirname( | |||
| os.path.abspath(__file__))) | |||
|
|
|||
| from testing_tools.adapter.__main__ import parse_args, main | |||
|
|
|||
|
|
|||
| if __name__ == '__main__': | |||
| tool, cmd, subargs, toolargs = parse_args() | |||
| main(tool, cmd, subargs, toolargs) | |||
| @@ -7,12 +7,14 @@ | |||
| TEST_ROOT = os.path.dirname(__file__) | |||
| SRC_ROOT = os.path.dirname(TEST_ROOT) | |||
| DATASCIENCE_ROOT = os.path.join(SRC_ROOT, 'datascience') | |||
| TESTING_TOOLS_ROOT = os.path.join(SRC_ROOT, 'testing_tools') | |||
|
|
|||
|
|
|||
| if __name__ == '__main__': | |||
| sys.path.insert(1, DATASCIENCE_ROOT) | |||
| sys.path.insert(1, TESTING_TOOLS_ROOT) | |||
| ec = pytest.main([ | |||
| '--rootdir', SRC_ROOT, | |||
| TEST_ROOT, | |||
| ]) | |||
| ] + sys.argv[1:]) | |||
| sys.exit(ec) | |||
No changes.
No changes.
Oops, something went wrong.