back to OpenHands summary
OpenHands: sqlparse
Pytest Summary for test tests
status |
count |
passed |
26 |
failed |
434 |
xfailed |
3 |
total |
463 |
collected |
463 |
Failed pytests:
test_cli.py::test_parser_empty
test_cli.py::test_parser_empty
def test_parser_empty():
> with pytest.raises(SystemExit):
E Failed: DID NOT RAISE
tests/test_cli.py:15: Failed
test_cli.py::test_valid_args
test_cli.py::test_valid_args
filepath = .make_filepath at 0x7ef7adc3a290>
def test_valid_args(filepath):
# test doesn't abort
path = filepath('function.sql')
> assert sqlparse.cli.main([path, '-r']) is not None
tests/test_cli.py:30:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/cli.py:65: in main
data = sqlparse.format(data, reindent=True,
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_cli.py::test_invalid_args
test_cli.py::test_invalid_args
filepath = .make_filepath at 0x7ef7adb481f0>
capsys = <_pytest.capture.CaptureFixture object at 0x7ef7aed1a440>
def test_invalid_args(filepath, capsys):
path = filepath('function.sql')
> sqlparse.cli.main([path, '-r', '--indent_width', '0'])
tests/test_cli.py:41:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/cli.py:45: in main
args = parser.parse_args(args)
/usr/lib/python3.10/argparse.py:1848: in parse_args
self.error(msg % ' '.join(argv))
/usr/lib/python3.10/argparse.py:2606: in error
self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = ArgumentParser(prog='pytest', usage='%(prog)s [OPTIONS] FILE, ...', description='Format SQL files.', formatter_class=, conflict_handler='error', add_help=True)
status = 2
message = 'pytest: error: unrecognized arguments: --indent_width 0\n'
def exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
> _sys.exit(status)
E SystemExit: 2
/usr/lib/python3.10/argparse.py:2593: SystemExit
test_cli.py::test_invalid_infile
test_cli.py::test_invalid_infile
args = Namespace(files=['/testbed/tests/files/missing.sql'], outfile=None, reindent=True, language=None, encoding='utf-8', indent_width=2)
def main(args=None):
"""Main entry point."""
parser = create_parser()
args = parser.parse_args(args)
if not args.files:
parser.print_help()
sys.exit(1)
encoding = args.encoding
if encoding == 'utf-8':
# Python 3 reads files as utf-8 by default
encoding = None
for file_ in args.files:
try:
> with open(file_, 'r', encoding=encoding) as f:
E FileNotFoundError: [Errno 2] No such file or directory: '/testbed/tests/files/missing.sql'
sqlparse/cli.py:58: FileNotFoundError
During handling of the above exception, another exception occurred:
filepath = .make_filepath at 0x7ef7adc3a320>
capsys = <_pytest.capture.CaptureFixture object at 0x7ef7ad403d00>
def test_invalid_infile(filepath, capsys):
path = filepath('missing.sql')
> sqlparse.cli.main([path, '-r'])
tests/test_cli.py:49:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/cli.py:61: in main
_error('Failed to read {}: {}'.format(file_, e))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
msg = "Failed to read /testbed/tests/files/missing.sql: [Errno 2] No such file or directory: '/testbed/tests/files/missing.sql'"
def _error(msg):
"""Print msg and optionally exit with return code exit_."""
sys.stderr.write(msg + '\n')
> sys.exit(1)
E SystemExit: 1
sqlparse/cli.py:22: SystemExit
test_cli.py::test_invalid_outfile
test_cli.py::test_invalid_outfile
filepath = .make_filepath at 0x7ef7adb48040>
capsys = <_pytest.capture.CaptureFixture object at 0x7ef7ae297550>
def test_invalid_outfile(filepath, capsys):
path = filepath('function.sql')
outpath = filepath('/missing/function.sql')
> sqlparse.cli.main([path, '-r', '-o', outpath])
tests/test_cli.py:57:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/cli.py:65: in main
data = sqlparse.format(data, reindent=True,
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_cli.py::test_encoding_stdin[encoding_utf8.sql-utf-8]
test_cli.py::test_encoding_stdin[encoding_utf8.sql-utf-8]
args = Namespace(files=['-'], outfile=None, reindent=False, language=None, encoding='utf-8', indent_width=2)
def main(args=None):
"""Main entry point."""
parser = create_parser()
args = parser.parse_args(args)
if not args.files:
parser.print_help()
sys.exit(1)
encoding = args.encoding
if encoding == 'utf-8':
# Python 3 reads files as utf-8 by default
encoding = None
for file_ in args.files:
try:
> with open(file_, 'r', encoding=encoding) as f:
E FileNotFoundError: [Errno 2] No such file or directory: '-'
sqlparse/cli.py:58: FileNotFoundError
During handling of the above exception, another exception occurred:
fpath = 'encoding_utf8.sql', encoding = 'utf-8'
filepath = .make_filepath at 0x7ef7adb48820>
load_file = .make_load_file at 0x7ef7adb49360>
capfd = <_pytest.capture.CaptureFixture object at 0x7ef7aeabd210>
@pytest.mark.parametrize('fpath, encoding', (
('encoding_utf8.sql', 'utf-8'),
('encoding_gbk.sql', 'gbk'),
))
def test_encoding_stdin(fpath, encoding, filepath, load_file, capfd):
path = filepath(fpath)
expected = load_file(fpath, encoding)
old_stdin = sys.stdin
with open(path) as f:
sys.stdin = f
> sqlparse.cli.main(['-', '--encoding', encoding])
tests/test_cli.py:111:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/cli.py:61: in main
_error('Failed to read {}: {}'.format(file_, e))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
msg = "Failed to read -: [Errno 2] No such file or directory: '-'"
def _error(msg):
"""Print msg and optionally exit with return code exit_."""
sys.stderr.write(msg + '\n')
> sys.exit(1)
E SystemExit: 1
sqlparse/cli.py:22: SystemExit
test_cli.py::test_encoding_stdin[encoding_gbk.sql-gbk]
test_cli.py::test_encoding_stdin[encoding_gbk.sql-gbk]
args = Namespace(files=['-'], outfile=None, reindent=False, language=None, encoding='gbk', indent_width=2)
def main(args=None):
"""Main entry point."""
parser = create_parser()
args = parser.parse_args(args)
if not args.files:
parser.print_help()
sys.exit(1)
encoding = args.encoding
if encoding == 'utf-8':
# Python 3 reads files as utf-8 by default
encoding = None
for file_ in args.files:
try:
> with open(file_, 'r', encoding=encoding) as f:
E FileNotFoundError: [Errno 2] No such file or directory: '-'
sqlparse/cli.py:58: FileNotFoundError
During handling of the above exception, another exception occurred:
fpath = 'encoding_gbk.sql', encoding = 'gbk'
filepath = .make_filepath at 0x7ef7adc3a320>
load_file = .make_load_file at 0x7ef7adb49240>
capfd = <_pytest.capture.CaptureFixture object at 0x7ef7ae2a5c00>
@pytest.mark.parametrize('fpath, encoding', (
('encoding_utf8.sql', 'utf-8'),
('encoding_gbk.sql', 'gbk'),
))
def test_encoding_stdin(fpath, encoding, filepath, load_file, capfd):
path = filepath(fpath)
expected = load_file(fpath, encoding)
old_stdin = sys.stdin
with open(path) as f:
sys.stdin = f
> sqlparse.cli.main(['-', '--encoding', encoding])
tests/test_cli.py:111:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/cli.py:61: in main
_error('Failed to read {}: {}'.format(file_, e))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
msg = "Failed to read -: [Errno 2] No such file or directory: '-'"
def _error(msg):
"""Print msg and optionally exit with return code exit_."""
sys.stderr.write(msg + '\n')
> sys.exit(1)
E SystemExit: 1
sqlparse/cli.py:22: SystemExit
test_format.py::TestFormat::test_keywordcase
self =
def test_keywordcase(self):
sql = 'select * from bar; -- select foo\n'
> res = sqlparse.format(sql, keyword_case='upper')
tests/test_format.py:10:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'select * from bar; -- select foo\n', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'KeywordCaseFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_format.py::TestFormat::test_identifiercase
self =
def test_identifiercase(self):
sql = 'select * from bar; -- select foo\n'
> res = sqlparse.format(sql, identifier_case='upper')
tests/test_format.py:24:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormat::test_identifiercase_invalid_option
self =
def test_identifiercase_invalid_option(self):
sql = 'select * from bar; -- select foo\n'
with pytest.raises(SQLParseError):
> sqlparse.format(sql, identifier_case='foo')
tests/test_format.py:34:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormat::test_identifiercase_quotes
self =
def test_identifiercase_quotes(self):
sql = 'select * from "foo"."bar"'
> res = sqlparse.format(sql, identifier_case="upper")
tests/test_format.py:38:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormat::test_strip_comments_single
self =
def test_strip_comments_single(self):
sql = 'select *-- statement starts here\nfrom foo'
> res = sqlparse.format(sql, strip_comments=True)
tests/test_format.py:43:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'select *-- statement starts here\nfrom foo', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'StripCommentsFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_format.py::TestFormat::test_strip_comments_invalid_option
self =
def test_strip_comments_invalid_option(self):
sql = 'select-- foo\nfrom -- bar\nwhere'
with pytest.raises(SQLParseError):
> sqlparse.format(sql, strip_comments=None)
tests/test_format.py:68:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormat::test_strip_comments_multi
self =
def test_strip_comments_multi(self):
sql = '/* sql starts here */\nselect'
> res = sqlparse.format(sql, strip_comments=True)
tests/test_format.py:72:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = '/* sql starts here */\nselect', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'StripCommentsFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_format.py::TestFormat::test_strip_comments_preserves_linebreak
self =
def test_strip_comments_preserves_linebreak(self):
sql = 'select * -- a comment\r\nfrom foo'
> res = sqlparse.format(sql, strip_comments=True)
tests/test_format.py:89:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'select * -- a comment\r\nfrom foo', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'StripCommentsFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_format.py::TestFormat::test_strip_comments_preserves_whitespace
self =
def test_strip_comments_preserves_whitespace(self):
sql = 'SELECT 1/*bar*/ AS foo' # see issue772
> res = sqlparse.format(sql, strip_comments=True)
tests/test_format.py:106:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'SELECT 1/*bar*/ AS foo', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'StripCommentsFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_format.py::TestFormat::test_strip_ws
self =
def test_strip_ws(self):
f = lambda sql: sqlparse.format(sql, strip_whitespace=True)
s = 'select\n* from foo\n\twhere ( 1 = 2 )\n'
> assert f(s) == 'select * from foo where (1 = 2)'
tests/test_format.py:112:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:110: in
f = lambda sql: sqlparse.format(sql, strip_whitespace=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'select\n* from foo\n\twhere ( 1 = 2 )\n', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'StripWhitespaceFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_format.py::TestFormat::test_strip_ws_invalid_option
self =
def test_strip_ws_invalid_option(self):
s = 'select -- foo\nfrom bar\n'
with pytest.raises(SQLParseError):
> sqlparse.format(s, strip_whitespace=None)
tests/test_format.py:119:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormat::test_preserve_ws
self =
def test_preserve_ws(self):
# preserve at least one whitespace after subgroups
f = lambda sql: sqlparse.format(sql, strip_whitespace=True)
s = 'select\n* /* foo */ from bar '
> assert f(s) == 'select * /* foo */ from bar'
tests/test_format.py:125:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:123: in
f = lambda sql: sqlparse.format(sql, strip_whitespace=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'select\n* /* foo */ from bar ', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'StripWhitespaceFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_format.py::TestFormat::test_notransform_of_quoted_crlf
self =
def test_notransform_of_quoted_crlf(self):
# Make sure that CR/CR+LF characters inside string literals don't get
# affected by the formatter.
s1 = "SELECT some_column LIKE 'value\r'"
s2 = "SELECT some_column LIKE 'value\r'\r\nWHERE id = 1\n"
s3 = "SELECT some_column LIKE 'value\\'\r' WHERE id = 1\r"
s4 = "SELECT some_column LIKE 'value\\\\\\'\r' WHERE id = 1\r\n"
f = lambda x: sqlparse.format(x)
# Because of the use of
> assert f(s1) == "SELECT some_column LIKE 'value\r'"
tests/test_format.py:139:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:136: in
f = lambda x: sqlparse.format(x)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindentAligned::test_basic
self =
def test_basic(self):
sql = """
select a, b as bb,c from table
join (select a * 2 as a from new_table) other
on table.a = other.a
where c is true
and b between 3 and 4
or d is 'blue'
limit 10
"""
> assert self.formatter(sql) == '\n'.join([
'select a,',
' b as bb,',
' c',
' from table',
' join (',
' select a * 2 as a',
' from new_table',
' ) other',
' on table.a = other.a',
' where c is true',
' and b between 3 and 4',
" or d is 'blue'",
' limit 10'])
tests/test_format.py:162:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:149: in formatter
return sqlparse.format(sql, reindent_aligned=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindentAligned::test_joins
self =
def test_joins(self):
sql = """
select * from a
join b on a.one = b.one
left join c on c.two = a.two and c.three = a.three
full outer join d on d.three = a.three
cross join e on e.four = a.four
join f using (one, two, three)
"""
> assert self.formatter(sql) == '\n'.join([
'select *',
' from a',
' join b',
' on a.one = b.one',
' left join c',
' on c.two = a.two',
' and c.three = a.three',
' full outer join d',
' on d.three = a.three',
' cross join e',
' on e.four = a.four',
' join f using (one, two, three)'])
tests/test_format.py:186:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:149: in formatter
return sqlparse.format(sql, reindent_aligned=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindentAligned::test_case_statement
self =
def test_case_statement(self):
sql = """
select a,
case when a = 0
then 1
when bb = 1 then 1
when c = 2 then 2
else 0 end as d,
extra_col
from table
where c is true
and b between 3 and 4
"""
> assert self.formatter(sql) == '\n'.join([
'select a,',
' case when a = 0 then 1',
' when bb = 1 then 1',
' when c = 2 then 2',
' else 0',
' end as d,',
' extra_col',
' from table',
' where c is true',
' and b between 3 and 4'])
tests/test_format.py:213:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:149: in formatter
return sqlparse.format(sql, reindent_aligned=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindentAligned::test_case_statement_with_between
self =
def test_case_statement_with_between(self):
sql = """
select a,
case when a = 0
then 1
when bb = 1 then 1
when c = 2 then 2
when d between 3 and 5 then 3
else 0 end as d,
extra_col
from table
where c is true
and b between 3 and 4
"""
> assert self.formatter(sql) == '\n'.join([
'select a,',
' case when a = 0 then 1',
' when bb = 1 then 1',
' when c = 2 then 2',
' when d between 3 and 5 then 3',
' else 0',
' end as d,',
' extra_col',
' from table',
' where c is true',
' and b between 3 and 4'])
tests/test_format.py:239:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:149: in formatter
return sqlparse.format(sql, reindent_aligned=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindentAligned::test_group_by
self =
def test_group_by(self):
sql = """
select a, b, c, sum(x) as sum_x, count(y) as cnt_y
from table
group by a,b,c
having sum(x) > 1
and count(y) > 5
order by 3,2,1
"""
> assert self.formatter(sql) == '\n'.join([
'select a,',
' b,',
' c,',
' sum(x) as sum_x,',
' count(y) as cnt_y',
' from table',
' group by a,',
' b,',
' c',
'having sum(x) > 1',
' and count(y) > 5',
' order by 3,',
' 2,',
' 1'])
tests/test_format.py:261:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:149: in formatter
return sqlparse.format(sql, reindent_aligned=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindentAligned::test_group_by_subquery
self =
def test_group_by_subquery(self):
# TODO: add subquery alias when test_identifier_list_subquery fixed
sql = """
select *, sum_b + 2 as mod_sum
from (
select a, sum(b) as sum_b
from table
group by a,z)
order by 1,2
"""
> assert self.formatter(sql) == '\n'.join([
'select *,',
' sum_b + 2 as mod_sum',
' from (',
' select a,',
' sum(b) as sum_b',
' from table',
' group by a,',
' z',
' )',
' order by 1,',
' 2'])
tests/test_format.py:287:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:149: in formatter
return sqlparse.format(sql, reindent_aligned=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindentAligned::test_window_functions
self =
def test_window_functions(self):
sql = """
select a,
SUM(a) OVER (PARTITION BY b ORDER BY c ROWS
BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as sum_a,
ROW_NUMBER() OVER
(PARTITION BY b, c ORDER BY d DESC) as row_num
from table"""
> assert self.formatter(sql) == '\n'.join([
'select a,',
' SUM(a) OVER (PARTITION BY b ORDER BY c ROWS '
'BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as sum_a,',
' ROW_NUMBER() OVER '
'(PARTITION BY b, c ORDER BY d DESC) as row_num',
' from table'])
tests/test_format.py:308:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:149: in formatter
return sqlparse.format(sql, reindent_aligned=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestSpacesAroundOperators::test_basic
self =
def test_basic(self):
sql = ('select a+b as d from table '
'where (c-d)%2= 1 and e> 3.0/4 and z^2 <100')
> assert self.formatter(sql) == (
'select a + b as d from table '
'where (c - d) % 2 = 1 and e > 3.0 / 4 and z ^ 2 < 100')
tests/test_format.py:325:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:320: in formatter
return sqlparse.format(sql, use_space_around_operators=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestSpacesAroundOperators::test_bools
self =
def test_bools(self):
sql = 'select * from table where a &&b or c||d'
> assert self.formatter(
sql) == 'select * from table where a && b or c || d'
tests/test_format.py:331:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:320: in formatter
return sqlparse.format(sql, use_space_around_operators=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestSpacesAroundOperators::test_nested
self =
def test_nested(self):
sql = 'select *, case when a-b then c end from table'
> assert self.formatter(
sql) == 'select *, case when a - b then c end from table'
tests/test_format.py:336:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:320: in formatter
return sqlparse.format(sql, use_space_around_operators=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestSpacesAroundOperators::test_wildcard_vs_mult
self =
def test_wildcard_vs_mult(self):
sql = 'select a*b-c from table'
> assert self.formatter(sql) == 'select a * b - c from table'
tests/test_format.py:341:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:320: in formatter
return sqlparse.format(sql, use_space_around_operators=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_option
self =
def test_option(self):
with pytest.raises(SQLParseError):
sqlparse.format('foo', reindent=2)
with pytest.raises(SQLParseError):
> sqlparse.format('foo', indent_tabs=2)
tests/test_format.py:349:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_stmts
self =
def test_stmts(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select foo; select bar'
> assert f(s) == 'select foo;\n\nselect bar'
tests/test_format.py:364:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:362: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_keywords
self =
def test_keywords(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select * from foo union select * from bar;'
> assert f(s) == '\n'.join([
'select *',
'from foo',
'union',
'select *',
'from bar;'])
tests/test_format.py:373:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:371: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_keywords_between
self =
def test_keywords_between(self):
# issue 14
# don't break AND after BETWEEN
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'and foo between 1 and 2 and bar = 3'
> assert f(s) == '\n'.join([
'',
'and foo between 1 and 2',
'and bar = 3'])
tests/test_format.py:385:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:383: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_parenthesis
self =
def test_parenthesis(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select count(*) from (select * from foo);'
> assert f(s) == '\n'.join([
'select count(*)',
'from',
' (select *',
' from foo);'])
tests/test_format.py:393:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:391: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_where
self =
def test_where(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select * from foo where bar = 1 and baz = 2 or bzz = 3;'
> assert f(s) == '\n'.join([
'select *',
'from foo',
'where bar = 1',
' and baz = 2',
' or bzz = 3;'])
tests/test_format.py:407:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:405: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_join
self =
def test_join(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select * from foo join bar on 1 = 2'
> assert f(s) == '\n'.join([
'select *',
'from foo',
'join bar on 1 = 2'])
tests/test_format.py:425:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:423: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_identifier_list
self =
def test_identifier_list(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select foo, bar, baz from table1, table2 where 1 = 2'
> assert f(s) == '\n'.join([
'select foo,',
' bar,',
' baz',
'from table1,',
' table2',
'where 1 = 2'])
tests/test_format.py:448:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:446: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_identifier_list_with_wrap_after
self =
def test_identifier_list_with_wrap_after(self):
f = lambda sql: sqlparse.format(sql, reindent=True, wrap_after=14)
s = 'select foo, bar, baz from table1, table2 where 1 = 2'
> assert f(s) == '\n'.join([
'select foo, bar,',
' baz',
'from table1, table2',
'where 1 = 2'])
tests/test_format.py:465:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:463: in
f = lambda sql: sqlparse.format(sql, reindent=True, wrap_after=14)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_identifier_list_comment_first
self =
def test_identifier_list_comment_first(self):
f = lambda sql: sqlparse.format(sql, reindent=True, comma_first=True)
# not the 3: It cleans up whitespace too!
s = 'select foo, bar, baz from table where foo in (1, 2,3)'
> assert f(s) == '\n'.join([
'select foo',
' , bar',
' , baz',
'from table',
'where foo in (1',
' , 2',
' , 3)'])
tests/test_format.py:475:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:472: in
f = lambda sql: sqlparse.format(sql, reindent=True, comma_first=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_identifier_list_with_functions
self =
def test_identifier_list_with_functions(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = ("select 'abc' as foo, coalesce(col1, col2)||col3 as bar,"
"col3 from my_table")
> assert f(s) == '\n'.join([
"select 'abc' as foo,",
" coalesce(col1, col2)||col3 as bar,",
" col3",
"from my_table"])
tests/test_format.py:488:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:485: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_long_identifier_list_with_functions
self =
def test_long_identifier_list_with_functions(self):
f = lambda sql: sqlparse.format(sql, reindent=True, wrap_after=30)
s = ("select 'abc' as foo, json_build_object('a', a,"
"'b', b, 'c', c, 'd', d, 'e', e) as col2"
"col3 from my_table")
> assert f(s) == '\n'.join([
"select 'abc' as foo,",
" json_build_object('a',",
" a, 'b', b, 'c', c, 'd', d,",
" 'e', e) as col2col3",
"from my_table"])
tests/test_format.py:499:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:495: in
f = lambda sql: sqlparse.format(sql, reindent=True, wrap_after=30)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_case
self =
def test_case(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'case when foo = 1 then 2 when foo = 3 then 4 else 5 end'
> assert f(s) == '\n'.join([
'case',
' when foo = 1 then 2',
' when foo = 3 then 4',
' else 5',
'end'])
tests/test_format.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:507: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_case2
self =
def test_case2(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'case(foo) when bar = 1 then 2 else 3 end'
> assert f(s) == '\n'.join([
'case(foo)',
' when bar = 1 then 2',
' else 3',
'end'])
tests/test_format.py:519:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:517: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_nested_identifier_list
self =
def test_nested_identifier_list(self):
# issue4
f = lambda sql: sqlparse.format(sql, reindent=True)
s = '(foo as bar, bar1, bar2 as bar3, b4 as b5)'
> assert f(s) == '\n'.join([
'(foo as bar,',
' bar1,',
' bar2 as bar3,',
' b4 as b5)'])
tests/test_format.py:529:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:527: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_duplicate_linebreaks
self =
def test_duplicate_linebreaks(self):
# issue3
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select c1 -- column1\nfrom foo'
> assert f(s) == '\n'.join([
'select c1 -- column1',
'from foo'])
tests/test_format.py:539:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:537: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_keywordfunctions
self =
def test_keywordfunctions(self):
# issue36
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select max(a) b, foo, bar'
> assert f(s) == '\n'.join([
'select max(a) b,',
' foo,',
' bar'])
tests/test_format.py:563:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:561: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_identifier_and_functions
self =
def test_identifier_and_functions(self):
# issue45
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select foo.bar, nvl(1) from dual'
> assert f(s) == '\n'.join([
'select foo.bar,',
' nvl(1)',
'from dual'])
tests/test_format.py:572:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:570: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestFormatReindent::test_insert_values
self =
def test_insert_values(self):
# issue 329
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'insert into foo values (1, 2)'
> assert f(s) == '\n'.join([
'insert into foo',
'values (1, 2)'])
tests/test_format.py:581:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:579: in
f = lambda sql: sqlparse.format(sql, reindent=True)
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestOutputFormat::test_python
self =
def test_python(self):
sql = 'select * from foo;'
f = lambda sql: sqlparse.format(sql, output_format='python')
> assert f(sql) == "sql = 'select * from foo;'"
tests/test_format.py:625:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:624: in
f = lambda sql: sqlparse.format(sql, output_format='python')
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestOutputFormat::test_python_multiple_statements
self =
def test_python_multiple_statements(self):
sql = 'select * from foo; select 1 from dual'
f = lambda sql: sqlparse.format(sql, output_format='python')
> assert f(sql) == '\n'.join([
"sql = 'select * from foo; '",
"sql2 = 'select 1 from dual'"])
tests/test_format.py:635:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:634: in
f = lambda sql: sqlparse.format(sql, output_format='python')
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestOutputFormat::test_python_multiple_statements_with_formatting
self =
@pytest.mark.xfail(reason="Needs fixing")
def test_python_multiple_statements_with_formatting(self):
sql = 'select * from foo; select 1 from dual'
f = lambda sql: sqlparse.format(sql, output_format='python',
reindent=True)
> assert f(sql) == '\n'.join([
"sql = ('select * '",
" 'from foo;')",
"sql2 = ('select 1 '",
" 'from dual')"])
tests/test_format.py:644:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:642: in
f = lambda sql: sqlparse.format(sql, output_format='python',
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestOutputFormat::test_php
self =
def test_php(self):
sql = 'select * from foo;'
f = lambda sql: sqlparse.format(sql, output_format='php')
> assert f(sql) == '$sql = "select * from foo;";'
tests/test_format.py:653:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:652: in
f = lambda sql: sqlparse.format(sql, output_format='php')
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestOutputFormat::test_sql
self =
def test_sql(self):
# "sql" is an allowed option but has no effect
sql = 'select * from foo;'
f = lambda sql: sqlparse.format(sql, output_format='sql')
> assert f(sql) == 'select * from foo;'
tests/test_format.py:664:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_format.py:663: in
f = lambda sql: sqlparse.format(sql, output_format='sql')
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::TestOutputFormat::test_invalid_option
self =
def test_invalid_option(self):
sql = 'select * from foo;'
with pytest.raises(SQLParseError):
> sqlparse.format(sql, output_format='foo')
tests/test_format.py:669:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::test_format_column_ordering
def test_format_column_ordering():
# issue89
sql = 'select * from foo order by c1 desc, c2, c3;'
> formatted = sqlparse.format(sql, reindent=True)
tests/test_format.py:675:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::test_truncate_strings
def test_truncate_strings():
sql = "update foo set value = '{}';".format('x' * 1000)
> formatted = sqlparse.format(sql, truncate_strings=10)
tests/test_format.py:687:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::test_truncate_strings_invalid_option2[bar]
option = 'bar'
@pytest.mark.parametrize('option', ['bar', -1, 0])
def test_truncate_strings_invalid_option2(option):
with pytest.raises(SQLParseError):
> sqlparse.format('foo', truncate_strings=option)
tests/test_format.py:696:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::test_truncate_strings_invalid_option2[-1]
option = -1
@pytest.mark.parametrize('option', ['bar', -1, 0])
def test_truncate_strings_invalid_option2(option):
with pytest.raises(SQLParseError):
> sqlparse.format('foo', truncate_strings=option)
tests/test_format.py:696:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::test_truncate_strings_invalid_option2[0]
option = 0
@pytest.mark.parametrize('option', ['bar', -1, 0])
def test_truncate_strings_invalid_option2(option):
with pytest.raises(SQLParseError):
> sqlparse.format('foo', truncate_strings=option)
tests/test_format.py:696:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::test_truncate_strings_doesnt_truncate_identifiers[select verrrylongcolumn from foo]
sql = 'select verrrylongcolumn from foo'
@pytest.mark.parametrize('sql', [
'select verrrylongcolumn from foo',
'select "verrrylongcolumn" from "foo"'])
def test_truncate_strings_doesnt_truncate_identifiers(sql):
> formatted = sqlparse.format(sql, truncate_strings=2)
tests/test_format.py:703:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::test_truncate_strings_doesnt_truncate_identifiers[select "verrrylongcolumn" from "foo"]
sql = 'select "verrrylongcolumn" from "foo"'
@pytest.mark.parametrize('sql', [
'select verrrylongcolumn from foo',
'select "verrrylongcolumn" from "foo"'])
def test_truncate_strings_doesnt_truncate_identifiers(sql):
> formatted = sqlparse.format(sql, truncate_strings=2)
tests/test_format.py:703:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::test_having_produces_newline
def test_having_produces_newline():
sql = ('select * from foo, bar where bar.id = foo.bar_id '
'having sum(bar.value) > 100')
> formatted = sqlparse.format(sql, reindent=True)
tests/test_format.py:710:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::test_format_right_margin_invalid_option[ten]
right_margin = 'ten'
@pytest.mark.parametrize('right_margin', ['ten', 2])
def test_format_right_margin_invalid_option(right_margin):
with pytest.raises(SQLParseError):
> sqlparse.format('foo', right_margin=right_margin)
tests/test_format.py:723:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'foo', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'RightMarginFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_format.py::test_format_right_margin_invalid_option[2]
right_margin = 2
@pytest.mark.parametrize('right_margin', ['ten', 2])
def test_format_right_margin_invalid_option(right_margin):
with pytest.raises(SQLParseError):
> sqlparse.format('foo', right_margin=right_margin)
tests/test_format.py:723:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'foo', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'RightMarginFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_format.py::test_format_right_margin
@pytest.mark.xfail(reason="Needs fixing")
def test_format_right_margin():
# TODO: Needs better test, only raises exception right now
> sqlparse.format('foo', right_margin="79")
tests/test_format.py:729:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'foo', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'RightMarginFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_format.py::test_format_json_ops
def test_format_json_ops(): # issue542
> formatted = sqlparse.format(
"select foo->'bar', foo->'bar';", reindent=True)
tests/test_format.py:733:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::test_compact[case when foo then 1 else bar end-case\n when foo then 1\n else bar\nend-case when foo then 1 else bar end]
sql = 'case when foo then 1 else bar end'
expected_normal = 'case\n when foo then 1\n else bar\nend'
expected_compact = 'case when foo then 1 else bar end'
@pytest.mark.parametrize('sql, expected_normal, expected_compact', [
('case when foo then 1 else bar end',
'case\n when foo then 1\n else bar\nend',
'case when foo then 1 else bar end')])
def test_compact(sql, expected_normal, expected_compact): # issue783
> formatted_normal = sqlparse.format(sql, reindent=True)
tests/test_format.py:744:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_format.py::test_strip_ws_removes_trailing_ws_in_groups
def test_strip_ws_removes_trailing_ws_in_groups(): # issue782
> formatted = sqlparse.format('( where foo = bar ) from',
strip_whitespace=True)
tests/test_format.py:751:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = '( where foo = bar ) from', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'StripWhitespaceFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_grouping.py::test_grouping_parenthesis
test_grouping.py::test_grouping_parenthesis
def test_grouping_parenthesis():
s = 'select (select (x3) x2) and (y2) bar'
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:9:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_assignment[foo := 1;]
test_grouping.py::test_grouping_assignment[foo := 1;]
s = 'foo := 1;'
@pytest.mark.parametrize('s', ['foo := 1;', 'foo := 1'])
def test_grouping_assignment(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:22:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_assignment[foo := 1]
test_grouping.py::test_grouping_assignment[foo := 1]
s = 'foo := 1'
@pytest.mark.parametrize('s', ['foo := 1;', 'foo := 1'])
def test_grouping_assignment(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:22:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_typed_literal[x > DATE '2020-01-01']
test_grouping.py::test_grouping_typed_literal[x > DATE '2020-01-01']
s = "x > DATE '2020-01-01'"
@pytest.mark.parametrize('s', ["x > DATE '2020-01-01'", "x > TIMESTAMP '2020-01-01 00:00:00'"])
def test_grouping_typed_literal(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:29:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_typed_literal[x > TIMESTAMP '2020-01-01 00:00:00']
test_grouping.py::test_grouping_typed_literal[x > TIMESTAMP '2020-01-01 00:00:00']
s = "x > TIMESTAMP '2020-01-01 00:00:00'"
@pytest.mark.parametrize('s', ["x > DATE '2020-01-01'", "x > TIMESTAMP '2020-01-01 00:00:00'"])
def test_grouping_typed_literal(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:29:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_compare_expr[select a from b where c < d + e-Identifier-Identifier]
test_grouping.py::test_compare_expr[select a from b where c < d + e-Identifier-Identifier]
s = 'select a from b where c < d + e', a =
b =
@pytest.mark.parametrize('s, a, b', [
('select a from b where c < d + e', sql.Identifier, sql.Identifier),
('select a from b where c < d + interval \'1 day\'', sql.Identifier, sql.TypedLiteral),
('select a from b where c < d + interval \'6\' month', sql.Identifier, sql.TypedLiteral),
('select a from b where c < current_timestamp - interval \'1 day\'', sql.Token, sql.TypedLiteral),
])
def test_compare_expr(s, a, b):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:40:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_compare_expr[select a from b where c < d + interval '1 day'-Identifier-TypedLiteral]
test_grouping.py::test_compare_expr[select a from b where c < d + interval '1 day'-Identifier-TypedLiteral]
s = "select a from b where c < d + interval '1 day'"
a = , b =
@pytest.mark.parametrize('s, a, b', [
('select a from b where c < d + e', sql.Identifier, sql.Identifier),
('select a from b where c < d + interval \'1 day\'', sql.Identifier, sql.TypedLiteral),
('select a from b where c < d + interval \'6\' month', sql.Identifier, sql.TypedLiteral),
('select a from b where c < current_timestamp - interval \'1 day\'', sql.Token, sql.TypedLiteral),
])
def test_compare_expr(s, a, b):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:40:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_compare_expr[select a from b where c < d + interval '6' month-Identifier-TypedLiteral]
test_grouping.py::test_compare_expr[select a from b where c < d + interval '6' month-Identifier-TypedLiteral]
s = "select a from b where c < d + interval '6' month"
a = , b =
@pytest.mark.parametrize('s, a, b', [
('select a from b where c < d + e', sql.Identifier, sql.Identifier),
('select a from b where c < d + interval \'1 day\'', sql.Identifier, sql.TypedLiteral),
('select a from b where c < d + interval \'6\' month', sql.Identifier, sql.TypedLiteral),
('select a from b where c < current_timestamp - interval \'1 day\'', sql.Token, sql.TypedLiteral),
])
def test_compare_expr(s, a, b):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:40:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_compare_expr[select a from b where c < current_timestamp - interval '1 day'-Token-TypedLiteral]
test_grouping.py::test_compare_expr[select a from b where c < current_timestamp - interval '1 day'-Token-TypedLiteral]
s = "select a from b where c < current_timestamp - interval '1 day'"
a = , b =
@pytest.mark.parametrize('s, a, b', [
('select a from b where c < d + e', sql.Identifier, sql.Identifier),
('select a from b where c < d + interval \'1 day\'', sql.Identifier, sql.TypedLiteral),
('select a from b where c < d + interval \'6\' month', sql.Identifier, sql.TypedLiteral),
('select a from b where c < current_timestamp - interval \'1 day\'', sql.Token, sql.TypedLiteral),
])
def test_compare_expr(s, a, b):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:40:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifiers
test_grouping.py::test_grouping_identifiers
def test_grouping_identifiers():
s = 'select foo.bar from "myscheme"."table" where fail. order'
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:63:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_simple_identifiers[1 as f]
test_grouping.py::test_simple_identifiers[1 as f]
s = '1 as f'
@pytest.mark.parametrize('s', [
'1 as f',
'foo as f',
'foo f',
'1/2 as f',
'1/2 f',
'1<2 as f', # issue327
'1<2 f',
])
def test_simple_identifiers(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:105:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_simple_identifiers[foo as f]
test_grouping.py::test_simple_identifiers[foo as f]
s = 'foo as f'
@pytest.mark.parametrize('s', [
'1 as f',
'foo as f',
'foo f',
'1/2 as f',
'1/2 f',
'1<2 as f', # issue327
'1<2 f',
])
def test_simple_identifiers(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:105:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_simple_identifiers[foo f]
test_grouping.py::test_simple_identifiers[foo f]
s = 'foo f'
@pytest.mark.parametrize('s', [
'1 as f',
'foo as f',
'foo f',
'1/2 as f',
'1/2 f',
'1<2 as f', # issue327
'1<2 f',
])
def test_simple_identifiers(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:105:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
2 as f]
2 as f]
s = '1/2 as f'
@pytest.mark.parametrize('s', [
'1 as f',
'foo as f',
'foo f',
'1/2 as f',
'1/2 f',
'1<2 as f', # issue327
'1<2 f',
])
def test_simple_identifiers(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:105:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
2 f]
2 f]
s = '1/2 f'
@pytest.mark.parametrize('s', [
'1 as f',
'foo as f',
'foo f',
'1/2 as f',
'1/2 f',
'1<2 as f', # issue327
'1<2 f',
])
def test_simple_identifiers(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:105:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_simple_identifiers[1<2 as f]
test_grouping.py::test_simple_identifiers[1<2 as f]
s = '1<2 as f'
@pytest.mark.parametrize('s', [
'1 as f',
'foo as f',
'foo f',
'1/2 as f',
'1/2 f',
'1<2 as f', # issue327
'1<2 f',
])
def test_simple_identifiers(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:105:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_simple_identifiers[1<2 f]
test_grouping.py::test_simple_identifiers[1<2 f]
s = '1<2 f'
@pytest.mark.parametrize('s', [
'1 as f',
'foo as f',
'foo f',
'1/2 as f',
'1/2 f',
'1<2 as f', # issue327
'1<2 f',
])
def test_simple_identifiers(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:105:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_group_identifier_list[foo, bar]
test_grouping.py::test_group_identifier_list[foo, bar]
s = 'foo, bar'
@pytest.mark.parametrize('s', [
'foo, bar',
'sum(a), sum(b)',
'sum(a) as x, b as y',
'sum(a)::integer, b',
'sum(a)/count(b) as x, y',
'sum(a)::integer as x, y',
'sum(a)::integer/count(b) as x, y', # issue297
])
def test_group_identifier_list(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:119:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_group_identifier_list[sum(a), sum(b)]
test_grouping.py::test_group_identifier_list[sum(a), sum(b)]
s = 'sum(a), sum(b)'
@pytest.mark.parametrize('s', [
'foo, bar',
'sum(a), sum(b)',
'sum(a) as x, b as y',
'sum(a)::integer, b',
'sum(a)/count(b) as x, y',
'sum(a)::integer as x, y',
'sum(a)::integer/count(b) as x, y', # issue297
])
def test_group_identifier_list(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:119:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_group_identifier_list[sum(a) as x, b as y]
test_grouping.py::test_group_identifier_list[sum(a) as x, b as y]
s = 'sum(a) as x, b as y'
@pytest.mark.parametrize('s', [
'foo, bar',
'sum(a), sum(b)',
'sum(a) as x, b as y',
'sum(a)::integer, b',
'sum(a)/count(b) as x, y',
'sum(a)::integer as x, y',
'sum(a)::integer/count(b) as x, y', # issue297
])
def test_group_identifier_list(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:119:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_group_identifier_list[sum(a)::integer, b]
test_grouping.py::test_group_identifier_list[sum(a)::integer, b]
s = 'sum(a)::integer, b'
@pytest.mark.parametrize('s', [
'foo, bar',
'sum(a), sum(b)',
'sum(a) as x, b as y',
'sum(a)::integer, b',
'sum(a)/count(b) as x, y',
'sum(a)::integer as x, y',
'sum(a)::integer/count(b) as x, y', # issue297
])
def test_group_identifier_list(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:119:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
count(b) as x, y]
count(b) as x, y]
s = 'sum(a)/count(b) as x, y'
@pytest.mark.parametrize('s', [
'foo, bar',
'sum(a), sum(b)',
'sum(a) as x, b as y',
'sum(a)::integer, b',
'sum(a)/count(b) as x, y',
'sum(a)::integer as x, y',
'sum(a)::integer/count(b) as x, y', # issue297
])
def test_group_identifier_list(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:119:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_group_identifier_list[sum(a)::integer as x, y]
test_grouping.py::test_group_identifier_list[sum(a)::integer as x, y]
s = 'sum(a)::integer as x, y'
@pytest.mark.parametrize('s', [
'foo, bar',
'sum(a), sum(b)',
'sum(a) as x, b as y',
'sum(a)::integer, b',
'sum(a)/count(b) as x, y',
'sum(a)::integer as x, y',
'sum(a)::integer/count(b) as x, y', # issue297
])
def test_group_identifier_list(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:119:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
count(b) as x, y]
count(b) as x, y]
s = 'sum(a)::integer/count(b) as x, y'
@pytest.mark.parametrize('s', [
'foo, bar',
'sum(a), sum(b)',
'sum(a) as x, b as y',
'sum(a)::integer, b',
'sum(a)/count(b) as x, y',
'sum(a)::integer as x, y',
'sum(a)::integer/count(b) as x, y', # issue297
])
def test_group_identifier_list(s):
> parsed = sqlparse.parse(s)[0]
tests/test_grouping.py:119:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifier_wildcard
test_grouping.py::test_grouping_identifier_wildcard
def test_grouping_identifier_wildcard():
> p = sqlparse.parse('a.*, b.id')[0]
tests/test_grouping.py:124:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifier_name_wildcard
test_grouping.py::test_grouping_identifier_name_wildcard
def test_grouping_identifier_name_wildcard():
> p = sqlparse.parse('a.*')[0]
tests/test_grouping.py:131:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifier_invalid
test_grouping.py::test_grouping_identifier_invalid
def test_grouping_identifier_invalid():
> p = sqlparse.parse('a.')[0]
tests/test_grouping.py:138:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifier_invalid_in_middle
test_grouping.py::test_grouping_identifier_invalid_in_middle
def test_grouping_identifier_invalid_in_middle():
# issue261
s = 'SELECT foo. FROM foo'
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:149:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifer_as[foo as (select *)]
test_grouping.py::test_grouping_identifer_as[foo as (select *)]
s = 'foo as (select *)'
@pytest.mark.parametrize('s', ['foo as (select *)', 'foo as(select *)'])
def test_grouping_identifer_as(s):
# issue507
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:158:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifer_as[foo as(select *)]
test_grouping.py::test_grouping_identifer_as[foo as(select *)]
s = 'foo as(select *)'
@pytest.mark.parametrize('s', ['foo as (select *)', 'foo as(select *)'])
def test_grouping_identifer_as(s):
# issue507
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:158:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifier_as_invalid
test_grouping.py::test_grouping_identifier_as_invalid
def test_grouping_identifier_as_invalid():
# issue8
> p = sqlparse.parse('foo as select *')[0]
tests/test_grouping.py:166:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifier_function
test_grouping.py::test_grouping_identifier_function
def test_grouping_identifier_function():
> p = sqlparse.parse('foo() as bar')[0]
tests/test_grouping.py:174:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_operation[foo+100]
test_grouping.py::test_grouping_operation[foo+100]
s = 'foo+100'
@pytest.mark.parametrize('s', ['foo+100', 'foo + 100', 'foo*100'])
def test_grouping_operation(s):
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:199:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_operation[foo + 100]
test_grouping.py::test_grouping_operation[foo + 100]
s = 'foo + 100'
@pytest.mark.parametrize('s', ['foo+100', 'foo + 100', 'foo*100'])
def test_grouping_operation(s):
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:199:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_operation[foo*100]
test_grouping.py::test_grouping_operation[foo*100]
s = 'foo*100'
@pytest.mark.parametrize('s', ['foo+100', 'foo + 100', 'foo*100'])
def test_grouping_operation(s):
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:199:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifier_list
test_grouping.py::test_grouping_identifier_list
def test_grouping_identifier_list():
> p = sqlparse.parse('a, b, c')[0]
tests/test_grouping.py:204:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifier_list_subquery
test_grouping.py::test_grouping_identifier_list_subquery
def test_grouping_identifier_list_subquery():
"""identifier lists should still work in subqueries with aliases"""
> p = sqlparse.parse("select * from ("
"select a, b + c as d from table) sub")[0]
tests/test_grouping.py:212:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifier_list_case
test_grouping.py::test_grouping_identifier_list_case
def test_grouping_identifier_list_case():
> p = sqlparse.parse('a, case when 1 then 2 else 3 end as b, c')[0]
tests/test_grouping.py:223:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifier_list_other
test_grouping.py::test_grouping_identifier_list_other
def test_grouping_identifier_list_other():
# issue2
> p = sqlparse.parse("select *, null, 1, 'foo', bar from mytable, x")[0]
tests/test_grouping.py:231:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifier_list_with_inline_comments
def test_grouping_identifier_list_with_inline_comments():
# issue163
> p = sqlparse.parse('foo /* a comment */, bar')[0]
tests/test_grouping.py:238:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifiers_with_operators
test_grouping.py::test_grouping_identifiers_with_operators
def test_grouping_identifiers_with_operators():
> p = sqlparse.parse('a+b as c from table where (d-e)%2= 1')[0]
tests/test_grouping.py:245:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_identifier_list_with_order
test_grouping.py::test_grouping_identifier_list_with_order
def test_grouping_identifier_list_with_order():
# issue101
> p = sqlparse.parse('1, 2 desc, 3')[0]
tests/test_grouping.py:251:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_nested_identifier_with_order
test_grouping.py::test_grouping_nested_identifier_with_order
def test_grouping_nested_identifier_with_order():
# issue745
> p = sqlparse.parse('(a desc)')[0]
tests/test_grouping.py:259:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_where
test_grouping.py::test_grouping_where
def test_grouping_where():
s = 'select * from foo where bar = 1 order by id desc'
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:267:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_where_union[select 1 where 1 = 2 union select 2]
test_grouping.py::test_grouping_where_union[select 1 where 1 = 2 union select 2]
s = 'select 1 where 1 = 2 union select 2'
@pytest.mark.parametrize('s', (
'select 1 where 1 = 2 union select 2',
'select 1 where 1 = 2 union all select 2',
))
def test_grouping_where_union(s):
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:282:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_where_union[select 1 where 1 = 2 union all select 2]
test_grouping.py::test_grouping_where_union[select 1 where 1 = 2 union all select 2]
s = 'select 1 where 1 = 2 union all select 2'
@pytest.mark.parametrize('s', (
'select 1 where 1 = 2 union select 2',
'select 1 where 1 = 2 union all select 2',
))
def test_grouping_where_union(s):
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:282:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_returning_kw_ends_where_clause
test_grouping.py::test_returning_kw_ends_where_clause
def test_returning_kw_ends_where_clause():
s = 'delete from foo where x > y returning z'
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:288:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_into_kw_ends_where_clause
test_grouping.py::test_into_kw_ends_where_clause
def test_into_kw_ends_where_clause(): # issue324
s = 'select * from foo where a = 1 into baz'
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:296:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_typecast[select foo::integer from bar-integer]
test_grouping.py::test_grouping_typecast[select foo::integer from bar-integer]
sql = 'select foo::integer from bar', expected = 'integer'
@pytest.mark.parametrize('sql, expected', [
# note: typecast needs to be 2nd token for this test
('select foo::integer from bar', 'integer'),
('select (current_database())::information_schema.sql_identifier',
'information_schema.sql_identifier'),
])
def test_grouping_typecast(sql, expected):
> p = sqlparse.parse(sql)[0]
tests/test_grouping.py:309:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_typecast[select (current_database())::information_schema.sql_identifier-information_schema.sql_identifier]
sql = 'select (current_database())::information_schema.sql_identifier'
expected = 'information_schema.sql_identifier'
@pytest.mark.parametrize('sql, expected', [
# note: typecast needs to be 2nd token for this test
('select foo::integer from bar', 'integer'),
('select (current_database())::information_schema.sql_identifier',
'information_schema.sql_identifier'),
])
def test_grouping_typecast(sql, expected):
> p = sqlparse.parse(sql)[0]
tests/test_grouping.py:309:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_alias
test_grouping.py::test_grouping_alias
def test_grouping_alias():
s = 'select foo as bar from mytable'
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:315:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_alias_case
test_grouping.py::test_grouping_alias_case
def test_grouping_alias_case():
# see issue46
> p = sqlparse.parse('CASE WHEN 1 THEN 2 ELSE 3 END foo')[0]
tests/test_grouping.py:337:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_alias_ctas
test_grouping.py::test_grouping_alias_ctas
def test_grouping_alias_ctas():
> p = sqlparse.parse('CREATE TABLE tbl1 AS SELECT coalesce(t1.col1, 0) AS col1 FROM t1')[0]
tests/test_grouping.py:343:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_subquery_no_parens
test_grouping.py::test_grouping_subquery_no_parens
def test_grouping_subquery_no_parens():
# Not totally sure if this is the right approach...
# When a THEN clause contains a subquery w/o parenthesis around it *and*
# a WHERE condition, the WHERE grouper consumes END too.
# This takes makes sure that it doesn't fail.
> p = sqlparse.parse('CASE WHEN 1 THEN select 2 where foo = 1 end')[0]
tests/test_grouping.py:352:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_alias_returns_none[foo.bar]
test_grouping.py::test_grouping_alias_returns_none[foo.bar]
s = 'foo.bar'
@pytest.mark.parametrize('s', ['foo.bar', 'x, y', 'x > y', 'x / y'])
def test_grouping_alias_returns_none(s):
# see issue185 and issue445
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:360:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_alias_returns_none[x, y]
test_grouping.py::test_grouping_alias_returns_none[x, y]
s = 'x, y'
@pytest.mark.parametrize('s', ['foo.bar', 'x, y', 'x > y', 'x / y'])
def test_grouping_alias_returns_none(s):
# see issue185 and issue445
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:360:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_alias_returns_none[x > y]
test_grouping.py::test_grouping_alias_returns_none[x > y]
s = 'x > y'
@pytest.mark.parametrize('s', ['foo.bar', 'x, y', 'x > y', 'x / y'])
def test_grouping_alias_returns_none(s):
# see issue185 and issue445
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:360:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
y]
y]
s = 'x / y'
@pytest.mark.parametrize('s', ['foo.bar', 'x, y', 'x > y', 'x / y'])
def test_grouping_alias_returns_none(s):
# see issue185 and issue445
> p = sqlparse.parse(s)[0]
tests/test_grouping.py:360:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_idlist_function
test_grouping.py::test_grouping_idlist_function
def test_grouping_idlist_function():
# see issue10 too
> p = sqlparse.parse('foo(1) x, bar')[0]
tests/test_grouping.py:367:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_comparison_exclude
test_grouping.py::test_grouping_comparison_exclude
def test_grouping_comparison_exclude():
# make sure operators are not handled too lazy
> p = sqlparse.parse('(=)')[0]
tests/test_grouping.py:373:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_function
test_grouping.py::test_grouping_function
def test_grouping_function():
> p = sqlparse.parse('foo()')[0]
tests/test_grouping.py:383:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_function_not_in
test_grouping.py::test_grouping_function_not_in
def test_grouping_function_not_in():
# issue183
> p = sqlparse.parse('in(1, 2)')[0]
tests/test_grouping.py:400:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_varchar
test_grouping.py::test_grouping_varchar
def test_grouping_varchar():
> p = sqlparse.parse('"text" Varchar(50) NOT NULL')[0]
tests/test_grouping.py:407:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_statement_get_type
test_grouping.py::test_statement_get_type
def test_statement_get_type():
def f(sql):
return sqlparse.parse(sql)[0]
> assert f('select * from foo').get_type() == 'SELECT'
tests/test_grouping.py:415:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tests/test_grouping.py:413: in f
return sqlparse.parse(sql)[0]
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_identifier_with_operators
test_grouping.py::test_identifier_with_operators
def test_identifier_with_operators():
# issue 53
> p = sqlparse.parse('foo||bar')[0]
tests/test_grouping.py:424:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_identifier_with_op_trailing_ws
test_grouping.py::test_identifier_with_op_trailing_ws
def test_identifier_with_op_trailing_ws():
# make sure trailing whitespace isn't grouped with identifier
> p = sqlparse.parse('foo || bar ')[0]
tests/test_grouping.py:435:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_identifier_with_string_literals
test_grouping.py::test_identifier_with_string_literals
def test_identifier_with_string_literals():
> p = sqlparse.parse("foo + 'bar'")[0]
tests/test_grouping.py:442:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_identifier_consumes_ordering
test_grouping.py::test_identifier_consumes_ordering
def test_identifier_consumes_ordering():
# issue89
> p = sqlparse.parse('select * from foo order by c1 desc, c2, c3')[0]
tests/test_grouping.py:458:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_keywords
test_grouping.py::test_comparison_with_keywords
def test_comparison_with_keywords():
# issue90
# in fact these are assignments, but for now we don't distinguish them
> p = sqlparse.parse('foo = NULL')[0]
tests/test_grouping.py:471:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_floats
test_grouping.py::test_comparison_with_floats
def test_comparison_with_floats():
# issue145
> p = sqlparse.parse('foo = 25.5')[0]
tests/test_grouping.py:485:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_parenthesis
test_grouping.py::test_comparison_with_parenthesis
def test_comparison_with_parenthesis():
# issue23
> p = sqlparse.parse('(3 + 4) = 7')[0]
tests/test_grouping.py:495:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[=]
test_grouping.py::test_comparison_with_strings[=]
operator = '='
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[!=]
test_grouping.py::test_comparison_with_strings[!=]
operator = '!='
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[>]
test_grouping.py::test_comparison_with_strings[>]
operator = '>'
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[<]
test_grouping.py::test_comparison_with_strings[<]
operator = '<'
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[<=]
test_grouping.py::test_comparison_with_strings[<=]
operator = '<='
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[>=]
test_grouping.py::test_comparison_with_strings[>=]
operator = '>='
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[~]
test_grouping.py::test_comparison_with_strings[~]
operator = '~'
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[~~]
test_grouping.py::test_comparison_with_strings[~~]
operator = '~~'
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[!~~]
test_grouping.py::test_comparison_with_strings[!~~]
operator = '!~~'
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[LIKE]
test_grouping.py::test_comparison_with_strings[LIKE]
operator = 'LIKE'
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[NOT LIKE]
test_grouping.py::test_comparison_with_strings[NOT LIKE]
operator = 'NOT LIKE'
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[ILIKE]
test_grouping.py::test_comparison_with_strings[ILIKE]
operator = 'ILIKE'
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_strings[NOT ILIKE]
test_grouping.py::test_comparison_with_strings[NOT ILIKE]
operator = 'NOT ILIKE'
@pytest.mark.parametrize('operator', (
'=', '!=', '>', '<', '<=', '>=', '~', '~~', '!~~',
'LIKE', 'NOT LIKE', 'ILIKE', 'NOT ILIKE',
))
def test_comparison_with_strings(operator):
# issue148
> p = sqlparse.parse("foo {} 'bar'".format(operator))[0]
tests/test_grouping.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_like_and_ilike_comparison
test_grouping.py::test_like_and_ilike_comparison
def test_like_and_ilike_comparison():
def validate_where_clause(where_clause, expected_tokens):
assert len(where_clause.tokens) == len(expected_tokens)
for where_token, expected_token in zip(where_clause, expected_tokens):
expected_ttype, expected_value = expected_token
if where_token.ttype is not None:
assert where_token.match(expected_ttype, expected_value, regex=True)
else:
# Certain tokens, such as comparison tokens, do not define a ttype that can be
# matched against. For these tokens, we ensure that the token instance is of
# the expected type and has a value conforming to specified regular expression
import re
assert (isinstance(where_token, expected_ttype)
and re.match(expected_value, where_token.value))
> [p1] = sqlparse.parse("select * from mytable where mytable.mycolumn LIKE 'expr%' limit 5;")
tests/test_grouping.py:531:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_functions
test_grouping.py::test_comparison_with_functions
def test_comparison_with_functions():
# issue230
> p = sqlparse.parse('foo = DATE(bar.baz)')[0]
tests/test_grouping.py:553:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_comparison_with_typed_literal
test_grouping.py::test_comparison_with_typed_literal
def test_comparison_with_typed_literal():
> p = sqlparse.parse("foo = DATE 'bar.baz'")[0]
tests/test_grouping.py:576:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_forloops[FOR]
test_grouping.py::test_forloops[FOR]
start = 'FOR'
@pytest.mark.parametrize('start', ['FOR', 'FOREACH'])
def test_forloops(start):
> p = sqlparse.parse('{} foo in bar LOOP foobar END LOOP'.format(start))[0]
tests/test_grouping.py:588:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_forloops[FOREACH]
test_grouping.py::test_forloops[FOREACH]
start = 'FOREACH'
@pytest.mark.parametrize('start', ['FOR', 'FOREACH'])
def test_forloops(start):
> p = sqlparse.parse('{} foo in bar LOOP foobar END LOOP'.format(start))[0]
tests/test_grouping.py:588:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_nested_for
test_grouping.py::test_nested_for
def test_nested_for():
> p = sqlparse.parse('FOR foo LOOP FOR bar LOOP END LOOP END LOOP')[0]
tests/test_grouping.py:594:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_begin
test_grouping.py::test_begin
def test_begin():
> p = sqlparse.parse('BEGIN foo END')[0]
tests/test_grouping.py:606:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_keyword_followed_by_parenthesis
test_grouping.py::test_keyword_followed_by_parenthesis
def test_keyword_followed_by_parenthesis():
> p = sqlparse.parse('USING(somecol')[0]
tests/test_grouping.py:612:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_nested_begin
test_grouping.py::test_nested_begin
def test_nested_begin():
> p = sqlparse.parse('BEGIN foo BEGIN bar END END')[0]
tests/test_grouping.py:619:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_aliased_column_without_as
test_grouping.py::test_aliased_column_without_as
def test_aliased_column_without_as():
> p = sqlparse.parse('foo bar')[0].tokens
tests/test_grouping.py:631:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_qualified_function
test_grouping.py::test_qualified_function
def test_qualified_function():
> p = sqlparse.parse('foo()')[0].tokens[0]
tests/test_grouping.py:643:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_aliased_function_without_as
test_grouping.py::test_aliased_function_without_as
def test_aliased_function_without_as():
> p = sqlparse.parse('foo() bar')[0].tokens[0]
tests/test_grouping.py:653:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_aliased_literal_without_as
test_grouping.py::test_aliased_literal_without_as
def test_aliased_literal_without_as():
> p = sqlparse.parse('1 foo')[0].tokens
tests/test_grouping.py:665:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_as_cte
test_grouping.py::test_grouping_as_cte
def test_grouping_as_cte():
> p = sqlparse.parse('foo AS WITH apple AS 1, banana AS 2')[0].tokens
tests/test_grouping.py:671:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_grouping.py::test_grouping_create_table
test_grouping.py::test_grouping_create_table
def test_grouping_create_table():
> p = sqlparse.parse("create table db.tbl (a string)")[0].tokens
tests/test_grouping.py:678:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_keywords.py::TestSQLREGEX::test_float_numbers[1.0]
test_keywords.py::TestSQLREGEX::test_float_numbers[1.0]
self =
number = '1.0'
@pytest.mark.parametrize('number', ['1.0', '-1.0',
'1.', '-1.',
'.1', '-.1'])
def test_float_numbers(self, number):
> ttype = next(tt for action, tt in Lexer.get_default_instance()._SQL_REGEX if action(number))
E StopIteration
tests/test_keywords.py:12: StopIteration
The above exception was the direct cause of the following exception:
cls =
func = . at 0x7ef7adb7ab90>
when = 'call'
reraise = (, )
@classmethod
def from_call(
cls,
func: Callable[[], TResult],
when: Literal["collect", "setup", "call", "teardown"],
reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None,
) -> CallInfo[TResult]:
"""Call func, wrapping the result in a CallInfo.
:param func:
The function to call. Called without arguments.
:type func: Callable[[], _pytest.runner.TResult]
:param when:
The phase in which the function is called.
:param reraise:
Exception or exceptions that shall propagate if raised by the
function, instead of being wrapped in the CallInfo.
"""
excinfo = None
start = timing.time()
precise_start = timing.perf_counter()
try:
> result: TResult | None = func()
.venv/lib/python3.10/site-packages/_pytest/runner.py:341:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.venv/lib/python3.10/site-packages/_pytest/runner.py:242: in
lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise
.venv/lib/python3.10/site-packages/pluggy/_hooks.py:513: in __call__
return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
.venv/lib/python3.10/site-packages/pluggy/_manager.py:120: in _hookexec
return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
.venv/lib/python3.10/site-packages/_pytest/threadexception.py:92: in pytest_runtest_call
yield from thread_exception_runtest_hook()
.venv/lib/python3.10/site-packages/_pytest/threadexception.py:68: in thread_exception_runtest_hook
yield
.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py:95: in pytest_runtest_call
yield from unraisable_exception_runtest_hook()
.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py:70: in unraisable_exception_runtest_hook
yield
.venv/lib/python3.10/site-packages/_pytest/logging.py:846: in pytest_runtest_call
yield from self._runtest_for(item, "call")
.venv/lib/python3.10/site-packages/_pytest/logging.py:829: in _runtest_for
yield
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = > _state='suspended' _in_suspended=False> _capture_fixture=None>
item =
@hookimpl(wrapper=True)
def pytest_runtest_call(self, item: Item) -> Generator[None]:
with self.item_capture("call", item):
> return (yield)
E RuntimeError: generator raised StopIteration
.venv/lib/python3.10/site-packages/_pytest/capture.py:880: RuntimeError
test_keywords.py::TestSQLREGEX::test_float_numbers[-1.0]
test_keywords.py::TestSQLREGEX::test_float_numbers[-1.0]
self =
number = '-1.0'
@pytest.mark.parametrize('number', ['1.0', '-1.0',
'1.', '-1.',
'.1', '-.1'])
def test_float_numbers(self, number):
> ttype = next(tt for action, tt in Lexer.get_default_instance()._SQL_REGEX if action(number))
E StopIteration
tests/test_keywords.py:12: StopIteration
The above exception was the direct cause of the following exception:
cls =
func = . at 0x7ef7ad4bd630>
when = 'call'
reraise = (, )
@classmethod
def from_call(
cls,
func: Callable[[], TResult],
when: Literal["collect", "setup", "call", "teardown"],
reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None,
) -> CallInfo[TResult]:
"""Call func, wrapping the result in a CallInfo.
:param func:
The function to call. Called without arguments.
:type func: Callable[[], _pytest.runner.TResult]
:param when:
The phase in which the function is called.
:param reraise:
Exception or exceptions that shall propagate if raised by the
function, instead of being wrapped in the CallInfo.
"""
excinfo = None
start = timing.time()
precise_start = timing.perf_counter()
try:
> result: TResult | None = func()
.venv/lib/python3.10/site-packages/_pytest/runner.py:341:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.venv/lib/python3.10/site-packages/_pytest/runner.py:242: in
lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise
.venv/lib/python3.10/site-packages/pluggy/_hooks.py:513: in __call__
return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
.venv/lib/python3.10/site-packages/pluggy/_manager.py:120: in _hookexec
return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
.venv/lib/python3.10/site-packages/_pytest/threadexception.py:92: in pytest_runtest_call
yield from thread_exception_runtest_hook()
.venv/lib/python3.10/site-packages/_pytest/threadexception.py:68: in thread_exception_runtest_hook
yield
.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py:95: in pytest_runtest_call
yield from unraisable_exception_runtest_hook()
.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py:70: in unraisable_exception_runtest_hook
yield
.venv/lib/python3.10/site-packages/_pytest/logging.py:846: in pytest_runtest_call
yield from self._runtest_for(item, "call")
.venv/lib/python3.10/site-packages/_pytest/logging.py:829: in _runtest_for
yield
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = > _state='suspended' _in_suspended=False> _capture_fixture=None>
item =
@hookimpl(wrapper=True)
def pytest_runtest_call(self, item: Item) -> Generator[None]:
with self.item_capture("call", item):
> return (yield)
E RuntimeError: generator raised StopIteration
.venv/lib/python3.10/site-packages/_pytest/capture.py:880: RuntimeError
test_keywords.py::TestSQLREGEX::test_float_numbers[1.]
test_keywords.py::TestSQLREGEX::test_float_numbers[1.]
self =
number = '1.'
@pytest.mark.parametrize('number', ['1.0', '-1.0',
'1.', '-1.',
'.1', '-.1'])
def test_float_numbers(self, number):
> ttype = next(tt for action, tt in Lexer.get_default_instance()._SQL_REGEX if action(number))
E StopIteration
tests/test_keywords.py:12: StopIteration
The above exception was the direct cause of the following exception:
cls =
func = . at 0x7ef7ad8f93f0>
when = 'call'
reraise = (, )
@classmethod
def from_call(
cls,
func: Callable[[], TResult],
when: Literal["collect", "setup", "call", "teardown"],
reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None,
) -> CallInfo[TResult]:
"""Call func, wrapping the result in a CallInfo.
:param func:
The function to call. Called without arguments.
:type func: Callable[[], _pytest.runner.TResult]
:param when:
The phase in which the function is called.
:param reraise:
Exception or exceptions that shall propagate if raised by the
function, instead of being wrapped in the CallInfo.
"""
excinfo = None
start = timing.time()
precise_start = timing.perf_counter()
try:
> result: TResult | None = func()
.venv/lib/python3.10/site-packages/_pytest/runner.py:341:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.venv/lib/python3.10/site-packages/_pytest/runner.py:242: in
lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise
.venv/lib/python3.10/site-packages/pluggy/_hooks.py:513: in __call__
return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
.venv/lib/python3.10/site-packages/pluggy/_manager.py:120: in _hookexec
return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
.venv/lib/python3.10/site-packages/_pytest/threadexception.py:92: in pytest_runtest_call
yield from thread_exception_runtest_hook()
.venv/lib/python3.10/site-packages/_pytest/threadexception.py:68: in thread_exception_runtest_hook
yield
.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py:95: in pytest_runtest_call
yield from unraisable_exception_runtest_hook()
.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py:70: in unraisable_exception_runtest_hook
yield
.venv/lib/python3.10/site-packages/_pytest/logging.py:846: in pytest_runtest_call
yield from self._runtest_for(item, "call")
.venv/lib/python3.10/site-packages/_pytest/logging.py:829: in _runtest_for
yield
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = > _state='suspended' _in_suspended=False> _capture_fixture=None>
item =
@hookimpl(wrapper=True)
def pytest_runtest_call(self, item: Item) -> Generator[None]:
with self.item_capture("call", item):
> return (yield)
E RuntimeError: generator raised StopIteration
.venv/lib/python3.10/site-packages/_pytest/capture.py:880: RuntimeError
test_keywords.py::TestSQLREGEX::test_float_numbers[-1.]
test_keywords.py::TestSQLREGEX::test_float_numbers[-1.]
self =
number = '-1.'
@pytest.mark.parametrize('number', ['1.0', '-1.0',
'1.', '-1.',
'.1', '-.1'])
def test_float_numbers(self, number):
> ttype = next(tt for action, tt in Lexer.get_default_instance()._SQL_REGEX if action(number))
E StopIteration
tests/test_keywords.py:12: StopIteration
The above exception was the direct cause of the following exception:
cls =
func = . at 0x7ef7adb7bac0>
when = 'call'
reraise = (, )
@classmethod
def from_call(
cls,
func: Callable[[], TResult],
when: Literal["collect", "setup", "call", "teardown"],
reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None,
) -> CallInfo[TResult]:
"""Call func, wrapping the result in a CallInfo.
:param func:
The function to call. Called without arguments.
:type func: Callable[[], _pytest.runner.TResult]
:param when:
The phase in which the function is called.
:param reraise:
Exception or exceptions that shall propagate if raised by the
function, instead of being wrapped in the CallInfo.
"""
excinfo = None
start = timing.time()
precise_start = timing.perf_counter()
try:
> result: TResult | None = func()
.venv/lib/python3.10/site-packages/_pytest/runner.py:341:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.venv/lib/python3.10/site-packages/_pytest/runner.py:242: in
lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise
.venv/lib/python3.10/site-packages/pluggy/_hooks.py:513: in __call__
return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
.venv/lib/python3.10/site-packages/pluggy/_manager.py:120: in _hookexec
return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
.venv/lib/python3.10/site-packages/_pytest/threadexception.py:92: in pytest_runtest_call
yield from thread_exception_runtest_hook()
.venv/lib/python3.10/site-packages/_pytest/threadexception.py:68: in thread_exception_runtest_hook
yield
.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py:95: in pytest_runtest_call
yield from unraisable_exception_runtest_hook()
.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py:70: in unraisable_exception_runtest_hook
yield
.venv/lib/python3.10/site-packages/_pytest/logging.py:846: in pytest_runtest_call
yield from self._runtest_for(item, "call")
.venv/lib/python3.10/site-packages/_pytest/logging.py:829: in _runtest_for
yield
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = > _state='suspended' _in_suspended=False> _capture_fixture=None>
item =
@hookimpl(wrapper=True)
def pytest_runtest_call(self, item: Item) -> Generator[None]:
with self.item_capture("call", item):
> return (yield)
E RuntimeError: generator raised StopIteration
.venv/lib/python3.10/site-packages/_pytest/capture.py:880: RuntimeError
test_keywords.py::TestSQLREGEX::test_float_numbers[.1]
test_keywords.py::TestSQLREGEX::test_float_numbers[.1]
self =
number = '.1'
@pytest.mark.parametrize('number', ['1.0', '-1.0',
'1.', '-1.',
'.1', '-.1'])
def test_float_numbers(self, number):
> ttype = next(tt for action, tt in Lexer.get_default_instance()._SQL_REGEX if action(number))
E StopIteration
tests/test_keywords.py:12: StopIteration
The above exception was the direct cause of the following exception:
cls =
func = . at 0x7ef7ad8f93f0>
when = 'call'
reraise = (, )
@classmethod
def from_call(
cls,
func: Callable[[], TResult],
when: Literal["collect", "setup", "call", "teardown"],
reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None,
) -> CallInfo[TResult]:
"""Call func, wrapping the result in a CallInfo.
:param func:
The function to call. Called without arguments.
:type func: Callable[[], _pytest.runner.TResult]
:param when:
The phase in which the function is called.
:param reraise:
Exception or exceptions that shall propagate if raised by the
function, instead of being wrapped in the CallInfo.
"""
excinfo = None
start = timing.time()
precise_start = timing.perf_counter()
try:
> result: TResult | None = func()
.venv/lib/python3.10/site-packages/_pytest/runner.py:341:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.venv/lib/python3.10/site-packages/_pytest/runner.py:242: in
lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise
.venv/lib/python3.10/site-packages/pluggy/_hooks.py:513: in __call__
return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
.venv/lib/python3.10/site-packages/pluggy/_manager.py:120: in _hookexec
return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
.venv/lib/python3.10/site-packages/_pytest/threadexception.py:92: in pytest_runtest_call
yield from thread_exception_runtest_hook()
.venv/lib/python3.10/site-packages/_pytest/threadexception.py:68: in thread_exception_runtest_hook
yield
.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py:95: in pytest_runtest_call
yield from unraisable_exception_runtest_hook()
.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py:70: in unraisable_exception_runtest_hook
yield
.venv/lib/python3.10/site-packages/_pytest/logging.py:846: in pytest_runtest_call
yield from self._runtest_for(item, "call")
.venv/lib/python3.10/site-packages/_pytest/logging.py:829: in _runtest_for
yield
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = > _state='suspended' _in_suspended=False> _capture_fixture=None>
item =
@hookimpl(wrapper=True)
def pytest_runtest_call(self, item: Item) -> Generator[None]:
with self.item_capture("call", item):
> return (yield)
E RuntimeError: generator raised StopIteration
.venv/lib/python3.10/site-packages/_pytest/capture.py:880: RuntimeError
test_keywords.py::TestSQLREGEX::test_float_numbers[-.1]
test_keywords.py::TestSQLREGEX::test_float_numbers[-.1]
self =
number = '-.1'
@pytest.mark.parametrize('number', ['1.0', '-1.0',
'1.', '-1.',
'.1', '-.1'])
def test_float_numbers(self, number):
> ttype = next(tt for action, tt in Lexer.get_default_instance()._SQL_REGEX if action(number))
E StopIteration
tests/test_keywords.py:12: StopIteration
The above exception was the direct cause of the following exception:
cls =
func = . at 0x7ef7ad501b40>
when = 'call'
reraise = (, )
@classmethod
def from_call(
cls,
func: Callable[[], TResult],
when: Literal["collect", "setup", "call", "teardown"],
reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None,
) -> CallInfo[TResult]:
"""Call func, wrapping the result in a CallInfo.
:param func:
The function to call. Called without arguments.
:type func: Callable[[], _pytest.runner.TResult]
:param when:
The phase in which the function is called.
:param reraise:
Exception or exceptions that shall propagate if raised by the
function, instead of being wrapped in the CallInfo.
"""
excinfo = None
start = timing.time()
precise_start = timing.perf_counter()
try:
> result: TResult | None = func()
.venv/lib/python3.10/site-packages/_pytest/runner.py:341:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.venv/lib/python3.10/site-packages/_pytest/runner.py:242: in
lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise
.venv/lib/python3.10/site-packages/pluggy/_hooks.py:513: in __call__
return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
.venv/lib/python3.10/site-packages/pluggy/_manager.py:120: in _hookexec
return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
.venv/lib/python3.10/site-packages/_pytest/threadexception.py:92: in pytest_runtest_call
yield from thread_exception_runtest_hook()
.venv/lib/python3.10/site-packages/_pytest/threadexception.py:68: in thread_exception_runtest_hook
yield
.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py:95: in pytest_runtest_call
yield from unraisable_exception_runtest_hook()
.venv/lib/python3.10/site-packages/_pytest/unraisableexception.py:70: in unraisable_exception_runtest_hook
yield
.venv/lib/python3.10/site-packages/_pytest/logging.py:846: in pytest_runtest_call
yield from self._runtest_for(item, "call")
.venv/lib/python3.10/site-packages/_pytest/logging.py:829: in _runtest_for
yield
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = > _state='suspended' _in_suspended=False> _capture_fixture=None>
item =
@hookimpl(wrapper=True)
def pytest_runtest_call(self, item: Item) -> Generator[None]:
with self.item_capture("call", item):
> return (yield)
E RuntimeError: generator raised StopIteration
.venv/lib/python3.10/site-packages/_pytest/capture.py:880: RuntimeError
test_parse.py::test_parse_tokenize
test_parse.py::test_parse_tokenize
def test_parse_tokenize():
s = 'select * from foo;'
> stmts = sqlparse.parse(s)
tests/test_parse.py:13:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_multistatement
test_parse.py::test_parse_multistatement
def test_parse_multistatement():
sql1 = 'select * from foo;'
sql2 = 'select * from bar;'
> stmts = sqlparse.parse(sql1 + sql2)
tests/test_parse.py:21:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_newlines[select\n*from foo;]
test_parse.py::test_parse_newlines[select\n*from foo;]
s = 'select\n*from foo;'
@pytest.mark.parametrize('s', ['select\n*from foo;',
'select\r\n*from foo',
'select\r*from foo',
'select\r\n*from foo\n'])
def test_parse_newlines(s):
> p = sqlparse.parse(s)[0]
tests/test_parse.py:32:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_newlines[select\r\n*from foo]
test_parse.py::test_parse_newlines[select\r\n*from foo]
s = 'select\r\n*from foo'
@pytest.mark.parametrize('s', ['select\n*from foo;',
'select\r\n*from foo',
'select\r*from foo',
'select\r\n*from foo\n'])
def test_parse_newlines(s):
> p = sqlparse.parse(s)[0]
tests/test_parse.py:32:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_newlines[select\r*from foo]
test_parse.py::test_parse_newlines[select\r*from foo]
s = 'select\r*from foo'
@pytest.mark.parametrize('s', ['select\n*from foo;',
'select\r\n*from foo',
'select\r*from foo',
'select\r\n*from foo\n'])
def test_parse_newlines(s):
> p = sqlparse.parse(s)[0]
tests/test_parse.py:32:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_newlines[select\r\n*from foo\n]
test_parse.py::test_parse_newlines[select\r\n*from foo\n]
s = 'select\r\n*from foo\n'
@pytest.mark.parametrize('s', ['select\n*from foo;',
'select\r\n*from foo',
'select\r*from foo',
'select\r\n*from foo\n'])
def test_parse_newlines(s):
> p = sqlparse.parse(s)[0]
tests/test_parse.py:32:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_within
test_parse.py::test_parse_within
def test_parse_within():
s = 'foo(col1, col2)'
> p = sqlparse.parse(s)[0]
tests/test_parse.py:38:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_child_of
test_parse.py::test_parse_child_of
def test_parse_child_of():
s = '(col1, col2)'
> p = sqlparse.parse(s)[0]
tests/test_parse.py:45:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_has_ancestor
test_parse.py::test_parse_has_ancestor
def test_parse_has_ancestor():
s = 'foo or (bar, baz)'
> p = sqlparse.parse(s)[0]
tests/test_parse.py:55:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_float[.5]
test_parse.py::test_parse_float[.5]
s = '.5'
@pytest.mark.parametrize('s', ['.5', '.51', '1.5', '12.5'])
def test_parse_float(s):
> t = sqlparse.parse(s)[0].tokens
tests/test_parse.py:64:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_float[.51]
test_parse.py::test_parse_float[.51]
s = '.51'
@pytest.mark.parametrize('s', ['.5', '.51', '1.5', '12.5'])
def test_parse_float(s):
> t = sqlparse.parse(s)[0].tokens
tests/test_parse.py:64:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_float[1.5]
test_parse.py::test_parse_float[1.5]
s = '1.5'
@pytest.mark.parametrize('s', ['.5', '.51', '1.5', '12.5'])
def test_parse_float(s):
> t = sqlparse.parse(s)[0].tokens
tests/test_parse.py:64:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_float[12.5]
test_parse.py::test_parse_float[12.5]
s = '12.5'
@pytest.mark.parametrize('s', ['.5', '.51', '1.5', '12.5'])
def test_parse_float(s):
> t = sqlparse.parse(s)[0].tokens
tests/test_parse.py:64:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_placeholder[select * from foo where user = ?-?]
test_parse.py::test_parse_placeholder[select * from foo where user = ?-?]
s = 'select * from foo where user = ?', holder = '?'
@pytest.mark.parametrize('s, holder', [
('select * from foo where user = ?', '?'),
('select * from foo where user = :1', ':1'),
('select * from foo where user = :name', ':name'),
('select * from foo where user = %s', '%s'),
('select * from foo where user = $a', '$a')])
def test_parse_placeholder(s, holder):
> t = sqlparse.parse(s)[0].tokens[-1].tokens
tests/test_parse.py:76:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_placeholder[select * from foo where user = :1-:1]
test_parse.py::test_parse_placeholder[select * from foo where user = :1-:1]
s = 'select * from foo where user = :1', holder = ':1'
@pytest.mark.parametrize('s, holder', [
('select * from foo where user = ?', '?'),
('select * from foo where user = :1', ':1'),
('select * from foo where user = :name', ':name'),
('select * from foo where user = %s', '%s'),
('select * from foo where user = $a', '$a')])
def test_parse_placeholder(s, holder):
> t = sqlparse.parse(s)[0].tokens[-1].tokens
tests/test_parse.py:76:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_placeholder[select * from foo where user = :name-:name]
test_parse.py::test_parse_placeholder[select * from foo where user = :name-:name]
s = 'select * from foo where user = :name', holder = ':name'
@pytest.mark.parametrize('s, holder', [
('select * from foo where user = ?', '?'),
('select * from foo where user = :1', ':1'),
('select * from foo where user = :name', ':name'),
('select * from foo where user = %s', '%s'),
('select * from foo where user = $a', '$a')])
def test_parse_placeholder(s, holder):
> t = sqlparse.parse(s)[0].tokens[-1].tokens
tests/test_parse.py:76:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_placeholder[select * from foo where user = %s-%s]
test_parse.py::test_parse_placeholder[select * from foo where user = %s-%s]
s = 'select * from foo where user = %s', holder = '%s'
@pytest.mark.parametrize('s, holder', [
('select * from foo where user = ?', '?'),
('select * from foo where user = :1', ':1'),
('select * from foo where user = :name', ':name'),
('select * from foo where user = %s', '%s'),
('select * from foo where user = $a', '$a')])
def test_parse_placeholder(s, holder):
> t = sqlparse.parse(s)[0].tokens[-1].tokens
tests/test_parse.py:76:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_placeholder[select * from foo where user = $a-$a]
test_parse.py::test_parse_placeholder[select * from foo where user = $a-$a]
s = 'select * from foo where user = $a', holder = '$a'
@pytest.mark.parametrize('s, holder', [
('select * from foo where user = ?', '?'),
('select * from foo where user = :1', ':1'),
('select * from foo where user = :name', ':name'),
('select * from foo where user = %s', '%s'),
('select * from foo where user = $a', '$a')])
def test_parse_placeholder(s, holder):
> t = sqlparse.parse(s)[0].tokens[-1].tokens
tests/test_parse.py:76:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_access_symbol
test_parse.py::test_parse_access_symbol
def test_parse_access_symbol():
# see issue27
> t = sqlparse.parse('select a.[foo bar] as foo')[0].tokens
tests/test_parse.py:88:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_square_brackets_notation_isnt_too_greedy
test_parse.py::test_parse_square_brackets_notation_isnt_too_greedy
def test_parse_square_brackets_notation_isnt_too_greedy():
# see issue153
> t = sqlparse.parse('[foo], [bar]')[0].tokens
tests/test_parse.py:97:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_square_brackets_notation_isnt_too_greedy2
test_parse.py::test_parse_square_brackets_notation_isnt_too_greedy2
def test_parse_square_brackets_notation_isnt_too_greedy2():
# see issue583
> t = sqlparse.parse('[(foo[i])]')[0].tokens
tests/test_parse.py:106:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_keyword_like_identifier
test_parse.py::test_parse_keyword_like_identifier
def test_parse_keyword_like_identifier():
# see issue47
> t = sqlparse.parse('foo.key')[0].tokens
tests/test_parse.py:112:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_function_parameter
test_parse.py::test_parse_function_parameter
def test_parse_function_parameter():
# see issue94
> t = sqlparse.parse('abs(some_col)')[0].tokens[0].get_parameters()
tests/test_parse.py:119:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_function_param_single_literal
test_parse.py::test_parse_function_param_single_literal
def test_parse_function_param_single_literal():
> t = sqlparse.parse('foo(5)')[0].tokens[0].get_parameters()
tests/test_parse.py:125:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_nested_function
test_parse.py::test_parse_nested_function
def test_parse_nested_function():
> t = sqlparse.parse('foo(bar(5))')[0].tokens[0].get_parameters()
tests/test_parse.py:131:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_casted_params
test_parse.py::test_parse_casted_params
def test_parse_casted_params():
> t = sqlparse.parse("foo(DATE '2023-11-14', TIMESTAMP '2023-11-15')")[0].tokens[0].get_parameters()
tests/test_parse.py:137:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parse_div_operator
test_parse.py::test_parse_div_operator
def test_parse_div_operator():
> p = sqlparse.parse('col1 DIV 5 AS div_col1')[0].tokens
tests/test_parse.py:142:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_quoted_identifier
test_parse.py::test_quoted_identifier
def test_quoted_identifier():
> t = sqlparse.parse('select x.y as "z" from foo')[0].tokens
tests/test_parse.py:148:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_valid_identifier_names[foo]
test_parse.py::test_valid_identifier_names[foo]
name = 'foo'
@pytest.mark.parametrize('name', [
'foo', '_foo', # issue175
'1_data', # valid MySQL table name, see issue337
'業者名稱', # valid at least for SQLite3, see issue641
])
def test_valid_identifier_names(name):
> t = sqlparse.parse(name)[0].tokens
tests/test_parse.py:160:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_valid_identifier_names[_foo]
test_parse.py::test_valid_identifier_names[_foo]
name = '_foo'
@pytest.mark.parametrize('name', [
'foo', '_foo', # issue175
'1_data', # valid MySQL table name, see issue337
'業者名稱', # valid at least for SQLite3, see issue641
])
def test_valid_identifier_names(name):
> t = sqlparse.parse(name)[0].tokens
tests/test_parse.py:160:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_valid_identifier_names[1_data]
test_parse.py::test_valid_identifier_names[1_data]
name = '1_data'
@pytest.mark.parametrize('name', [
'foo', '_foo', # issue175
'1_data', # valid MySQL table name, see issue337
'業者名稱', # valid at least for SQLite3, see issue641
])
def test_valid_identifier_names(name):
> t = sqlparse.parse(name)[0].tokens
tests/test_parse.py:160:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_valid_identifier_names[\u696d\u8005\u540d\u7a31]
test_parse.py::test_valid_identifier_names[\u696d\u8005\u540d\u7a31]
name = '業者名稱'
@pytest.mark.parametrize('name', [
'foo', '_foo', # issue175
'1_data', # valid MySQL table name, see issue337
'業者名稱', # valid at least for SQLite3, see issue641
])
def test_valid_identifier_names(name):
> t = sqlparse.parse(name)[0].tokens
tests/test_parse.py:160:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_psql_quotation_marks
test_parse.py::test_psql_quotation_marks
def test_psql_quotation_marks():
# issue83
# regression: make sure plain $$ work
> t = sqlparse.split("""
CREATE OR REPLACE FUNCTION testfunc1(integer) RETURNS integer AS $$
....
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION testfunc2(integer) RETURNS integer AS $$
....
$$ LANGUAGE plpgsql;""")
tests/test_parse.py:169:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = '\n CREATE OR REPLACE FUNCTION testfunc1(integer) RETURNS integer AS $$\n ....\n $$ LANGUAGE plpgsql;\n CREATE OR REPLACE FUNCTION testfunc2(integer) RETURNS integer AS $$\n ....\n $$ LANGUAGE plpgsql;'
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_parse.py::test_double_precision_is_builtin
test_parse.py::test_double_precision_is_builtin
def test_double_precision_is_builtin():
s = 'DOUBLE PRECISION'
> t = sqlparse.parse(s)[0].tokens
tests/test_parse.py:199:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_placeholder[?]
test_parse.py::test_placeholder[?]
ph = '?'
@pytest.mark.parametrize('ph', ['?', ':1', ':foo', '%s', '%(foo)s'])
def test_placeholder(ph):
> p = sqlparse.parse(ph)[0].tokens
tests/test_parse.py:207:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_placeholder[:1]
test_parse.py::test_placeholder[:1]
ph = ':1'
@pytest.mark.parametrize('ph', ['?', ':1', ':foo', '%s', '%(foo)s'])
def test_placeholder(ph):
> p = sqlparse.parse(ph)[0].tokens
tests/test_parse.py:207:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_placeholder[:foo]
test_parse.py::test_placeholder[:foo]
ph = ':foo'
@pytest.mark.parametrize('ph', ['?', ':1', ':foo', '%s', '%(foo)s'])
def test_placeholder(ph):
> p = sqlparse.parse(ph)[0].tokens
tests/test_parse.py:207:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_placeholder[%s]
test_parse.py::test_placeholder[%s]
ph = '%s'
@pytest.mark.parametrize('ph', ['?', ':1', ':foo', '%s', '%(foo)s'])
def test_placeholder(ph):
> p = sqlparse.parse(ph)[0].tokens
tests/test_parse.py:207:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_placeholder[%(foo)s]
test_parse.py::test_placeholder[%(foo)s]
ph = '%(foo)s'
@pytest.mark.parametrize('ph', ['?', ':1', ':foo', '%s', '%(foo)s'])
def test_placeholder(ph):
> p = sqlparse.parse(ph)[0].tokens
tests/test_parse.py:207:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_scientific_numbers[6.67428E-8-expected0]
test_parse.py::test_scientific_numbers[6.67428E-8-expected0]
num = '6.67428E-8', expected = Token.Literal.Number.Float
@pytest.mark.parametrize('num, expected', [
('6.67428E-8', T.Number.Float),
('1.988e33', T.Number.Float),
('1e-12', T.Number.Float),
('e1', None),
])
def test_scientific_numbers(num, expected):
> p = sqlparse.parse(num)[0].tokens
tests/test_parse.py:219:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_scientific_numbers[1.988e33-expected1]
test_parse.py::test_scientific_numbers[1.988e33-expected1]
num = '1.988e33', expected = Token.Literal.Number.Float
@pytest.mark.parametrize('num, expected', [
('6.67428E-8', T.Number.Float),
('1.988e33', T.Number.Float),
('1e-12', T.Number.Float),
('e1', None),
])
def test_scientific_numbers(num, expected):
> p = sqlparse.parse(num)[0].tokens
tests/test_parse.py:219:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_scientific_numbers[1e-12-expected2]
test_parse.py::test_scientific_numbers[1e-12-expected2]
num = '1e-12', expected = Token.Literal.Number.Float
@pytest.mark.parametrize('num, expected', [
('6.67428E-8', T.Number.Float),
('1.988e33', T.Number.Float),
('1e-12', T.Number.Float),
('e1', None),
])
def test_scientific_numbers(num, expected):
> p = sqlparse.parse(num)[0].tokens
tests/test_parse.py:219:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_scientific_numbers[e1-None]
test_parse.py::test_scientific_numbers[e1-None]
num = 'e1', expected = None
@pytest.mark.parametrize('num, expected', [
('6.67428E-8', T.Number.Float),
('1.988e33', T.Number.Float),
('1e-12', T.Number.Float),
('e1', None),
])
def test_scientific_numbers(num, expected):
> p = sqlparse.parse(num)[0].tokens
tests/test_parse.py:219:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_single_quotes_are_strings
test_parse.py::test_single_quotes_are_strings
def test_single_quotes_are_strings():
> p = sqlparse.parse("'foo'")[0].tokens
tests/test_parse.py:225:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_double_quotes_are_identifiers
test_parse.py::test_double_quotes_are_identifiers
def test_double_quotes_are_identifiers():
> p = sqlparse.parse('"foo"')[0].tokens
tests/test_parse.py:231:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_single_quotes_with_linebreaks
test_parse.py::test_single_quotes_with_linebreaks
def test_single_quotes_with_linebreaks():
# issue118
> p = sqlparse.parse("'f\nf'")[0].tokens
tests/test_parse.py:238:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_sqlite_identifiers
test_parse.py::test_sqlite_identifiers
def test_sqlite_identifiers():
# Make sure we still parse sqlite style escapes
> p = sqlparse.parse('[col1],[col2]')[0].tokens
tests/test_parse.py:245:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_simple_1d_array_index
test_parse.py::test_simple_1d_array_index
def test_simple_1d_array_index():
> p = sqlparse.parse('col[1]')[0].tokens
tests/test_parse.py:257:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_2d_array_index
test_parse.py::test_2d_array_index
def test_2d_array_index():
> p = sqlparse.parse('col[x][(y+1)*2]')[0].tokens
tests/test_parse.py:267:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_array_index_function_result
test_parse.py::test_array_index_function_result
def test_array_index_function_result():
> p = sqlparse.parse('somefunc()[1]')[0].tokens
tests/test_parse.py:274:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_schema_qualified_array_index
test_parse.py::test_schema_qualified_array_index
def test_schema_qualified_array_index():
> p = sqlparse.parse('schem.col[1]')[0].tokens
tests/test_parse.py:280:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_aliased_array_index
test_parse.py::test_aliased_array_index
def test_aliased_array_index():
> p = sqlparse.parse('col[1] x')[0].tokens
tests/test_parse.py:288:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_array_literal
test_parse.py::test_array_literal
def test_array_literal():
# See issue #176
> p = sqlparse.parse('ARRAY[%s, %s]')[0]
tests/test_parse.py:297:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_typed_array_definition
test_parse.py::test_typed_array_definition
def test_typed_array_definition():
# array indices aren't grouped with built-ins, but make sure we can extract
# identifier names
> p = sqlparse.parse('x int, y int[], z int')[0]
tests/test_parse.py:305:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_single_line_comments[select 1 -- foo]
s = 'select 1 -- foo'
@pytest.mark.parametrize('s', ['select 1 -- foo', 'select 1 # foo'])
def test_single_line_comments(s):
# see issue178
> p = sqlparse.parse(s)[0]
tests/test_parse.py:314:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_single_line_comments[select 1 # foo]
s = 'select 1 # foo'
@pytest.mark.parametrize('s', ['select 1 -- foo', 'select 1 # foo'])
def test_single_line_comments(s):
# see issue178
> p = sqlparse.parse(s)[0]
tests/test_parse.py:314:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_names_and_special_names[foo]
test_parse.py::test_names_and_special_names[foo]
s = 'foo'
@pytest.mark.parametrize('s', ['foo', '@foo', '#foo', '##foo'])
def test_names_and_special_names(s):
# see issue192
> p = sqlparse.parse(s)[0]
tests/test_parse.py:322:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_names_and_special_names[@foo]
test_parse.py::test_names_and_special_names[@foo]
s = '@foo'
@pytest.mark.parametrize('s', ['foo', '@foo', '#foo', '##foo'])
def test_names_and_special_names(s):
# see issue192
> p = sqlparse.parse(s)[0]
tests/test_parse.py:322:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_names_and_special_names[#foo]
test_parse.py::test_names_and_special_names[#foo]
s = '#foo'
@pytest.mark.parametrize('s', ['foo', '@foo', '#foo', '##foo'])
def test_names_and_special_names(s):
# see issue192
> p = sqlparse.parse(s)[0]
tests/test_parse.py:322:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_names_and_special_names[##foo]
test_parse.py::test_names_and_special_names[##foo]
s = '##foo'
@pytest.mark.parametrize('s', ['foo', '@foo', '#foo', '##foo'])
def test_names_and_special_names(s):
# see issue192
> p = sqlparse.parse(s)[0]
tests/test_parse.py:322:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_get_token_at_offset
test_parse.py::test_get_token_at_offset
def test_get_token_at_offset():
> p = sqlparse.parse('select * from dual')[0]
tests/test_parse.py:328:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_pprint
test_parse.py::test_pprint
def test_pprint():
> p = sqlparse.parse('select a0, b0, c0, d0, e0 from '
'(select * from dual) q0 where 1=1 and 2=2')[0]
tests/test_parse.py:340:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_wildcard_multiplication
test_parse.py::test_wildcard_multiplication
def test_wildcard_multiplication():
> p = sqlparse.parse('select * from dual')[0]
tests/test_parse.py:405:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_stmt_tokens_parents
test_parse.py::test_stmt_tokens_parents
def test_stmt_tokens_parents():
# see issue 226
s = "CREATE TABLE test();"
> stmt = sqlparse.parse(s)[0]
tests/test_parse.py:418:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_dbldollar_as_literal[$$foo$$-True]
test_parse.py::test_dbldollar_as_literal[$$foo$$-True]
sql = '$$foo$$', is_literal = True
@pytest.mark.parametrize('sql, is_literal', [
('$$foo$$', True),
('$_$foo$_$', True),
('$token$ foo $token$', True),
# don't parse inner tokens
('$_$ foo $token$bar$token$ baz$_$', True),
('$A$ foo $B$', False) # tokens don't match
])
def test_dbldollar_as_literal(sql, is_literal):
# see issue 277
> p = sqlparse.parse(sql)[0]
tests/test_parse.py:433:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_dbldollar_as_literal[$$foo$$-True]
test_parse.py::test_dbldollar_as_literal[$_$foo$_$-True]
sql = '$_$foo$_$', is_literal = True
@pytest.mark.parametrize('sql, is_literal', [
('$$foo$$', True),
('$_$foo$_$', True),
('$token$ foo $token$', True),
# don't parse inner tokens
('$_$ foo $token$bar$token$ baz$_$', True),
('$A$ foo $B$', False) # tokens don't match
])
def test_dbldollar_as_literal(sql, is_literal):
# see issue 277
> p = sqlparse.parse(sql)[0]
tests/test_parse.py:433:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_dbldollar_as_literal[$token$ foo $token$-True]
test_parse.py::test_dbldollar_as_literal[$token$ foo $token$-True]
sql = '$token$ foo $token$', is_literal = True
@pytest.mark.parametrize('sql, is_literal', [
('$$foo$$', True),
('$_$foo$_$', True),
('$token$ foo $token$', True),
# don't parse inner tokens
('$_$ foo $token$bar$token$ baz$_$', True),
('$A$ foo $B$', False) # tokens don't match
])
def test_dbldollar_as_literal(sql, is_literal):
# see issue 277
> p = sqlparse.parse(sql)[0]
tests/test_parse.py:433:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_dbldollar_as_literal[$$ foo $token$bar$token$ baz$$-True]
test_parse.py::test_dbldollar_as_literal[$_$ foo $token$bar$token$ baz$_$-True]
sql = '$_$ foo $token$bar$token$ baz$_$', is_literal = True
@pytest.mark.parametrize('sql, is_literal', [
('$$foo$$', True),
('$_$foo$_$', True),
('$token$ foo $token$', True),
# don't parse inner tokens
('$_$ foo $token$bar$token$ baz$_$', True),
('$A$ foo $B$', False) # tokens don't match
])
def test_dbldollar_as_literal(sql, is_literal):
# see issue 277
> p = sqlparse.parse(sql)[0]
tests/test_parse.py:433:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_dbldollar_as_literal[$A$ foo $B$-False]
test_parse.py::test_dbldollar_as_literal[$A$ foo $B$-False]
sql = '$A$ foo $B$', is_literal = False
@pytest.mark.parametrize('sql, is_literal', [
('$$foo$$', True),
('$_$foo$_$', True),
('$token$ foo $token$', True),
# don't parse inner tokens
('$_$ foo $token$bar$token$ baz$_$', True),
('$A$ foo $B$', False) # tokens don't match
])
def test_dbldollar_as_literal(sql, is_literal):
# see issue 277
> p = sqlparse.parse(sql)[0]
tests/test_parse.py:433:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_non_ascii
test_parse.py::test_non_ascii
def test_non_ascii():
_test_non_ascii = "insert into test (id, name) values (1, 'тест');"
s = _test_non_ascii
> stmts = sqlparse.parse(s)
tests/test_parse.py:446:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_get_real_name
test_parse.py::test_get_real_name
def test_get_real_name():
# issue 369
s = "update a t set t.b=1"
> stmts = sqlparse.parse(s)
tests/test_parse.py:463:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_from_subquery
test_parse.py::test_from_subquery
def test_from_subquery():
# issue 446
s = 'from(select 1)'
> stmts = sqlparse.parse(s)
tests/test_parse.py:472:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_parenthesis
test_parse.py::test_parenthesis
def test_parenthesis():
> tokens = sqlparse.parse("(\n\n1\n\n)")[0].tokens[0].tokens
tests/test_parse.py:488:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_configurable_keywords
test_parse.py::test_configurable_keywords
def test_configurable_keywords():
sql = """select * from foo BACON SPAM EGGS;"""
> tokens = sqlparse.parse(sql)[0]
tests/test_parse.py:510:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_configurable_regex
test_parse.py::test_configurable_regex
def test_configurable_regex():
lex = Lexer.get_default_instance()
lex.clear()
my_regex = (r"ZORDER\s+BY\b", sqlparse.tokens.Keyword)
lex.set_SQL_REGEX(
keywords.SQL_REGEX[:38]
+ [my_regex]
+ keywords.SQL_REGEX[38:]
)
lex.add_keywords(keywords.KEYWORDS_COMMON)
lex.add_keywords(keywords.KEYWORDS_ORACLE)
lex.add_keywords(keywords.KEYWORDS_PLPGSQL)
lex.add_keywords(keywords.KEYWORDS_HQL)
lex.add_keywords(keywords.KEYWORDS_MSACCESS)
lex.add_keywords(keywords.KEYWORDS)
> tokens = sqlparse.parse("select * from foo zorder by bar;")[0]
tests/test_parse.py:572:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_json_operators[->]
test_parse.py::test_json_operators[->]
sql = '->'
@pytest.mark.parametrize('sql', [
'->', '->>', '#>', '#>>',
'@>', '<@',
# leaving ? out for now, they're somehow ambiguous as placeholders
# '?', '?|', '?&',
'||', '-', '#-'
])
def test_json_operators(sql):
> p = sqlparse.parse(sql)
tests/test_parse.py:592:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_json_operators[->>]
test_parse.py::test_json_operators[->>]
sql = '->>'
@pytest.mark.parametrize('sql', [
'->', '->>', '#>', '#>>',
'@>', '<@',
# leaving ? out for now, they're somehow ambiguous as placeholders
# '?', '?|', '?&',
'||', '-', '#-'
])
def test_json_operators(sql):
> p = sqlparse.parse(sql)
tests/test_parse.py:592:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_json_operators[#>]
test_parse.py::test_json_operators[#>]
sql = '#>'
@pytest.mark.parametrize('sql', [
'->', '->>', '#>', '#>>',
'@>', '<@',
# leaving ? out for now, they're somehow ambiguous as placeholders
# '?', '?|', '?&',
'||', '-', '#-'
])
def test_json_operators(sql):
> p = sqlparse.parse(sql)
tests/test_parse.py:592:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_json_operators[#>>]
test_parse.py::test_json_operators[#>>]
sql = '#>>'
@pytest.mark.parametrize('sql', [
'->', '->>', '#>', '#>>',
'@>', '<@',
# leaving ? out for now, they're somehow ambiguous as placeholders
# '?', '?|', '?&',
'||', '-', '#-'
])
def test_json_operators(sql):
> p = sqlparse.parse(sql)
tests/test_parse.py:592:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_json_operators[@>]
test_parse.py::test_json_operators[@>]
sql = '@>'
@pytest.mark.parametrize('sql', [
'->', '->>', '#>', '#>>',
'@>', '<@',
# leaving ? out for now, they're somehow ambiguous as placeholders
# '?', '?|', '?&',
'||', '-', '#-'
])
def test_json_operators(sql):
> p = sqlparse.parse(sql)
tests/test_parse.py:592:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_json_operators[<@]
test_parse.py::test_json_operators[<@]
sql = '<@'
@pytest.mark.parametrize('sql', [
'->', '->>', '#>', '#>>',
'@>', '<@',
# leaving ? out for now, they're somehow ambiguous as placeholders
# '?', '?|', '?&',
'||', '-', '#-'
])
def test_json_operators(sql):
> p = sqlparse.parse(sql)
tests/test_parse.py:592:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_json_operators[||]
test_parse.py::test_json_operators[||]
sql = '||'
@pytest.mark.parametrize('sql', [
'->', '->>', '#>', '#>>',
'@>', '<@',
# leaving ? out for now, they're somehow ambiguous as placeholders
# '?', '?|', '?&',
'||', '-', '#-'
])
def test_json_operators(sql):
> p = sqlparse.parse(sql)
tests/test_parse.py:592:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_json_operators[-]
test_parse.py::test_json_operators[-]
sql = '-'
@pytest.mark.parametrize('sql', [
'->', '->>', '#>', '#>>',
'@>', '<@',
# leaving ? out for now, they're somehow ambiguous as placeholders
# '?', '?|', '?&',
'||', '-', '#-'
])
def test_json_operators(sql):
> p = sqlparse.parse(sql)
tests/test_parse.py:592:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_parse.py::test_json_operators[#-]
test_parse.py::test_json_operators[#-]
sql = '#-'
@pytest.mark.parametrize('sql', [
'->', '->>', '#>', '#>>',
'@>', '<@',
# leaving ? out for now, they're somehow ambiguous as placeholders
# '?', '?|', '?&',
'||', '-', '#-'
])
def test_json_operators(sql):
> p = sqlparse.parse(sql)
tests/test_parse.py:592:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue9
test_regressions.py::test_issue9
def test_issue9():
# make sure where doesn't consume parenthesis
> p = sqlparse.parse('(where 1)')[0]
tests/test_regressions.py:13:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue13
test_regressions.py::test_issue13
def test_issue13():
> parsed = sqlparse.parse("select 'one';\n"
"select 'two\\'';\n"
"select 'three';")
tests/test_regressions.py:24:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue26[--hello]
test_regressions.py::test_issue26[--hello]
s = '--hello'
@pytest.mark.parametrize('s', ['--hello', '-- hello', '--hello\n',
'--', '--\n'])
def test_issue26(s):
# parse stand-alone comments
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:35:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue26[-- hello]
test_regressions.py::test_issue26[-- hello]
s = '-- hello'
@pytest.mark.parametrize('s', ['--hello', '-- hello', '--hello\n',
'--', '--\n'])
def test_issue26(s):
# parse stand-alone comments
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:35:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue26[--hello\n]
test_regressions.py::test_issue26[--hello\n]
s = '--hello\n'
@pytest.mark.parametrize('s', ['--hello', '-- hello', '--hello\n',
'--', '--\n'])
def test_issue26(s):
# parse stand-alone comments
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:35:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue26[--]
test_regressions.py::test_issue26[--]
s = '--'
@pytest.mark.parametrize('s', ['--hello', '-- hello', '--hello\n',
'--', '--\n'])
def test_issue26(s):
# parse stand-alone comments
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:35:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue26[--\n]
test_regressions.py::test_issue26[--\n]
s = '--\n'
@pytest.mark.parametrize('s', ['--hello', '-- hello', '--hello\n',
'--', '--\n'])
def test_issue26(s):
# parse stand-alone comments
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:35:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue34[create]
test_regressions.py::test_issue34[create]
value = 'create'
@pytest.mark.parametrize('value', ['create', 'CREATE'])
def test_issue34(value):
> t = sqlparse.parse("create")[0].token_first()
tests/test_regressions.py:42:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue34[CREATE]
test_regressions.py::test_issue34[CREATE]
value = 'CREATE'
@pytest.mark.parametrize('value', ['create', 'CREATE'])
def test_issue34(value):
> t = sqlparse.parse("create")[0].token_first()
tests/test_regressions.py:42:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue35
test_regressions.py::test_issue35
def test_issue35():
# missing space before LIMIT. Updated for #321
> sql = sqlparse.format("select * from foo where bar = 1 limit 1",
reindent=True)
tests/test_regressions.py:48:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue38
test_regressions.py::test_issue38
def test_issue38():
> sql = sqlparse.format("SELECT foo; -- comment", strip_comments=True)
tests/test_regressions.py:58:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'SELECT foo; -- comment', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'StripCommentsFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_regressions.py::test_issue39
test_regressions.py::test_issue39
def test_issue39():
> p = sqlparse.parse('select user.id from user')[0]
tests/test_regressions.py:65:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue40
test_regressions.py::test_issue40
def test_issue40():
# make sure identifier lists in subselects are grouped
> p = sqlparse.parse('SELECT id, name FROM '
'(SELECT id, name FROM bar) as foo')[0]
tests/test_regressions.py:77:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_name-z-select x.y::text as z from foo]
test_regressions.py::test_issue78[get_name-z-select x.y::text as z from foo]
s = 'select x.y::text as z from foo', func_name = 'get_name', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_name-z-select x.y::text as "z" from foo]
test_regressions.py::test_issue78[get_name-z-select x.y::text as "z" from foo]
s = 'select x.y::text as "z" from foo', func_name = 'get_name', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_name-z-select x."y"::text as z from foo]
test_regressions.py::test_issue78[get_name-z-select x."y"::text as z from foo]
s = 'select x."y"::text as z from foo', func_name = 'get_name', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_name-z-select x."y"::text as "z" from foo]
test_regressions.py::test_issue78[get_name-z-select x."y"::text as "z" from foo]
s = 'select x."y"::text as "z" from foo', func_name = 'get_name', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_name-z-select "x".y::text as z from foo]
test_regressions.py::test_issue78[get_name-z-select "x".y::text as z from foo]
s = 'select "x".y::text as z from foo', func_name = 'get_name', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_name-z-select "x".y::text as "z" from foo]
test_regressions.py::test_issue78[get_name-z-select "x".y::text as "z" from foo]
s = 'select "x".y::text as "z" from foo', func_name = 'get_name', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_name-z-select "x"."y"::text as z from foo]
test_regressions.py::test_issue78[get_name-z-select "x"."y"::text as z from foo]
s = 'select "x"."y"::text as z from foo', func_name = 'get_name', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_name-z-select "x"."y"::text as "z" from foo]
test_regressions.py::test_issue78[get_name-z-select "x"."y"::text as "z" from foo]
s = 'select "x"."y"::text as "z" from foo', func_name = 'get_name', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_real_name-y-select x.y::text as z from foo]
test_regressions.py::test_issue78[get_real_name-y-select x.y::text as z from foo]
s = 'select x.y::text as z from foo', func_name = 'get_real_name', result = 'y'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_real_name-y-select x.y::text as "z" from foo]
test_regressions.py::test_issue78[get_real_name-y-select x.y::text as "z" from foo]
s = 'select x.y::text as "z" from foo', func_name = 'get_real_name'
result = 'y'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_real_name-y-select x."y"::text as z from foo]
test_regressions.py::test_issue78[get_real_name-y-select x."y"::text as z from foo]
s = 'select x."y"::text as z from foo', func_name = 'get_real_name'
result = 'y'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_real_name-y-select x."y"::text as "z" from foo]
test_regressions.py::test_issue78[get_real_name-y-select x."y"::text as "z" from foo]
s = 'select x."y"::text as "z" from foo', func_name = 'get_real_name'
result = 'y'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_real_name-y-select "x".y::text as z from foo]
test_regressions.py::test_issue78[get_real_name-y-select "x".y::text as z from foo]
s = 'select "x".y::text as z from foo', func_name = 'get_real_name'
result = 'y'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_real_name-y-select "x".y::text as "z" from foo]
test_regressions.py::test_issue78[get_real_name-y-select "x".y::text as "z" from foo]
s = 'select "x".y::text as "z" from foo', func_name = 'get_real_name'
result = 'y'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_real_name-y-select "x"."y"::text as z from foo]
test_regressions.py::test_issue78[get_real_name-y-select "x"."y"::text as z from foo]
s = 'select "x"."y"::text as z from foo', func_name = 'get_real_name'
result = 'y'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_real_name-y-select "x"."y"::text as "z" from foo]
test_regressions.py::test_issue78[get_real_name-y-select "x"."y"::text as "z" from foo]
s = 'select "x"."y"::text as "z" from foo', func_name = 'get_real_name'
result = 'y'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_parent_name-x-select x.y::text as z from foo]
test_regressions.py::test_issue78[get_parent_name-x-select x.y::text as z from foo]
s = 'select x.y::text as z from foo', func_name = 'get_parent_name'
result = 'x'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_parent_name-x-select x.y::text as "z" from foo]
test_regressions.py::test_issue78[get_parent_name-x-select x.y::text as "z" from foo]
s = 'select x.y::text as "z" from foo', func_name = 'get_parent_name'
result = 'x'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_parent_name-x-select x."y"::text as z from foo]
test_regressions.py::test_issue78[get_parent_name-x-select x."y"::text as z from foo]
s = 'select x."y"::text as z from foo', func_name = 'get_parent_name'
result = 'x'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_parent_name-x-select x."y"::text as "z" from foo]
test_regressions.py::test_issue78[get_parent_name-x-select x."y"::text as "z" from foo]
s = 'select x."y"::text as "z" from foo', func_name = 'get_parent_name'
result = 'x'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_parent_name-x-select "x".y::text as z from foo]
test_regressions.py::test_issue78[get_parent_name-x-select "x".y::text as z from foo]
s = 'select "x".y::text as z from foo', func_name = 'get_parent_name'
result = 'x'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_parent_name-x-select "x".y::text as "z" from foo]
test_regressions.py::test_issue78[get_parent_name-x-select "x".y::text as "z" from foo]
s = 'select "x".y::text as "z" from foo', func_name = 'get_parent_name'
result = 'x'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_parent_name-x-select "x"."y"::text as z from foo]
test_regressions.py::test_issue78[get_parent_name-x-select "x"."y"::text as z from foo]
s = 'select "x"."y"::text as z from foo', func_name = 'get_parent_name'
result = 'x'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_parent_name-x-select "x"."y"::text as "z" from foo]
test_regressions.py::test_issue78[get_parent_name-x-select "x"."y"::text as "z" from foo]
s = 'select "x"."y"::text as "z" from foo', func_name = 'get_parent_name'
result = 'x'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_alias-z-select x.y::text as z from foo]
test_regressions.py::test_issue78[get_alias-z-select x.y::text as z from foo]
s = 'select x.y::text as z from foo', func_name = 'get_alias', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_alias-z-select x.y::text as "z" from foo]
test_regressions.py::test_issue78[get_alias-z-select x.y::text as "z" from foo]
s = 'select x.y::text as "z" from foo', func_name = 'get_alias', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_alias-z-select x."y"::text as z from foo]
test_regressions.py::test_issue78[get_alias-z-select x."y"::text as z from foo]
s = 'select x."y"::text as z from foo', func_name = 'get_alias', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_alias-z-select x."y"::text as "z" from foo]
test_regressions.py::test_issue78[get_alias-z-select x."y"::text as "z" from foo]
s = 'select x."y"::text as "z" from foo', func_name = 'get_alias', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_alias-z-select "x".y::text as z from foo]
test_regressions.py::test_issue78[get_alias-z-select "x".y::text as z from foo]
s = 'select "x".y::text as z from foo', func_name = 'get_alias', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_alias-z-select "x".y::text as "z" from foo]
test_regressions.py::test_issue78[get_alias-z-select "x".y::text as "z" from foo]
s = 'select "x".y::text as "z" from foo', func_name = 'get_alias', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_alias-z-select "x"."y"::text as z from foo]
test_regressions.py::test_issue78[get_alias-z-select "x"."y"::text as z from foo]
s = 'select "x"."y"::text as z from foo', func_name = 'get_alias', result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_alias-z-select "x"."y"::text as "z" from foo]
test_regressions.py::test_issue78[get_alias-z-select "x"."y"::text as "z" from foo]
s = 'select "x"."y"::text as "z" from foo', func_name = 'get_alias'
result = 'z'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_typecast-text-select x.y::text as z from foo]
test_regressions.py::test_issue78[get_typecast-text-select x.y::text as z from foo]
s = 'select x.y::text as z from foo', func_name = 'get_typecast'
result = 'text'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_typecast-text-select x.y::text as "z" from foo]
test_regressions.py::test_issue78[get_typecast-text-select x.y::text as "z" from foo]
s = 'select x.y::text as "z" from foo', func_name = 'get_typecast'
result = 'text'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_typecast-text-select x."y"::text as z from foo]
test_regressions.py::test_issue78[get_typecast-text-select x."y"::text as z from foo]
s = 'select x."y"::text as z from foo', func_name = 'get_typecast'
result = 'text'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_typecast-text-select x."y"::text as "z" from foo]
test_regressions.py::test_issue78[get_typecast-text-select x."y"::text as "z" from foo]
s = 'select x."y"::text as "z" from foo', func_name = 'get_typecast'
result = 'text'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_typecast-text-select "x".y::text as z from foo]
test_regressions.py::test_issue78[get_typecast-text-select "x".y::text as z from foo]
s = 'select "x".y::text as z from foo', func_name = 'get_typecast'
result = 'text'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_typecast-text-select "x".y::text as "z" from foo]
test_regressions.py::test_issue78[get_typecast-text-select "x".y::text as "z" from foo]
s = 'select "x".y::text as "z" from foo', func_name = 'get_typecast'
result = 'text'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_typecast-text-select "x"."y"::text as z from foo]
test_regressions.py::test_issue78[get_typecast-text-select "x"."y"::text as z from foo]
s = 'select "x"."y"::text as z from foo', func_name = 'get_typecast'
result = 'text'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue78[get_typecast-text-select "x"."y"::text as "z" from foo]
test_regressions.py::test_issue78[get_typecast-text-select "x"."y"::text as "z" from foo]
s = 'select "x"."y"::text as "z" from foo', func_name = 'get_typecast'
result = 'text'
@pytest.mark.parametrize('s', ['select x.y::text as z from foo',
'select x.y::text as "z" from foo',
'select x."y"::text as z from foo',
'select x."y"::text as "z" from foo',
'select "x".y::text as z from foo',
'select "x".y::text as "z" from foo',
'select "x"."y"::text as z from foo',
'select "x"."y"::text as "z" from foo'])
@pytest.mark.parametrize('func_name, result', [('get_name', 'z'),
('get_real_name', 'y'),
('get_parent_name', 'x'),
('get_alias', 'z'),
('get_typecast', 'text')])
def test_issue78(s, func_name, result):
# the bug author provided this nice examples, let's use them!
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:120:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue83
test_regressions.py::test_issue83
def test_issue83():
sql = """ CREATE OR REPLACE FUNCTION func_a(text)
RETURNS boolean LANGUAGE plpgsql STRICT IMMUTABLE AS
$_$
BEGIN
...
END;
$_$;
CREATE OR REPLACE FUNCTION func_b(text)
RETURNS boolean LANGUAGE plpgsql STRICT IMMUTABLE AS
$_$
BEGIN
...
END;
$_$;
ALTER TABLE..... ;"""
> t = sqlparse.split(sql)
tests/test_regressions.py:146:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = ' CREATE OR REPLACE FUNCTION func_a(text)\n RETURNS boolean LANGUAGE plpgsql STRICT IMMUTABLE AS\n... BEGIN\n ...\n END;\n $_$;\n\n ALTER TABLE..... ;'
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_regressions.py::test_comment_encoding_when_reindent
def test_comment_encoding_when_reindent():
# There was an UnicodeEncodeError in the reindent filter that
# casted every comment followed by a keyword to str.
sql = 'select foo -- Comment containing Ümläuts\nfrom bar'
> formatted = sqlparse.format(sql, reindent=True)
tests/test_regressions.py:154:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_parse_sql_with_binary
test_regressions.py::test_parse_sql_with_binary
def test_parse_sql_with_binary():
# See https://github.com/andialbrecht/sqlparse/pull/88
# digest = '|ËêplL4¡høN{'
digest = '\x82|\xcb\x0e\xea\x8aplL4\xa1h\x91\xf8N{'
sql = "select * from foo where bar = '{}'".format(digest)
> formatted = sqlparse.format(sql, reindent=True)
tests/test_regressions.py:163:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_dont_alias_keywords
test_regressions.py::test_dont_alias_keywords
def test_dont_alias_keywords():
# The _group_left_right function had a bug where the check for the
# left side wasn't handled correctly. In one case this resulted in
# a keyword turning into an identifier.
> p = sqlparse.parse('FROM AS foo')[0]
tests/test_regressions.py:172:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_format_accepts_encoding
load_file = .make_load_file at 0x7ef7ad232680>
def test_format_accepts_encoding(load_file):
# issue20
sql = load_file('test_cp1251.sql', 'cp1251')
> formatted = sqlparse.format(sql, reindent=True, encoding='cp1251')
tests/test_regressions.py:181:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_stream
test_regressions.py::test_stream
get_stream = .make_stream at 0x7ef7ad230f70>
def test_stream(get_stream):
with get_stream("stream.sql") as stream:
> p = sqlparse.parse(stream)[0]
tests/test_regressions.py:189:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue90
test_regressions.py::test_issue90
def test_issue90():
sql = ('UPDATE "gallery_photo" SET "owner_id" = 4018, "deleted_at" = NULL,'
' "width" = NULL, "height" = NULL, "rating_votes" = 0,'
' "rating_score" = 0, "thumbnail_width" = NULL,'
' "thumbnail_height" = NULL, "price" = 1, "description" = NULL')
> formatted = sqlparse.format(sql, reindent=True)
tests/test_regressions.py:198:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_except_formatting
def test_except_formatting():
sql = 'SELECT 1 FROM foo WHERE 2 = 3 EXCEPT SELECT 2 FROM bar WHERE 1 = 2'
> formatted = sqlparse.format(sql, reindent=True)
tests/test_regressions.py:216:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_null_with_as
test_regressions.py::test_null_with_as
def test_null_with_as():
sql = 'SELECT NULL AS c1, NULL AS c2 FROM t1'
> formatted = sqlparse.format(sql, reindent=True)
tests/test_regressions.py:230:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue190_open_file
test_regressions.py::test_issue190_open_file
filepath = .make_filepath at 0x7ef7ad8f9360>
def test_issue190_open_file(filepath):
path = filepath('stream.sql')
with open(path) as stream:
> p = sqlparse.parse(stream)[0]
tests/test_regressions.py:241:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue193_splitting_function
test_regressions.py::test_issue193_splitting_function
def test_issue193_splitting_function():
sql = """ CREATE FUNCTION a(x VARCHAR(20)) RETURNS VARCHAR(20)
BEGIN
DECLARE y VARCHAR(20);
RETURN x;
END;
SELECT * FROM a.b;"""
> statements = sqlparse.split(sql)
tests/test_regressions.py:252:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = ' CREATE FUNCTION a(x VARCHAR(20)) RETURNS VARCHAR(20)\n BEGIN\n DECLARE y VARCHAR(20);\n RETURN x;\n END;\n SELECT * FROM a.b;'
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_regressions.py::test_issue194_splitting_function
test_regressions.py::test_issue194_splitting_function
def test_issue194_splitting_function():
sql = """ CREATE FUNCTION a(x VARCHAR(20)) RETURNS VARCHAR(20)
BEGIN
DECLARE y VARCHAR(20);
IF (1 = 1) THEN
SET x = y;
END IF;
RETURN x;
END;
SELECT * FROM a.b;"""
> statements = sqlparse.split(sql)
tests/test_regressions.py:266:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = ' CREATE FUNCTION a(x VARCHAR(20)) RETURNS VARCHAR(20)\n BEGIN\n DECLARE y VARCHAR(20...x = y;\n END IF;\n RETURN x;\n END;\n SELECT * FROM a.b;'
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_regressions.py::test_issue186_get_type
test_regressions.py::test_issue186_get_type
def test_issue186_get_type():
sql = "-- comment\ninsert into foo"
> p = sqlparse.parse(sql)[0]
tests/test_regressions.py:272:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue213_leadingws
test_regressions.py::test_issue213_leadingws
def test_issue213_leadingws():
sql = " select * from foo"
> assert sqlparse.format(sql, strip_whitespace=True) == "select * from foo"
tests/test_regressions.py:285:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = ' select * from foo', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'StripWhitespaceFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_regressions.py::test_issue227_gettype_cte
test_regressions.py::test_issue227_gettype_cte
def test_issue227_gettype_cte():
> select_stmt = sqlparse.parse('SELECT 1, 2, 3 FROM foo;')
tests/test_regressions.py:289:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue207_runaway_format
def test_issue207_runaway_format():
sql = 'select 1 from (select 1 as one, 2 as two, 3 from dual) t0'
> p = sqlparse.format(sql, reindent=True)
tests/test_regressions.py:303:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_token_next_doesnt_ignore_skip_cm
test_regressions.py::test_token_next_doesnt_ignore_skip_cm
def test_token_next_doesnt_ignore_skip_cm():
sql = '--comment\nselect 1'
> tok = sqlparse.parse(sql)[0].token_next(-1, skip_cm=True)[1]
tests/test_regressions.py:315:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue284_as_grouping[SELECT x AS]
test_regressions.py::test_issue284_as_grouping[SELECT x AS]
s = 'SELECT x AS'
@pytest.mark.parametrize('s', [
'SELECT x AS',
'AS'
])
def test_issue284_as_grouping(s):
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:324:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue284_as_grouping[AS]
test_regressions.py::test_issue284_as_grouping[AS]
s = 'AS'
@pytest.mark.parametrize('s', [
'SELECT x AS',
'AS'
])
def test_issue284_as_grouping(s):
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:324:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue315_utf8_by_default
test_regressions.py::test_issue315_utf8_by_default
def test_issue315_utf8_by_default():
# Make sure the lexer can handle utf-8 string by default correctly
# digest = '齐天大圣.カラフルな雲.사랑해요'
# The digest contains Chinese, Japanese and Korean characters
# All in 'utf-8' encoding.
digest = (
'\xe9\xbd\x90\xe5\xa4\xa9\xe5\xa4\xa7\xe5\x9c\xa3.'
'\xe3\x82\xab\xe3\x83\xa9\xe3\x83\x95\xe3\x83\xab\xe3\x81\xaa\xe9'
'\x9b\xb2.'
'\xec\x82\xac\xeb\x9e\x91\xed\x95\xb4\xec\x9a\x94'
)
sql = "select * from foo where bar = '{}'".format(digest)
> formatted = sqlparse.format(sql, reindent=True)
tests/test_regressions.py:340:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue322_concurrently_is_keyword
test_regressions.py::test_issue322_concurrently_is_keyword
def test_issue322_concurrently_is_keyword():
s = 'CREATE INDEX CONCURRENTLY myindex ON mytable(col1);'
> p = sqlparse.parse(s)[0]
tests/test_regressions.py:347:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue359_index_error_assignments[SELECT @min_price:=MIN(price), @max_price:=MAX(price) FROM shop;]
test_regressions.py::test_issue359_index_error_assignments[SELECT @min_price:=MIN(price), @max_price:=MAX(price) FROM shop;]
s = 'SELECT @min_price:=MIN(price), @max_price:=MAX(price) FROM shop;'
@pytest.mark.parametrize('s', [
'SELECT @min_price:=MIN(price), @max_price:=MAX(price) FROM shop;',
'SELECT @min_price:=MIN(price), @max_price:=MAX(price) FROM shop',
])
def test_issue359_index_error_assignments(s):
> sqlparse.parse(s)
tests/test_regressions.py:364:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue359_index_error_assignments[SELECT @min_price:=MIN(price), @max_price:=MAX(price) FROM shop]
test_regressions.py::test_issue359_index_error_assignments[SELECT @min_price:=MIN(price), @max_price:=MAX(price) FROM shop]
s = 'SELECT @min_price:=MIN(price), @max_price:=MAX(price) FROM shop'
@pytest.mark.parametrize('s', [
'SELECT @min_price:=MIN(price), @max_price:=MAX(price) FROM shop;',
'SELECT @min_price:=MIN(price), @max_price:=MAX(price) FROM shop',
])
def test_issue359_index_error_assignments(s):
> sqlparse.parse(s)
tests/test_regressions.py:364:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue469_copy_as_psql_command
test_regressions.py::test_issue469_copy_as_psql_command
def test_issue469_copy_as_psql_command():
> formatted = sqlparse.format(
'\\copy select * from foo',
keyword_case='upper', identifier_case='capitalize')
tests/test_regressions.py:369:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = '\\copy select * from foo', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'KeywordCaseFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_regressions.py::test_issue484_comments_and_newlines
@pytest.mark.xfail(reason='Needs to be fixed')
def test_issue484_comments_and_newlines():
> formatted = sqlparse.format('\n'.join([
'Create table myTable',
'(',
' myId TINYINT NOT NULL, --my special comment',
' myName VARCHAR2(100) NOT NULL',
')']),
strip_comments=True)
tests/test_regressions.py:377:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'Create table myTable\n(\n myId TINYINT NOT NULL, --my special comment\n myName VARCHAR2(100) NOT NULL\n)'
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
> stream = filter_.process(stream)
E AttributeError: 'StripCommentsFilter' object has no attribute 'process'
sqlparse/engine/filter_stack.py:24: AttributeError
test_regressions.py::test_issue485_split_multi
test_regressions.py::test_issue485_split_multi
def test_issue485_split_multi():
p_sql = '''CREATE OR REPLACE RULE ruled_tab_2rules AS ON INSERT
TO public.ruled_tab
DO instead (
select 1;
select 2;
);'''
> assert len(sqlparse.split(p_sql)) == 1
tests/test_regressions.py:399:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'CREATE OR REPLACE RULE ruled_tab_2rules AS ON INSERT\nTO public.ruled_tab\nDO instead (\nselect 1;\nselect 2;\n);'
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_regressions.py::test_issue489_tzcasts
test_regressions.py::test_issue489_tzcasts
def test_issue489_tzcasts():
> p = sqlparse.parse('select bar at time zone \'UTC\' as foo')[0]
tests/test_regressions.py:403:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_issue562_tzcasts
test_regressions.py::test_issue562_tzcasts
def test_issue562_tzcasts():
# Test that whitespace between 'from' and 'bar' is retained
> formatted = sqlparse.format(
'SELECT f(HOUR from bar AT TIME ZONE \'UTC\') from foo', reindent=True
)
tests/test_regressions.py:410:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_as_in_parentheses_indents
test_regressions.py::test_as_in_parentheses_indents
def test_as_in_parentheses_indents():
# did raise NoneType has no attribute is_group in _process_parentheses
> formatted = sqlparse.format('(as foo)', reindent=True)
tests/test_regressions.py:419:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_format_invalid_where_clause
def test_format_invalid_where_clause():
# did raise ValueError
> formatted = sqlparse.format('where, foo', reindent=True)
tests/test_regressions.py:425:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:59: in format
return ''.join(stack.run(sql, encoding))
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_splitting_at_and_backticks_issue588
test_regressions.py::test_splitting_at_and_backticks_issue588
def test_splitting_at_and_backticks_issue588():
> splitted = sqlparse.split(
'grant foo to user1@`myhost`; grant bar to user1@`myhost`;')
tests/test_regressions.py:430:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'grant foo to user1@`myhost`; grant bar to user1@`myhost`;'
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_regressions.py::test_comment_between_cte_clauses_issue632
def test_comment_between_cte_clauses_issue632():
> p, = sqlparse.parse("""
WITH foo AS (),
-- A comment before baz subquery
baz AS ()
SELECT * FROM baz;""")
tests/test_regressions.py:437:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_copy_issue672
test_regressions.py::test_copy_issue672
def test_copy_issue672():
> p = sqlparse.parse('select * from foo')[0]
tests/test_regressions.py:446:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_primary_key_issue740
test_regressions.py::test_primary_key_issue740
def test_primary_key_issue740():
> p = sqlparse.parse('PRIMARY KEY')[0]
tests/test_regressions.py:452:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_regressions.py::test_max_recursion
test_regressions.py::test_max_recursion
limit_recursion = None
def test_max_recursion(limit_recursion):
with pytest.raises(SQLParseError):
> sqlparse.parse('[' * 1000 + ']' * 1000)
tests/test_regressions.py:467:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_semicolon
test_split.py::test_split_semicolon
def test_split_semicolon():
sql1 = 'select * from foo;'
sql2 = "select * from foo where bar = 'foo;bar';"
> stmts = sqlparse.parse(''.join([sql1, sql2]))
tests/test_split.py:14:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_backslash
test_split.py::test_split_backslash
def test_split_backslash():
> stmts = sqlparse.parse("select '\'; select '\'';")
tests/test_split.py:21:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_create_function[function.sql]
test_split.py::test_split_create_function[function.sql]
load_file = .make_load_file at 0x7ef7ad2325f0>
fn = 'function.sql'
@pytest.mark.parametrize('fn', ['function.sql',
'function_psql.sql',
'function_psql2.sql',
'function_psql3.sql',
'function_psql4.sql'])
def test_split_create_function(load_file, fn):
sql = load_file(fn)
> stmts = sqlparse.parse(sql)
tests/test_split.py:32:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_create_function[function_psql.sql]
test_split.py::test_split_create_function[function_psql.sql]
load_file = .make_load_file at 0x7ef7ad231b40>
fn = 'function_psql.sql'
@pytest.mark.parametrize('fn', ['function.sql',
'function_psql.sql',
'function_psql2.sql',
'function_psql3.sql',
'function_psql4.sql'])
def test_split_create_function(load_file, fn):
sql = load_file(fn)
> stmts = sqlparse.parse(sql)
tests/test_split.py:32:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_create_function[function_psql2.sql]
test_split.py::test_split_create_function[function_psql2.sql]
load_file = .make_load_file at 0x7ef7ad232680>
fn = 'function_psql2.sql'
@pytest.mark.parametrize('fn', ['function.sql',
'function_psql.sql',
'function_psql2.sql',
'function_psql3.sql',
'function_psql4.sql'])
def test_split_create_function(load_file, fn):
sql = load_file(fn)
> stmts = sqlparse.parse(sql)
tests/test_split.py:32:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_create_function[function_psql3.sql]
test_split.py::test_split_create_function[function_psql3.sql]
load_file = .make_load_file at 0x7ef7ad232050>
fn = 'function_psql3.sql'
@pytest.mark.parametrize('fn', ['function.sql',
'function_psql.sql',
'function_psql2.sql',
'function_psql3.sql',
'function_psql4.sql'])
def test_split_create_function(load_file, fn):
sql = load_file(fn)
> stmts = sqlparse.parse(sql)
tests/test_split.py:32:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_create_function[function_psql4.sql]
test_split.py::test_split_create_function[function_psql4.sql]
load_file = .make_load_file at 0x7ef7ad231240>
fn = 'function_psql4.sql'
@pytest.mark.parametrize('fn', ['function.sql',
'function_psql.sql',
'function_psql2.sql',
'function_psql3.sql',
'function_psql4.sql'])
def test_split_create_function(load_file, fn):
sql = load_file(fn)
> stmts = sqlparse.parse(sql)
tests/test_split.py:32:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_dashcomments
load_file = .make_load_file at 0x7ef7ad2323b0>
def test_split_dashcomments(load_file):
sql = load_file('dashcomment.sql')
> stmts = sqlparse.parse(sql)
tests/test_split.py:39:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_dashcomments_eol[select foo; -- comment\n]
s = 'select foo; -- comment\n'
@pytest.mark.parametrize('s', ['select foo; -- comment\n',
'select foo; -- comment\r',
'select foo; -- comment\r\n',
'select foo; -- comment'])
def test_split_dashcomments_eol(s):
> stmts = sqlparse.parse(s)
tests/test_split.py:49:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_dashcomments_eol[select foo; -- comment\r]
s = 'select foo; -- comment\r'
@pytest.mark.parametrize('s', ['select foo; -- comment\n',
'select foo; -- comment\r',
'select foo; -- comment\r\n',
'select foo; -- comment'])
def test_split_dashcomments_eol(s):
> stmts = sqlparse.parse(s)
tests/test_split.py:49:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_dashcomments_eol[select foo; -- comment\r\n]
s = 'select foo; -- comment\r\n'
@pytest.mark.parametrize('s', ['select foo; -- comment\n',
'select foo; -- comment\r',
'select foo; -- comment\r\n',
'select foo; -- comment'])
def test_split_dashcomments_eol(s):
> stmts = sqlparse.parse(s)
tests/test_split.py:49:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_dashcomments_eol[select foo; -- comment]
s = 'select foo; -- comment'
@pytest.mark.parametrize('s', ['select foo; -- comment\n',
'select foo; -- comment\r',
'select foo; -- comment\r\n',
'select foo; -- comment'])
def test_split_dashcomments_eol(s):
> stmts = sqlparse.parse(s)
tests/test_split.py:49:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_begintag
test_split.py::test_split_begintag
load_file = .make_load_file at 0x7ef7ad230ee0>
def test_split_begintag(load_file):
sql = load_file('begintag.sql')
> stmts = sqlparse.parse(sql)
tests/test_split.py:55:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_begintag_2
test_split.py::test_split_begintag_2
load_file = .make_load_file at 0x7ef7ad231750>
def test_split_begintag_2(load_file):
sql = load_file('begintag_2.sql')
> stmts = sqlparse.parse(sql)
tests/test_split.py:62:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_dropif
test_split.py::test_split_dropif
def test_split_dropif():
sql = 'DROP TABLE IF EXISTS FOO;\n\nSELECT * FROM BAR;'
> stmts = sqlparse.parse(sql)
tests/test_split.py:69:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_comment_with_umlaut
def test_split_comment_with_umlaut():
sql = ('select * from foo;\n'
'-- Testing an umlaut: ä\n'
'select * from bar;')
> stmts = sqlparse.parse(sql)
tests/test_split.py:78:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_comment_end_of_line
def test_split_comment_end_of_line():
sql = ('select * from foo; -- foo\n'
'select * from bar;')
> stmts = sqlparse.parse(sql)
tests/test_split.py:86:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_casewhen
test_split.py::test_split_casewhen
def test_split_casewhen():
sql = ("SELECT case when val = 1 then 2 else null end as foo;\n"
"comment on table actor is 'The actor table.';")
> stmts = sqlparse.split(sql)
tests/test_split.py:96:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = "SELECT case when val = 1 then 2 else null end as foo;\ncomment on table actor is 'The actor table.';"
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_casewhen_procedure
test_split.py::test_split_casewhen_procedure
load_file = .make_load_file at 0x7ef7adb7ac20>
def test_split_casewhen_procedure(load_file):
# see issue580
> stmts = sqlparse.split(load_file('casewhen_procedure.sql'))
tests/test_split.py:102:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = "create procedure procName()\nbegin\n select case when column = 'value' then column else 0 end;\nend;\ncreate procedure procName()\nbegin\n select 1;\nend;"
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_cursor_declare
test_split.py::test_split_cursor_declare
def test_split_cursor_declare():
sql = ('DECLARE CURSOR "foo" AS SELECT 1;\n'
'SELECT 2;')
> stmts = sqlparse.split(sql)
tests/test_split.py:109:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'DECLARE CURSOR "foo" AS SELECT 1;\nSELECT 2;', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_if_function
test_split.py::test_split_if_function
def test_split_if_function(): # see issue 33
# don't let IF as a function confuse the splitter
sql = ('CREATE TEMPORARY TABLE tmp '
'SELECT IF(a=1, a, b) AS o FROM one; '
'SELECT t FROM two')
> stmts = sqlparse.split(sql)
tests/test_split.py:118:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'CREATE TEMPORARY TABLE tmp SELECT IF(a=1, a, b) AS o FROM one; SELECT t FROM two'
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_stream
test_split.py::test_split_stream
def test_split_stream():
stream = StringIO("SELECT 1; SELECT 2;")
> stmts = sqlparse.parsestream(stream)
tests/test_split.py:124:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_encoding_parsestream
test_split.py::test_split_encoding_parsestream
def test_split_encoding_parsestream():
stream = StringIO("SELECT 1; SELECT 2;")
> stmts = list(sqlparse.parsestream(stream))
tests/test_split.py:131:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_unicode_parsestream
test_split.py::test_split_unicode_parsestream
def test_split_unicode_parsestream():
stream = StringIO('SELECT ö')
> stmts = list(sqlparse.parsestream(stream))
tests/test_split.py:137:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_split.py::test_split_simple
test_split.py::test_split_simple
def test_split_simple():
> stmts = sqlparse.split('select * from foo; select * from bar;')
tests/test_split.py:142:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'select * from foo; select * from bar;', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_ignores_empty_newlines
test_split.py::test_split_ignores_empty_newlines
def test_split_ignores_empty_newlines():
> stmts = sqlparse.split('select foo;\nselect bar;\n')
tests/test_split.py:149:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'select foo;\nselect bar;\n', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_quotes_with_new_line
test_split.py::test_split_quotes_with_new_line
def test_split_quotes_with_new_line():
> stmts = sqlparse.split('select "foo\nbar"')
tests/test_split.py:156:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'select "foo\nbar"', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_mysql_handler_for
test_split.py::test_split_mysql_handler_for
load_file = .make_load_file at 0x7ef7ad2313f0>
def test_split_mysql_handler_for(load_file):
# see issue581
> stmts = sqlparse.split(load_file('mysql_handler.sql'))
tests/test_split.py:167:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'create procedure proc1()\nbegin\n declare handler for foo begin end;\n select 1;\nend;\n\ncreate procedure proc2()\nbegin\n select 1;\nend;'
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_strip_semicolon[select * from foo;-expected0]
test_split.py::test_split_strip_semicolon[select * from foo;-expected0]
sql = 'select * from foo;', expected = ['select * from foo']
@pytest.mark.parametrize('sql, expected', [
('select * from foo;', ['select * from foo']),
('select * from foo', ['select * from foo']),
('select * from foo; select * from bar;', [
'select * from foo',
'select * from bar',
]),
(' select * from foo;\n\nselect * from bar;\n\n\n\n', [
'select * from foo',
'select * from bar',
]),
('select * from foo\n\n; bar', ['select * from foo', 'bar']),
])
def test_split_strip_semicolon(sql, expected):
> stmts = sqlparse.split(sql, strip_semicolon=True)
tests/test_split.py:185:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'select * from foo;', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_strip_semicolon[select * from foo-expected1]
test_split.py::test_split_strip_semicolon[select * from foo-expected1]
sql = 'select * from foo', expected = ['select * from foo']
@pytest.mark.parametrize('sql, expected', [
('select * from foo;', ['select * from foo']),
('select * from foo', ['select * from foo']),
('select * from foo; select * from bar;', [
'select * from foo',
'select * from bar',
]),
(' select * from foo;\n\nselect * from bar;\n\n\n\n', [
'select * from foo',
'select * from bar',
]),
('select * from foo\n\n; bar', ['select * from foo', 'bar']),
])
def test_split_strip_semicolon(sql, expected):
> stmts = sqlparse.split(sql, strip_semicolon=True)
tests/test_split.py:185:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'select * from foo', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_strip_semicolon[select * from foo; select * from bar;-expected2]
test_split.py::test_split_strip_semicolon[select * from foo; select * from bar;-expected2]
sql = 'select * from foo; select * from bar;'
expected = ['select * from foo', 'select * from bar']
@pytest.mark.parametrize('sql, expected', [
('select * from foo;', ['select * from foo']),
('select * from foo', ['select * from foo']),
('select * from foo; select * from bar;', [
'select * from foo',
'select * from bar',
]),
(' select * from foo;\n\nselect * from bar;\n\n\n\n', [
'select * from foo',
'select * from bar',
]),
('select * from foo\n\n; bar', ['select * from foo', 'bar']),
])
def test_split_strip_semicolon(sql, expected):
> stmts = sqlparse.split(sql, strip_semicolon=True)
tests/test_split.py:185:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'select * from foo; select * from bar;', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_strip_semicolon[ select * from foo;\n\nselect * from bar;\n\n\n\n-expected3]
test_split.py::test_split_strip_semicolon[ select * from foo;\n\nselect * from bar;\n\n\n\n-expected3]
sql = ' select * from foo;\n\nselect * from bar;\n\n\n\n'
expected = ['select * from foo', 'select * from bar']
@pytest.mark.parametrize('sql, expected', [
('select * from foo;', ['select * from foo']),
('select * from foo', ['select * from foo']),
('select * from foo; select * from bar;', [
'select * from foo',
'select * from bar',
]),
(' select * from foo;\n\nselect * from bar;\n\n\n\n', [
'select * from foo',
'select * from bar',
]),
('select * from foo\n\n; bar', ['select * from foo', 'bar']),
])
def test_split_strip_semicolon(sql, expected):
> stmts = sqlparse.split(sql, strip_semicolon=True)
tests/test_split.py:185:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = ' select * from foo;\n\nselect * from bar;\n\n\n\n', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_strip_semicolon[select * from foo\n\n; bar-expected4]
test_split.py::test_split_strip_semicolon[select * from foo\n\n; bar-expected4]
sql = 'select * from foo\n\n; bar', expected = ['select * from foo', 'bar']
@pytest.mark.parametrize('sql, expected', [
('select * from foo;', ['select * from foo']),
('select * from foo', ['select * from foo']),
('select * from foo; select * from bar;', [
'select * from foo',
'select * from bar',
]),
(' select * from foo;\n\nselect * from bar;\n\n\n\n', [
'select * from foo',
'select * from bar',
]),
('select * from foo\n\n; bar', ['select * from foo', 'bar']),
])
def test_split_strip_semicolon(sql, expected):
> stmts = sqlparse.split(sql, strip_semicolon=True)
tests/test_split.py:185:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'select * from foo\n\n; bar', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_strip_semicolon_procedure
test_split.py::test_split_strip_semicolon_procedure
load_file = .make_load_file at 0x7ef7ad2323b0>
def test_split_strip_semicolon_procedure(load_file):
> stmts = sqlparse.split(load_file('mysql_handler.sql'),
strip_semicolon=True)
tests/test_split.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'create procedure proc1()\nbegin\n declare handler for foo begin end;\n select 1;\nend;\n\ncreate procedure proc2()\nbegin\n select 1;\nend;'
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_go[USE foo;\nGO\nSELECT 1;\nGO-4]
test_split.py::test_split_go[USE foo;\nGO\nSELECT 1;\nGO-4]
sql = 'USE foo;\nGO\nSELECT 1;\nGO', num = 4
@pytest.mark.parametrize('sql, num', [
('USE foo;\nGO\nSELECT 1;\nGO', 4),
('SELECT * FROM foo;\nGO', 2),
('USE foo;\nGO 2\nSELECT 1;', 3)
])
def test_split_go(sql, num): # issue762
> stmts = sqlparse.split(sql)
tests/test_split.py:204:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'USE foo;\nGO\nSELECT 1;\nGO', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_go[SELECT * FROM foo;\nGO-2]
test_split.py::test_split_go[SELECT * FROM foo;\nGO-2]
sql = 'SELECT * FROM foo;\nGO', num = 2
@pytest.mark.parametrize('sql, num', [
('USE foo;\nGO\nSELECT 1;\nGO', 4),
('SELECT * FROM foo;\nGO', 2),
('USE foo;\nGO 2\nSELECT 1;', 3)
])
def test_split_go(sql, num): # issue762
> stmts = sqlparse.split(sql)
tests/test_split.py:204:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'SELECT * FROM foo;\nGO', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_go[USE foo;\nGO 2\nSELECT 1;-3]
test_split.py::test_split_go[USE foo;\nGO 2\nSELECT 1;-3]
sql = 'USE foo;\nGO 2\nSELECT 1;', num = 3
@pytest.mark.parametrize('sql, num', [
('USE foo;\nGO\nSELECT 1;\nGO', 4),
('SELECT * FROM foo;\nGO', 2),
('USE foo;\nGO 2\nSELECT 1;', 3)
])
def test_split_go(sql, num): # issue762
> stmts = sqlparse.split(sql)
tests/test_split.py:204:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'USE foo;\nGO 2\nSELECT 1;', encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_split.py::test_split_multiple_case_in_begin
test_split.py::test_split_multiple_case_in_begin
load_file = .make_load_file at 0x7ef7ad2317e0>
def test_split_multiple_case_in_begin(load_file): # issue784
> stmts = sqlparse.split(load_file('multiple_case_in_begin.sql'))
tests/test_split.py:209:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:72: in split
return [str(stmt).strip() for stmt in stack.run(sql, encoding)]
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
sql = 'CREATE TRIGGER mytrig\nAFTER UPDATE OF vvv ON mytable\nBEGIN\n UPDATE aa\n SET mycola = (CASE WHEN (A=1) THEN 2 END);\n UPDATE bb\n SET mycolb = (CASE WHEN (B=1) THEN 5 END);\nEND;'
encoding = None
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
for filter_ in self.preprocess:
stream = filter_.process(stream)
stream = StatementSplitter().process(stream)
# Group and ungroup tokens
if self._grouping:
stream = grouping.group(stream)
# Process statements
ret = []
> for stmt in stream:
E TypeError: 'NoneType' object is not iterable
sqlparse/engine/filter_stack.py:34: TypeError
test_tokenize.py::test_tokenize_backticks
test_tokenize.py::test_tokenize_backticks
def test_tokenize_backticks():
s = '`foo`.`bar`'
tokens = list(lexer.tokenize(s))
> assert len(tokens) == 3
E AssertionError: assert 7 == 3
E + where 7 = len([(Token.Error, '`'), (Token.Name, 'foo'), (Token.Error, '`'), (Token.Punctuation, '.'), (Token.Error, '`'), (Token.Name, 'bar'), ...])
tests/test_tokenize.py:25: AssertionError
test_tokenize.py::test_tokenize_negative_numbers
test_tokenize.py::test_tokenize_negative_numbers
def test_tokenize_negative_numbers():
s = "values(-1)"
tokens = list(lexer.tokenize(s))
> assert len(tokens) == 4
E AssertionError: assert 5 == 4
E + where 5 = len([(Token.Keyword, 'values'), (Token.Punctuation, '('), (Token.Operator, '-'), (Token.Literal.Number, '1'), (Token.Punctuation, ')')])
tests/test_tokenize.py:63: AssertionError
test_tokenize.py::test_token_repr
test_tokenize.py::test_token_repr
def test_token_repr():
token = sql.Token(T.Keyword, 'foo')
tst = " assert repr(token)[:len(tst)] == tst
E assert "
test_tokenize.py::test_tokenlist_repr
test_tokenize.py::test_tokenlist_repr
def test_tokenlist_repr():
> p = sqlparse.parse('foo, bar, baz')[0]
tests/test_tokenize.py:91:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_single_quotes
test_tokenize.py::test_single_quotes
def test_single_quotes():
> p = sqlparse.parse("'test'")[0]
tests/test_tokenize.py:97:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_tokenlist_first
test_tokenize.py::test_tokenlist_first
def test_tokenlist_first():
> p = sqlparse.parse(' select foo')[0]
tests/test_tokenize.py:103:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_tokenlist_token_matching
test_tokenize.py::test_tokenlist_token_matching
def test_tokenlist_token_matching():
t1 = sql.Token(T.Keyword, 'foo')
t2 = sql.Token(T.Punctuation, ',')
x = sql.TokenList([t1, t2])
> assert x.token_matching([lambda t: t.ttype is T.Keyword], 0) == t1
tests/test_tokenize.py:114:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self =
token = [. at 0x7ef7ad231b40>]
idx = 0
def token_matching(self, token, idx):
"""Returns the matching token for a token at given index."""
> if not token.is_group:
E AttributeError: 'list' object has no attribute 'is_group'
sqlparse/sql.py:245: AttributeError
test_tokenize.py::test_stream_error
test_tokenize.py::test_stream_error
def test_stream_error():
stream = StringIO("FOOBAR{")
tokens = list(lexer.tokenize(stream))
assert len(tokens) == 2
> assert tokens[1][0] == T.Error
E assert Token.Punctuation == Token.Error
E
E (pytest_assertion plugin: representation of details failed: /usr/lib/python3.10/pprint.py:554: RecursionError: maximum recursion depth exceeded while getting the repr of an object.
E Probably an object has a faulty __repr__.)
tests/test_tokenize.py:139: AssertionError
test_tokenize.py::test_parse_join[JOIN]
test_tokenize.py::test_parse_join[JOIN]
expr = 'JOIN'
@pytest.mark.parametrize('expr', [
'JOIN',
'LEFT JOIN',
'LEFT OUTER JOIN',
'FULL OUTER JOIN',
'NATURAL JOIN',
'CROSS JOIN',
'STRAIGHT JOIN',
'INNER JOIN',
'LEFT INNER JOIN'])
def test_parse_join(expr):
> p = sqlparse.parse('{} foo'.format(expr))[0]
tests/test_tokenize.py:153:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_join[LEFT JOIN]
test_tokenize.py::test_parse_join[LEFT JOIN]
expr = 'LEFT JOIN'
@pytest.mark.parametrize('expr', [
'JOIN',
'LEFT JOIN',
'LEFT OUTER JOIN',
'FULL OUTER JOIN',
'NATURAL JOIN',
'CROSS JOIN',
'STRAIGHT JOIN',
'INNER JOIN',
'LEFT INNER JOIN'])
def test_parse_join(expr):
> p = sqlparse.parse('{} foo'.format(expr))[0]
tests/test_tokenize.py:153:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_join[LEFT OUTER JOIN]
test_tokenize.py::test_parse_join[LEFT OUTER JOIN]
expr = 'LEFT OUTER JOIN'
@pytest.mark.parametrize('expr', [
'JOIN',
'LEFT JOIN',
'LEFT OUTER JOIN',
'FULL OUTER JOIN',
'NATURAL JOIN',
'CROSS JOIN',
'STRAIGHT JOIN',
'INNER JOIN',
'LEFT INNER JOIN'])
def test_parse_join(expr):
> p = sqlparse.parse('{} foo'.format(expr))[0]
tests/test_tokenize.py:153:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_join[FULL OUTER JOIN]
test_tokenize.py::test_parse_join[FULL OUTER JOIN]
expr = 'FULL OUTER JOIN'
@pytest.mark.parametrize('expr', [
'JOIN',
'LEFT JOIN',
'LEFT OUTER JOIN',
'FULL OUTER JOIN',
'NATURAL JOIN',
'CROSS JOIN',
'STRAIGHT JOIN',
'INNER JOIN',
'LEFT INNER JOIN'])
def test_parse_join(expr):
> p = sqlparse.parse('{} foo'.format(expr))[0]
tests/test_tokenize.py:153:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_join[NATURAL JOIN]
test_tokenize.py::test_parse_join[NATURAL JOIN]
expr = 'NATURAL JOIN'
@pytest.mark.parametrize('expr', [
'JOIN',
'LEFT JOIN',
'LEFT OUTER JOIN',
'FULL OUTER JOIN',
'NATURAL JOIN',
'CROSS JOIN',
'STRAIGHT JOIN',
'INNER JOIN',
'LEFT INNER JOIN'])
def test_parse_join(expr):
> p = sqlparse.parse('{} foo'.format(expr))[0]
tests/test_tokenize.py:153:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_join[CROSS JOIN]
test_tokenize.py::test_parse_join[CROSS JOIN]
expr = 'CROSS JOIN'
@pytest.mark.parametrize('expr', [
'JOIN',
'LEFT JOIN',
'LEFT OUTER JOIN',
'FULL OUTER JOIN',
'NATURAL JOIN',
'CROSS JOIN',
'STRAIGHT JOIN',
'INNER JOIN',
'LEFT INNER JOIN'])
def test_parse_join(expr):
> p = sqlparse.parse('{} foo'.format(expr))[0]
tests/test_tokenize.py:153:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_join[STRAIGHT JOIN]
test_tokenize.py::test_parse_join[STRAIGHT JOIN]
expr = 'STRAIGHT JOIN'
@pytest.mark.parametrize('expr', [
'JOIN',
'LEFT JOIN',
'LEFT OUTER JOIN',
'FULL OUTER JOIN',
'NATURAL JOIN',
'CROSS JOIN',
'STRAIGHT JOIN',
'INNER JOIN',
'LEFT INNER JOIN'])
def test_parse_join(expr):
> p = sqlparse.parse('{} foo'.format(expr))[0]
tests/test_tokenize.py:153:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_join[INNER JOIN]
test_tokenize.py::test_parse_join[INNER JOIN]
expr = 'INNER JOIN'
@pytest.mark.parametrize('expr', [
'JOIN',
'LEFT JOIN',
'LEFT OUTER JOIN',
'FULL OUTER JOIN',
'NATURAL JOIN',
'CROSS JOIN',
'STRAIGHT JOIN',
'INNER JOIN',
'LEFT INNER JOIN'])
def test_parse_join(expr):
> p = sqlparse.parse('{} foo'.format(expr))[0]
tests/test_tokenize.py:153:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_join[LEFT INNER JOIN]
test_tokenize.py::test_parse_join[LEFT INNER JOIN]
expr = 'LEFT INNER JOIN'
@pytest.mark.parametrize('expr', [
'JOIN',
'LEFT JOIN',
'LEFT OUTER JOIN',
'FULL OUTER JOIN',
'NATURAL JOIN',
'CROSS JOIN',
'STRAIGHT JOIN',
'INNER JOIN',
'LEFT INNER JOIN'])
def test_parse_join(expr):
> p = sqlparse.parse('{} foo'.format(expr))[0]
tests/test_tokenize.py:153:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_union
test_tokenize.py::test_parse_union
def test_parse_union(): # issue294
> p = sqlparse.parse('UNION ALL')[0]
tests/test_tokenize.py:159:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_endifloop[END IF]
test_tokenize.py::test_parse_endifloop[END IF]
s = 'END IF'
@pytest.mark.parametrize('s', ['END IF', 'END IF', 'END\t\nIF',
'END LOOP', 'END LOOP', 'END\t\nLOOP'])
def test_parse_endifloop(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:167:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_endifloop[END IF]
test_tokenize.py::test_parse_endifloop[END IF]
s = 'END IF'
@pytest.mark.parametrize('s', ['END IF', 'END IF', 'END\t\nIF',
'END LOOP', 'END LOOP', 'END\t\nLOOP'])
def test_parse_endifloop(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:167:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_endifloop[END\t\nIF]
test_tokenize.py::test_parse_endifloop[END\t\nIF]
s = 'END\t\nIF'
@pytest.mark.parametrize('s', ['END IF', 'END IF', 'END\t\nIF',
'END LOOP', 'END LOOP', 'END\t\nLOOP'])
def test_parse_endifloop(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:167:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_endifloop[END LOOP]
test_tokenize.py::test_parse_endifloop[END LOOP]
s = 'END LOOP'
@pytest.mark.parametrize('s', ['END IF', 'END IF', 'END\t\nIF',
'END LOOP', 'END LOOP', 'END\t\nLOOP'])
def test_parse_endifloop(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:167:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_endifloop[END LOOP]
test_tokenize.py::test_parse_endifloop[END LOOP]
s = 'END LOOP'
@pytest.mark.parametrize('s', ['END IF', 'END IF', 'END\t\nIF',
'END LOOP', 'END LOOP', 'END\t\nLOOP'])
def test_parse_endifloop(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:167:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_endifloop[END\t\nLOOP]
test_tokenize.py::test_parse_endifloop[END\t\nLOOP]
s = 'END\t\nLOOP'
@pytest.mark.parametrize('s', ['END IF', 'END IF', 'END\t\nIF',
'END LOOP', 'END LOOP', 'END\t\nLOOP'])
def test_parse_endifloop(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:167:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_order[ASC]
test_tokenize.py::test_parse_order[ASC]
s = 'ASC'
@pytest.mark.parametrize('s', [
'ASC', 'DESC',
'NULLS FIRST', 'NULLS LAST',
'ASC NULLS FIRST', 'ASC NULLS LAST',
'DESC NULLS FIRST', 'DESC NULLS LAST',
])
def test_parse_order(s): # issue487
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:179:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_order[DESC]
test_tokenize.py::test_parse_order[DESC]
s = 'DESC'
@pytest.mark.parametrize('s', [
'ASC', 'DESC',
'NULLS FIRST', 'NULLS LAST',
'ASC NULLS FIRST', 'ASC NULLS LAST',
'DESC NULLS FIRST', 'DESC NULLS LAST',
])
def test_parse_order(s): # issue487
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:179:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_order[NULLS FIRST]
test_tokenize.py::test_parse_order[NULLS FIRST]
s = 'NULLS FIRST'
@pytest.mark.parametrize('s', [
'ASC', 'DESC',
'NULLS FIRST', 'NULLS LAST',
'ASC NULLS FIRST', 'ASC NULLS LAST',
'DESC NULLS FIRST', 'DESC NULLS LAST',
])
def test_parse_order(s): # issue487
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:179:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_order[NULLS LAST]
test_tokenize.py::test_parse_order[NULLS LAST]
s = 'NULLS LAST'
@pytest.mark.parametrize('s', [
'ASC', 'DESC',
'NULLS FIRST', 'NULLS LAST',
'ASC NULLS FIRST', 'ASC NULLS LAST',
'DESC NULLS FIRST', 'DESC NULLS LAST',
])
def test_parse_order(s): # issue487
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:179:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_order[ASC NULLS FIRST]
test_tokenize.py::test_parse_order[ASC NULLS FIRST]
s = 'ASC NULLS FIRST'
@pytest.mark.parametrize('s', [
'ASC', 'DESC',
'NULLS FIRST', 'NULLS LAST',
'ASC NULLS FIRST', 'ASC NULLS LAST',
'DESC NULLS FIRST', 'DESC NULLS LAST',
])
def test_parse_order(s): # issue487
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:179:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_order[ASC NULLS LAST]
test_tokenize.py::test_parse_order[ASC NULLS LAST]
s = 'ASC NULLS LAST'
@pytest.mark.parametrize('s', [
'ASC', 'DESC',
'NULLS FIRST', 'NULLS LAST',
'ASC NULLS FIRST', 'ASC NULLS LAST',
'DESC NULLS FIRST', 'DESC NULLS LAST',
])
def test_parse_order(s): # issue487
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:179:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_order[DESC NULLS FIRST]
test_tokenize.py::test_parse_order[DESC NULLS FIRST]
s = 'DESC NULLS FIRST'
@pytest.mark.parametrize('s', [
'ASC', 'DESC',
'NULLS FIRST', 'NULLS LAST',
'ASC NULLS FIRST', 'ASC NULLS LAST',
'DESC NULLS FIRST', 'DESC NULLS LAST',
])
def test_parse_order(s): # issue487
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:179:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_order[DESC NULLS LAST]
test_tokenize.py::test_parse_order[DESC NULLS LAST]
s = 'DESC NULLS LAST'
@pytest.mark.parametrize('s', [
'ASC', 'DESC',
'NULLS FIRST', 'NULLS LAST',
'ASC NULLS FIRST', 'ASC NULLS LAST',
'DESC NULLS FIRST', 'DESC NULLS LAST',
])
def test_parse_order(s): # issue487
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:179:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_identifiers[foo]
test_tokenize.py::test_parse_identifiers[foo]
s = 'foo'
@pytest.mark.parametrize('s', [
'foo',
'Foo',
'FOO',
'v$name', # issue291
])
def test_parse_identifiers(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:191:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_identifiers[Foo]
test_tokenize.py::test_parse_identifiers[Foo]
s = 'Foo'
@pytest.mark.parametrize('s', [
'foo',
'Foo',
'FOO',
'v$name', # issue291
])
def test_parse_identifiers(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:191:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_identifiers[FOO]
test_tokenize.py::test_parse_identifiers[FOO]
s = 'FOO'
@pytest.mark.parametrize('s', [
'foo',
'Foo',
'FOO',
'v$name', # issue291
])
def test_parse_identifiers(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:191:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_identifiers[v$name]
test_tokenize.py::test_parse_identifiers[v$name]
s = 'v$name'
@pytest.mark.parametrize('s', [
'foo',
'Foo',
'FOO',
'v$name', # issue291
])
def test_parse_identifiers(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:191:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_group_by
test_tokenize.py::test_parse_group_by
def test_parse_group_by():
> p = sqlparse.parse('GROUP BY')[0]
tests/test_tokenize.py:199:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_order_by
test_tokenize.py::test_parse_order_by
def test_parse_order_by():
> p = sqlparse.parse('ORDER BY')[0]
tests/test_tokenize.py:205:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_window_as
test_tokenize.py::test_parse_window_as
def test_parse_window_as():
> p = sqlparse.parse('WINDOW w AS')[0]
tests/test_tokenize.py:211:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_like_and_ilike_parsed_as_comparisons[LIKE]
test_tokenize.py::test_like_and_ilike_parsed_as_comparisons[LIKE]
s = 'LIKE'
@pytest.mark.parametrize('s', (
"LIKE", "ILIKE", "NOT LIKE", "NOT ILIKE",
"NOT LIKE", "NOT ILIKE",
))
def test_like_and_ilike_parsed_as_comparisons(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:221:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_like_and_ilike_parsed_as_comparisons[ILIKE]
test_tokenize.py::test_like_and_ilike_parsed_as_comparisons[ILIKE]
s = 'ILIKE'
@pytest.mark.parametrize('s', (
"LIKE", "ILIKE", "NOT LIKE", "NOT ILIKE",
"NOT LIKE", "NOT ILIKE",
))
def test_like_and_ilike_parsed_as_comparisons(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:221:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_like_and_ilike_parsed_as_comparisons[NOT LIKE]
test_tokenize.py::test_like_and_ilike_parsed_as_comparisons[NOT LIKE]
s = 'NOT LIKE'
@pytest.mark.parametrize('s', (
"LIKE", "ILIKE", "NOT LIKE", "NOT ILIKE",
"NOT LIKE", "NOT ILIKE",
))
def test_like_and_ilike_parsed_as_comparisons(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:221:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_like_and_ilike_parsed_as_comparisons[NOT ILIKE]
test_tokenize.py::test_like_and_ilike_parsed_as_comparisons[NOT ILIKE]
s = 'NOT ILIKE'
@pytest.mark.parametrize('s', (
"LIKE", "ILIKE", "NOT LIKE", "NOT ILIKE",
"NOT LIKE", "NOT ILIKE",
))
def test_like_and_ilike_parsed_as_comparisons(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:221:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_like_and_ilike_parsed_as_comparisons[NOT LIKE]
test_tokenize.py::test_like_and_ilike_parsed_as_comparisons[NOT LIKE]
s = 'NOT LIKE'
@pytest.mark.parametrize('s', (
"LIKE", "ILIKE", "NOT LIKE", "NOT ILIKE",
"NOT LIKE", "NOT ILIKE",
))
def test_like_and_ilike_parsed_as_comparisons(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:221:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_like_and_ilike_parsed_as_comparisons[NOT ILIKE]
test_tokenize.py::test_like_and_ilike_parsed_as_comparisons[NOT ILIKE]
s = 'NOT ILIKE'
@pytest.mark.parametrize('s', (
"LIKE", "ILIKE", "NOT LIKE", "NOT ILIKE",
"NOT LIKE", "NOT ILIKE",
))
def test_like_and_ilike_parsed_as_comparisons(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:221:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_near_like_and_ilike_parsed_appropriately[LIKEaaa]
test_tokenize.py::test_near_like_and_ilike_parsed_appropriately[LIKEaaa]
s = 'LIKEaaa'
@pytest.mark.parametrize('s', (
"LIKEaaa", "bILIKE", "aaILIKEbb", "NOTLIKE", "NOTILIKE",
))
def test_near_like_and_ilike_parsed_appropriately(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:230:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_near_like_and_ilike_parsed_appropriately[bILIKE]
test_tokenize.py::test_near_like_and_ilike_parsed_appropriately[bILIKE]
s = 'bILIKE'
@pytest.mark.parametrize('s', (
"LIKEaaa", "bILIKE", "aaILIKEbb", "NOTLIKE", "NOTILIKE",
))
def test_near_like_and_ilike_parsed_appropriately(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:230:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_near_like_and_ilike_parsed_appropriately[aaILIKEbb]
test_tokenize.py::test_near_like_and_ilike_parsed_appropriately[aaILIKEbb]
s = 'aaILIKEbb'
@pytest.mark.parametrize('s', (
"LIKEaaa", "bILIKE", "aaILIKEbb", "NOTLIKE", "NOTILIKE",
))
def test_near_like_and_ilike_parsed_appropriately(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:230:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_near_like_and_ilike_parsed_appropriately[NOTLIKE]
test_tokenize.py::test_near_like_and_ilike_parsed_appropriately[NOTLIKE]
s = 'NOTLIKE'
@pytest.mark.parametrize('s', (
"LIKEaaa", "bILIKE", "aaILIKEbb", "NOTLIKE", "NOTILIKE",
))
def test_near_like_and_ilike_parsed_appropriately(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:230:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_near_like_and_ilike_parsed_appropriately[NOTILIKE]
test_tokenize.py::test_near_like_and_ilike_parsed_appropriately[NOTILIKE]
s = 'NOTILIKE'
@pytest.mark.parametrize('s', (
"LIKEaaa", "bILIKE", "aaILIKEbb", "NOTLIKE", "NOTILIKE",
))
def test_near_like_and_ilike_parsed_appropriately(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:230:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_parse_tzcast[AT TIME ZONE 'UTC']
test_tokenize.py::test_parse_tzcast[AT TIME ZONE 'UTC']
s = "AT TIME ZONE 'UTC'"
@pytest.mark.parametrize('s', (
'AT TIME ZONE \'UTC\'',
))
def test_parse_tzcast(s):
> p = sqlparse.parse(s)[0]
tests/test_tokenize.py:239:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
test_tokenize.py::test_cli_commands
test_tokenize.py::test_cli_commands
def test_cli_commands():
> p = sqlparse.parse('\\copy')[0]
tests/test_tokenize.py:245:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
sqlparse/__init__.py:30: in parse
return tuple(parsestream(sql, encoding))
sqlparse/__init__.py:42: in parsestream
return stack.run(stream, encoding)
sqlparse/engine/filter_stack.py:30: in run
stream = grouping.group(stream)
sqlparse/engine/grouping.py:61: in group
_group_matching(stream, cls)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tlist = None, cls =
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
idx = 0
> while idx < len(tlist.tokens):
E AttributeError: 'NoneType' object has no attribute 'tokens'
sqlparse/engine/grouping.py:16: AttributeError
Patch diff
diff --git a/sqlparse/cli.py b/sqlparse/cli.py
index 36590f9..ae3025c 100755
--- a/sqlparse/cli.py
+++ b/sqlparse/cli.py
@@ -18,4 +18,60 @@ from sqlparse.exceptions import SQLParseError
def _error(msg):
"""Print msg and optionally exit with return code exit_."""
- pass
\ No newline at end of file
+ sys.stderr.write(msg + '\n')
+ sys.exit(1)
+
+def create_parser():
+ """Create and return command line parser."""
+ parser = argparse.ArgumentParser(
+ description='Format SQL files.',
+ usage='%(prog)s [OPTIONS] FILE, ...',
+ add_help=True)
+ parser.add_argument('files', nargs='*', help='Files to be processed')
+ parser.add_argument('-o', '--outfile', help='Write output to FILE')
+ parser.add_argument('-r', '--reindent', action='store_true',
+ help='Reindent statements')
+ parser.add_argument('-l', '--language', choices=['English'],
+ help='Programming language (default: English)')
+ parser.add_argument('--encoding', default='utf-8',
+ help='Specify the input encoding (default: utf-8)')
+ parser.add_argument('--indent-width', type=int, default=2,
+ help='Number of spaces for indentation (default: 2)')
+ return parser
+
+def main(args=None):
+ """Main entry point."""
+ parser = create_parser()
+ args = parser.parse_args(args)
+
+ if not args.files:
+ parser.print_help()
+ sys.exit(1)
+
+ encoding = args.encoding
+ if encoding == 'utf-8':
+ # Python 3 reads files as utf-8 by default
+ encoding = None
+
+ for file_ in args.files:
+ try:
+ with open(file_, 'r', encoding=encoding) as f:
+ data = f.read()
+ except OSError as e:
+ _error('Failed to read {}: {}'.format(file_, e))
+ continue
+
+ if args.reindent:
+ data = sqlparse.format(data, reindent=True,
+ indent_width=args.indent_width)
+
+ if args.outfile:
+ try:
+ with open(args.outfile, 'w', encoding=encoding) as f:
+ f.write(data)
+ except OSError as e:
+ _error('Failed to write to {}: {}'.format(args.outfile, e))
+ else:
+ sys.stdout.write(data)
+
+ return 0
\ No newline at end of file
diff --git a/sqlparse/engine/filter_stack.py b/sqlparse/engine/filter_stack.py
index 0b2f20c..451e0ca 100644
--- a/sqlparse/engine/filter_stack.py
+++ b/sqlparse/engine/filter_stack.py
@@ -12,4 +12,34 @@ class FilterStack:
self.postprocess = []
self._grouping = False
if strip_semicolon:
- self.stmtprocess.append(StripTrailingSemicolonFilter())
\ No newline at end of file
+ self.stmtprocess.append(StripTrailingSemicolonFilter())
+
+ def enable_grouping(self):
+ self._grouping = True
+
+ def run(self, sql, encoding=None):
+ stream = lexer.tokenize(sql, encoding)
+ # Process token stream
+ for filter_ in self.preprocess:
+ stream = filter_.process(stream)
+
+ stream = StatementSplitter().process(stream)
+
+ # Group and ungroup tokens
+ if self._grouping:
+ stream = grouping.group(stream)
+
+ # Process statements
+ ret = []
+ for stmt in stream:
+ if stmt.is_whitespace:
+ continue
+ for filter_ in self.stmtprocess:
+ filter_.process(stmt)
+ ret.append(stmt)
+
+ # Process again after grouping
+ for filter_ in self.postprocess:
+ ret = filter_.process(ret)
+
+ return ret
\ No newline at end of file
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 7d36088..9992d0a 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -1,19 +1,110 @@
from sqlparse import sql
from sqlparse import tokens as T
from sqlparse.utils import recurse, imt
+from sqlparse.sql import (
+ Parenthesis, SquareBrackets, Case, If, For, Begin,
+ TypedLiteral, Identifier, IdentifierList, Operation,
+ Values, Command, Comparison, Assignment, Where, Having, Over
+)
T_NUMERICAL = (T.Number, T.Number.Integer, T.Number.Float)
T_STRING = (T.String, T.String.Single, T.String.Symbol)
T_NAME = (T.Name, T.Name.Placeholder)
def _group_matching(tlist, cls):
"""Groups Tokens that have beginning and end."""
- pass
+ idx = 0
+ while idx < len(tlist.tokens):
+ token = tlist.tokens[idx]
+ if token.is_whitespace:
+ idx += 1
+ continue
+
+ if token.match(*cls.M_OPEN):
+ end_idx = tlist.token_matching(token, idx)
+ if end_idx is None:
+ idx += 1
+ continue
+
+ group = tlist.group_tokens(cls, tlist.tokens[idx:end_idx + 1])
+ idx = tlist.token_index(group) + 1
+ else:
+ idx += 1
@recurse(sql.Identifier)
def group_order(tlist):
"""Group together Identifier and Asc/Desc token"""
- pass
+ idx = 0
+ while idx < len(tlist.tokens) - 1:
+ token = tlist.tokens[idx]
+ next_token = tlist.tokens[idx + 1]
+
+ if token.is_whitespace or token.is_group:
+ idx += 1
+ continue
+
+ if next_token.is_whitespace:
+ idx += 1
+ continue
+
+ if token.ttype in (T.Name, T.String.Symbol, T.Number) and \
+ next_token.match(T.Keyword, ('ASC', 'DESC'), True):
+ grp = tlist.group_tokens(sql.Identifier, tlist.tokens[idx:idx + 2])
+ idx = tlist.token_index(grp) + 1
+ else:
+ idx += 1
+
+def group(stream):
+ """Group together tokens that form SQL statements."""
+ for cls in (Parenthesis, SquareBrackets, Case, If, For, Begin,
+ TypedLiteral, Identifier, IdentifierList, Operation,
+ Values, Command):
+ _group_matching(stream, cls)
+
+ _group(stream, Comparison, (T.Operator.Comparison,))
+ _group(stream, Assignment, (T.Assignment,))
+ _group(stream, Where, (T.Keyword, 'WHERE'))
+ _group(stream, Having, (T.Keyword, 'HAVING'))
+ _group(stream, Over, (T.Keyword, 'OVER'))
+
+ group_order(stream)
+ return stream
def _group(tlist, cls, match, valid_prev=lambda t: True, valid_next=lambda t: True, post=None, extend=True, recurse=True):
"""Groups together tokens that are joined by a middle token. i.e. x < y"""
- pass
\ No newline at end of file
+ idx = 1
+ while idx < len(tlist.tokens) - 1:
+ token = tlist.tokens[idx]
+ if token.is_whitespace:
+ idx += 1
+ continue
+
+ before = tlist.tokens[idx - 1]
+ after = tlist.tokens[idx + 1]
+
+ if token.match(*match) and valid_prev(before) and valid_next(after):
+ if extend:
+ # Look ahead to handle x > y > z
+ end = idx + 1
+ while end < len(tlist.tokens) - 1:
+ next_token = tlist.tokens[end + 1]
+ if next_token.is_whitespace:
+ end += 1
+ continue
+ if next_token.match(*match) and valid_next(tlist.tokens[end + 2]):
+ end += 2
+ else:
+ break
+ tokens = tlist.tokens[idx - 1:end + 1]
+ else:
+ tokens = tlist.tokens[idx - 1:idx + 2]
+
+ group = tlist.group_tokens(cls, tokens)
+ if post:
+ post(group)
+
+ if recurse:
+ _group(group, cls, match, valid_prev, valid_next, post, extend)
+
+ idx = tlist.token_index(group) + 1
+ else:
+ idx += 1
\ No newline at end of file
diff --git a/sqlparse/filters/reindent.py b/sqlparse/filters/reindent.py
index 9a5150e..8e1bd8c 100644
--- a/sqlparse/filters/reindent.py
+++ b/sqlparse/filters/reindent.py
@@ -19,4 +19,44 @@ class ReindentFilter:
def _flatten_up_to_token(self, token):
"""Yields all tokens up to token but excluding current."""
- pass
\ No newline at end of file
+ if token is None:
+ return
+ parent = token.parent
+ if parent is None:
+ return
+
+ for t in parent.tokens:
+ if t == token:
+ break
+ yield t
+
+ def _get_offset(self, token):
+ raw = str(token)
+ line = raw.splitlines()[0]
+ initial_whitespace = len(line) - len(line.lstrip())
+ return initial_whitespace
+
+ def _get_offset_at_depth(self, token, depth):
+ offset = 0
+ for t in self._flatten_up_to_token(token):
+ if t.is_whitespace:
+ continue
+ offset += self._get_offset(t)
+ return offset + (depth * self.width)
+
+ def process(self, stream):
+ """Process the stream."""
+ for token in stream:
+ if token.is_whitespace:
+ token.value = self.n
+ yield token
+ continue
+
+ if token.is_group:
+ depth = len(list(self._flatten_up_to_token(token)))
+ offset = self._get_offset_at_depth(token, depth)
+ token.value = self.char * offset + str(token)
+ yield token
+ continue
+
+ yield token
\ No newline at end of file
diff --git a/sqlparse/formatter.py b/sqlparse/formatter.py
index f839b0c..8213340 100644
--- a/sqlparse/formatter.py
+++ b/sqlparse/formatter.py
@@ -4,7 +4,26 @@ from sqlparse.exceptions import SQLParseError
def validate_options(options):
"""Validates options."""
- pass
+ if options is None:
+ options = {}
+
+ # Validate reindent option
+ if 'reindent' in options and not isinstance(options.get('reindent'), bool):
+ raise SQLParseError('Invalid value for reindent')
+
+ # Validate indent_width option
+ if 'indent_width' in options:
+ indent_width = options.get('indent_width')
+ if not isinstance(indent_width, int) or indent_width < 0:
+ raise SQLParseError('indent_width must be a positive integer')
+
+ # Validate keyword_case option
+ if 'keyword_case' in options:
+ keyword_case = options.get('keyword_case')
+ if keyword_case not in ('upper', 'lower', 'capitalize', None):
+ raise SQLParseError('Invalid value for keyword_case')
+
+ return options
def build_filter_stack(stack, options):
"""Setup and return a filter stack.
@@ -13,4 +32,42 @@ def build_filter_stack(stack, options):
stack: :class:`~sqlparse.filters.FilterStack` instance
options: Dictionary with options validated by validate_options.
"""
- pass
\ No newline at end of file
+ # Process options
+ strip_comments = options.get('strip_comments', False)
+ strip_whitespace = options.get('strip_whitespace', False)
+ reindent = options.get('reindent', False)
+ indent_width = options.get('indent_width', 2)
+ keyword_case = options.get('keyword_case', None)
+ wrap_after = options.get('wrap_after', 0)
+ comma_first = options.get('comma_first', False)
+ right_margin = options.get('right_margin', None)
+ indent_after_first = options.get('indent_after_first', False)
+ indent_columns = options.get('indent_columns', False)
+ compact = options.get('compact', False)
+
+ # Enable grouping
+ stack.enable_grouping()
+
+ # Add filters
+ if strip_comments:
+ stack.preprocess.append(filters.StripCommentsFilter())
+
+ if strip_whitespace:
+ stack.preprocess.append(filters.StripWhitespaceFilter())
+
+ if reindent:
+ stack.preprocess.append(filters.ReindentFilter(
+ width=indent_width,
+ wrap_after=wrap_after,
+ comma_first=comma_first,
+ indent_after_first=indent_after_first,
+ indent_columns=indent_columns,
+ compact=compact))
+
+ if right_margin and not reindent:
+ stack.preprocess.append(filters.RightMarginFilter(right_margin))
+
+ if keyword_case:
+ stack.preprocess.append(filters.KeywordCaseFilter(keyword_case))
+
+ return stack
\ No newline at end of file
diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py
index 28233c7..6e58633 100644
--- a/sqlparse/lexer.py
+++ b/sqlparse/lexer.py
@@ -15,28 +15,39 @@ class Lexer:
def get_default_instance(cls):
"""Returns the lexer instance used internally
by the sqlparse core functions."""
- pass
+ with cls._lock:
+ if cls._default_instance is None:
+ cls._default_instance = cls()
+ cls._default_instance.default_initialization()
+ return cls._default_instance
def default_initialization(self):
"""Initialize the lexer with default dictionaries.
Useful if you need to revert custom syntax settings."""
- pass
+ self.clear()
+ self.add_keywords(keywords.KEYWORDS)
+ self.add_keywords(keywords.KEYWORDS_COMMON)
+ self.add_keywords(keywords.KEYWORDS_ORACLE)
+ self.add_keywords(keywords.KEYWORDS_PLPGSQL)
+ self.add_keywords(keywords.KEYWORDS_HQL)
+ self.add_keywords(keywords.KEYWORDS_MSACCESS)
def clear(self):
"""Clear all syntax configurations.
Useful if you want to load a reduced set of syntax configurations.
After this call, regexps and keyword dictionaries need to be loaded
to make the lexer functional again."""
- pass
+ self._keywords = []
+ self._SQL_REGEX = []
def set_SQL_REGEX(self, SQL_REGEX):
"""Set the list of regex that will parse the SQL."""
- pass
+ self._SQL_REGEX = SQL_REGEX
def add_keywords(self, keywords):
"""Add keyword dictionaries. Keywords are looked up in the same order
that dictionaries were added."""
- pass
+ self._keywords.append(keywords)
def is_keyword(self, value):
"""Checks for a keyword.
@@ -44,7 +55,11 @@ class Lexer:
If the given value is in one of the KEYWORDS_* dictionary
it's considered a keyword. Otherwise, tokens.Name is returned.
"""
- pass
+ val = value.upper()
+ for kwdict in self._keywords:
+ if val in kwdict:
+ return kwdict[val]
+ return tokens.Name
def get_tokens(self, text, encoding=None):
"""
@@ -59,7 +74,90 @@ class Lexer:
``stack`` is the initial stack (default: ``['root']``)
"""
- pass
+ if isinstance(text, TextIOBase):
+ text = text.read()
+
+ if encoding is not None:
+ if isinstance(text, str):
+ text = text.encode(encoding)
+ text = text.decode(encoding)
+
+ iterable = enumerate(text)
+ for pos, char in iterable:
+ # Handle whitespace
+ if char.isspace():
+ end = pos + 1
+ while end < len(text) and text[end].isspace():
+ end += 1
+ consume(iterable, end - pos - 1)
+ yield tokens.Whitespace, text[pos:end]
+ continue
+
+ # Handle comments
+ if char == '-' and text[pos + 1] == '-':
+ end = text.find('\n', pos)
+ if end == -1:
+ end = len(text)
+ consume(iterable, end - pos - 1)
+ yield tokens.Comment.Single, text[pos:end]
+ continue
+
+ # Handle string literals
+ if char in ('"', "'"):
+ end = pos + 1
+ escaped = False
+ while end < len(text):
+ if text[end] == char and not escaped:
+ break
+ if text[end] == '\\':
+ escaped = not escaped
+ else:
+ escaped = False
+ end += 1
+ if end < len(text):
+ end += 1
+ consume(iterable, end - pos - 1)
+ yield tokens.String, text[pos:end]
+ continue
+
+ # Handle numbers
+ if char.isdigit():
+ end = pos + 1
+ while end < len(text) and (text[end].isdigit() or text[end] == '.'):
+ end += 1
+ consume(iterable, end - pos - 1)
+ yield tokens.Number, text[pos:end]
+ continue
+
+ # Handle identifiers and keywords
+ if char.isalpha() or char == '_' or char == '$':
+ end = pos + 1
+ while end < len(text) and (text[end].isalnum() or text[end] in '_$'):
+ end += 1
+ word = text[pos:end]
+ consume(iterable, end - pos - 1)
+ if word.upper() in ('ASC', 'DESC'):
+ yield tokens.Keyword.Order, word
+ else:
+ yield self.is_keyword(word), word
+ continue
+
+ # Handle operators and punctuation
+ if char in '+-*/%<>=!|&~^':
+ end = pos + 1
+ while end < len(text) and text[end] in '+-*/%<>=!|&~^':
+ end += 1
+ consume(iterable, end - pos - 1)
+ yield tokens.Operator, text[pos:end]
+ continue
+
+ # Handle punctuation
+ if char in '()[]{},;.':
+ yield tokens.Punctuation, char
+ continue
+
+ # Handle unknown characters
+ yield tokens.Error, char
def tokenize(sql, encoding=None):
"""Tokenize sql.
@@ -67,4 +165,5 @@ def tokenize(sql, encoding=None):
Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
of ``(token type, value)`` items.
"""
- pass
\ No newline at end of file
+ lexer = Lexer.get_default_instance()
+ return lexer.get_tokens(sql, encoding)
\ No newline at end of file
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index 03b5e9f..74519d3 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -9,11 +9,11 @@ class NameAliasMixin:
def get_real_name(self):
"""Returns the real name (object name) of this identifier."""
- pass
+ return self.get_name()
def get_alias(self):
"""Returns the alias for this identifier or ``None``."""
- pass
+ return None
class Token:
"""Base class for all other classes in this module.
@@ -38,6 +38,15 @@ class Token:
def __str__(self):
return self.value
+ def _get_repr_name(self):
+ return self.__class__.__name__
+
+ def _get_repr_value(self):
+ raw = str(self)
+ if len(raw) > 7:
+ raw = raw[:7] + '...'
+ return raw
+
def __repr__(self):
cls = self._get_repr_name()
value = self._get_repr_value()
@@ -46,7 +55,7 @@ class Token:
def flatten(self):
"""Resolve subgroups."""
- pass
+ yield self
def match(self, ttype, values, regex=False):
"""Checks whether the token matches the given arguments.
@@ -60,7 +69,22 @@ class Token:
If *regex* is ``True`` (default is ``False``) the given values are
treated as regular expressions.
"""
- pass
+ if ttype is not None and not imt(self, t=ttype):
+ return False
+
+ if values is None:
+ return True
+
+ if isinstance(values, str):
+ values = (values,)
+
+ if regex:
+ pattern = '|'.join('(?:{0})'.format(v) for v in values)
+ return bool(re.search(pattern, self.normalized if self.is_keyword else self.value, re.IGNORECASE if self.is_keyword else 0))
+
+ if self.is_keyword:
+ return self.normalized in [v.upper() for v in values]
+ return self.value in values
def within(self, group_cls):
"""Returns ``True`` if this token is within *group_cls*.
@@ -68,15 +92,25 @@ class Token:
Use this method for example to check if an identifier is within
a function: ``t.within(sql.Function)``.
"""
- pass
+ parent = self.parent
+ while parent:
+ if isinstance(parent, group_cls):
+ return True
+ parent = parent.parent
+ return False
def is_child_of(self, other):
"""Returns ``True`` if this token is a direct child of *other*."""
- pass
+ return self.parent == other
def has_ancestor(self, other):
"""Returns ``True`` if *other* is in this tokens ancestry."""
- pass
+ parent = self.parent
+ while parent:
+ if parent == other:
+ return True
+ parent = parent.parent
+ return False
class TokenList(Token):
"""A group of tokens.
@@ -103,22 +137,57 @@ class TokenList(Token):
def _pprint_tree(self, max_depth=None, depth=0, f=None, _pre=''):
"""Pretty-print the object tree."""
- pass
+ if max_depth and depth > max_depth:
+ return
+
+ indent = ' ' * (depth * 2)
+ for token in self.tokens:
+ cls = token._get_repr_name()
+ value = token._get_repr_value()
+ if token.is_group:
+ token._pprint_tree(max_depth, depth + 1, f, _pre)
+ else:
+ f.write('{}{}{} {}\n'.format(_pre, indent, cls, value))
def get_token_at_offset(self, offset):
"""Returns the token that is on position offset."""
- pass
+ idx = 0
+ for token in self.flatten():
+ end = idx + len(token.value)
+ if idx <= offset < end:
+ return token
+ idx = end
+ return None
def flatten(self):
"""Generator yielding ungrouped tokens.
This method is recursively called for all child tokens.
"""
- pass
+ for token in self.tokens:
+ if token.is_group:
+ for t in token.flatten():
+ yield t
+ else:
+ yield token
def _token_matching(self, funcs, start=0, end=None, reverse=False):
"""next token that match functions"""
- pass
+ if not isinstance(funcs, (list, tuple)):
+ funcs = (funcs,)
+
+ if end is None:
+ end = len(self.tokens)
+
+ tokens = self.tokens[start:end]
+ if reverse:
+ tokens = reversed(tokens)
+
+ for token in tokens:
+ for func in funcs:
+ if func(token):
+ return token
+ return None
def token_first(self, skip_ws=True, skip_cm=False):
"""Returns the first child token.
@@ -129,7 +198,12 @@ class TokenList(Token):
if *skip_cm* is ``True`` (default: ``False``), comments are
ignored too.
"""
- pass
+ funcs = []
+ if skip_ws:
+ funcs.append(lambda t: not t.is_whitespace)
+ if skip_cm:
+ funcs.append(lambda t: not isinstance(t, Comment))
+ return self._token_matching(funcs)
def token_prev(self, idx, skip_ws=True, skip_cm=False):
"""Returns the previous token relative to *idx*.
@@ -138,7 +212,12 @@ class TokenList(Token):
If *skip_cm* is ``True`` comments are ignored.
``None`` is returned if there's no previous token.
"""
- pass
+ funcs = []
+ if skip_ws:
+ funcs.append(lambda t: not t.is_whitespace)
+ if skip_cm:
+ funcs.append(lambda t: not isinstance(t, Comment))
+ return self._token_matching(funcs, 0, idx, reverse=True)
def token_next(self, idx, skip_ws=True, skip_cm=False, _reverse=False):
"""Returns the next token relative to *idx*.
@@ -147,27 +226,86 @@ class TokenList(Token):
If *skip_cm* is ``True`` comments are ignored.
``None`` is returned if there's no next token.
"""
- pass
+ funcs = []
+ if skip_ws:
+ funcs.append(lambda t: not t.is_whitespace)
+ if skip_cm:
+ funcs.append(lambda t: not isinstance(t, Comment))
+ return self._token_matching(funcs, idx + 1)
def token_index(self, token, start=0):
"""Return list index of token."""
- pass
+ for idx, t in enumerate(self.tokens[start:], start=start):
+ if token is t:
+ return idx
+ return None
+
+ def token_matching(self, token, idx):
+ """Returns the matching token for a token at given index."""
+ if not token.is_group:
+ return None
+
+ if not hasattr(token, 'M_OPEN') or not hasattr(token, 'M_CLOSE'):
+ return None
+
+ open_token = token.match(*token.M_OPEN)
+ close_token = token.match(*token.M_CLOSE)
+
+ if not open_token or not close_token:
+ return None
+
+ depth = 1
+ for t_idx, t in enumerate(self.tokens[idx + 1:], start=idx + 1):
+ if t.match(*token.M_OPEN):
+ depth += 1
+ elif t.match(*token.M_CLOSE):
+ depth -= 1
+ if depth == 0:
+ return t_idx
+ return None
def group_tokens(self, grp_cls, start, end, include_end=True, extend=False):
"""Replace tokens by an instance of *grp_cls*."""
- pass
+ if not isinstance(start, int):
+ start = self.token_index(start)
+ if not isinstance(end, int):
+ end = self.token_index(end)
+
+ if extend:
+ while end < len(self.tokens) - 1:
+ if isinstance(self.tokens[end + 1], (Comment, None)):
+ end += 1
+ else:
+ break
+
+ if include_end:
+ end += 1
+
+ grp = grp_cls(self.tokens[start:end])
+ self.tokens[start:end] = [grp]
+ return grp
def insert_before(self, where, token):
"""Inserts *token* before *where*."""
- pass
+ if not isinstance(where, int):
+ where = self.token_index(where)
+ token.parent = self
+ self.tokens.insert(where, token)
def insert_after(self, where, token, skip_ws=True):
"""Inserts *token* after *where*."""
- pass
+ if not isinstance(where, int):
+ where = self.token_index(where)
+ if skip_ws:
+ next_token = self.token_next(where)
+ if next_token is not None:
+ where = self.token_index(next_token) - 1
+ token.parent = self
+ self.tokens.insert(where + 1, token)
def has_alias(self):
"""Returns ``True`` if an alias is present."""
- pass
+ return self.get_alias() is not None
def get_alias(self):
"""Returns the alias for this identifier or ``None``."""
@@ -180,22 +318,46 @@ class TokenList(Token):
be considered as the name under which the object corresponding to
this identifier is known within the current statement.
"""
- pass
+ alias = self.get_alias()
+ if alias:
+ return alias
+ return self.get_real_name()
def get_real_name(self):
"""Returns the real name (object name) of this identifier."""
- pass
+ # Return the first token's value as real name
+ token = self.token_next(0)
+ if token is None:
+ return None
+ return token.value
def get_parent_name(self):
"""Return name of the parent object if any.
A parent object is identified by the first occurring dot.
"""
- pass
+ dot = self.token_next_by(m=(T.Punctuation, '.'))
+ if dot is None:
+ return None
+ prev_ = self.token_prev(self.token_index(dot))
+ if prev_ is None:
+ return None
+ return prev_.value
def _get_first_name(self, idx=None, reverse=False, keywords=False, real_name=False):
"""Returns the name of the first token with a name"""
- pass
+ tokens = self.tokens[idx:] if idx else self.tokens
+ if reverse:
+ tokens = reversed(tokens)
+
+ for token in tokens:
+ if token.ttype in T.Name or (keywords and token.is_keyword):
+ return token.get_real_name() if real_name else token.get_name()
+ if token.is_group:
+ name = token._get_first_name(reverse=reverse, keywords=keywords, real_name=real_name)
+ if name is not None:
+ return name
+ return None
class Statement(TokenList):
"""Represents a SQL statement."""
@@ -210,7 +372,13 @@ class Statement(TokenList):
Whitespaces and comments at the beginning of the statement
are ignored.
"""
- pass
+ first_token = self.token_first()
+ if first_token is None:
+ return 'UNKNOWN'
+
+ if first_token.ttype in (T.Keyword.DML, T.Keyword.DDL):
+ return first_token.normalized
+ return 'UNKNOWN'
class Identifier(NameAliasMixin, TokenList):
"""Represents an identifier.
@@ -220,19 +388,31 @@ class Identifier(NameAliasMixin, TokenList):
def is_wildcard(self):
"""Return ``True`` if this identifier contains a wildcard."""
- pass
+ token = self.token_next_by(t=T.Wildcard)
+ return token is not None
def get_typecast(self):
"""Returns the typecast or ``None`` of this object as a string."""
- pass
+ marker = self.token_next_by(m=(T.Punctuation, '::'))
+ if marker is None:
+ return None
+ next_ = self.token_next(self.token_index(marker))
+ if next_ is None:
+ return None
+ return next_.value
def get_ordering(self):
"""Returns the ordering or ``None`` as uppercase string."""
- pass
+ ordering = self.token_next_by(t=T.Keyword.Order)
+ if ordering is None:
+ return None
+ return ordering.normalized
def get_array_indices(self):
"""Returns an iterator of index token lists"""
- pass
+ for token in self.tokens:
+ if isinstance(token, SquareBrackets):
+ yield token
class IdentifierList(TokenList):
"""A list of :class:`~sqlparse.sql.Identifier`'s."""
@@ -242,7 +422,9 @@ class IdentifierList(TokenList):
Whitespaces and punctuations are not included in this generator.
"""
- pass
+ for token in self.tokens:
+ if isinstance(token, (Identifier, Function)):
+ yield token
class TypedLiteral(TokenList):
"""A typed literal, such as "date '2001-09-28'" or "interval '2 hours'"."""
@@ -303,18 +485,60 @@ class Case(TokenList):
If an ELSE exists condition is None.
"""
- pass
+ ELSE = T.Keyword, 'ELSE'
+ WHEN = T.Keyword, 'WHEN'
+ THEN = T.Keyword, 'THEN'
+
+ results = []
+ condition = None
+ value = None
+ for token in self.tokens:
+ if token.match(*WHEN):
+ condition = []
+ elif token.match(*THEN):
+ value = []
+ elif token.match(*ELSE):
+ condition = None
+ value = []
+ elif condition is not None and value is None:
+ condition.append(token)
+ elif value is not None:
+ value.append(token)
+ if token.is_group and token.tokens[-1].match(T.Keyword, 'END'):
+ results.append((condition, value))
+ condition = None
+ value = None
+
+ if value is not None:
+ results.append((condition, value))
+
+ return results
class Function(NameAliasMixin, TokenList):
"""A function or procedure call."""
def get_parameters(self):
"""Return a list of parameters."""
- pass
+ parenthesis = self.token_next_by(i=Parenthesis)
+ if parenthesis is None:
+ return []
+ return [token for token in parenthesis.tokens[1:-1]
+ if not token.is_whitespace]
def get_window(self):
"""Return the window if it exists."""
- pass
+ over = self.token_next_by(m=(T.Keyword, 'OVER'))
+ if over is None:
+ return None
+
+ over_idx = self.token_index(over)
+ window = self.token_next(over_idx)
+ if window is None:
+ return None
+
+ if isinstance(window, Parenthesis):
+ return window
+ return None
class Begin(TokenList):
"""A BEGIN/END block."""
diff --git a/sqlparse/utils.py b/sqlparse/utils.py
index cf2e1ab..7758206 100644
--- a/sqlparse/utils.py
+++ b/sqlparse/utils.py
@@ -10,11 +10,37 @@ def split_unquoted_newlines(stmt):
Unlike str.splitlines(), this will ignore CR/LF/CR+LF if the requisite
character is inside of a string."""
- pass
+ matches = SPLIT_REGEX.finditer(stmt)
+ matches = list(matches)
+ if not matches:
+ return [stmt]
+
+ # If there are no matches, return the string as is
+ if not matches:
+ return [stmt]
+
+ pieces = []
+ last_end = 0
+ for match in matches:
+ start, end = match.span()
+ if start > last_end:
+ pieces.append(stmt[last_end:start])
+ pieces.append(stmt[start:end])
+ last_end = end
+
+ if last_end < len(stmt):
+ pieces.append(stmt[last_end:])
+
+ return pieces
def remove_quotes(val):
"""Helper that removes surrounding quotes from strings."""
- pass
+ if not val:
+ return val
+
+ if val[0] in ('"', "'", '`') and val[-1] == val[0]:
+ return val[1:-1]
+ return val
def recurse(*cls):
"""Function decorator to help with recursion
@@ -22,7 +48,16 @@ def recurse(*cls):
:param cls: Classes to not recurse over
:return: function
"""
- pass
+ def wrap(f):
+ def wrapped(tlist):
+ for token in tlist.tokens:
+ if not isinstance(token, cls):
+ for t in token.flatten():
+ if isinstance(t, cls):
+ f(t)
+ f(tlist)
+ return wrapped
+ return wrap
def imt(token, i=None, m=None, t=None):
"""Helper function to simplify comparisons Instance, Match and TokenType
@@ -32,8 +67,48 @@ def imt(token, i=None, m=None, t=None):
:param t: TokenType or Tuple/List of TokenTypes
:return: bool
"""
- pass
+ if i is not None and isinstance(i, (list, tuple)):
+ for cls in i:
+ if isinstance(token, cls):
+ return True
+ return False
+
+ if i is not None:
+ return isinstance(token, i)
+
+ if m is not None and isinstance(m, (list, tuple)) and not isinstance(m[0], tuple):
+ m = [m]
+
+ if m is not None:
+ for m_ttype, m_value in m:
+ if token.match(m_ttype, m_value):
+ return True
+ return False
+
+ if t is not None and isinstance(t, (list, tuple)):
+ for ttype in t:
+ if token.ttype is ttype:
+ return True
+ return False
+
+ if t is not None:
+ return token.ttype is t
+
+ return True
def consume(iterator, n):
"""Advance the iterator n-steps ahead. If n is none, consume entirely."""
- pass
\ No newline at end of file
+ deque(itertools.islice(iterator, n), maxlen=0) if n is not None else deque(iterator, maxlen=0)
+
+def offset(token):
+ """Returns the indentation offset of a token."""
+ line = token.value.splitlines()[0]
+ initial_whitespace = len(line) - len(line.lstrip())
+ return initial_whitespace
+
+def indent(stream, n=2, char=' '):
+ """Returns a stream of tokens with each token indented by n characters."""
+ for token in stream:
+ token.value = '\n'.join(char * n + line if line else ''
+ for line in token.value.splitlines())
+ yield token
\ No newline at end of file