Skip to content

back to Reference (Gold) summary

Reference (Gold): jedi

Pytest Summary for test test

status count
passed 3814
failed 28
skipped 22
error 5
xfailed 5
total 3874
collected 3874

Failed pytests:

test_completion.py::test_file_path_completions[None-"test-None-expected1]

test_completion.py::test_file_path_completions[None-"test-None-expected1]
Script = functools.partial(, environment=)
file = None, code = '"test', column = None, expected = ['/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['/', '_output.txt"'] == ['/']
E             
E             Left contains one more item: '_output.txt"'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

test_completion.py::test_file_path_completions[None-"test-4-expected2]

test_completion.py::test_file_path_completions[None-"test-4-expected2]
Script = functools.partial(, environment=)
file = None, code = '"test', column = 4, expected = ['t/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['t/', 't_output.txt"'] == ['t/']
E             
E             Left contains one more item: 't_output.txt"'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

test_completion.py::test_file_path_completions[example.py-r"tes"-5-expected7]

test_completion.py::test_file_path_completions[example.py-r"tes"-5-expected7]
Script = functools.partial(, environment=)
file = 'example.py', code = 'r"tes"', column = 5, expected = ['t/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           AssertionError: assert ['t/', 't_output.txt'] == ['t/']
E             
E             Left contains one more item: 't_output.txt'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

example.py-r"tes"-5-expected9]

example.py-r"tes"-5-expected9]
Script = functools.partial(, environment=)
file = 'test/example.py', code = 'r"tes"', column = 5, expected = ['t/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           AssertionError: assert ['t/', 't_output.txt'] == ['t/']
E             
E             Left contains one more item: 't_output.txt'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

comp"-5-expected10]

comp"-5-expected10]
Script = functools.partial(, environment=)
file = 'test/example.py', code = 'r"test/comp"', column = 5, expected = ['t/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['t/', 't_output.txt"'] == ['t/']
E             
E             Left contains one more item: 't_output.txt"'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

tes-None-expected13]

tes-None-expected13]
Script = functools.partial(, environment=)
file = 'example.py', code = 'rb"../testbed/tes', column = None
expected = ['t/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['t/', 't_output.txt"'] == ['t/']
E             
E             Left contains one more item: 't_output.txt"'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

test_completion.py::test_file_path_completions[example.py-r"""test-None-expected16]

test_completion.py::test_file_path_completions[example.py-r"""test-None-expected16]
Script = functools.partial(, environment=)
file = 'example.py', code = 'r"""test', column = None, expected = ['/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['/', '_output.txt"""'] == ['/']
E             
E             Left contains one more item: '_output.txt"""'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

test_completion.py::test_file_path_completions[example.py-u"""tes\n-column18-expected18]

test_completion.py::test_file_path_completions[example.py-u"""tes\n-column18-expected18]
Script = functools.partial(, environment=)
file = 'example.py', code = 'u"""tes\n', column = 7, expected = ['t/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['t/', 't_output.txt"""'] == ['t/']
E             
E             Left contains one more item: 't_output.txt"""'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

test_completion.py::test_file_path_completions[example.py-x = f("te" + "st)-16-expected24]

test_completion.py::test_file_path_completions[example.py-x = f("te" + "st)-16-expected24]
Script = functools.partial(, environment=)
file = 'example.py', code = 'x = f("te" + "st)', column = 16, expected = ['/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['/', '_output.txt"'] == ['/']
E             
E             Left contains one more item: '_output.txt"'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

test_completion.py::test_file_path_completions[example.py-x = f("te" + "st-16-expected25]

test_completion.py::test_file_path_completions[example.py-x = f("te" + "st-16-expected25]
Script = functools.partial(, environment=)
file = 'example.py', code = 'x = f("te" + "st', column = 16, expected = ['/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['/', '_output.txt"'] == ['/']
E             
E             Left contains one more item: '_output.txt"'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

test_completion.py::test_file_path_completions[example.py-x = f("te" + "st"-16-expected26]

test_completion.py::test_file_path_completions[example.py-x = f("te" + "st"-16-expected26]
Script = functools.partial(, environment=)
file = 'example.py', code = 'x = f("te" + "st"', column = 16, expected = ['/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           AssertionError: assert ['/', '_output.txt'] == ['/']
E             
E             Left contains one more item: '_output.txt'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

test_completion.py::test_file_path_completions[example.py-x = f("te" + "st")-16-expected27]

test_completion.py::test_file_path_completions[example.py-x = f("te" + "st")-16-expected27]
Script = functools.partial(, environment=)
file = 'example.py', code = 'x = f("te" + "st")', column = 16, expected = ['/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           AssertionError: assert ['/', '_output.txt'] == ['/']
E             
E             Left contains one more item: '_output.txt'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

test_completion.py::test_file_path_completions[example.py-x = f("t" + "est")-16-expected28]

test_completion.py::test_file_path_completions[example.py-x = f("t" + "est")-16-expected28]
Script = functools.partial(, environment=)
file = 'example.py', code = 'x = f("t" + "est")', column = 16, expected = ['/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           AssertionError: assert ['/', '_output.txt'] == ['/']
E             
E             Left contains one more item: '_output.txt'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

test_completion.py::test_file_path_completions[example.py-"test" + "-None-expected30]

test_completion.py::test_file_path_completions[example.py-"test" + "-None-expected30]
Script = functools.partial(, environment=)
file = 'example.py', code = '"test" + "', column = None, expected = ['/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['/', '_output.txt"'] == ['/']
E             
E             Left contains one more item: '_output.txt"'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

test-None-expected31]

test-None-expected31]
Script = functools.partial(, environment=)
file = '/testbed/example.py'
code = 'from os.path import *\ndirname(__file__) + "/test', column = None
expected = ['/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['/', '_output.txt"'] == ['/']
E             
E             Left contains one more item: '_output.txt"'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

example.py-from os.path import *\njoin("tes-9-expected42]

example.py-from os.path import *\njoin("tes-9-expected42]
Script = functools.partial(, environment=)
file = '/testbed/test/example.py', code = 'from os.path import *\njoin("tes'
column = 9, expected = ['t"']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['t"', 't_output.txt"'] == ['t"']
E             
E             Left contains one more item: 't_output.txt"'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

example.py-from os.path import *\njoin('tes)-9-expected43]

example.py-from os.path import *\njoin('tes)-9-expected43]
Script = functools.partial(, environment=)
file = '/testbed/test/example.py', code = "from os.path import *\njoin('tes)"
column = 9, expected = ["t'"]

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ["t'", "t_output.txt'"] == ["t'"]
E             
E             Left contains one more item: "t_output.txt'"
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

example.py-from os.path import *\njoin(r"tes"-10-expected44]

example.py-from os.path import *\njoin(r"tes"-10-expected44]
Script = functools.partial(, environment=)
file = '/testbed/test/example.py', code = 'from os.path import *\njoin(r"tes"'
column = 10, expected = ['t']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           AssertionError: assert ['t', 't_output.txt'] == ['t']
E             
E             Left contains one more item: 't_output.txt'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

example.py-from os.path import *\njoin("""tes""")-11-expected45]

example.py-from os.path import *\njoin("""tes""")-11-expected45]
Script = functools.partial(, environment=)
file = '/testbed/test/example.py'
code = 'from os.path import *\njoin("""tes""")', column = 11, expected = ['t']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           AssertionError: assert ['t', 't_output.txt'] == ['t']
E             
E             Left contains one more item: 't_output.txt'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

example.py-from os.path import *\njoin["tes-9-expected46]

example.py-from os.path import *\njoin["tes-9-expected46]
Script = functools.partial(, environment=)
file = '/testbed/test/example.py', code = 'from os.path import *\njoin["tes'
column = 9, expected = ['t/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['t/', 't_output.txt"'] == ['t/']
E             
E             Left contains one more item: 't_output.txt"'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

example.py-from os.path import *\njoin["tes"-9-expected47]

example.py-from os.path import *\njoin["tes"-9-expected47]
Script = functools.partial(, environment=)
file = '/testbed/test/example.py', code = 'from os.path import *\njoin["tes"'
column = 9, expected = ['t/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           AssertionError: assert ['t/', 't_output.txt'] == ['t/']
E             
E             Left contains one more item: 't_output.txt'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

example.py-from os.path import *\njoin["tes"]-9-expected48]

example.py-from os.path import *\njoin["tes"]-9-expected48]
Script = functools.partial(, environment=)
file = '/testbed/test/example.py', code = 'from os.path import *\njoin["tes"]'
column = 9, expected = ['t/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           AssertionError: assert ['t/', 't_output.txt'] == ['t/']
E             
E             Left contains one more item: 't_output.txt'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

example.py-from os.path import *\njoin(["tes-10-expected57]

example.py-from os.path import *\njoin(["tes-10-expected57]
Script = functools.partial(, environment=)
file = '/testbed/test/example.py', code = 'from os.path import *\njoin(["tes'
column = 10, expected = ['t/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           assert ['t/', 't_output.txt"'] == ['t/']
E             
E             Left contains one more item: 't_output.txt"'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

example.py-from os.path import *\njoin(["tes"]-10-expected58]

example.py-from os.path import *\njoin(["tes"]-10-expected58]
Script = functools.partial(, environment=)
file = '/testbed/test/example.py', code = 'from os.path import *\njoin(["tes"]'
column = 10, expected = ['t/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           AssertionError: assert ['t/', 't_output.txt'] == ['t/']
E             
E             Left contains one more item: 't_output.txt'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

example.py-from os.path import *\njoin(["tes"])-10-expected59]

example.py-from os.path import *\njoin(["tes"])-10-expected59]
Script = functools.partial(, environment=)
file = '/testbed/test/example.py', code = 'from os.path import *\njoin(["tes"])'
column = 10, expected = ['t/']

    @pytest.mark.parametrize(
        'file, code, column, expected', [
            # General tests / relative paths
            (None, '"comp', None, []),  # No files like comp
            (None, '"test', None, [s]),
            (None, '"test', 4, ['t' + s]),
            ('example.py', '"test%scomp' % s, None, ['letion' + s]),
            ('example.py', 'r"comp"', None, []),
            ('example.py', 'r"tes"', None, []),
            ('example.py', '1 + r"tes"', None, []),
            ('example.py', 'r"tes"', 5, ['t' + s]),
            ('example.py', 'r" tes"', 6, []),
            ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]),
            ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]),
            ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']),
            ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]),

            # Absolute paths
            (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']),
            (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']),

            # Longer quotes
            ('example.py', 'r"""test', None, [s]),
            ('example.py', 'r"""\ntest', None, []),
            ('example.py', 'u"""tes\n', (1, 7), ['t' + s]),
            ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']),
            ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']),

            # Adding
            ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']),
            ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']),
            ('example.py', 'x = 1 + "test', None, []),
            ('example.py', 'x = f("te" + "st)', 16, [s]),
            ('example.py', 'x = f("te" + "st', 16, [s]),
            ('example.py', 'x = f("te" + "st"', 16, [s]),
            ('example.py', 'x = f("te" + "st")', 16, [s]),
            ('example.py', 'x = f("t" + "est")', 16, [s]),
            ('example.py', 'x = f(b"t" + "est")', 17, []),
            ('example.py', '"test" + "', None, [s]),

            # __file__
            (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]),
            (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']),
            (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']),
            (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']),

            # inside join
            (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']),
            (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']),

            # join with one argument. join will not get inferred and the result is
            # that directories and in a slash. This is unfortunate, but doesn't
            # really matter.
            (f2, os_path + 'join("tes', 9, ['t"']),
            (f2, os_path + 'join(\'tes)', 9, ["t'"]),
            (f2, os_path + 'join(r"tes"', 10, ['t']),
            (f2, os_path + 'join("""tes""")', 11, ['t']),

            # Almost like join but not really
            (f2, os_path + 'join["tes', 9, ['t' + s]),
            (f2, os_path + 'join["tes"', 9, ['t' + s]),
            (f2, os_path + 'join["tes"]', 9, ['t' + s]),
            (f2, os_path + 'join[dirname(__file__), "completi', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"', 33, []),
            (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []),

            # With full paths
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']),
            (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']),

            # With alias
            (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']),
            (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi',
             None, ['on"']),

            # Trying to break it
            (f2, os_path + 'join(["tes', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"]', 10, ['t' + s]),
            (f2, os_path + 'join(["tes"])', 10, ['t' + s]),
            (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']),

            # GH #1528
            (f2, "'a' 'b'", 4, Ellipsis),
        ]
    )
    def test_file_path_completions(Script, file, code, column, expected):
        line = None
        if isinstance(column, tuple):
            line, column = column
        comps = Script(code, path=file).complete(line=line, column=column)
        if expected is Ellipsis:
            assert len(comps) > 100  # This is basically global completions.
        else:
>           assert [c.complete for c in comps] == expected
E           AssertionError: assert ['t/', 't_output.txt'] == ['t/']
E             
E             Left contains one more item: 't_output.txt'
E             Use -v to get more diff

test/test_api/test_completion.py:297: AssertionError

test_environment.py::test_working_venv

test_environment.py::test_working_venv
tmpdir_factory = TempdirFactory(_tmppath_factory=TempPathFactory(_given_basetemp=None, _trace=, _basetemp=PosixPath('/tmp/pytest-of-root/pytest-0'), _retention_count=3, _retention_policy='all'))
environment = 

    @pytest.fixture(scope='session')
    def venv_path(tmpdir_factory, environment):
        if isinstance(environment, InterpreterEnvironment):
            # The environment can be a tox virtualenv environment which we don't
            # want, so use the system environment.
            environment = get_system_environment(
                '.'.join(str(x) for x in environment.version_info[:2])
            )

        tmpdir = tmpdir_factory.mktemp('venv_path')
        dirname = os.path.join(tmpdir.strpath, 'venv')

        # We cannot use the Python from tox because tox creates virtualenvs and
        # they have different site.py files that work differently than the default
        # ones. Instead, we find the real Python executable by printing the value
        # of sys.base_prefix or sys.real_prefix if we are in a virtualenv.
        output = subprocess.check_output([
            environment.executable, "-c",
            "import sys; "
            "print(sys.real_prefix if hasattr(sys, 'real_prefix') else sys.base_prefix)"
        ])
        prefix = output.rstrip().decode('utf8')
        if os.name == 'nt':
            executable_path = os.path.join(prefix, 'python')
        else:
            executable_name = os.path.basename(environment.executable)
            executable_path = os.path.join(prefix, 'bin', executable_name)

        return_code = subprocess.call([executable_path, '-m', 'venv', dirname])
>       assert return_code == 0, return_code
E       AssertionError: 1
E       assert 1 == 0

test/conftest.py:128: AssertionError

test_environment.py::test_scanning_venvs

test_environment.py::test_scanning_venvs
tmpdir_factory = TempdirFactory(_tmppath_factory=TempPathFactory(_given_basetemp=None, _trace=, _basetemp=PosixPath('/tmp/pytest-of-root/pytest-0'), _retention_count=3, _retention_policy='all'))
environment = 

    @pytest.fixture(scope='session')
    def venv_path(tmpdir_factory, environment):
        if isinstance(environment, InterpreterEnvironment):
            # The environment can be a tox virtualenv environment which we don't
            # want, so use the system environment.
            environment = get_system_environment(
                '.'.join(str(x) for x in environment.version_info[:2])
            )

        tmpdir = tmpdir_factory.mktemp('venv_path')
        dirname = os.path.join(tmpdir.strpath, 'venv')

        # We cannot use the Python from tox because tox creates virtualenvs and
        # they have different site.py files that work differently than the default
        # ones. Instead, we find the real Python executable by printing the value
        # of sys.base_prefix or sys.real_prefix if we are in a virtualenv.
        output = subprocess.check_output([
            environment.executable, "-c",
            "import sys; "
            "print(sys.real_prefix if hasattr(sys, 'real_prefix') else sys.base_prefix)"
        ])
        prefix = output.rstrip().decode('utf8')
        if os.name == 'nt':
            executable_path = os.path.join(prefix, 'python')
        else:
            executable_name = os.path.basename(environment.executable)
            executable_path = os.path.join(prefix, 'bin', executable_name)

        return_code = subprocess.call([executable_path, '-m', 'venv', dirname])
>       assert return_code == 0, return_code
E       AssertionError: 1
E       assert 1 == 0

test/conftest.py:128: AssertionError

test_environment.py::test_create_environment_venv_path

test_environment.py::test_create_environment_venv_path
tmpdir_factory = TempdirFactory(_tmppath_factory=TempPathFactory(_given_basetemp=None, _trace=, _basetemp=PosixPath('/tmp/pytest-of-root/pytest-0'), _retention_count=3, _retention_policy='all'))
environment = 

    @pytest.fixture(scope='session')
    def venv_path(tmpdir_factory, environment):
        if isinstance(environment, InterpreterEnvironment):
            # The environment can be a tox virtualenv environment which we don't
            # want, so use the system environment.
            environment = get_system_environment(
                '.'.join(str(x) for x in environment.version_info[:2])
            )

        tmpdir = tmpdir_factory.mktemp('venv_path')
        dirname = os.path.join(tmpdir.strpath, 'venv')

        # We cannot use the Python from tox because tox creates virtualenvs and
        # they have different site.py files that work differently than the default
        # ones. Instead, we find the real Python executable by printing the value
        # of sys.base_prefix or sys.real_prefix if we are in a virtualenv.
        output = subprocess.check_output([
            environment.executable, "-c",
            "import sys; "
            "print(sys.real_prefix if hasattr(sys, 'real_prefix') else sys.base_prefix)"
        ])
        prefix = output.rstrip().decode('utf8')
        if os.name == 'nt':
            executable_path = os.path.join(prefix, 'python')
        else:
            executable_name = os.path.basename(environment.executable)
            executable_path = os.path.join(prefix, 'bin', executable_name)

        return_code = subprocess.call([executable_path, '-m', 'venv', dirname])
>       assert return_code == 0, return_code
E       AssertionError: 1
E       assert 1 == 0

test/conftest.py:128: AssertionError

test_environment.py::test_changing_venv

test_environment.py::test_changing_venv
tmpdir_factory = TempdirFactory(_tmppath_factory=TempPathFactory(_given_basetemp=None, _trace=, _basetemp=PosixPath('/tmp/pytest-of-root/pytest-0'), _retention_count=3, _retention_policy='all'))
environment = 

    @pytest.fixture(scope='session')
    def venv_path(tmpdir_factory, environment):
        if isinstance(environment, InterpreterEnvironment):
            # The environment can be a tox virtualenv environment which we don't
            # want, so use the system environment.
            environment = get_system_environment(
                '.'.join(str(x) for x in environment.version_info[:2])
            )

        tmpdir = tmpdir_factory.mktemp('venv_path')
        dirname = os.path.join(tmpdir.strpath, 'venv')

        # We cannot use the Python from tox because tox creates virtualenvs and
        # they have different site.py files that work differently than the default
        # ones. Instead, we find the real Python executable by printing the value
        # of sys.base_prefix or sys.real_prefix if we are in a virtualenv.
        output = subprocess.check_output([
            environment.executable, "-c",
            "import sys; "
            "print(sys.real_prefix if hasattr(sys, 'real_prefix') else sys.base_prefix)"
        ])
        prefix = output.rstrip().decode('utf8')
        if os.name == 'nt':
            executable_path = os.path.join(prefix, 'python')
        else:
            executable_name = os.path.basename(environment.executable)
            executable_path = os.path.join(prefix, 'bin', executable_name)

        return_code = subprocess.call([executable_path, '-m', 'venv', dirname])
>       assert return_code == 0, return_code
E       AssertionError: 1
E       assert 1 == 0

test/conftest.py:128: AssertionError

test_imports.py::test_flask_ext[import flask.ext.foo; flask.ext.foo.-Foo]

test_imports.py::test_flask_ext[import flask.ext.foo; flask.ext.foo.-Foo]
Script = functools.partial(, environment=)
code = 'import flask.ext.foo; flask.ext.foo.', name = 'Foo'

    @pytest.mark.parametrize("code,name", [
        ("from flask.ext import foo; foo.", "Foo"),  # flask_foo.py
        ("from flask.ext import bar; bar.", "Bar"),  # flaskext/bar.py
        ("from flask.ext import baz; baz.", "Baz"),  # flask_baz/__init__.py
        ("from flask.ext import moo; moo.", "Moo"),  # flaskext/moo/__init__.py
        ("from flask.ext.", "foo"),
        ("from flask.ext.", "bar"),
        ("from flask.ext.", "baz"),
        ("from flask.ext.", "moo"),
        pytest.param("import flask.ext.foo; flask.ext.foo.", "Foo", marks=pytest.mark.xfail),
        pytest.param("import flask.ext.bar; flask.ext.bar.", "Foo", marks=pytest.mark.xfail),
        pytest.param("import flask.ext.baz; flask.ext.baz.", "Foo", marks=pytest.mark.xfail),
        pytest.param("import flask.ext.moo; flask.ext.moo.", "Foo", marks=pytest.mark.xfail),
    ])
    def test_flask_ext(Script, code, name):
        """flask.ext.foo is really imported from flaskext.foo or flask_foo.
        """
        path = get_example_dir('flask-site-packages')
        completions = Script(code, project=Project('.', sys_path=[path])).complete()
>       assert name in [c.name for c in completions]
E       AssertionError: assert 'Foo' in []

test/test_inference/test_imports.py:169: AssertionError

test_imports.py::test_flask_ext[import flask.ext.bar; flask.ext.bar.-Foo]

test_imports.py::test_flask_ext[import flask.ext.bar; flask.ext.bar.-Foo]
Script = functools.partial(, environment=)
code = 'import flask.ext.bar; flask.ext.bar.', name = 'Foo'

    @pytest.mark.parametrize("code,name", [
        ("from flask.ext import foo; foo.", "Foo"),  # flask_foo.py
        ("from flask.ext import bar; bar.", "Bar"),  # flaskext/bar.py
        ("from flask.ext import baz; baz.", "Baz"),  # flask_baz/__init__.py
        ("from flask.ext import moo; moo.", "Moo"),  # flaskext/moo/__init__.py
        ("from flask.ext.", "foo"),
        ("from flask.ext.", "bar"),
        ("from flask.ext.", "baz"),
        ("from flask.ext.", "moo"),
        pytest.param("import flask.ext.foo; flask.ext.foo.", "Foo", marks=pytest.mark.xfail),
        pytest.param("import flask.ext.bar; flask.ext.bar.", "Foo", marks=pytest.mark.xfail),
        pytest.param("import flask.ext.baz; flask.ext.baz.", "Foo", marks=pytest.mark.xfail),
        pytest.param("import flask.ext.moo; flask.ext.moo.", "Foo", marks=pytest.mark.xfail),
    ])
    def test_flask_ext(Script, code, name):
        """flask.ext.foo is really imported from flaskext.foo or flask_foo.
        """
        path = get_example_dir('flask-site-packages')
        completions = Script(code, project=Project('.', sys_path=[path])).complete()
>       assert name in [c.name for c in completions]
E       AssertionError: assert 'Foo' in []

test/test_inference/test_imports.py:169: AssertionError

test_imports.py::test_flask_ext[import flask.ext.baz; flask.ext.baz.-Foo]

test_imports.py::test_flask_ext[import flask.ext.baz; flask.ext.baz.-Foo]
Script = functools.partial(, environment=)
code = 'import flask.ext.baz; flask.ext.baz.', name = 'Foo'

    @pytest.mark.parametrize("code,name", [
        ("from flask.ext import foo; foo.", "Foo"),  # flask_foo.py
        ("from flask.ext import bar; bar.", "Bar"),  # flaskext/bar.py
        ("from flask.ext import baz; baz.", "Baz"),  # flask_baz/__init__.py
        ("from flask.ext import moo; moo.", "Moo"),  # flaskext/moo/__init__.py
        ("from flask.ext.", "foo"),
        ("from flask.ext.", "bar"),
        ("from flask.ext.", "baz"),
        ("from flask.ext.", "moo"),
        pytest.param("import flask.ext.foo; flask.ext.foo.", "Foo", marks=pytest.mark.xfail),
        pytest.param("import flask.ext.bar; flask.ext.bar.", "Foo", marks=pytest.mark.xfail),
        pytest.param("import flask.ext.baz; flask.ext.baz.", "Foo", marks=pytest.mark.xfail),
        pytest.param("import flask.ext.moo; flask.ext.moo.", "Foo", marks=pytest.mark.xfail),
    ])
    def test_flask_ext(Script, code, name):
        """flask.ext.foo is really imported from flaskext.foo or flask_foo.
        """
        path = get_example_dir('flask-site-packages')
        completions = Script(code, project=Project('.', sys_path=[path])).complete()
>       assert name in [c.name for c in completions]
E       AssertionError: assert 'Foo' in []

test/test_inference/test_imports.py:169: AssertionError

test_imports.py::test_flask_ext[import flask.ext.moo; flask.ext.moo.-Foo]

test_imports.py::test_flask_ext[import flask.ext.moo; flask.ext.moo.-Foo]
Script = functools.partial(, environment=)
code = 'import flask.ext.moo; flask.ext.moo.', name = 'Foo'

    @pytest.mark.parametrize("code,name", [
        ("from flask.ext import foo; foo.", "Foo"),  # flask_foo.py
        ("from flask.ext import bar; bar.", "Bar"),  # flaskext/bar.py
        ("from flask.ext import baz; baz.", "Baz"),  # flask_baz/__init__.py
        ("from flask.ext import moo; moo.", "Moo"),  # flaskext/moo/__init__.py
        ("from flask.ext.", "foo"),
        ("from flask.ext.", "bar"),
        ("from flask.ext.", "baz"),
        ("from flask.ext.", "moo"),
        pytest.param("import flask.ext.foo; flask.ext.foo.", "Foo", marks=pytest.mark.xfail),
        pytest.param("import flask.ext.bar; flask.ext.bar.", "Foo", marks=pytest.mark.xfail),
        pytest.param("import flask.ext.baz; flask.ext.baz.", "Foo", marks=pytest.mark.xfail),
        pytest.param("import flask.ext.moo; flask.ext.moo.", "Foo", marks=pytest.mark.xfail),
    ])
    def test_flask_ext(Script, code, name):
        """flask.ext.foo is really imported from flaskext.foo or flask_foo.
        """
        path = get_example_dir('flask-site-packages')
        completions = Script(code, project=Project('.', sys_path=[path])).complete()
>       assert name in [c.name for c in completions]
E       AssertionError: assert 'Foo' in []

test/test_inference/test_imports.py:169: AssertionError

test_precedence.py::test_equals[... == ...]

test_precedence.py::test_equals[... == ...]
Script = functools.partial(, environment=)
environment = , source = '... == ...'

    @pytest.mark.parametrize('source', [
        pytest.param('1 == 1'),
        pytest.param('1.0 == 1'),
        # Unfortunately for now not possible, because it's a typeshed object.
        pytest.param('... == ...', marks=pytest.mark.xfail),
    ])
    def test_equals(Script, environment, source):
        script = Script(source)
        node = script._module_node.children[0]
>       first, = script._get_module_context().infer_node(node)
E       ValueError: too many values to unpack (expected 1)

test/test_inference/test_precedence.py:15: ValueError

test_sys_path.py::test_venv_and_pths

test_sys_path.py::test_venv_and_pths
tmpdir_factory = TempdirFactory(_tmppath_factory=TempPathFactory(_given_basetemp=None, _trace=, _basetemp=PosixPath('/tmp/pytest-of-root/pytest-0'), _retention_count=3, _retention_policy='all'))
environment = 

    @pytest.fixture(scope='session')
    def venv_path(tmpdir_factory, environment):
        if isinstance(environment, InterpreterEnvironment):
            # The environment can be a tox virtualenv environment which we don't
            # want, so use the system environment.
            environment = get_system_environment(
                '.'.join(str(x) for x in environment.version_info[:2])
            )

        tmpdir = tmpdir_factory.mktemp('venv_path')
        dirname = os.path.join(tmpdir.strpath, 'venv')

        # We cannot use the Python from tox because tox creates virtualenvs and
        # they have different site.py files that work differently than the default
        # ones. Instead, we find the real Python executable by printing the value
        # of sys.base_prefix or sys.real_prefix if we are in a virtualenv.
        output = subprocess.check_output([
            environment.executable, "-c",
            "import sys; "
            "print(sys.real_prefix if hasattr(sys, 'real_prefix') else sys.base_prefix)"
        ])
        prefix = output.rstrip().decode('utf8')
        if os.name == 'nt':
            executable_path = os.path.join(prefix, 'python')
        else:
            executable_name = os.path.basename(environment.executable)
            executable_path = os.path.join(prefix, 'bin', executable_name)

        return_code = subprocess.call([executable_path, '-m', 'venv', dirname])
>       assert return_code == 0, return_code
E       AssertionError: 1
E       assert 1 == 0

test/conftest.py:128: AssertionError

test_utils.py::TestSetupReadline::test_colorama

test_utils.py::TestSetupReadline::test_colorama
self = 

    def test_colorama(self):
        """
        Only test it if colorama library is available.

        This module is being tested because it uses ``setattr`` at some point,
        which Jedi doesn't understand, but it should still work in the REPL.
        """
        try:
            # if colorama is installed
            import colorama
        except ImportError:
            pass
        else:
            self.namespace.colorama = colorama
>           assert self.complete('colorama')
E           AssertionError: assert []
E            +  where [] = complete('colorama')
E            +    where complete = .complete

test/test_utils.py:113: AssertionError

test_utils.py::TestSetupReadline::test_modules

test_utils.py::TestSetupReadline::test_modules
self = 

    def test_modules(self):
        import sys
        import os
        self.namespace.sys = sys
        self.namespace.os = os

        try:
>           assert self.complete('os.path.join') == ['os.path.join']
E           AssertionError: assert [] == ['os.path.join']
E             
E             Right contains one more item: 'os.path.join'
E             Use -v to get more diff

test/test_utils.py:54: AssertionError

test_utils.py::TestSetupReadline::test_preexisting_values

test_utils.py::TestSetupReadline::test_preexisting_values
self = 

    def test_preexisting_values(self):
        self.namespace.a = range(10)
>       assert set(self.complete('a.')) == {'a.' + n for n in dir(range(1))}
E       AssertionError: assert set() == {'a.__bool__'...__doc__', ...}
E         
E         Extra items in the right set:
E         'a.step'
E         'a.__class__'
E         'a.__le__'
E         'a.count'
E         'a.__len__'...
E         
E         ...Full output truncated (30 lines hidden), use '-vv' to show

test/test_utils.py:96: AssertionError

Patch diff

diff --git a/jedi/_compatibility.py b/jedi/_compatibility.py
index 976a96ed..13a74b7b 100644
--- a/jedi/_compatibility.py
+++ b/jedi/_compatibility.py
@@ -5,3 +5,28 @@ different Python versions.
 import errno
 import sys
 import pickle
+
+
+def pickle_load(file):
+    try:
+        return pickle.load(file)
+    # Python on Windows don't throw EOF errors for pipes. So reraise them with
+    # the correct type, which is caught upwards.
+    except OSError:
+        if sys.platform == 'win32':
+            raise EOFError()
+        raise
+
+
+def pickle_dump(data, file, protocol):
+    try:
+        pickle.dump(data, file, protocol)
+        # On Python 3.3 flush throws sometimes an error even though the writing
+        # operation should be completed.
+        file.flush()
+    # Python on Windows don't throw EPIPE errors for pipes. So reraise them with
+    # the correct type and error number.
+    except OSError:
+        if sys.platform == 'win32':
+            raise IOError(errno.EPIPE, "Broken pipe")
+        raise
diff --git a/jedi/api/classes.py b/jedi/api/classes.py
index 04b944e5..7054788e 100644
--- a/jedi/api/classes.py
+++ b/jedi/api/classes.py
@@ -16,7 +16,9 @@ the interesting information about all operations.
 import re
 from pathlib import Path
 from typing import Optional
+
 from parso.tree import search_ancestor
+
 from jedi import settings
 from jedi import debug
 from jedi.inference.utils import unite
@@ -31,6 +33,10 @@ from jedi.api import completion_cache
 from jedi.api.helpers import filter_follow_imports


+def _sort_names_by_start_pos(names):
+    return sorted(names, key=lambda s: s.start_pos or (0, 0))
+
+
 def defined_names(inference_state, value):
     """
     List sub-definitions (e.g., methods in class).
@@ -38,20 +44,41 @@ def defined_names(inference_state, value):
     :type scope: Scope
     :rtype: list of Name
     """
-    pass
+    try:
+        context = value.as_context()
+    except HasNoContext:
+        return []
+    filter = next(context.get_filters())
+    names = [name for name in filter.values()]
+    return [Name(inference_state, n) for n in _sort_names_by_start_pos(names)]
+
+
+def _values_to_definitions(values):
+    return [Name(c.inference_state, c.name) for c in values]


 class BaseName:
     """
     The base class for all definitions, completions and signatures.
     """
-    _mapping = {'posixpath': 'os.path', 'riscospath': 'os.path', 'ntpath':
-        'os.path', 'os2emxpath': 'os.path', 'macpath': 'os.path',
-        'genericpath': 'os.path', 'posix': 'os', '_io': 'io', '_functools':
-        'functools', '_collections': 'collections', '_socket': 'socket',
-        '_sqlite3': 'sqlite3'}
-    _tuple_mapping = dict((tuple(k.split('.')), v) for k, v in {
-        'argparse._ActionsContainer': 'argparse.ArgumentParser'}.items())
+    _mapping = {
+        'posixpath': 'os.path',
+        'riscospath': 'os.path',
+        'ntpath': 'os.path',
+        'os2emxpath': 'os.path',
+        'macpath': 'os.path',
+        'genericpath': 'os.path',
+        'posix': 'os',
+        '_io': 'io',
+        '_functools': 'functools',
+        '_collections': 'collections',
+        '_socket': 'socket',
+        '_sqlite3': 'sqlite3',
+    }
+
+    _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
+        'argparse._ActionsContainer': 'argparse.ArgumentParser',
+    }.items())

     def __init__(self, inference_state, name):
         self._inference_state = inference_state
@@ -61,12 +88,26 @@ class BaseName:
         """
         self.is_keyword = isinstance(self._name, KeywordName)

+    @memoize_method
+    def _get_module_context(self):
+        # This can take a while to complete, because in the worst case of
+        # imports (consider `import a` completions), we need to load all
+        # modules starting with a first.
+        return self._name.get_root_context()
+
     @property
-    def module_path(self) ->Optional[Path]:
+    def module_path(self) -> Optional[Path]:
         """
         Shows the file path of a module. e.g. ``/usr/lib/python3.9/os.py``
         """
-        pass
+        module = self._get_module_context()
+        if module.is_stub() or not module.is_compiled():
+            # Compiled modules should not return a module path even if they
+            # have one.
+            path: Optional[Path] = self._get_module_context().py__file__()
+            return path
+
+        return None

     @property
     def name(self):
@@ -77,7 +118,7 @@ class BaseName:

         :rtype: str or None
         """
-        pass
+        return self._name.get_public_name()

     @property
     def type(self):
@@ -136,7 +177,19 @@ class BaseName:
         ``param``, ``path``, ``keyword``, ``property`` and ``statement``.

         """
-        pass
+        tree_name = self._name.tree_name
+        resolve = False
+        if tree_name is not None:
+            # TODO move this to their respective names.
+            definition = tree_name.get_definition()
+            if definition is not None and definition.type == 'import_from' and \
+                    tree_name.is_definition():
+                resolve = True
+
+        if isinstance(self._name, SubModuleName) or resolve:
+            for value in self._name.infer():
+                return value.api_type
+        return self._name.api_type

     @property
     def module_name(self):
@@ -151,23 +204,32 @@ class BaseName:
         >>> print(d.module_name)  # doctest: +ELLIPSIS
         json
         """
-        pass
+        return self._get_module_context().py__name__()

     def in_builtin_module(self):
         """
         Returns True, if this is a builtin module.
         """
-        pass
+        value = self._get_module_context().get_value()
+        if isinstance(value, StubModuleValue):
+            return any(v.is_compiled() for v in value.non_stub_value_set)
+        return value.is_compiled()

     @property
     def line(self):
         """The line where the definition occurs (starting with 1)."""
-        pass
+        start_pos = self._name.start_pos
+        if start_pos is None:
+            return None
+        return start_pos[0]

     @property
     def column(self):
         """The column where the definition occurs (starting with 0)."""
-        pass
+        start_pos = self._name.start_pos
+        if start_pos is None:
+            return None
+        return start_pos[1]

     def get_definition_start_position(self):
         """
@@ -176,7 +238,12 @@ class BaseName:

         :rtype: Optional[Tuple[int, int]]
         """
-        pass
+        if self._name.tree_name is None:
+            return None
+        definition = self._name.tree_name.get_definition()
+        if definition is None:
+            return self._name.start_pos
+        return definition.start_pos

     def get_definition_end_position(self):
         """
@@ -185,16 +252,26 @@ class BaseName:

         :rtype: Optional[Tuple[int, int]]
         """
-        pass
+        if self._name.tree_name is None:
+            return None
+        definition = self._name.tree_name.get_definition()
+        if definition is None:
+            return self._name.tree_name.end_pos
+        if self.type in ("function", "class"):
+            last_leaf = definition.get_last_leaf()
+            if last_leaf.type == "newline":
+                return last_leaf.get_previous_leaf().end_pos
+            return last_leaf.end_pos
+        return definition.end_pos

     def docstring(self, raw=False, fast=True):
-        """
+        r"""
         Return a document string for this completion object.

         Example:

         >>> from jedi import Script
-        >>> source = '''\\
+        >>> source = '''\
         ... def f(a, b=1):
         ...     "Document for function f."
         ... '''
@@ -218,7 +295,26 @@ class BaseName:
             the ``foo.docstring(fast=False)`` on every object, because it
             parses all libraries starting with ``a``.
         """
-        pass
+        if isinstance(self._name, ImportName) and fast:
+            return ''
+        doc = self._get_docstring()
+        if raw:
+            return doc
+
+        signature_text = self._get_docstring_signature()
+        if signature_text and doc:
+            return signature_text + '\n\n' + doc
+        else:
+            return signature_text + doc
+
+    def _get_docstring(self):
+        return self._name.py__doc__()
+
+    def _get_docstring_signature(self):
+        return '\n'.join(
+            signature.to_string()
+            for signature in self._get_signatures(for_docstring=True)
+        )

     @property
     def description(self):
@@ -249,7 +345,25 @@ class BaseName:
         'class C'

         """
-        pass
+        typ = self.type
+        tree_name = self._name.tree_name
+        if typ == 'param':
+            return typ + ' ' + self._name.to_string()
+        if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
+            if typ == 'function':
+                # For the description we want a short and a pythonic way.
+                typ = 'def'
+            return typ + ' ' + self._name.get_public_name()
+
+        definition = tree_name.get_definition(include_setitem=True) or tree_name
+        # Remove the prefix, because that's not what we want for get_code
+        # here.
+        txt = definition.get_code(include_prefix=False)
+        # Delete comments:
+        txt = re.sub(r'#[^\n]+\n', ' ', txt)
+        # Delete multi spaces/newlines
+        txt = re.sub(r'\s+', ' ', txt).strip()
+        return txt

     @property
     def full_name(self):
@@ -275,24 +389,44 @@ class BaseName:
         be ``<module 'posixpath' ...>```. However most users find the latter
         more practical.
         """
-        pass
+        if not self._name.is_value_name:
+            return None
+
+        names = self._name.get_qualified_names(include_module_names=True)
+        if names is None:
+            return None
+
+        names = list(names)
+        try:
+            names[0] = self._mapping[names[0]]
+        except KeyError:
+            pass
+
+        return '.'.join(names)

     def is_stub(self):
         """
         Returns True if the current name is defined in a stub file.
         """
-        pass
+        if not self._name.is_value_name:
+            return False
+
+        return self._name.get_root_context().is_stub()

     def is_side_effect(self):
         """
         Checks if a name is defined as ``self.foo = 3``. In case of self, this
         function would return False, for foo it would return True.
         """
-        pass
+        tree_name = self._name.tree_name
+        if tree_name is None:
+            return False
+        return tree_name.is_definition() and tree_name.parent.type == 'trailer'

     @debug.increase_indent_cm('goto on name')
     def goto(self, *, follow_imports=False, follow_builtin_imports=False,
-        only_stubs=False, prefer_stubs=False):
+             only_stubs=False, prefer_stubs=False):
+
         """
         Like :meth:`.Script.goto` (also supports the same params), but does it
         for the current name. This is typically useful if you are using
@@ -305,7 +439,19 @@ class BaseName:
         :param prefer_stubs: Prefer stubs to Python objects for this goto call.
         :rtype: list of :class:`Name`
         """
-        pass
+        if not self._name.is_value_name:
+            return []
+
+        names = self._name.goto()
+        if follow_imports:
+            names = filter_follow_imports(names, follow_builtin_imports)
+        names = convert_names(
+            names,
+            only_stubs=only_stubs,
+            prefer_stubs=prefer_stubs,
+        )
+        return [self if n == self._name else Name(self._inference_state, n)
+                for n in names]

     @debug.increase_indent_cm('infer on name')
     def infer(self, *, only_stubs=False, prefer_stubs=False):
@@ -325,7 +471,23 @@ class BaseName:
             inference call.
         :rtype: list of :class:`Name`
         """
-        pass
+        assert not (only_stubs and prefer_stubs)
+
+        if not self._name.is_value_name:
+            return []
+
+        # First we need to make sure that we have stub names (if possible) that
+        # we can follow. If we don't do that, we can end up with the inferred
+        # results of Python objects instead of stubs.
+        names = convert_names([self._name], prefer_stubs=True)
+        values = convert_values(
+            ValueSet.from_sets(n.infer() for n in names),
+            only_stubs=only_stubs,
+            prefer_stubs=prefer_stubs,
+        )
+        resulting_names = [c.name for c in values]
+        return [self if n == self._name else Name(self._inference_state, n)
+                for n in resulting_names]

     def parent(self):
         """
@@ -333,12 +495,40 @@ class BaseName:

         :rtype: Name
         """
-        pass
+        if not self._name.is_value_name:
+            return None
+
+        if self.type in ('function', 'class', 'param') and self._name.tree_name is not None:
+            # Since the parent_context doesn't really match what the user
+            # thinks of that the parent is here, we do these cases separately.
+            # The reason for this is the following:
+            # - class: Nested classes parent_context is always the
+            #   parent_context of the most outer one.
+            # - function: Functions in classes have the module as
+            #   parent_context.
+            # - param: The parent_context of a param is not its function but
+            #   e.g. the outer class or module.
+            cls_or_func_node = self._name.tree_name.get_definition()
+            parent = search_ancestor(cls_or_func_node, 'funcdef', 'classdef', 'file_input')
+            context = self._get_module_context().create_value(parent).as_context()
+        else:
+            context = self._name.parent_context
+
+        if context is None:
+            return None
+        while context.name is None:
+            # Happens for comprehension contexts
+            context = context.parent_context
+
+        return Name(self._inference_state, context.name)

     def __repr__(self):
-        return '<%s %sname=%r, description=%r>' % (self.__class__.__name__,
-            'full_' if self.full_name else '', self.full_name or self.name,
-            self.description)
+        return "<%s %sname=%r, description=%r>" % (
+            self.__class__.__name__,
+            'full_' if self.full_name else '',
+            self.full_name or self.name,
+            self.description,
+        )

     def get_line_code(self, before=0, after=0):
         """
@@ -350,7 +540,34 @@ class BaseName:
         :return str: Returns the line(s) of code or an empty string if it's a
                      builtin.
         """
-        pass
+        if not self._name.is_value_name:
+            return ''
+
+        lines = self._name.get_root_context().code_lines
+        if lines is None:
+            # Probably a builtin module, just ignore in that case.
+            return ''
+
+        index = self._name.start_pos[0] - 1
+        start_index = max(index - before, 0)
+        return ''.join(lines[start_index:index + after + 1])
+
+    def _get_signatures(self, for_docstring=False):
+        if self._name.api_type == 'property':
+            return []
+        if for_docstring and self._name.api_type == 'statement' and not self.is_stub():
+            # For docstrings we don't resolve signatures if they are simple
+            # statements and not stubs. This is a speed optimization.
+            return []
+
+        if isinstance(self._name, MixedName):
+            # While this would eventually happen anyway, it's basically just a
+            # shortcut to not infer anything tree related, because it's really
+            # not necessary.
+            return self._name.infer_compiled_value().get_signatures()
+
+        names = convert_names([self._name], prefer_stubs=True)
+        return [sig for name in names for sig in name.infer().get_signatures()]

     def get_signatures(self):
         """
@@ -359,7 +576,10 @@ class BaseName:

         :rtype: list of :class:`BaseSignature`
         """
-        pass
+        return [
+            BaseSignature(self._inference_state, s)
+            for s in self._get_signatures()
+        ]

     def execute(self):
         """
@@ -368,7 +588,7 @@ class BaseName:

         :rtype: list of :class:`Name`
         """
-        pass
+        return _values_to_definitions(self._name.infer().execute_with_values())

     def get_type_hint(self):
         """
@@ -380,7 +600,7 @@ class BaseName:

         :rtype: str
         """
-        pass
+        return self._name.infer().get_type_hint()


 class Completion(BaseName):
@@ -388,16 +608,30 @@ class Completion(BaseName):
     ``Completion`` objects are returned from :meth:`.Script.complete`. They
     provide additional information about a completion.
     """
-
     def __init__(self, inference_state, name, stack, like_name_length,
-        is_fuzzy, cached_name=None):
+                 is_fuzzy, cached_name=None):
         super().__init__(inference_state, name)
+
         self._like_name_length = like_name_length
         self._stack = stack
         self._is_fuzzy = is_fuzzy
         self._cached_name = cached_name
+
+        # Completion objects with the same Completion name (which means
+        # duplicate items in the completion)
         self._same_name_completions = []

+    def _complete(self, like_name):
+        append = ''
+        if settings.add_bracket_after_function \
+                and self.type == 'function':
+            append = '('
+
+        name = self._name.get_public_name()
+        if like_name:
+            name = name[self._like_name_length:]
+        return name + append
+
     @property
     def complete(self):
         """
@@ -419,7 +653,9 @@ class Completion(BaseName):
         completing ``foo(par`` would give a ``Completion`` which ``complete``
         would be ``am=``.
         """
-        pass
+        if self._is_fuzzy:
+            return None
+        return self._complete(True)

     @property
     def name_with_symbols(self):
@@ -434,20 +670,58 @@ class Completion(BaseName):
         ``name_with_symbols`` would be "param=".

         """
-        pass
+        return self._complete(False)

     def docstring(self, raw=False, fast=True):
         """
         Documented under :meth:`BaseName.docstring`.
         """
-        pass
+        if self._like_name_length >= 3:
+            # In this case we can just resolve the like name, because we
+            # wouldn't load like > 100 Python modules anymore.
+            fast = False
+
+        return super().docstring(raw=raw, fast=fast)
+
+    def _get_docstring(self):
+        if self._cached_name is not None:
+            return completion_cache.get_docstring(
+                self._cached_name,
+                self._name.get_public_name(),
+                lambda: self._get_cache()
+            )
+        return super()._get_docstring()
+
+    def _get_docstring_signature(self):
+        if self._cached_name is not None:
+            return completion_cache.get_docstring_signature(
+                self._cached_name,
+                self._name.get_public_name(),
+                lambda: self._get_cache()
+            )
+        return super()._get_docstring_signature()
+
+    def _get_cache(self):
+        return (
+            super().type,
+            super()._get_docstring_signature(),
+            super()._get_docstring(),
+        )

     @property
     def type(self):
         """
         Documented under :meth:`BaseName.type`.
         """
-        pass
+        # Purely a speed optimization.
+        if self._cached_name is not None:
+            return completion_cache.get_type(
+                self._cached_name,
+                self._name.get_public_name(),
+                lambda: self._get_cache()
+            )
+
+        return super().type

     def get_completion_prefix_length(self):
         """
@@ -465,7 +739,7 @@ class Completion(BaseName):

         completing ``foo(par`` would return 3.
         """
-        pass
+        return self._like_name_length

     def __repr__(self):
         return '<%s: %s>' % (type(self).__name__, self._name.get_public_name())
@@ -476,7 +750,6 @@ class Name(BaseName):
     *Name* objects are returned from many different APIs including
     :meth:`.Script.goto` or :meth:`.Script.infer`.
     """
-
     def __init__(self, inference_state, definition):
         super().__init__(inference_state, definition)

@@ -487,26 +760,33 @@ class Name(BaseName):

         :rtype: list of :class:`Name`
         """
-        pass
+        defs = self._name.infer()
+        return sorted(
+            unite(defined_names(self._inference_state, d) for d in defs),
+            key=lambda s: s._name.start_pos or (0, 0)
+        )

     def is_definition(self):
         """
         Returns True, if defined as a name in a statement, function or class.
         Returns False, if it's a reference to such a definition.
         """
-        pass
+        if self._name.tree_name is None:
+            return True
+        else:
+            return self._name.tree_name.is_definition()

     def __eq__(self, other):
-        return (self._name.start_pos == other._name.start_pos and self.
-            module_path == other.module_path and self.name == other.name and
-            self._inference_state == other._inference_state)
+        return self._name.start_pos == other._name.start_pos \
+            and self.module_path == other.module_path \
+            and self.name == other.name \
+            and self._inference_state == other._inference_state

     def __ne__(self, other):
         return not self.__eq__(other)

     def __hash__(self):
-        return hash((self._name.start_pos, self.module_path, self.name,
-            self._inference_state))
+        return hash((self._name.start_pos, self.module_path, self.name, self._inference_state))


 class BaseSignature(Name):
@@ -514,7 +794,6 @@ class BaseSignature(Name):
     These signatures are returned by :meth:`BaseName.get_signatures`
     calls.
     """
-
     def __init__(self, inference_state, signature):
         super().__init__(inference_state, signature.name)
         self._signature = signature
@@ -527,7 +806,8 @@ class BaseSignature(Name):

         :rtype: list of :class:`.ParamName`
         """
-        pass
+        return [ParamName(self._inference_state, n)
+                for n in self._signature.get_param_names(resolve_stars=True)]

     def to_string(self):
         """
@@ -536,7 +816,7 @@ class BaseSignature(Name):

         :rtype: str
         """
-        pass
+        return self._signature.to_string()


 class Signature(BaseSignature):
@@ -544,7 +824,6 @@ class Signature(BaseSignature):
     A full signature object is the return value of
     :meth:`.Script.get_signatures`.
     """
-
     def __init__(self, inference_state, signature, call_details):
         super().__init__(inference_state, signature)
         self._call_details = call_details
@@ -558,7 +837,9 @@ class Signature(BaseSignature):

         :rtype: int
         """
-        pass
+        return self._call_details.calculate_index(
+            self._signature.get_param_names(resolve_stars=True)
+        )

     @property
     def bracket_start(self):
@@ -568,22 +849,24 @@ class Signature(BaseSignature):

         :rtype: int, int
         """
-        pass
+        return self._call_details.bracket_leaf.start_pos

     def __repr__(self):
-        return '<%s: index=%r %s>' % (type(self).__name__, self.index, self
-            ._signature.to_string())
+        return '<%s: index=%r %s>' % (
+            type(self).__name__,
+            self.index,
+            self._signature.to_string(),
+        )


 class ParamName(Name):
-
     def infer_default(self):
         """
         Returns default values like the ``1`` of ``def foo(x=1):``.

         :rtype: list of :class:`.Name`
         """
-        pass
+        return _values_to_definitions(self._name.infer_default())

     def infer_annotation(self, **kwargs):
         """
@@ -591,7 +874,7 @@ class ParamName(Name):
             executed and classes are returned instead of instances.
         :rtype: list of :class:`.Name`
         """
-        pass
+        return _values_to_definitions(self._name.infer_annotation(ignore_stars=True, **kwargs))

     def to_string(self):
         """
@@ -600,7 +883,7 @@ class ParamName(Name):

         :rtype: str
         """
-        pass
+        return self._name.to_string()

     @property
     def kind(self):
@@ -609,4 +892,4 @@ class ParamName(Name):

         :rtype: :py:attr:`inspect.Parameter.kind`
         """
-        pass
+        return self._name.get_kind()
diff --git a/jedi/api/completion.py b/jedi/api/completion.py
index 2aa05f9a..342f7506 100644
--- a/jedi/api/completion.py
+++ b/jedi/api/completion.py
@@ -1,10 +1,12 @@
 import re
 from textwrap import dedent
 from inspect import Parameter
+
 from parso.python.token import PythonTokenTypes
 from parso.python import tree
 from parso.tree import search_ancestor, Leaf
 from parso import split_lines
+
 from jedi import debug
 from jedi import settings
 from jedi.api import classes
@@ -25,30 +27,160 @@ from jedi.plugins import plugin_manager


 class ParamNameWithEquals(ParamNameWrapper):
-    pass
+    def get_public_name(self):
+        return self.string_name + '='
+
+
+def _get_signature_param_names(signatures, positional_count, used_kwargs):
+    # Add named params
+    for call_sig in signatures:
+        for i, p in enumerate(call_sig.params):
+            kind = p.kind
+            if i < positional_count and kind == Parameter.POSITIONAL_OR_KEYWORD:
+                continue
+            if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) \
+                    and p.name not in used_kwargs:
+                yield ParamNameWithEquals(p._name)
+
+
+def _must_be_kwarg(signatures, positional_count, used_kwargs):
+    if used_kwargs:
+        return True
+
+    must_be_kwarg = True
+    for signature in signatures:
+        for i, p in enumerate(signature.params):
+            kind = p.kind
+            if kind is Parameter.VAR_POSITIONAL:
+                # In case there were not already kwargs, the next param can
+                # always be a normal argument.
+                return False
+
+            if i >= positional_count and kind in (Parameter.POSITIONAL_OR_KEYWORD,
+                                                  Parameter.POSITIONAL_ONLY):
+                must_be_kwarg = False
+                break
+        if not must_be_kwarg:
+            break
+    return must_be_kwarg
+
+
+def filter_names(inference_state, completion_names, stack, like_name, fuzzy, cached_name):
+    comp_dct = set()
+    if settings.case_insensitive_completion:
+        like_name = like_name.lower()
+    for name in completion_names:
+        string = name.string_name
+        if settings.case_insensitive_completion:
+            string = string.lower()
+        if helpers.match(string, like_name, fuzzy=fuzzy):
+            new = classes.Completion(
+                inference_state,
+                name,
+                stack,
+                len(like_name),
+                is_fuzzy=fuzzy,
+                cached_name=cached_name,
+            )
+            k = (new.name, new.complete)  # key
+            if k not in comp_dct:
+                comp_dct.add(k)
+                tree_name = name.tree_name
+                if tree_name is not None:
+                    definition = tree_name.get_definition()
+                    if definition is not None and definition.type == 'del_stmt':
+                        continue
+                yield new
+
+
+def _remove_duplicates(completions, other_completions):
+    names = {d.name for d in other_completions}
+    return [c for c in completions if c.name not in names]


 def get_user_context(module_context, position):
     """
     Returns the scope in which the user resides. This includes flows.
     """
-    pass
+    leaf = module_context.tree_node.get_leaf_for_position(position, include_prefixes=True)
+    return module_context.create_context(leaf)


-class Completion:
+def get_flow_scope_node(module_node, position):
+    node = module_node.get_leaf_for_position(position, include_prefixes=True)
+    while not isinstance(node, (tree.Scope, tree.Flow)):
+        node = node.parent
+
+    return node
+

-    def __init__(self, inference_state, module_context, code_lines,
-        position, signatures_callback, fuzzy=False):
+@plugin_manager.decorate()
+def complete_param_names(context, function_name, decorator_nodes):
+    # Basically there's no way to do param completion. The plugins are
+    # responsible for this.
+    return []
+
+
+class Completion:
+    def __init__(self, inference_state, module_context, code_lines, position,
+                 signatures_callback, fuzzy=False):
         self._inference_state = inference_state
         self._module_context = module_context
         self._module_node = module_context.tree_node
         self._code_lines = code_lines
-        self._like_name = helpers.get_on_completion_name(self._module_node,
-            code_lines, position)
+
+        # The first step of completions is to get the name
+        self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position)
+        # The actual cursor position is not what we need to calculate
+        # everything. We want the start of the name we're on.
         self._original_position = position
         self._signatures_callback = signatures_callback
+
         self._fuzzy = fuzzy

+    def complete(self):
+        leaf = self._module_node.get_leaf_for_position(
+            self._original_position,
+            include_prefixes=True
+        )
+        string, start_leaf, quote = _extract_string_while_in_string(leaf, self._original_position)
+
+        prefixed_completions = complete_dict(
+            self._module_context,
+            self._code_lines,
+            start_leaf or leaf,
+            self._original_position,
+            None if string is None else quote + string,
+            fuzzy=self._fuzzy,
+        )
+
+        if string is not None and not prefixed_completions:
+            prefixed_completions = list(complete_file_name(
+                self._inference_state, self._module_context, start_leaf, quote, string,
+                self._like_name, self._signatures_callback,
+                self._code_lines, self._original_position,
+                self._fuzzy
+            ))
+        if string is not None:
+            if not prefixed_completions and '\n' in string:
+                # Complete only multi line strings
+                prefixed_completions = self._complete_in_string(start_leaf, string)
+            return prefixed_completions
+
+        cached_name, completion_names = self._complete_python(leaf)
+
+        completions = list(filter_names(self._inference_state, completion_names,
+                                        self.stack, self._like_name,
+                                        self._fuzzy, cached_name=cached_name))
+
+        return (
+            # Removing duplicates mostly to remove False/True/None duplicates.
+            _remove_duplicates(prefixed_completions, completions)
+            + sorted(completions, key=lambda x: (x.name.startswith('__'),
+                                                 x.name.startswith('_'),
+                                                 x.name.lower()))
+        )
+
     def _complete_python(self, leaf):
         """
         Analyzes the current context of a completion and decides what to
@@ -63,13 +195,242 @@ class Completion:
         - In args: */**: no completion
         - In params (also lambda): no completion before =
         """
-        pass
+        grammar = self._inference_state.grammar
+        self.stack = stack = None
+        self._position = (
+            self._original_position[0],
+            self._original_position[1] - len(self._like_name)
+        )
+        cached_name = None
+
+        try:
+            self.stack = stack = helpers.get_stack_at_position(
+                grammar, self._code_lines, leaf, self._position
+            )
+        except helpers.OnErrorLeaf as e:
+            value = e.error_leaf.value
+            if value == '.':
+                # After ErrorLeaf's that are dots, we will not do any
+                # completions since this probably just confuses the user.
+                return cached_name, []
+
+            # If we don't have a value, just use global completion.
+            return cached_name, self._complete_global_scope()
+
+        allowed_transitions = \
+            list(stack._allowed_transition_names_and_token_types())
+
+        if 'if' in allowed_transitions:
+            leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
+            previous_leaf = leaf.get_previous_leaf()
+
+            indent = self._position[1]
+            if not (leaf.start_pos <= self._position <= leaf.end_pos):
+                indent = leaf.start_pos[1]
+
+            if previous_leaf is not None:
+                stmt = previous_leaf
+                while True:
+                    stmt = search_ancestor(
+                        stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt',
+                        'error_node',
+                    )
+                    if stmt is None:
+                        break
+
+                    type_ = stmt.type
+                    if type_ == 'error_node':
+                        first = stmt.children[0]
+                        if isinstance(first, Leaf):
+                            type_ = first.value + '_stmt'
+                    # Compare indents
+                    if stmt.start_pos[1] == indent:
+                        if type_ == 'if_stmt':
+                            allowed_transitions += ['elif', 'else']
+                        elif type_ == 'try_stmt':
+                            allowed_transitions += ['except', 'finally', 'else']
+                        elif type_ == 'for_stmt':
+                            allowed_transitions.append('else')
+
+        completion_names = []
+
+        kwargs_only = False
+        if any(t in allowed_transitions for t in (PythonTokenTypes.NAME,
+                                                  PythonTokenTypes.INDENT)):
+            # This means that we actually have to do type inference.
+
+            nonterminals = [stack_node.nonterminal for stack_node in stack]
+
+            nodes = _gather_nodes(stack)
+            if nodes and nodes[-1] in ('as', 'def', 'class'):
+                # No completions for ``with x as foo`` and ``import x as foo``.
+                # Also true for defining names as a class or function.
+                return cached_name, list(self._complete_inherited(is_function=True))
+            elif "import_stmt" in nonterminals:
+                level, names = parse_dotted_names(nodes, "import_from" in nonterminals)
+
+                only_modules = not ("import_from" in nonterminals and 'import' in nodes)
+                completion_names += self._get_importer_names(
+                    names,
+                    level,
+                    only_modules=only_modules,
+                )
+            elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
+                dot = self._module_node.get_leaf_for_position(self._position)
+                if dot.type == "endmarker":
+                    # This is a bit of a weird edge case, maybe we can somehow
+                    # generalize this.
+                    dot = leaf.get_previous_leaf()
+                cached_name, n = self._complete_trailer(dot.get_previous_leaf())
+                completion_names += n
+            elif self._is_parameter_completion():
+                completion_names += self._complete_params(leaf)
+            else:
+                # Apparently this looks like it's good enough to filter most cases
+                # so that signature completions don't randomly appear.
+                # To understand why this works, three things are important:
+                # 1. trailer with a `,` in it is either a subscript or an arglist.
+                # 2. If there's no `,`, it's at the start and only signatures start
+                #    with `(`. Other trailers could start with `.` or `[`.
+                # 3. Decorators are very primitive and have an optional `(` with
+                #    optional arglist in them.
+                if nodes[-1] in ['(', ','] \
+                        and nonterminals[-1] in ('trailer', 'arglist', 'decorator'):
+                    signatures = self._signatures_callback(*self._position)
+                    if signatures:
+                        call_details = signatures[0]._call_details
+                        used_kwargs = list(call_details.iter_used_keyword_arguments())
+                        positional_count = call_details.count_positional_arguments()
+
+                        completion_names += _get_signature_param_names(
+                            signatures,
+                            positional_count,
+                            used_kwargs,
+                        )
+
+                        kwargs_only = _must_be_kwarg(signatures, positional_count, used_kwargs)
+
+                if not kwargs_only:
+                    completion_names += self._complete_global_scope()
+                    completion_names += self._complete_inherited(is_function=False)
+
+        if not kwargs_only:
+            current_line = self._code_lines[self._position[0] - 1][:self._position[1]]
+            completion_names += self._complete_keywords(
+                allowed_transitions,
+                only_values=not (not current_line or current_line[-1] in ' \t.;'
+                                 and current_line[-3:] != '...')
+            )
+
+        return cached_name, completion_names
+
+    def _is_parameter_completion(self):
+        tos = self.stack[-1]
+        if tos.nonterminal == 'lambdef' and len(tos.nodes) == 1:
+            # We are at the position `lambda `, where basically the next node
+            # is a param.
+            return True
+        if tos.nonterminal in 'parameters':
+            # Basically we are at the position `foo(`, there's nothing there
+            # yet, so we have no `typedargslist`.
+            return True
+        # var args is for lambdas and typed args for normal functions
+        return tos.nonterminal in ('typedargslist', 'varargslist') and tos.nodes[-1] == ','
+
+    def _complete_params(self, leaf):
+        stack_node = self.stack[-2]
+        if stack_node.nonterminal == 'parameters':
+            stack_node = self.stack[-3]
+        if stack_node.nonterminal == 'funcdef':
+            context = get_user_context(self._module_context, self._position)
+            node = search_ancestor(leaf, 'error_node', 'funcdef')
+            if node is not None:
+                if node.type == 'error_node':
+                    n = node.children[0]
+                    if n.type == 'decorators':
+                        decorators = n.children
+                    elif n.type == 'decorator':
+                        decorators = [n]
+                    else:
+                        decorators = []
+                else:
+                    decorators = node.get_decorators()
+                function_name = stack_node.nodes[1]
+
+                return complete_param_names(context, function_name.value, decorators)
+        return []
+
+    def _complete_keywords(self, allowed_transitions, only_values):
+        for k in allowed_transitions:
+            if isinstance(k, str) and k.isalpha():
+                if not only_values or k in ('True', 'False', 'None'):
+                    yield keywords.KeywordName(self._inference_state, k)
+
+    def _complete_global_scope(self):
+        context = get_user_context(self._module_context, self._position)
+        debug.dbg('global completion scope: %s', context)
+        flow_scope_node = get_flow_scope_node(self._module_node, self._position)
+        filters = get_global_filters(
+            context,
+            self._position,
+            flow_scope_node
+        )
+        completion_names = []
+        for filter in filters:
+            completion_names += filter.values()
+        return completion_names
+
+    def _complete_trailer(self, previous_leaf):
+        inferred_context = self._module_context.create_context(previous_leaf)
+        values = infer_call_of_leaf(inferred_context, previous_leaf)
+        debug.dbg('trailer completion values: %s', values, color='MAGENTA')
+
+        # The cached name simply exists to make speed optimizations for certain
+        # modules.
+        cached_name = None
+        if len(values) == 1:
+            v, = values
+            if v.is_module():
+                if len(v.string_names) == 1:
+                    module_name = v.string_names[0]
+                    if module_name in ('numpy', 'tensorflow', 'matplotlib', 'pandas'):
+                        cached_name = module_name
+
+        return cached_name, self._complete_trailer_for_values(values)
+
+    def _complete_trailer_for_values(self, values):
+        user_context = get_user_context(self._module_context, self._position)
+
+        return complete_trailer(user_context, values)
+
+    def _get_importer_names(self, names, level=0, only_modules=True):
+        names = [n.value for n in names]
+        i = imports.Importer(self._inference_state, names, self._module_context, level)
+        return i.completion_names(self._inference_state, only_modules=only_modules)

     def _complete_inherited(self, is_function=True):
         """
         Autocomplete inherited methods when overriding in child class.
         """
-        pass
+        leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
+        cls = tree.search_ancestor(leaf, 'classdef')
+        if cls is None:
+            return
+
+        # Complete the methods that are defined in the super classes.
+        class_value = self._module_context.create_value(cls)
+
+        if cls.start_pos[1] >= leaf.start_pos[1]:
+            return
+
+        filters = class_value.get_filters(is_instance=True)
+        # The first dict is the dictionary of class itself.
+        next(filters)
+        for filter in filters:
+            for name in filter.values():
+                # TODO we should probably check here for properties
+                if (name.api_type == 'function') == is_function:
+                    yield name

     def _complete_in_string(self, start_leaf, string):
         """
@@ -81,10 +442,122 @@ class Completion:
         - Having some doctest code that starts with `>>>`
         - Having backticks that doesn't have whitespace inside it
         """
-        pass
+        def iter_relevant_lines(lines):
+            include_next_line = False
+            for l in code_lines:
+                if include_next_line or l.startswith('>>>') or l.startswith(' '):
+                    yield re.sub(r'^( *>>> ?| +)', '', l)
+                else:
+                    yield None
+
+                include_next_line = bool(re.match(' *>>>', l))
+
+        string = dedent(string)
+        code_lines = split_lines(string, keepends=True)
+        relevant_code_lines = list(iter_relevant_lines(code_lines))
+        if relevant_code_lines[-1] is not None:
+            # Some code lines might be None, therefore get rid of that.
+            relevant_code_lines = ['\n' if c is None else c for c in relevant_code_lines]
+            return self._complete_code_lines(relevant_code_lines)
+        match = re.search(r'`([^`\s]+)', code_lines[-1])
+        if match:
+            return self._complete_code_lines([match.group(1)])
+        return []

+    def _complete_code_lines(self, code_lines):
+        module_node = self._inference_state.grammar.parse(''.join(code_lines))
+        module_value = DocstringModule(
+            in_module_context=self._module_context,
+            inference_state=self._inference_state,
+            module_node=module_node,
+            code_lines=code_lines,
+        )
+        return Completion(
+            self._inference_state,
+            module_value.as_context(),
+            code_lines=code_lines,
+            position=module_node.end_pos,
+            signatures_callback=lambda *args, **kwargs: [],
+            fuzzy=self._fuzzy
+        ).complete()

-_string_start = re.compile('^\\w*(\\\'{3}|"{3}|\\\'|")')
+
+def _gather_nodes(stack):
+    nodes = []
+    for stack_node in stack:
+        if stack_node.dfa.from_rule == 'small_stmt':
+            nodes = []
+        else:
+            nodes += stack_node.nodes
+    return nodes
+
+
+_string_start = re.compile(r'^\w*(\'{3}|"{3}|\'|")')
+
+
+def _extract_string_while_in_string(leaf, position):
+    def return_part_of_leaf(leaf):
+        kwargs = {}
+        if leaf.line == position[0]:
+            kwargs['endpos'] = position[1] - leaf.column
+        match = _string_start.match(leaf.value, **kwargs)
+        if not match:
+            return None, None, None
+        start = match.group(0)
+        if leaf.line == position[0] and position[1] < leaf.column + match.end():
+            return None, None, None
+        return cut_value_at_position(leaf, position)[match.end():], leaf, start
+
+    if position < leaf.start_pos:
+        return None, None, None
+
+    if leaf.type == 'string':
+        return return_part_of_leaf(leaf)
+
+    leaves = []
+    while leaf is not None:
+        if leaf.type == 'error_leaf' and ('"' in leaf.value or "'" in leaf.value):
+            if len(leaf.value) > 1:
+                return return_part_of_leaf(leaf)
+            prefix_leaf = None
+            if not leaf.prefix:
+                prefix_leaf = leaf.get_previous_leaf()
+                if prefix_leaf is None or prefix_leaf.type != 'name' \
+                        or not all(c in 'rubf' for c in prefix_leaf.value.lower()):
+                    prefix_leaf = None
+
+            return (
+                ''.join(cut_value_at_position(l, position) for l in leaves),
+                prefix_leaf or leaf,
+                ('' if prefix_leaf is None else prefix_leaf.value)
+                + cut_value_at_position(leaf, position),
+            )
+        if leaf.line != position[0]:
+            # Multi line strings are always simple error leaves and contain the
+            # whole string, single line error leaves are atherefore important
+            # now and since the line is different, it's not really a single
+            # line string anymore.
+            break
+        leaves.insert(0, leaf)
+        leaf = leaf.get_previous_leaf()
+    return None, None, None
+
+
+def complete_trailer(user_context, values):
+    completion_names = []
+    for value in values:
+        for filter in value.get_filters(origin_scope=user_context.tree_node):
+            completion_names += filter.values()
+
+        if not value.is_stub() and isinstance(value, TreeInstance):
+            completion_names += _complete_getattr(user_context, value)
+
+    python_values = convert_values(values)
+    for c in python_values:
+        if c not in values:
+            for filter in c.get_filters(origin_scope=user_context.tree_node):
+                completion_names += filter.values()
+    return completion_names


 def _complete_getattr(user_context, instance):
@@ -107,4 +580,87 @@ def _complete_getattr(user_context, instance):
     will write it like this anyway and the other ones, well they are just
     out of luck I guess :) ~dave.
     """
-    pass
+    names = (instance.get_function_slot_names('__getattr__')
+             or instance.get_function_slot_names('__getattribute__'))
+    functions = ValueSet.from_sets(
+        name.infer()
+        for name in names
+    )
+    for func in functions:
+        tree_node = func.tree_node
+        if tree_node is None or tree_node.type != 'funcdef':
+            continue
+
+        for return_stmt in tree_node.iter_return_stmts():
+            # Basically until the next comment we just try to find out if a
+            # return statement looks exactly like `return getattr(x, name)`.
+            if return_stmt.type != 'return_stmt':
+                continue
+            atom_expr = return_stmt.children[1]
+            if atom_expr.type != 'atom_expr':
+                continue
+            atom = atom_expr.children[0]
+            trailer = atom_expr.children[1]
+            if len(atom_expr.children) != 2 or atom.type != 'name' \
+                    or atom.value != 'getattr':
+                continue
+            arglist = trailer.children[1]
+            if arglist.type != 'arglist' or len(arglist.children) < 3:
+                continue
+            context = func.as_context()
+            object_node = arglist.children[0]
+
+            # Make sure it's a param: foo in __getattr__(self, foo)
+            name_node = arglist.children[2]
+            name_list = context.goto(name_node, name_node.start_pos)
+            if not any(n.api_type == 'param' for n in name_list):
+                continue
+
+            # Now that we know that these are most probably completion
+            # objects, we just infer the object and return them as
+            # completions.
+            objects = context.infer_node(object_node)
+            return complete_trailer(user_context, objects)
+    return []
+
+
+def search_in_module(inference_state, module_context, names, wanted_names,
+                     wanted_type, complete=False, fuzzy=False,
+                     ignore_imports=False, convert=False):
+    for s in wanted_names[:-1]:
+        new_names = []
+        for n in names:
+            if s == n.string_name:
+                if n.tree_name is not None and n.api_type in ('module', 'namespace') \
+                        and ignore_imports:
+                    continue
+                new_names += complete_trailer(
+                    module_context,
+                    n.infer()
+                )
+        debug.dbg('dot lookup on search %s from %s', new_names, names[:10])
+        names = new_names
+
+    last_name = wanted_names[-1].lower()
+    for n in names:
+        string = n.string_name.lower()
+        if complete and helpers.match(string, last_name, fuzzy=fuzzy) \
+                or not complete and string == last_name:
+            if isinstance(n, SubModuleName):
+                names = [v.name for v in n.infer()]
+            else:
+                names = [n]
+            if convert:
+                names = convert_names(names)
+            for n2 in names:
+                if complete:
+                    def_ = classes.Completion(
+                        inference_state, n2,
+                        stack=None,
+                        like_name_length=len(last_name),
+                        is_fuzzy=fuzzy,
+                    )
+                else:
+                    def_ = classes.Name(inference_state, n2)
+                if not wanted_type or wanted_type == def_.type:
+                    yield def_
diff --git a/jedi/api/completion_cache.py b/jedi/api/completion_cache.py
index 0fcf17c4..46e9bead 100644
--- a/jedi/api/completion_cache.py
+++ b/jedi/api/completion_cache.py
@@ -1,7 +1,31 @@
 from typing import Dict, Tuple, Callable
+
 CacheValues = Tuple[str, str, str]
 CacheValuesCallback = Callable[[], CacheValues]
+
+
 _cache: Dict[str, Dict[str, CacheValues]] = {}
+
+
+def save_entry(module_name: str, name: str, cache: CacheValues) -> None:
+    try:
+        module_cache = _cache[module_name]
+    except KeyError:
+        module_cache = _cache[module_name] = {}
+    module_cache[name] = cache
+
+
+def _create_get_from_cache(number: int) -> Callable[[str, str, CacheValuesCallback], str]:
+    def _get_from_cache(module_name: str, name: str, get_cache_values: CacheValuesCallback) -> str:
+        try:
+            return _cache[module_name][name][number]
+        except KeyError:
+            v = get_cache_values()
+            save_entry(module_name, name, v)
+            return v[number]
+    return _get_from_cache
+
+
 get_type = _create_get_from_cache(0)
 get_docstring_signature = _create_get_from_cache(1)
 get_docstring = _create_get_from_cache(2)
diff --git a/jedi/api/environment.py b/jedi/api/environment.py
index ec0c00aa..771a9a83 100644
--- a/jedi/api/environment.py
+++ b/jedi/api/environment.py
@@ -8,10 +8,15 @@ import hashlib
 import filecmp
 from collections import namedtuple
 from shutil import which
+
 from jedi.cache import memoize_method, time_cache
-from jedi.inference.compiled.subprocess import CompiledSubprocess, InferenceStateSameProcess, InferenceStateSubprocess
+from jedi.inference.compiled.subprocess import CompiledSubprocess, \
+    InferenceStateSameProcess, InferenceStateSubprocess
+
 import parso
-_VersionInfo = namedtuple('VersionInfo', 'major minor micro')
+
+_VersionInfo = namedtuple('VersionInfo', 'major minor micro')  # type: ignore[name-match]
+
 _SUPPORTED_PYTHONS = ['3.12', '3.11', '3.10', '3.9', '3.8', '3.7', '3.6']
 _SAFE_PATHS = ['/usr/bin', '/usr/local/bin']
 _CONDA_VAR = 'CONDA_PREFIX'
@@ -26,7 +31,26 @@ class InvalidPythonEnvironment(Exception):


 class _BaseEnvironment:
-    pass
+    @memoize_method
+    def get_grammar(self):
+        version_string = '%s.%s' % (self.version_info.major, self.version_info.minor)
+        return parso.load_grammar(version=version_string)
+
+    @property
+    def _sha256(self):
+        try:
+            return self._hash
+        except AttributeError:
+            self._hash = _calculate_sha256_for_file(self.executable)
+            return self._hash
+
+
+def _get_info():
+    return (
+        sys.executable,
+        sys.prefix,
+        sys.version_info[:3],
+    )


 class Environment(_BaseEnvironment):
@@ -40,12 +64,47 @@ class Environment(_BaseEnvironment):
     def __init__(self, executable, env_vars=None):
         self._start_executable = executable
         self._env_vars = env_vars
+        # Initialize the environment
         self._get_subprocess()

+    def _get_subprocess(self):
+        if self._subprocess is not None and not self._subprocess.is_crashed:
+            return self._subprocess
+
+        try:
+            self._subprocess = CompiledSubprocess(self._start_executable,
+                                                  env_vars=self._env_vars)
+            info = self._subprocess._send(None, _get_info)
+        except Exception as exc:
+            raise InvalidPythonEnvironment(
+                "Could not get version information for %r: %r" % (
+                    self._start_executable,
+                    exc))
+
+        # Since it could change and might not be the same(?) as the one given,
+        # set it here.
+        self.executable = info[0]
+        """
+        The Python executable, matches ``sys.executable``.
+        """
+        self.path = info[1]
+        """
+        The path to an environment, matches ``sys.prefix``.
+        """
+        self.version_info = _VersionInfo(*info[2])
+        """
+        Like :data:`sys.version_info`: a tuple to show the current
+        Environment's Python version.
+        """
+        return self._subprocess
+
     def __repr__(self):
         version = '.'.join(str(i) for i in self.version_info)
         return '<%s: %s in %s>' % (self.__class__.__name__, version, self.path)

+    def get_inference_state_subprocess(self, inference_state):
+        return InferenceStateSubprocess(inference_state, self._get_subprocess())
+
     @memoize_method
     def get_sys_path(self):
         """
@@ -54,11 +113,15 @@ class Environment(_BaseEnvironment):

         :returns: list of str
         """
-        pass
+        # It's pretty much impossible to generate the sys path without actually
+        # executing Python. The sys path (when starting with -S) itself depends
+        # on how the Python version was compiled (ENV variables).
+        # If you omit -S when starting Python (normal case), additionally
+        # site.py gets executed.
+        return self._get_subprocess().get_sys_path()


 class _SameEnvironmentMixin:
-
     def __init__(self):
         self._start_executable = self.executable = sys.executable
         self.path = sys.prefix
@@ -71,7 +134,11 @@ class SameEnvironment(_SameEnvironmentMixin, Environment):


 class InterpreterEnvironment(_SameEnvironmentMixin, _BaseEnvironment):
-    pass
+    def get_inference_state_subprocess(self, inference_state):
+        return InferenceStateSameProcess(inference_state)
+
+    def get_sys_path(self):
+        return sys.path


 def _get_virtual_env_from_var(env_var='VIRTUAL_ENV'):
@@ -80,7 +147,27 @@ def _get_virtual_env_from_var(env_var='VIRTUAL_ENV'):
     It uses `safe=False` with ``create_environment``, because the environment
     variable is considered to be safe / controlled by the user solely.
     """
-    pass
+    var = os.environ.get(env_var)
+    if var:
+        # Under macOS in some cases - notably when using Pipenv - the
+        # sys.prefix of the virtualenv is /path/to/env/bin/.. instead of
+        # /path/to/env so we need to fully resolve the paths in order to
+        # compare them.
+        if os.path.realpath(var) == os.path.realpath(sys.prefix):
+            return _try_get_same_env()
+
+        try:
+            return create_environment(var, safe=False)
+        except InvalidPythonEnvironment:
+            pass
+
+
+def _calculate_sha256_for_file(path):
+    sha256 = hashlib.sha256()
+    with open(path, 'rb') as f:
+        for block in iter(lambda: f.read(filecmp.BUFSIZE), b''):
+            sha256.update(block)
+    return sha256.hexdigest()


 def get_default_environment():
@@ -93,7 +180,81 @@ def get_default_environment():

     :returns: :class:`.Environment`
     """
-    pass
+    virtual_env = _get_virtual_env_from_var()
+    if virtual_env is not None:
+        return virtual_env
+
+    conda_env = _get_virtual_env_from_var(_CONDA_VAR)
+    if conda_env is not None:
+        return conda_env
+
+    return _try_get_same_env()
+
+
+def _try_get_same_env():
+    env = SameEnvironment()
+    if not os.path.basename(env.executable).lower().startswith('python'):
+        # This tries to counter issues with embedding. In some cases (e.g.
+        # VIM's Python Mac/Windows, sys.executable is /foo/bar/vim. This
+        # happens, because for Mac a function called `_NSGetExecutablePath` is
+        # used and for Windows `GetModuleFileNameW`. These are both platform
+        # specific functions. For all other systems sys.executable should be
+        # alright. However here we try to generalize:
+        #
+        # 1. Check if the executable looks like python (heuristic)
+        # 2. In case it's not try to find the executable
+        # 3. In case we don't find it use an interpreter environment.
+        #
+        # The last option will always work, but leads to potential crashes of
+        # Jedi - which is ok, because it happens very rarely and even less,
+        # because the code below should work for most cases.
+        if os.name == 'nt':
+            # The first case would be a virtualenv and the second a normal
+            # Python installation.
+            checks = (r'Scripts\python.exe', 'python.exe')
+        else:
+            # For unix it looks like Python is always in a bin folder.
+            checks = (
+                'bin/python%s.%s' % (sys.version_info[0], sys.version[1]),
+                'bin/python%s' % (sys.version_info[0]),
+                'bin/python',
+            )
+        for check in checks:
+            guess = os.path.join(sys.exec_prefix, check)
+            if os.path.isfile(guess):
+                # Bingo - We think we have our Python.
+                return Environment(guess)
+        # It looks like there is no reasonable Python to be found.
+        return InterpreterEnvironment()
+    # If no virtualenv is found, use the environment we're already
+    # using.
+    return env
+
+
+def get_cached_default_environment():
+    var = os.environ.get('VIRTUAL_ENV') or os.environ.get(_CONDA_VAR)
+    environment = _get_cached_default_environment()
+
+    # Under macOS in some cases - notably when using Pipenv - the
+    # sys.prefix of the virtualenv is /path/to/env/bin/.. instead of
+    # /path/to/env so we need to fully resolve the paths in order to
+    # compare them.
+    if var and os.path.realpath(var) != os.path.realpath(environment.path):
+        _get_cached_default_environment.clear_cache()
+        return _get_cached_default_environment()
+    return environment
+
+
+@time_cache(seconds=10 * 60)  # 10 Minutes
+def _get_cached_default_environment():
+    try:
+        return get_default_environment()
+    except InvalidPythonEnvironment:
+        # It's possible that `sys.executable` is wrong. Typically happens
+        # when Jedi is used in an executable that embeds Python. For further
+        # information, have a look at:
+        # https://github.com/davidhalter/jedi/issues/1531
+        return InterpreterEnvironment()


 def find_virtualenvs(paths=None, *, safe=True, use_environment_vars=True):
@@ -113,7 +274,41 @@ def find_virtualenvs(paths=None, *, safe=True, use_environment_vars=True):

     :yields: :class:`.Environment`
     """
-    pass
+    if paths is None:
+        paths = []
+
+    _used_paths = set()
+
+    if use_environment_vars:
+        # Using this variable should be safe, because attackers might be
+        # able to drop files (via git) but not environment variables.
+        virtual_env = _get_virtual_env_from_var()
+        if virtual_env is not None:
+            yield virtual_env
+            _used_paths.add(virtual_env.path)
+
+        conda_env = _get_virtual_env_from_var(_CONDA_VAR)
+        if conda_env is not None:
+            yield conda_env
+            _used_paths.add(conda_env.path)
+
+    for directory in paths:
+        if not os.path.isdir(directory):
+            continue
+
+        directory = os.path.abspath(directory)
+        for path in os.listdir(directory):
+            path = os.path.join(directory, path)
+            if path in _used_paths:
+                # A path shouldn't be inferred twice.
+                continue
+            _used_paths.add(path)
+
+            try:
+                executable = _get_executable_path(path, safe=safe)
+                yield Environment(executable)
+            except InvalidPythonEnvironment:
+                pass


 def find_system_environments(*, env_vars=None):
@@ -126,9 +321,15 @@ def find_system_environments(*, env_vars=None):

     :yields: :class:`.Environment`
     """
-    pass
+    for version_string in _SUPPORTED_PYTHONS:
+        try:
+            yield get_system_environment(version_string, env_vars=env_vars)
+        except InvalidPythonEnvironment:
+            pass


+# TODO: this function should probably return a list of environments since
+# multiple Python installations can be found on a system for the same version.
 def get_system_environment(version, *, env_vars=None):
     """
     Return the first Python environment found for a string of the form 'X.Y'
@@ -137,7 +338,19 @@ def get_system_environment(version, *, env_vars=None):
     :raises: :exc:`.InvalidPythonEnvironment`
     :returns: :class:`.Environment`
     """
-    pass
+    exe = which('python' + version)
+    if exe:
+        if exe == sys.executable:
+            return SameEnvironment()
+        return Environment(exe)
+
+    if os.name == 'nt':
+        for exe in _get_executables_from_windows_registry(version):
+            try:
+                return Environment(exe, env_vars=env_vars)
+            except InvalidPythonEnvironment:
+                pass
+    raise InvalidPythonEnvironment("Cannot find executable python%s." % version)


 def create_environment(path, *, safe=True, env_vars=None):
@@ -148,11 +361,106 @@ def create_environment(path, *, safe=True, env_vars=None):
     :raises: :exc:`.InvalidPythonEnvironment`
     :returns: :class:`.Environment`
     """
-    pass
+    if os.path.isfile(path):
+        _assert_safe(path, safe)
+        return Environment(path, env_vars=env_vars)
+    return Environment(_get_executable_path(path, safe=safe), env_vars=env_vars)


 def _get_executable_path(path, safe=True):
     """
     Returns None if it's not actually a virtual env.
     """
-    pass
+
+    if os.name == 'nt':
+        python = os.path.join(path, 'Scripts', 'python.exe')
+    else:
+        python = os.path.join(path, 'bin', 'python')
+    if not os.path.exists(python):
+        raise InvalidPythonEnvironment("%s seems to be missing." % python)
+
+    _assert_safe(python, safe)
+    return python
+
+
+def _get_executables_from_windows_registry(version):
+    import winreg
+
+    # TODO: support Python Anaconda.
+    sub_keys = [
+        r'SOFTWARE\Python\PythonCore\{version}\InstallPath',
+        r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}\InstallPath',
+        r'SOFTWARE\Python\PythonCore\{version}-32\InstallPath',
+        r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}-32\InstallPath'
+    ]
+    for root_key in [winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE]:
+        for sub_key in sub_keys:
+            sub_key = sub_key.format(version=version)
+            try:
+                with winreg.OpenKey(root_key, sub_key) as key:
+                    prefix = winreg.QueryValueEx(key, '')[0]
+                    exe = os.path.join(prefix, 'python.exe')
+                    if os.path.isfile(exe):
+                        yield exe
+            except WindowsError:
+                pass
+
+
+def _assert_safe(executable_path, safe):
+    if safe and not _is_safe(executable_path):
+        raise InvalidPythonEnvironment(
+            "The python binary is potentially unsafe.")
+
+
+def _is_safe(executable_path):
+    # Resolve sym links. A venv typically is a symlink to a known Python
+    # binary. Only virtualenvs copy symlinks around.
+    real_path = os.path.realpath(executable_path)
+
+    if _is_unix_safe_simple(real_path):
+        return True
+
+    # Just check the list of known Python versions. If it's not in there,
+    # it's likely an attacker or some Python that was not properly
+    # installed in the system.
+    for environment in find_system_environments():
+        if environment.executable == real_path:
+            return True
+
+        # If the versions don't match, just compare the binary files. If we
+        # don't do that, only venvs will be working and not virtualenvs.
+        # venvs are symlinks while virtualenvs are actual copies of the
+        # Python files.
+        # This still means that if the system Python is updated and the
+        # virtualenv's Python is not (which is probably never going to get
+        # upgraded), it will not work with Jedi. IMO that's fine, because
+        # people should just be using venv. ~ dave
+        if environment._sha256 == _calculate_sha256_for_file(real_path):
+            return True
+    return False
+
+
+def _is_unix_safe_simple(real_path):
+    if _is_unix_admin():
+        # In case we are root, just be conservative and
+        # only execute known paths.
+        return any(real_path.startswith(p) for p in _SAFE_PATHS)
+
+    uid = os.stat(real_path).st_uid
+    # The interpreter needs to be owned by root. This means that it wasn't
+    # written by a user and therefore attacking Jedi is not as simple.
+    # The attack could look like the following:
+    # 1. A user clones a repository.
+    # 2. The repository has an innocent looking folder called foobar. jedi
+    #    searches for the folder and executes foobar/bin/python --version if
+    #    there's also a foobar/bin/activate.
+    # 3. The attacker has gained code execution, since he controls
+    #    foobar/bin/python.
+    return uid == 0
+
+
+def _is_unix_admin():
+    try:
+        return os.getuid() == 0
+    except AttributeError:
+        return False  # Windows
diff --git a/jedi/api/errors.py b/jedi/api/errors.py
index 6f0d001a..10cb62af 100644
--- a/jedi/api/errors.py
+++ b/jedi/api/errors.py
@@ -4,34 +4,43 @@ Jedi.
 """


+def parso_to_jedi_errors(grammar, module_node):
+    return [SyntaxError(e) for e in grammar.iter_errors(module_node)]
+
+
 class SyntaxError:
     """
     Syntax errors are generated by :meth:`.Script.get_syntax_errors`.
     """
-
     def __init__(self, parso_error):
         self._parso_error = parso_error

     @property
     def line(self):
         """The line where the error starts (starting with 1)."""
-        pass
+        return self._parso_error.start_pos[0]

     @property
     def column(self):
         """The column where the error starts (starting with 0)."""
-        pass
+        return self._parso_error.start_pos[1]

     @property
     def until_line(self):
         """The line where the error ends (starting with 1)."""
-        pass
+        return self._parso_error.end_pos[0]

     @property
     def until_column(self):
         """The column where the error ends (starting with 0)."""
-        pass
+        return self._parso_error.end_pos[1]
+
+    def get_message(self):
+        return self._parso_error.message

     def __repr__(self):
-        return '<%s from=%s to=%s>' % (self.__class__.__name__, self.
-            _parso_error.start_pos, self._parso_error.end_pos)
+        return '<%s from=%s to=%s>' % (
+            self.__class__.__name__,
+            self._parso_error.start_pos,
+            self._parso_error.end_pos,
+        )
diff --git a/jedi/api/file_name.py b/jedi/api/file_name.py
index 23dce6e9..277f3220 100644
--- a/jedi/api/file_name.py
+++ b/jedi/api/file_name.py
@@ -1,4 +1,5 @@
 import os
+
 from jedi.api import classes
 from jedi.api.strings import StringName, get_quote_ending
 from jedi.api.helpers import match
@@ -7,3 +8,148 @@ from jedi.inference.helpers import get_str_or_none

 class PathName(StringName):
     api_type = 'path'
+
+
+def complete_file_name(inference_state, module_context, start_leaf, quote, string,
+                       like_name, signatures_callback, code_lines, position, fuzzy):
+    # First we want to find out what can actually be changed as a name.
+    like_name_length = len(os.path.basename(string))
+
+    addition = _get_string_additions(module_context, start_leaf)
+    if string.startswith('~'):
+        string = os.path.expanduser(string)
+    if addition is None:
+        return
+    string = addition + string
+
+    # Here we use basename again, because if strings are added like
+    # `'foo' + 'bar`, it should complete to `foobar/`.
+    must_start_with = os.path.basename(string)
+    string = os.path.dirname(string)
+
+    sigs = signatures_callback(*position)
+    is_in_os_path_join = sigs and all(s.full_name == 'os.path.join' for s in sigs)
+    if is_in_os_path_join:
+        to_be_added = _add_os_path_join(module_context, start_leaf, sigs[0].bracket_start)
+        if to_be_added is None:
+            is_in_os_path_join = False
+        else:
+            string = to_be_added + string
+    base_path = os.path.join(inference_state.project.path, string)
+    try:
+        listed = sorted(os.scandir(base_path), key=lambda e: e.name)
+        # OSError: [Errno 36] File name too long: '...'
+    except (FileNotFoundError, OSError):
+        return
+    quote_ending = get_quote_ending(quote, code_lines, position)
+    for entry in listed:
+        name = entry.name
+        if match(name, must_start_with, fuzzy=fuzzy):
+            if is_in_os_path_join or not entry.is_dir():
+                name += quote_ending
+            else:
+                name += os.path.sep
+
+            yield classes.Completion(
+                inference_state,
+                PathName(inference_state, name[len(must_start_with) - like_name_length:]),
+                stack=None,
+                like_name_length=like_name_length,
+                is_fuzzy=fuzzy,
+            )
+
+
+def _get_string_additions(module_context, start_leaf):
+    def iterate_nodes():
+        node = addition.parent
+        was_addition = True
+        for child_node in reversed(node.children[:node.children.index(addition)]):
+            if was_addition:
+                was_addition = False
+                yield child_node
+                continue
+
+            if child_node != '+':
+                break
+            was_addition = True
+
+    addition = start_leaf.get_previous_leaf()
+    if addition != '+':
+        return ''
+    context = module_context.create_context(start_leaf)
+    return _add_strings(context, reversed(list(iterate_nodes())))
+
+
+def _add_strings(context, nodes, add_slash=False):
+    string = ''
+    first = True
+    for child_node in nodes:
+        values = context.infer_node(child_node)
+        if len(values) != 1:
+            return None
+        c, = values
+        s = get_str_or_none(c)
+        if s is None:
+            return None
+        if not first and add_slash:
+            string += os.path.sep
+        string += s
+        first = False
+    return string
+
+
+def _add_os_path_join(module_context, start_leaf, bracket_start):
+    def check(maybe_bracket, nodes):
+        if maybe_bracket.start_pos != bracket_start:
+            return None
+
+        if not nodes:
+            return ''
+        context = module_context.create_context(nodes[0])
+        return _add_strings(context, nodes, add_slash=True) or ''
+
+    if start_leaf.type == 'error_leaf':
+        # Unfinished string literal, like `join('`
+        value_node = start_leaf.parent
+        index = value_node.children.index(start_leaf)
+        if index > 0:
+            error_node = value_node.children[index - 1]
+            if error_node.type == 'error_node' and len(error_node.children) >= 2:
+                index = -2
+                if error_node.children[-1].type == 'arglist':
+                    arglist_nodes = error_node.children[-1].children
+                    index -= 1
+                else:
+                    arglist_nodes = []
+
+                return check(error_node.children[index + 1], arglist_nodes[::2])
+        return None
+
+    # Maybe an arglist or some weird error case. Therefore checked below.
+    searched_node_child = start_leaf
+    while searched_node_child.parent is not None \
+            and searched_node_child.parent.type not in ('arglist', 'trailer', 'error_node'):
+        searched_node_child = searched_node_child.parent
+
+    if searched_node_child.get_first_leaf() is not start_leaf:
+        return None
+    searched_node = searched_node_child.parent
+    if searched_node is None:
+        return None
+
+    index = searched_node.children.index(searched_node_child)
+    arglist_nodes = searched_node.children[:index]
+    if searched_node.type == 'arglist':
+        trailer = searched_node.parent
+        if trailer.type == 'error_node':
+            trailer_index = trailer.children.index(searched_node)
+            assert trailer_index >= 2
+            assert trailer.children[trailer_index - 1] == '('
+            return check(trailer.children[trailer_index - 1], arglist_nodes[::2])
+        elif trailer.type == 'trailer':
+            return check(trailer.children[0], arglist_nodes[::2])
+    elif searched_node.type == 'trailer':
+        return check(searched_node.children[0], [])
+    elif searched_node.type == 'error_node':
+        # Stuff like `join(""`
+        return check(arglist_nodes[-1], [])
diff --git a/jedi/api/helpers.py b/jedi/api/helpers.py
index 18bd99de..111f75b4 100644
--- a/jedi/api/helpers.py
+++ b/jedi/api/helpers.py
@@ -7,48 +7,485 @@ from textwrap import dedent
 from itertools import chain
 from functools import wraps
 from inspect import Parameter
+
 from parso.python.parser import Parser
 from parso.python import tree
+
 from jedi.inference.base_value import NO_VALUES
 from jedi.inference.syntax_tree import infer_atom
 from jedi.inference.helpers import infer_call_of_leaf
 from jedi.inference.compiled import get_string_value_set
 from jedi.cache import signature_time_cache, memoize_method
 from jedi.parser_utils import get_parent_scope
+
+
 CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name'])


+def _start_match(string, like_name):
+    return string.startswith(like_name)
+
+
+def _fuzzy_match(string, like_name):
+    if len(like_name) <= 1:
+        return like_name in string
+    pos = string.find(like_name[0])
+    if pos >= 0:
+        return _fuzzy_match(string[pos + 1:], like_name[1:])
+    return False
+
+
+def match(string, like_name, fuzzy=False):
+    if fuzzy:
+        return _fuzzy_match(string, like_name)
+    else:
+        return _start_match(string, like_name)
+
+
+def sorted_definitions(defs):
+    # Note: `or ''` below is required because `module_path` could be
+    return sorted(defs, key=lambda x: (str(x.module_path or ''),
+                                       x.line or 0,
+                                       x.column or 0,
+                                       x.name))
+
+
+def get_on_completion_name(module_node, lines, position):
+    leaf = module_node.get_leaf_for_position(position)
+    if leaf is None or leaf.type in ('string', 'error_leaf'):
+        # Completions inside strings are a bit special, we need to parse the
+        # string. The same is true for comments and error_leafs.
+        line = lines[position[0] - 1]
+        # The first step of completions is to get the name
+        return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0)
+    elif leaf.type not in ('name', 'keyword'):
+        return ''
+
+    return leaf.value[:position[1] - leaf.start_pos[1]]
+
+
+def _get_code(code_lines, start_pos, end_pos):
+    # Get relevant lines.
+    lines = code_lines[start_pos[0] - 1:end_pos[0]]
+    # Remove the parts at the end of the line.
+    lines[-1] = lines[-1][:end_pos[1]]
+    # Remove first line indentation.
+    lines[0] = lines[0][start_pos[1]:]
+    return ''.join(lines)
+
+
 class OnErrorLeaf(Exception):
-    pass
+    @property
+    def error_leaf(self):
+        return self.args[0]
+
+
+def _get_code_for_stack(code_lines, leaf, position):
+    # It might happen that we're on whitespace or on a comment. This means
+    # that we would not get the right leaf.
+    if leaf.start_pos >= position:
+        # If we're not on a comment simply get the previous leaf and proceed.
+        leaf = leaf.get_previous_leaf()
+        if leaf is None:
+            return ''  # At the beginning of the file.
+
+    is_after_newline = leaf.type == 'newline'
+    while leaf.type == 'newline':
+        leaf = leaf.get_previous_leaf()
+        if leaf is None:
+            return ''
+
+    if leaf.type == 'error_leaf' or leaf.type == 'string':
+        if leaf.start_pos[0] < position[0]:
+            # On a different line, we just begin anew.
+            return ''
+
+        # Error leafs cannot be parsed, completion in strings is also
+        # impossible.
+        raise OnErrorLeaf(leaf)
+    else:
+        user_stmt = leaf
+        while True:
+            if user_stmt.parent.type in ('file_input', 'suite', 'simple_stmt'):
+                break
+            user_stmt = user_stmt.parent
+
+        if is_after_newline:
+            if user_stmt.start_pos[1] > position[1]:
+                # This means that it's actually a dedent and that means that we
+                # start without value (part of a suite).
+                return ''
+
+        # This is basically getting the relevant lines.
+        return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position)


 def get_stack_at_position(grammar, code_lines, leaf, pos):
     """
     Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
     """
-    pass
+    class EndMarkerReached(Exception):
+        pass

+    def tokenize_without_endmarker(code):
+        # TODO This is for now not an official parso API that exists purely
+        #   for Jedi.
+        tokens = grammar._tokenize(code)
+        for token in tokens:
+            if token.string == safeword:
+                raise EndMarkerReached()
+            elif token.prefix.endswith(safeword):
+                # This happens with comments.
+                raise EndMarkerReached()
+            elif token.string.endswith(safeword):
+                yield token  # Probably an f-string literal that was not finished.
+                raise EndMarkerReached()
+            else:
+                yield token

-class CallDetails:
+    # The code might be indedented, just remove it.
+    code = dedent(_get_code_for_stack(code_lines, leaf, pos))
+    # We use a word to tell Jedi when we have reached the start of the
+    # completion.
+    # Use Z as a prefix because it's not part of a number suffix.
+    safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI'
+    code = code + ' ' + safeword
+
+    p = Parser(grammar._pgen_grammar, error_recovery=True)
+    try:
+        p.parse(tokens=tokenize_without_endmarker(code))
+    except EndMarkerReached:
+        return p.stack
+    raise SystemError(
+        "This really shouldn't happen. There's a bug in Jedi:\n%s"
+        % list(tokenize_without_endmarker(code))
+    )
+
+
+def infer(inference_state, context, leaf):
+    if leaf.type == 'name':
+        return inference_state.infer(context, leaf)

+    parent = leaf.parent
+    definitions = NO_VALUES
+    if parent.type == 'atom':
+        # e.g. `(a + b)`
+        definitions = context.infer_node(leaf.parent)
+    elif parent.type == 'trailer':
+        # e.g. `a()`
+        definitions = infer_call_of_leaf(context, leaf)
+    elif isinstance(leaf, tree.Literal):
+        # e.g. `"foo"` or `1.0`
+        return infer_atom(context, leaf)
+    elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'):
+        return get_string_value_set(inference_state)
+    return definitions
+
+
+def filter_follow_imports(names, follow_builtin_imports=False):
+    for name in names:
+        if name.is_import():
+            new_names = list(filter_follow_imports(
+                name.goto(),
+                follow_builtin_imports=follow_builtin_imports,
+            ))
+            found_builtin = False
+            if follow_builtin_imports:
+                for new_name in new_names:
+                    if new_name.start_pos is None:
+                        found_builtin = True
+
+            if found_builtin:
+                yield name
+            else:
+                yield from new_names
+        else:
+            yield name
+
+
+class CallDetails:
     def __init__(self, bracket_leaf, children, position):
         self.bracket_leaf = bracket_leaf
         self._children = children
         self._position = position

+    @property
+    def index(self):
+        return _get_index_and_key(self._children, self._position)[0]
+
+    @property
+    def keyword_name_str(self):
+        return _get_index_and_key(self._children, self._position)[1]
+
+    @memoize_method
+    def _list_arguments(self):
+        return list(_iter_arguments(self._children, self._position))
+
+    def calculate_index(self, param_names):
+        positional_count = 0
+        used_names = set()
+        star_count = -1
+        args = self._list_arguments()
+        if not args:
+            if param_names:
+                return 0
+            else:
+                return None
+
+        is_kwarg = False
+        for i, (star_count, key_start, had_equal) in enumerate(args):
+            is_kwarg |= had_equal | (star_count == 2)
+            if star_count:
+                pass  # For now do nothing, we don't know what's in there here.
+            else:
+                if i + 1 != len(args):  # Not last
+                    if had_equal:
+                        used_names.add(key_start)
+                    else:
+                        positional_count += 1
+
+        for i, param_name in enumerate(param_names):
+            kind = param_name.get_kind()
+
+            if not is_kwarg:
+                if kind == Parameter.VAR_POSITIONAL:
+                    return i
+                if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.POSITIONAL_ONLY):
+                    if i == positional_count:
+                        return i
+
+            if key_start is not None and not star_count == 1 or star_count == 2:
+                if param_name.string_name not in used_names \
+                        and (kind == Parameter.KEYWORD_ONLY
+                             or kind == Parameter.POSITIONAL_OR_KEYWORD
+                             and positional_count <= i):
+                    if star_count:
+                        return i
+                    if had_equal:
+                        if param_name.string_name == key_start:
+                            return i
+                    else:
+                        if param_name.string_name.startswith(key_start):
+                            return i
+
+                if kind == Parameter.VAR_KEYWORD:
+                    return i
+        return None
+
+    def iter_used_keyword_arguments(self):
+        for star_count, key_start, had_equal in list(self._list_arguments()):
+            if had_equal and key_start:
+                yield key_start
+
+    def count_positional_arguments(self):
+        count = 0
+        for star_count, key_start, had_equal in self._list_arguments()[:-1]:
+            if star_count or key_start:
+                break
+            count += 1
+        return count
+
+
+def _iter_arguments(nodes, position):
+    def remove_after_pos(name):
+        if name.type != 'name':
+            return None
+        return name.value[:position[1] - name.start_pos[1]]
+
+    # Returns Generator[Tuple[star_count, Optional[key_start: str], had_equal]]
+    nodes_before = [c for c in nodes if c.start_pos < position]
+    if nodes_before[-1].type == 'arglist':
+        yield from _iter_arguments(nodes_before[-1].children, position)
+        return
+
+    previous_node_yielded = False
+    stars_seen = 0
+    for i, node in enumerate(nodes_before):
+        if node.type == 'argument':
+            previous_node_yielded = True
+            first = node.children[0]
+            second = node.children[1]
+            if second == '=':
+                if second.start_pos < position and first.type == 'name':
+                    yield 0, first.value, True
+                else:
+                    yield 0, remove_after_pos(first), False
+            elif first in ('*', '**'):
+                yield len(first.value), remove_after_pos(second), False
+            else:
+                # Must be a Comprehension
+                first_leaf = node.get_first_leaf()
+                if first_leaf.type == 'name' and first_leaf.start_pos >= position:
+                    yield 0, remove_after_pos(first_leaf), False
+                else:
+                    yield 0, None, False
+            stars_seen = 0
+        elif node.type == 'testlist_star_expr':
+            for n in node.children[::2]:
+                if n.type == 'star_expr':
+                    stars_seen = 1
+                    n = n.children[1]
+                yield stars_seen, remove_after_pos(n), False
+                stars_seen = 0
+            # The count of children is even if there's a comma at the end.
+            previous_node_yielded = bool(len(node.children) % 2)
+        elif isinstance(node, tree.PythonLeaf) and node.value == ',':
+            if not previous_node_yielded:
+                yield stars_seen, '', False
+                stars_seen = 0
+            previous_node_yielded = False
+        elif isinstance(node, tree.PythonLeaf) and node.value in ('*', '**'):
+            stars_seen = len(node.value)
+        elif node == '=' and nodes_before[-1]:
+            previous_node_yielded = True
+            before = nodes_before[i - 1]
+            if before.type == 'name':
+                yield 0, before.value, True
+            else:
+                yield 0, None, False
+            # Just ignore the star that is probably a syntax error.
+            stars_seen = 0
+
+    if not previous_node_yielded:
+        if nodes_before[-1].type == 'name':
+            yield stars_seen, remove_after_pos(nodes_before[-1]), False
+        else:
+            yield stars_seen, '', False
+

 def _get_index_and_key(nodes, position):
     """
     Returns the amount of commas and the keyword argument string.
     """
-    pass
+    nodes_before = [c for c in nodes if c.start_pos < position]
+    if nodes_before[-1].type == 'arglist':
+        return _get_index_and_key(nodes_before[-1].children, position)
+
+    key_str = None
+
+    last = nodes_before[-1]
+    if last.type == 'argument' and last.children[1] == '=' \
+            and last.children[1].end_pos <= position:
+        # Checked if the argument
+        key_str = last.children[0].value
+    elif last == '=':
+        key_str = nodes_before[-2].value
+
+    return nodes_before.count(','), key_str
+
+
+def _get_signature_details_from_error_node(node, additional_children, position):
+    for index, element in reversed(list(enumerate(node.children))):
+        # `index > 0` means that it's a trailer and not an atom.
+        if element == '(' and element.end_pos <= position and index > 0:
+            # It's an error node, we don't want to match too much, just
+            # until the parentheses is enough.
+            children = node.children[index:]
+            name = element.get_previous_leaf()
+            if name is None:
+                continue
+            if name.type == 'name' or name.parent.type in ('trailer', 'atom'):
+                return CallDetails(element, children + additional_children, position)
+

+def get_signature_details(module, position):
+    leaf = module.get_leaf_for_position(position, include_prefixes=True)
+    # It's easier to deal with the previous token than the next one in this
+    # case.
+    if leaf.start_pos >= position:
+        # Whitespace / comments after the leaf count towards the previous leaf.
+        leaf = leaf.get_previous_leaf()
+        if leaf is None:
+            return None

-@signature_time_cache('call_signatures_validity')
-def cache_signatures(inference_state, context, bracket_leaf, code_lines,
-    user_pos):
+    # Now that we know where we are in the syntax tree, we start to look at
+    # parents for possible function definitions.
+    node = leaf.parent
+    while node is not None:
+        if node.type in ('funcdef', 'classdef', 'decorated', 'async_stmt'):
+            # Don't show signatures if there's stuff before it that just
+            # makes it feel strange to have a signature.
+            return None
+
+        additional_children = []
+        for n in reversed(node.children):
+            if n.start_pos < position:
+                if n.type == 'error_node':
+                    result = _get_signature_details_from_error_node(
+                        n, additional_children, position
+                    )
+                    if result is not None:
+                        return result
+
+                    additional_children[0:0] = n.children
+                    continue
+                additional_children.insert(0, n)
+
+        # Find a valid trailer
+        if node.type == 'trailer' and node.children[0] == '(' \
+                or node.type == 'decorator' and node.children[2] == '(':
+            # Additionally we have to check that an ending parenthesis isn't
+            # interpreted wrong. There are two cases:
+            # 1. Cursor before paren -> The current signature is good
+            # 2. Cursor after paren -> We need to skip the current signature
+            if not (leaf is node.children[-1] and position >= leaf.end_pos):
+                leaf = node.get_previous_leaf()
+                if leaf is None:
+                    return None
+                return CallDetails(
+                    node.children[0] if node.type == 'trailer' else node.children[2],
+                    node.children,
+                    position
+                )
+
+        node = node.parent
+
+    return None
+
+
+@signature_time_cache("call_signatures_validity")
+def cache_signatures(inference_state, context, bracket_leaf, code_lines, user_pos):
     """This function calculates the cache key."""
-    pass
+    line_index = user_pos[0] - 1
+
+    before_cursor = code_lines[line_index][:user_pos[1]]
+    other_lines = code_lines[bracket_leaf.start_pos[0]:line_index]
+    whole = ''.join(other_lines + [before_cursor])
+    before_bracket = re.match(r'.*\(', whole, re.DOTALL)
+
+    module_path = context.get_root_context().py__file__()
+    if module_path is None:
+        yield None  # Don't cache!
+    else:
+        yield (module_path, before_bracket, bracket_leaf.start_pos)
+    yield infer(
+        inference_state,
+        context,
+        bracket_leaf.get_previous_leaf(),
+    )
+
+
+def validate_line_column(func):
+    @wraps(func)
+    def wrapper(self, line=None, column=None, *args, **kwargs):
+        line = max(len(self._code_lines), 1) if line is None else line
+        if not (0 < line <= len(self._code_lines)):
+            raise ValueError('`line` parameter is not in a valid range.')
+
+        line_string = self._code_lines[line - 1]
+        line_len = len(line_string)
+        if line_string.endswith('\r\n'):
+            line_len -= 2
+        elif line_string.endswith('\n'):
+            line_len -= 1
+
+        column = line_len if column is None else column
+        if not (0 <= column <= line_len):
+            raise ValueError('`column` parameter (%d) is not in a valid range '
+                             '(0-%d) for line %d (%r).' % (
+                                 column, line_len, line, line_string))
+        return func(self, line, column, *args, **kwargs)
+    return wrapper


 def get_module_names(module, all_scopes, definitions=True, references=False):
@@ -56,4 +493,30 @@ def get_module_names(module, all_scopes, definitions=True, references=False):
     Returns a dictionary with name parts as keys and their call paths as
     values.
     """
-    pass
+    def def_ref_filter(name):
+        is_def = name.is_definition()
+        return definitions and is_def or references and not is_def
+
+    names = list(chain.from_iterable(module.get_used_names().values()))
+    if not all_scopes:
+        # We have to filter all the names that don't have the module as a
+        # parent_scope. There's None as a parent, because nodes in the module
+        # node have the parent module and not suite as all the others.
+        # Therefore it's important to catch that case.
+
+        def is_module_scope_name(name):
+            parent_scope = get_parent_scope(name)
+            # async functions have an extra wrapper. Strip it.
+            if parent_scope and parent_scope.type == 'async_stmt':
+                parent_scope = parent_scope.parent
+            return parent_scope in (module, None)
+
+        names = [n for n in names if is_module_scope_name(n)]
+    return filter(def_ref_filter, names)
+
+
+def split_search_string(name):
+    type, _, dotted_names = name.rpartition(' ')
+    if type == 'def':
+        type = 'function'
+    return type, dotted_names.split('.')
diff --git a/jedi/api/interpreter.py b/jedi/api/interpreter.py
index 7271aaec..befafe5a 100644
--- a/jedi/api/interpreter.py
+++ b/jedi/api/interpreter.py
@@ -1,6 +1,7 @@
 """
 TODO Some parts of this module are still not well documented.
 """
+
 from jedi.inference import compiled
 from jedi.inference.base_value import ValueSet
 from jedi.inference.filters import ParserTreeFilter, MergedFilter
@@ -10,21 +11,34 @@ from jedi.inference.compiled.access import create_access_path
 from jedi.inference.context import ModuleContext


-class NamespaceObject:
+def _create(inference_state, obj):
+    return compiled.create_from_access_path(
+        inference_state, create_access_path(inference_state, obj)
+    )
+

+class NamespaceObject:
     def __init__(self, dct):
         self.__dict__ = dct


 class MixedTreeName(TreeNameDefinition):
-
     def infer(self):
         """
         In IPython notebook it is typical that some parts of the code that is
         provided was already executed. In that case if something is not properly
         inferred, it should still infer from the variables it already knows.
         """
-        pass
+        inferred = super().infer()
+        if not inferred:
+            for compiled_value in self.parent_context.mixed_values:
+                for f in compiled_value.get_filters():
+                    values = ValueSet.from_sets(
+                        n.infer() for n in f.get(self.string_name)
+                    )
+                    if values:
+                        return values
+        return inferred


 class MixedParserTreeFilter(ParserTreeFilter):
@@ -32,8 +46,29 @@ class MixedParserTreeFilter(ParserTreeFilter):


 class MixedModuleContext(ModuleContext):
-
     def __init__(self, tree_module_value, namespaces):
         super().__init__(tree_module_value)
-        self.mixed_values = [self._get_mixed_object(_create(self.
-            inference_state, NamespaceObject(n))) for n in namespaces]
+        self.mixed_values = [
+            self._get_mixed_object(
+                _create(self.inference_state, NamespaceObject(n))
+            ) for n in namespaces
+        ]
+
+    def _get_mixed_object(self, compiled_value):
+        return mixed.MixedObject(
+            compiled_value=compiled_value,
+            tree_value=self._value
+        )
+
+    def get_filters(self, until_position=None, origin_scope=None):
+        yield MergedFilter(
+            MixedParserTreeFilter(
+                parent_context=self,
+                until_position=until_position,
+                origin_scope=origin_scope
+            ),
+            self.get_global_filter(),
+        )
+
+        for mixed_object in self.mixed_values:
+            yield from mixed_object.get_filters(until_position, origin_scope)
diff --git a/jedi/api/keywords.py b/jedi/api/keywords.py
index 6a6e99e2..80ff13c3 100644
--- a/jedi/api/keywords.py
+++ b/jedi/api/keywords.py
@@ -1,21 +1,51 @@
 import pydoc
 from contextlib import suppress
 from typing import Dict, Optional
+
 from jedi.inference.names import AbstractArbitraryName
+
 try:
     from pydoc_data import topics
     pydoc_topics: Optional[Dict[str, str]] = topics.topics
 except ImportError:
+    # Python 3.6.8 embeddable does not have pydoc_data.
     pydoc_topics = None


 class KeywordName(AbstractArbitraryName):
     api_type = 'keyword'

+    def py__doc__(self):
+        return imitate_pydoc(self.string_name)
+

 def imitate_pydoc(string):
     """
     It's not possible to get the pydoc's without starting the annoying pager
     stuff.
     """
-    pass
+    if pydoc_topics is None:
+        return ''
+
+    h = pydoc.help
+    with suppress(KeyError):
+        # try to access symbols
+        string = h.symbols[string]
+        string, _, related = string.partition(' ')
+
+    def get_target(s):
+        return h.topics.get(s, h.keywords.get(s))
+
+    while isinstance(string, str):
+        string = get_target(string)
+
+    try:
+        # is a tuple now
+        label, related = string
+    except TypeError:
+        return ''
+
+    try:
+        return pydoc_topics[label].strip() if pydoc_topics else ''
+    except KeyError:
+        return ''
diff --git a/jedi/api/project.py b/jedi/api/project.py
index d5ae11bb..8927e7ea 100644
--- a/jedi/api/project.py
+++ b/jedi/api/project.py
@@ -10,22 +10,52 @@ be used across repositories.
 import json
 from pathlib import Path
 from itertools import chain
+
 from jedi import debug
 from jedi.api.environment import get_cached_default_environment, create_environment
 from jedi.api.exceptions import WrongVersion
 from jedi.api.completion import search_in_module
 from jedi.api.helpers import split_search_string, get_module_names
-from jedi.inference.imports import load_module_from_path, load_namespace_from_path, iter_module_names
+from jedi.inference.imports import load_module_from_path, \
+    load_namespace_from_path, iter_module_names
 from jedi.inference.sys_path import discover_buildout_paths
 from jedi.inference.cache import inference_state_as_method_param_cache
 from jedi.inference.references import recurse_find_python_folders_and_files, search_in_file_ios
 from jedi.file_io import FolderIO
+
 _CONFIG_FOLDER = '.jedi'
-_CONTAINS_POTENTIAL_PROJECT = ('setup.py', '.git', '.hg',
-    'requirements.txt', 'MANIFEST.in', 'pyproject.toml')
+_CONTAINS_POTENTIAL_PROJECT = \
+    'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in', 'pyproject.toml'
+
 _SERIALIZER_VERSION = 1


+def _try_to_skip_duplicates(func):
+    def wrapper(*args, **kwargs):
+        found_tree_nodes = []
+        found_modules = []
+        for definition in func(*args, **kwargs):
+            tree_node = definition._name.tree_name
+            if tree_node is not None and tree_node in found_tree_nodes:
+                continue
+            if definition.type == 'module' and definition.module_path is not None:
+                if definition.module_path in found_modules:
+                    continue
+                found_modules.append(definition.module_path)
+            yield definition
+            found_tree_nodes.append(tree_node)
+    return wrapper
+
+
+def _remove_duplicates_from_path(path):
+    used = set()
+    for p in path:
+        if p in used:
+            continue
+        used.add(p)
+        yield p
+
+
 class Project:
     """
     Projects are a simple way to manage Python folders and define how Jedi does
@@ -34,6 +64,14 @@ class Project:
     """
     _environment = None

+    @staticmethod
+    def _get_config_folder_path(base_path):
+        return base_path.joinpath(_CONFIG_FOLDER)
+
+    @staticmethod
+    def _get_json_path(base_path):
+        return Project._get_config_folder_path(base_path).joinpath('project.json')
+
     @classmethod
     def load(cls, path):
         """
@@ -42,17 +80,42 @@ class Project:

         :param path: The path of the directory you want to use as a project.
         """
-        pass
+        if isinstance(path, str):
+            path = Path(path)
+        with open(cls._get_json_path(path)) as f:
+            version, data = json.load(f)
+
+        if version == 1:
+            return cls(**data)
+        else:
+            raise WrongVersion(
+                "The Jedi version of this project seems newer than what we can handle."
+            )

     def save(self):
         """
         Saves the project configuration in the project in ``.jedi/project.json``.
         """
-        pass
+        data = dict(self.__dict__)
+        data.pop('_environment', None)
+        data.pop('_django', None)  # TODO make django setting public?
+        data = {k.lstrip('_'): v for k, v in data.items()}
+        data['path'] = str(data['path'])

-    def __init__(self, path, *, environment_path=None,
-        load_unsafe_extensions=False, sys_path=None, added_sys_path=(),
-        smart_sys_path=True) ->None:
+        self._get_config_folder_path(self._path).mkdir(parents=True, exist_ok=True)
+        with open(self._get_json_path(self._path), 'w') as f:
+            return json.dump((_SERIALIZER_VERSION, data), f)
+
+    def __init__(
+        self,
+        path,
+        *,
+        environment_path=None,
+        load_unsafe_extensions=False,
+        sys_path=None,
+        added_sys_path=(),
+        smart_sys_path=True,
+    ) -> None:
         """
         :param path: The base path for this project.
         :param environment_path: The Python executable path, typically the path
@@ -71,16 +134,20 @@ class Project:
             local directories. Otherwise you will have to rely on your packages
             being properly configured on the ``sys.path``.
         """
+
         if isinstance(path, str):
             path = Path(path).absolute()
         self._path = path
+
         self._environment_path = environment_path
         if sys_path is not None:
+            # Remap potential pathlib.Path entries
             sys_path = list(map(str, sys_path))
         self._sys_path = sys_path
         self._smart_sys_path = smart_sys_path
         self._load_unsafe_extensions = load_unsafe_extensions
         self._django = False
+        # Remap potential pathlib.Path entries
         self.added_sys_path = list(map(str, added_sys_path))
         """The sys path that is going to be added at the end of the """

@@ -89,7 +156,7 @@ class Project:
         """
         The base path for this project.
         """
-        pass
+        return self._path

     @property
     def sys_path(self):
@@ -97,7 +164,7 @@ class Project:
         The sys path provided to this project. This can be None and in that
         case will be auto generated.
         """
-        pass
+        return self._sys_path

     @property
     def smart_sys_path(self):
@@ -105,23 +172,80 @@ class Project:
         If the sys path is going to be calculated in a smart way, where
         additional paths are added.
         """
-        pass
+        return self._smart_sys_path

     @property
     def load_unsafe_extensions(self):
         """
         Wheter the project loads unsafe extensions.
         """
-        pass
+        return self._load_unsafe_extensions
+
+    @inference_state_as_method_param_cache()
+    def _get_base_sys_path(self, inference_state):
+        # The sys path has not been set explicitly.
+        sys_path = list(inference_state.environment.get_sys_path())
+        try:
+            sys_path.remove('')
+        except ValueError:
+            pass
+        return sys_path

     @inference_state_as_method_param_cache()
-    def _get_sys_path(self, inference_state, add_parent_paths=True,
-        add_init_paths=False):
+    def _get_sys_path(self, inference_state, add_parent_paths=True, add_init_paths=False):
         """
         Keep this method private for all users of jedi. However internally this
         one is used like a public method.
         """
-        pass
+        suffixed = list(self.added_sys_path)
+        prefixed = []
+
+        if self._sys_path is None:
+            sys_path = list(self._get_base_sys_path(inference_state))
+        else:
+            sys_path = list(self._sys_path)
+
+        if self._smart_sys_path:
+            prefixed.append(str(self._path))
+
+            if inference_state.script_path is not None:
+                suffixed += map(str, discover_buildout_paths(
+                    inference_state,
+                    inference_state.script_path
+                ))
+
+                if add_parent_paths:
+                    # Collect directories in upward search by:
+                    #   1. Skipping directories with __init__.py
+                    #   2. Stopping immediately when above self._path
+                    traversed = []
+                    for parent_path in inference_state.script_path.parents:
+                        if parent_path == self._path \
+                                or self._path not in parent_path.parents:
+                            break
+                        if not add_init_paths \
+                                and parent_path.joinpath("__init__.py").is_file():
+                            continue
+                        traversed.append(str(parent_path))
+
+                    # AFAIK some libraries have imports like `foo.foo.bar`, which
+                    # leads to the conclusion to by default prefer longer paths
+                    # rather than shorter ones by default.
+                    suffixed += reversed(traversed)
+
+        if self._django:
+            prefixed.append(str(self._path))
+
+        path = prefixed + sys_path + suffixed
+        return list(_remove_duplicates_from_path(path))
+
+    def get_environment(self):
+        if self._environment is None:
+            if self._environment_path is not None:
+                self._environment = create_environment(self._environment_path, safe=False)
+            else:
+                self._environment = get_cached_default_environment()
+        return self._environment

     def search(self, string, *, all_scopes=False):
         """
@@ -144,7 +268,7 @@ class Project:
             functions and classes.
         :yields: :class:`.Name`
         """
-        pass
+        return self._search_func(string, all_scopes=all_scopes)

     def complete_search(self, string, **kwargs):
         """
@@ -156,15 +280,113 @@ class Project:
             functions and classes.
         :yields: :class:`.Completion`
         """
-        pass
+        return self._search_func(string, complete=True, **kwargs)
+
+    @_try_to_skip_duplicates
+    def _search_func(self, string, complete=False, all_scopes=False):
+        # Using a Script is they easiest way to get an empty module context.
+        from jedi import Script
+        s = Script('', project=self)
+        inference_state = s._inference_state
+        empty_module_context = s._get_module_context()
+
+        debug.dbg('Search for string %s, complete=%s', string, complete)
+        wanted_type, wanted_names = split_search_string(string)
+        name = wanted_names[0]
+        stub_folder_name = name + '-stubs'
+
+        ios = recurse_find_python_folders_and_files(FolderIO(str(self._path)))
+        file_ios = []
+
+        # 1. Search for modules in the current project
+        for folder_io, file_io in ios:
+            if file_io is None:
+                file_name = folder_io.get_base_name()
+                if file_name == name or file_name == stub_folder_name:
+                    f = folder_io.get_file_io('__init__.py')
+                    try:
+                        m = load_module_from_path(inference_state, f).as_context()
+                    except FileNotFoundError:
+                        f = folder_io.get_file_io('__init__.pyi')
+                        try:
+                            m = load_module_from_path(inference_state, f).as_context()
+                        except FileNotFoundError:
+                            m = load_namespace_from_path(inference_state, folder_io).as_context()
+                else:
+                    continue
+            else:
+                file_ios.append(file_io)
+                if Path(file_io.path).name in (name + '.py', name + '.pyi'):
+                    m = load_module_from_path(inference_state, file_io).as_context()
+                else:
+                    continue
+
+            debug.dbg('Search of a specific module %s', m)
+            yield from search_in_module(
+                inference_state,
+                m,
+                names=[m.name],
+                wanted_type=wanted_type,
+                wanted_names=wanted_names,
+                complete=complete,
+                convert=True,
+                ignore_imports=True,
+            )
+
+        # 2. Search for identifiers in the project.
+        for module_context in search_in_file_ios(inference_state, file_ios,
+                                                 name, complete=complete):
+            names = get_module_names(module_context.tree_node, all_scopes=all_scopes)
+            names = [module_context.create_name(n) for n in names]
+            names = _remove_imports(names)
+            yield from search_in_module(
+                inference_state,
+                module_context,
+                names=names,
+                wanted_type=wanted_type,
+                wanted_names=wanted_names,
+                complete=complete,
+                ignore_imports=True,
+            )
+
+        # 3. Search for modules on sys.path
+        sys_path = [
+            p for p in self._get_sys_path(inference_state)
+            # Exclude the current folder which is handled by recursing the folders.
+            if p != self._path
+        ]
+        names = list(iter_module_names(inference_state, empty_module_context, sys_path))
+        yield from search_in_module(
+            inference_state,
+            empty_module_context,
+            names=names,
+            wanted_type=wanted_type,
+            wanted_names=wanted_names,
+            complete=complete,
+            convert=True,
+        )

     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self._path)


+def _is_potential_project(path):
+    for name in _CONTAINS_POTENTIAL_PROJECT:
+        try:
+            if path.joinpath(name).exists():
+                return True
+        except OSError:
+            continue
+    return False
+
+
 def _is_django_path(directory):
     """ Detects the path of the very well known Django library (if used) """
-    pass
+    try:
+        with open(directory.joinpath('manage.py'), 'rb') as f:
+            return b"DJANGO_SETTINGS_MODULE" in f.read()
+    except (FileNotFoundError, IsADirectoryError, PermissionError):
+        return False


 def get_default_project(path=None):
@@ -177,4 +399,50 @@ def get_default_project(path=None):
     2. One of the following files: ``setup.py``, ``.git``, ``.hg``,
        ``requirements.txt`` and ``MANIFEST.in``.
     """
-    pass
+    if path is None:
+        path = Path.cwd()
+    elif isinstance(path, str):
+        path = Path(path)
+
+    check = path.absolute()
+    probable_path = None
+    first_no_init_file = None
+    for dir in chain([check], check.parents):
+        try:
+            return Project.load(dir)
+        except (FileNotFoundError, IsADirectoryError, PermissionError):
+            pass
+        except NotADirectoryError:
+            continue
+
+        if first_no_init_file is None:
+            if dir.joinpath('__init__.py').exists():
+                # In the case that a __init__.py exists, it's in 99% just a
+                # Python package and the project sits at least one level above.
+                continue
+            elif not dir.is_file():
+                first_no_init_file = dir
+
+        if _is_django_path(dir):
+            project = Project(dir)
+            project._django = True
+            return project
+
+        if probable_path is None and _is_potential_project(dir):
+            probable_path = dir
+
+    if probable_path is not None:
+        return Project(probable_path)
+
+    if first_no_init_file is not None:
+        return Project(first_no_init_file)
+
+    curdir = path if path.is_dir() else path.parent
+    return Project(curdir)
+
+
+def _remove_imports(names):
+    return [
+        n for n in names
+        if n.tree_name is None or n.api_type not in ('module', 'namespace')
+    ]
diff --git a/jedi/api/refactoring/extract.py b/jedi/api/refactoring/extract.py
index 1cdeefac..6e7df7e1 100644
--- a/jedi/api/refactoring/extract.py
+++ b/jedi/api/refactoring/extract.py
@@ -1,21 +1,43 @@
 from textwrap import dedent
+
 from parso import split_lines
+
 from jedi import debug
 from jedi.api.exceptions import RefactoringError
 from jedi.api.refactoring import Refactoring, EXPRESSION_PARTS
 from jedi.common import indent_block
 from jedi.parser_utils import function_is_classmethod, function_is_staticmethod
-_DEFINITION_SCOPES = 'suite', 'file_input'
-_VARIABLE_EXCTRACTABLE = (EXPRESSION_PARTS +
-    'atom testlist_star_expr testlist test lambdef lambdef_nocond keyword name number string fstring'
-    .split())
+
+
+_DEFINITION_SCOPES = ('suite', 'file_input')
+_VARIABLE_EXCTRACTABLE = EXPRESSION_PARTS + \
+    ('atom testlist_star_expr testlist test lambdef lambdef_nocond '
+     'keyword name number string fstring').split()
+
+
+def extract_variable(inference_state, path, module_node, name, pos, until_pos):
+    nodes = _find_nodes(module_node, pos, until_pos)
+    debug.dbg('Extracting nodes: %s', nodes)
+
+    is_expression, message = _is_expression_with_error(nodes)
+    if not is_expression:
+        raise RefactoringError(message)
+
+    generated_code = name + ' = ' + _expression_nodes_to_string(nodes)
+    file_to_node_changes = {path: _replace(nodes, name, generated_code, pos)}
+    return Refactoring(inference_state, file_to_node_changes)


 def _is_expression_with_error(nodes):
     """
     Returns a tuple (is_expression, error_string).
     """
-    pass
+    if any(node.type == 'name' and node.is_definition() for node in nodes):
+        return False, 'Cannot extract a name that defines something'
+
+    if nodes[0].type not in _VARIABLE_EXCTRACTABLE:
+        return False, 'Cannot extract a "%s"' % nodes[0].type
+    return True, ''


 def _find_nodes(module_node, pos, until_pos):
@@ -23,7 +45,98 @@ def _find_nodes(module_node, pos, until_pos):
     Looks up a module and tries to find the appropriate amount of nodes that
     are in there.
     """
-    pass
+    start_node = module_node.get_leaf_for_position(pos, include_prefixes=True)
+
+    if until_pos is None:
+        if start_node.type == 'operator':
+            next_leaf = start_node.get_next_leaf()
+            if next_leaf is not None and next_leaf.start_pos == pos:
+                start_node = next_leaf
+
+        if _is_not_extractable_syntax(start_node):
+            start_node = start_node.parent
+
+        if start_node.parent.type == 'trailer':
+            start_node = start_node.parent.parent
+        while start_node.parent.type in EXPRESSION_PARTS:
+            start_node = start_node.parent
+
+        nodes = [start_node]
+    else:
+        # Get the next leaf if we are at the end of a leaf
+        if start_node.end_pos == pos:
+            next_leaf = start_node.get_next_leaf()
+            if next_leaf is not None:
+                start_node = next_leaf
+
+        # Some syntax is not exactable, just use its parent
+        if _is_not_extractable_syntax(start_node):
+            start_node = start_node.parent
+
+        # Find the end
+        end_leaf = module_node.get_leaf_for_position(until_pos, include_prefixes=True)
+        if end_leaf.start_pos > until_pos:
+            end_leaf = end_leaf.get_previous_leaf()
+            if end_leaf is None:
+                raise RefactoringError('Cannot extract anything from that')
+
+        parent_node = start_node
+        while parent_node.end_pos < end_leaf.end_pos:
+            parent_node = parent_node.parent
+
+        nodes = _remove_unwanted_expression_nodes(parent_node, pos, until_pos)
+
+    # If the user marks just a return statement, we return the expression
+    # instead of the whole statement, because the user obviously wants to
+    # extract that part.
+    if len(nodes) == 1 and start_node.type in ('return_stmt', 'yield_expr'):
+        return [nodes[0].children[1]]
+    return nodes
+
+
+def _replace(nodes, expression_replacement, extracted, pos,
+             insert_before_leaf=None, remaining_prefix=None):
+    # Now try to replace the nodes found with a variable and move the code
+    # before the current statement.
+    definition = _get_parent_definition(nodes[0])
+    if insert_before_leaf is None:
+        insert_before_leaf = definition.get_first_leaf()
+    first_node_leaf = nodes[0].get_first_leaf()
+
+    lines = split_lines(insert_before_leaf.prefix, keepends=True)
+    if first_node_leaf is insert_before_leaf:
+        if remaining_prefix is not None:
+            # The remaining prefix has already been calculated.
+            lines[:-1] = remaining_prefix
+    lines[-1:-1] = [indent_block(extracted, lines[-1]) + '\n']
+    extracted_prefix = ''.join(lines)
+
+    replacement_dct = {}
+    if first_node_leaf is insert_before_leaf:
+        replacement_dct[nodes[0]] = extracted_prefix + expression_replacement
+    else:
+        if remaining_prefix is None:
+            p = first_node_leaf.prefix
+        else:
+            p = remaining_prefix + _get_indentation(nodes[0])
+        replacement_dct[nodes[0]] = p + expression_replacement
+        replacement_dct[insert_before_leaf] = extracted_prefix + insert_before_leaf.value
+
+    for node in nodes[1:]:
+        replacement_dct[node] = ''
+    return replacement_dct
+
+
+def _expression_nodes_to_string(nodes):
+    return ''.join(n.get_code(include_prefix=i != 0) for i, n in enumerate(nodes))
+
+
+def _suite_nodes_to_string(nodes, pos):
+    n = nodes[0]
+    prefix, part_of_code = _split_prefix_at(n.get_first_leaf(), pos[0] - 1)
+    code = part_of_code + n.get_code(include_prefix=False) \
+        + ''.join(n.get_code() for n in nodes[1:])
+    return prefix, code


 def _split_prefix_at(leaf, until_line):
@@ -31,14 +144,25 @@ def _split_prefix_at(leaf, until_line):
     Returns a tuple of the leaf's prefix, split at the until_line
     position.
     """
-    pass
+    # second means the second returned part
+    second_line_count = leaf.start_pos[0] - until_line
+    lines = split_lines(leaf.prefix, keepends=True)
+    return ''.join(lines[:-second_line_count]), ''.join(lines[-second_line_count:])
+
+
+def _get_indentation(node):
+    return split_lines(node.get_first_leaf().prefix)[-1]


 def _get_parent_definition(node):
     """
     Returns the statement where a node is defined.
     """
-    pass
+    while node is not None:
+        if node.parent.type in _DEFINITION_SCOPES:
+            return node
+        node = node.parent
+    raise NotImplementedError('We should never even get here')


 def _remove_unwanted_expression_nodes(parent_node, pos, until_pos):
@@ -46,13 +170,217 @@ def _remove_unwanted_expression_nodes(parent_node, pos, until_pos):
     This function makes it so for `1 * 2 + 3` you can extract `2 + 3`, even
     though it is not part of the expression.
     """
-    pass
+    typ = parent_node.type
+    is_suite_part = typ in ('suite', 'file_input')
+    if typ in EXPRESSION_PARTS or is_suite_part:
+        nodes = parent_node.children
+        for i, n in enumerate(nodes):
+            if n.end_pos > pos:
+                start_index = i
+                if n.type == 'operator':
+                    start_index -= 1
+                break
+        for i, n in reversed(list(enumerate(nodes))):
+            if n.start_pos < until_pos:
+                end_index = i
+                if n.type == 'operator':
+                    end_index += 1
+
+                # Something like `not foo or bar` should not be cut after not
+                for n2 in nodes[i:]:
+                    if _is_not_extractable_syntax(n2):
+                        end_index += 1
+                    else:
+                        break
+                break
+        nodes = nodes[start_index:end_index + 1]
+        if not is_suite_part:
+            nodes[0:1] = _remove_unwanted_expression_nodes(nodes[0], pos, until_pos)
+            nodes[-1:] = _remove_unwanted_expression_nodes(nodes[-1], pos, until_pos)
+        return nodes
+    return [parent_node]
+

+def _is_not_extractable_syntax(node):
+    return node.type == 'operator' \
+        or node.type == 'keyword' and node.value not in ('None', 'True', 'False')

-def _find_needed_output_variables(context, search_node, at_least_pos,
-    return_variables):
+
+def extract_function(inference_state, path, module_context, name, pos, until_pos):
+    nodes = _find_nodes(module_context.tree_node, pos, until_pos)
+    assert len(nodes)
+
+    is_expression, _ = _is_expression_with_error(nodes)
+    context = module_context.create_context(nodes[0])
+    is_bound_method = context.is_bound_method()
+    params, return_variables = list(_find_inputs_and_outputs(module_context, context, nodes))
+
+    # Find variables
+    # Is a class method / method
+    if context.is_module():
+        insert_before_leaf = None  # Leaf will be determined later
+    else:
+        node = _get_code_insertion_node(context.tree_node, is_bound_method)
+        insert_before_leaf = node.get_first_leaf()
+    if is_expression:
+        code_block = 'return ' + _expression_nodes_to_string(nodes) + '\n'
+        remaining_prefix = None
+        has_ending_return_stmt = False
+    else:
+        has_ending_return_stmt = _is_node_ending_return_stmt(nodes[-1])
+        if not has_ending_return_stmt:
+            # Find the actually used variables (of the defined ones). If none are
+            # used (e.g. if the range covers the whole function), return the last
+            # defined variable.
+            return_variables = list(_find_needed_output_variables(
+                context,
+                nodes[0].parent,
+                nodes[-1].end_pos,
+                return_variables
+            )) or [return_variables[-1]] if return_variables else []
+
+        remaining_prefix, code_block = _suite_nodes_to_string(nodes, pos)
+        after_leaf = nodes[-1].get_next_leaf()
+        first, second = _split_prefix_at(after_leaf, until_pos[0])
+        code_block += first
+
+        code_block = dedent(code_block)
+        if not has_ending_return_stmt:
+            output_var_str = ', '.join(return_variables)
+            code_block += 'return ' + output_var_str + '\n'
+
+    # Check if we have to raise RefactoringError
+    _check_for_non_extractables(nodes[:-1] if has_ending_return_stmt else nodes)
+
+    decorator = ''
+    self_param = None
+    if is_bound_method:
+        if not function_is_staticmethod(context.tree_node):
+            function_param_names = context.get_value().get_param_names()
+            if len(function_param_names):
+                self_param = function_param_names[0].string_name
+                params = [p for p in params if p != self_param]
+
+        if function_is_classmethod(context.tree_node):
+            decorator = '@classmethod\n'
+    else:
+        code_block += '\n'
+
+    function_code = '%sdef %s(%s):\n%s' % (
+        decorator,
+        name,
+        ', '.join(params if self_param is None else [self_param] + params),
+        indent_block(code_block)
+    )
+
+    function_call = '%s(%s)' % (
+        ('' if self_param is None else self_param + '.') + name,
+        ', '.join(params)
+    )
+    if is_expression:
+        replacement = function_call
+    else:
+        if has_ending_return_stmt:
+            replacement = 'return ' + function_call + '\n'
+        else:
+            replacement = output_var_str + ' = ' + function_call + '\n'
+
+    replacement_dct = _replace(nodes, replacement, function_code, pos,
+                               insert_before_leaf, remaining_prefix)
+    if not is_expression:
+        replacement_dct[after_leaf] = second + after_leaf.value
+    file_to_node_changes = {path: replacement_dct}
+    return Refactoring(inference_state, file_to_node_changes)
+
+
+def _check_for_non_extractables(nodes):
+    for n in nodes:
+        try:
+            children = n.children
+        except AttributeError:
+            if n.value == 'return':
+                raise RefactoringError(
+                    'Can only extract return statements if they are at the end.')
+            if n.value == 'yield':
+                raise RefactoringError('Cannot extract yield statements.')
+        else:
+            _check_for_non_extractables(children)
+
+
+def _is_name_input(module_context, names, first, last):
+    for name in names:
+        if name.api_type == 'param' or not name.parent_context.is_module():
+            if name.get_root_context() is not module_context:
+                return True
+            if name.start_pos is None or not (first <= name.start_pos < last):
+                return True
+    return False
+
+
+def _find_inputs_and_outputs(module_context, context, nodes):
+    first = nodes[0].start_pos
+    last = nodes[-1].end_pos
+
+    inputs = []
+    outputs = []
+    for name in _find_non_global_names(nodes):
+        if name.is_definition():
+            if name not in outputs:
+                outputs.append(name.value)
+        else:
+            if name.value not in inputs:
+                name_definitions = context.goto(name, name.start_pos)
+                if not name_definitions \
+                        or _is_name_input(module_context, name_definitions, first, last):
+                    inputs.append(name.value)
+
+    # Check if outputs are really needed:
+    return inputs, outputs
+
+
+def _find_non_global_names(nodes):
+    for node in nodes:
+        try:
+            children = node.children
+        except AttributeError:
+            if node.type == 'name':
+                yield node
+        else:
+            # We only want to check foo in foo.bar
+            if node.type == 'trailer' and node.children[0] == '.':
+                continue
+
+            yield from _find_non_global_names(children)
+
+
+def _get_code_insertion_node(node, is_bound_method):
+    if not is_bound_method or function_is_staticmethod(node):
+        while node.parent.type != 'file_input':
+            node = node.parent
+
+    while node.parent.type in ('async_funcdef', 'decorated', 'async_stmt'):
+        node = node.parent
+    return node
+
+
+def _find_needed_output_variables(context, search_node, at_least_pos, return_variables):
     """
     Searches everything after at_least_pos in a node and checks if any of the
     return_variables are used in there and returns those.
     """
-    pass
+    for node in search_node.children:
+        if node.start_pos < at_least_pos:
+            continue
+
+        return_variables = set(return_variables)
+        for name in _find_non_global_names([node]):
+            if not name.is_definition() and name.value in return_variables:
+                return_variables.remove(name.value)
+                yield name.value
+
+
+def _is_node_ending_return_stmt(node):
+    t = node.type
+    if t == 'simple_stmt':
+        return _is_node_ending_return_stmt(node.children[0])
+    return t == 'return_stmt'
diff --git a/jedi/api/replstartup.py b/jedi/api/replstartup.py
index 0de20f06..e0f23d19 100644
--- a/jedi/api/replstartup.py
+++ b/jedi/api/replstartup.py
@@ -19,6 +19,11 @@ Then you will be able to use Jedi completer in your Python interpreter::
 """
 import jedi.utils
 from jedi import __version__ as __jedi_version__
+
 print('REPL completion using Jedi %s' % __jedi_version__)
 jedi.utils.setup_readline(fuzzy=False)
+
 del jedi
+
+# Note: try not to do many things here, as it will contaminate global
+# namespace of the interpreter.
diff --git a/jedi/api/strings.py b/jedi/api/strings.py
index 498532e1..7850142c 100644
--- a/jedi/api/strings.py
+++ b/jedi/api/strings.py
@@ -8,13 +8,104 @@ and other completions is mostly that this module doesn't return defined
 names in a module, but pretty much an arbitrary string.
 """
 import re
+
 from jedi.inference.names import AbstractArbitraryName
 from jedi.inference.helpers import infer_call_of_leaf
 from jedi.api.classes import Completion
 from jedi.parser_utils import cut_value_at_position
+
 _sentinel = object()


 class StringName(AbstractArbitraryName):
     api_type = 'string'
     is_value_name = False
+
+
+def complete_dict(module_context, code_lines, leaf, position, string, fuzzy):
+    bracket_leaf = leaf
+    if bracket_leaf != '[':
+        bracket_leaf = leaf.get_previous_leaf()
+
+    cut_end_quote = ''
+    if string:
+        cut_end_quote = get_quote_ending(string, code_lines, position, invert_result=True)
+
+    if bracket_leaf == '[':
+        if string is None and leaf is not bracket_leaf:
+            string = cut_value_at_position(leaf, position)
+
+        context = module_context.create_context(bracket_leaf)
+
+        before_node = before_bracket_leaf = bracket_leaf.get_previous_leaf()
+        if before_node in (')', ']', '}'):
+            before_node = before_node.parent
+        if before_node.type in ('atom', 'trailer', 'name'):
+            values = infer_call_of_leaf(context, before_bracket_leaf)
+            return list(_completions_for_dicts(
+                module_context.inference_state,
+                values,
+                '' if string is None else string,
+                cut_end_quote,
+                fuzzy=fuzzy,
+            ))
+    return []
+
+
+def _completions_for_dicts(inference_state, dicts, literal_string, cut_end_quote, fuzzy):
+    for dict_key in sorted(_get_python_keys(dicts), key=lambda x: repr(x)):
+        dict_key_str = _create_repr_string(literal_string, dict_key)
+        if dict_key_str.startswith(literal_string):
+            name = StringName(inference_state, dict_key_str[:-len(cut_end_quote) or None])
+            yield Completion(
+                inference_state,
+                name,
+                stack=None,
+                like_name_length=len(literal_string),
+                is_fuzzy=fuzzy
+            )
+
+
+def _create_repr_string(literal_string, dict_key):
+    if not isinstance(dict_key, (str, bytes)) or not literal_string:
+        return repr(dict_key)
+
+    r = repr(dict_key)
+    prefix, quote = _get_string_prefix_and_quote(literal_string)
+    if quote is None:
+        return r
+    if quote == r[0]:
+        return prefix + r
+    return prefix + quote + r[1:-1] + quote
+
+
+def _get_python_keys(dicts):
+    for dct in dicts:
+        if dct.array_type == 'dict':
+            for key in dct.get_key_values():
+                dict_key = key.get_safe_value(default=_sentinel)
+                if dict_key is not _sentinel:
+                    yield dict_key
+
+
+def _get_string_prefix_and_quote(string):
+    match = re.match(r'(\w*)("""|\'{3}|"|\')', string)
+    if match is None:
+        return None, None
+    return match.group(1), match.group(2)
+
+
+def _matches_quote_at_position(code_lines, quote, position):
+    string = code_lines[position[0] - 1][position[1]:position[1] + len(quote)]
+    return string == quote
+
+
+def get_quote_ending(string, code_lines, position, invert_result=False):
+    _, quote = _get_string_prefix_and_quote(string)
+    if quote is None:
+        return ''
+
+    # Add a quote only if it's not already there.
+    if _matches_quote_at_position(code_lines, quote, position) != invert_result:
+        return ''
+    return quote
diff --git a/jedi/cache.py b/jedi/cache.py
index 1dc6e8ac..1ff45201 100644
--- a/jedi/cache.py
+++ b/jedi/cache.py
@@ -14,19 +14,34 @@ these variables are being cleaned after every API usage.
 import time
 from functools import wraps
 from typing import Any, Dict, Tuple
+
 from jedi import settings
 from parso.cache import parser_cache
+
 _time_caches: Dict[str, Dict[Any, Tuple[float, Any]]] = {}


-def clear_time_caches(delete_all: bool=False) ->None:
+def clear_time_caches(delete_all: bool = False) -> None:
     """ Jedi caches many things, that should be completed after each completion
     finishes.

     :param delete_all: Deletes also the cache that is normally not deleted,
         like parser cache, which is important for faster parsing.
     """
-    pass
+    global _time_caches
+
+    if delete_all:
+        for cache in _time_caches.values():
+            cache.clear()
+        parser_cache.clear()
+    else:
+        # normally just kill the expired entries, not all
+        for tc in _time_caches.values():
+            # check time_cache for expired entries
+            for key, (t, value) in list(tc.items()):
+                if t < time.time():
+                    # delete expired entries
+                    del tc[key]


 def signature_time_cache(time_add_setting):
@@ -38,9 +53,63 @@ def signature_time_cache(time_add_setting):

     If the given key is None, the function will not be cached.
     """
-    pass
+    def _temp(key_func):
+        dct = {}
+        _time_caches[time_add_setting] = dct
+
+        def wrapper(*args, **kwargs):
+            generator = key_func(*args, **kwargs)
+            key = next(generator)
+            try:
+                expiry, value = dct[key]
+                if expiry > time.time():
+                    return value
+            except KeyError:
+                pass
+
+            value = next(generator)
+            time_add = getattr(settings, time_add_setting)
+            if key is not None:
+                dct[key] = time.time() + time_add, value
+            return value
+        return wrapper
+    return _temp
+
+
+def time_cache(seconds):
+    def decorator(func):
+        cache = {}
+
+        @wraps(func)
+        def wrapper(*args, **kwargs):
+            key = (args, frozenset(kwargs.items()))
+            try:
+                created, result = cache[key]
+                if time.time() < created + seconds:
+                    return result
+            except KeyError:
+                pass
+            result = func(*args, **kwargs)
+            cache[key] = time.time(), result
+            return result
+
+        wrapper.clear_cache = lambda: cache.clear()
+        return wrapper
+
+    return decorator


 def memoize_method(method):
     """A normal memoize function."""
-    pass
+    @wraps(method)
+    def wrapper(self, *args, **kwargs):
+        cache_dict = self.__dict__.setdefault('_memoize_method_dct', {})
+        dct = cache_dict.setdefault(method, {})
+        key = (args, frozenset(kwargs.items()))
+        try:
+            return dct[key]
+        except KeyError:
+            result = method(self, *args, **kwargs)
+            dct[key] = result
+            return result
+    return wrapper
diff --git a/jedi/common.py b/jedi/common.py
index 6254a2da..eb4b4996 100644
--- a/jedi/common.py
+++ b/jedi/common.py
@@ -6,9 +6,19 @@ def monkeypatch(obj, attribute_name, new_value):
     """
     Like pytest's monkeypatch, but as a value manager.
     """
-    pass
+    old_value = getattr(obj, attribute_name)
+    try:
+        setattr(obj, attribute_name, new_value)
+        yield
+    finally:
+        setattr(obj, attribute_name, old_value)


 def indent_block(text, indention='    '):
     """This function indents a text block with a default of four spaces."""
-    pass
+    temp = ''
+    while text and text[-1] == '\n':
+        temp += text[-1]
+        text = text[:-1]
+    lines = text.split('\n')
+    return '\n'.join(map(lambda s: indention + s, lines)) + temp
diff --git a/jedi/debug.py b/jedi/debug.py
index a97c4eef..99a90601 100644
--- a/jedi/debug.py
+++ b/jedi/debug.py
@@ -2,6 +2,7 @@ import os
 import time
 from contextlib import contextmanager
 from typing import Callable, Optional
+
 _inited = False


@@ -12,53 +13,112 @@ def _lazy_colorama_init():

     This version of the function does nothing.
     """
-    pass


 try:
     if os.name == 'nt':
+        # Does not work on Windows, as pyreadline and colorama interfere
         raise ImportError
     else:
-        from colorama import Fore, init
+        # Use colorama for nicer console output.
+        from colorama import Fore, init  # type: ignore[import]
         from colorama import initialise

-        def _lazy_colorama_init():
+        def _lazy_colorama_init():  # noqa: F811
             """
             Lazily init colorama if necessary, not to screw up stdout is
             debug not enabled.

             This version of the function does init colorama.
             """
-            pass
-except ImportError:
+            global _inited
+            if not _inited:
+                # pytest resets the stream at the end - causes troubles. Since
+                # after every output the stream is reset automatically we don't
+                # need this.
+                initialise.atexit_done = True
+                try:
+                    init(strip=False)
+                except Exception:
+                    # Colorama fails with initializing under vim and is buggy in
+                    # version 0.3.6.
+                    pass
+            _inited = True

-
-    class Fore:
+except ImportError:
+    class Fore:  # type: ignore[no-redef]
         RED = ''
         GREEN = ''
         YELLOW = ''
         MAGENTA = ''
         RESET = ''
         BLUE = ''
+
 NOTICE = object()
 WARNING = object()
 SPEED = object()
+
 enable_speed = False
 enable_warning = False
 enable_notice = False
+
+# callback, interface: level, str
 debug_function: Optional[Callable[[str, str], None]] = None
 _debug_indent = 0
 _start_time = time.time()


+def reset_time():
+    global _start_time, _debug_indent
+    _start_time = time.time()
+    _debug_indent = 0
+
+
 def increase_indent(func):
     """Decorator for makin """
-    pass
+    def wrapper(*args, **kwargs):
+        with increase_indent_cm():
+            return func(*args, **kwargs)
+    return wrapper
+
+
+@contextmanager
+def increase_indent_cm(title=None, color='MAGENTA'):
+    global _debug_indent
+    if title:
+        dbg('Start: ' + title, color=color)
+    _debug_indent += 1
+    try:
+        yield
+    finally:
+        _debug_indent -= 1
+        if title:
+            dbg('End: ' + title, color=color)


 def dbg(message, *args, color='GREEN'):
     """ Looks at the stack, to see if a debug message should be printed. """
-    pass
+    assert color
+
+    if debug_function and enable_notice:
+        i = ' ' * _debug_indent
+        _lazy_colorama_init()
+        debug_function(color, i + 'dbg: ' + message % tuple(repr(a) for a in args))
+
+
+def warning(message, *args, format=True):
+    if debug_function and enable_warning:
+        i = ' ' * _debug_indent
+        if format:
+            message = message % tuple(repr(a) for a in args)
+        debug_function('RED', i + 'warning: ' + message)
+
+
+def speed(name):
+    if debug_function and enable_speed:
+        now = time.time()
+        i = ' ' * _debug_indent
+        debug_function('YELLOW', i + 'speed: ' + '%s %s' % (name, now - _start_time))


 def print_to_stdout(color, str_out):
@@ -67,4 +127,6 @@ def print_to_stdout(color, str_out):

     :param str color: A string that is an attribute of ``colorama.Fore``.
     """
-    pass
+    col = getattr(Fore, color)
+    _lazy_colorama_init()
+    print(col + str_out + Fore.RESET)
diff --git a/jedi/file_io.py b/jedi/file_io.py
index 7258df1e..ead17335 100644
--- a/jedi/file_io.py
+++ b/jedi/file_io.py
@@ -1,31 +1,79 @@
 import os
+
 from parso import file_io


 class AbstractFolderIO:
-
     def __init__(self, path):
         self.path = path

+    def get_base_name(self):
+        raise NotImplementedError
+
+    def list(self):
+        raise NotImplementedError
+
+    def get_file_io(self, name):
+        raise NotImplementedError
+
+    def get_parent_folder(self):
+        raise NotImplementedError
+
     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self.path)


 class FolderIO(AbstractFolderIO):
-    pass
+    def get_base_name(self):
+        return os.path.basename(self.path)
+
+    def list(self):
+        return os.listdir(self.path)
+
+    def get_file_io(self, name):
+        return FileIO(os.path.join(self.path, name))
+
+    def get_parent_folder(self):
+        return FolderIO(os.path.dirname(self.path))
+
+    def walk(self):
+        for root, dirs, files in os.walk(self.path):
+            root_folder_io = FolderIO(root)
+            original_folder_ios = [FolderIO(os.path.join(root, d)) for d in dirs]
+            modified_folder_ios = list(original_folder_ios)
+            yield (
+                root_folder_io,
+                modified_folder_ios,
+                [FileIO(os.path.join(root, f)) for f in files],
+            )
+            modified_iterator = iter(reversed(modified_folder_ios))
+            current = next(modified_iterator, None)
+            i = len(original_folder_ios)
+            for folder_io in reversed(original_folder_ios):
+                i -= 1   # Basically enumerate but reversed
+                if current is folder_io:
+                    current = next(modified_iterator, None)
+                else:
+                    del dirs[i]


 class FileIOFolderMixin:
-    pass
+    def get_parent_folder(self):
+        return FolderIO(os.path.dirname(self.path))


 class ZipFileIO(file_io.KnownContentFileIO, FileIOFolderMixin):
     """For .zip and .egg archives"""
-
     def __init__(self, path, code, zip_path):
         super().__init__(path, code)
         self._zip_path = zip_path

+    def get_last_modified(self):
+        try:
+            return os.path.getmtime(self._zip_path)
+        except (FileNotFoundError, PermissionError, NotADirectoryError):
+            return None
+

 class FileIO(file_io.FileIO, FileIOFolderMixin):
     pass
diff --git a/jedi/inference/analysis.py b/jedi/inference/analysis.py
index a45cec67..c272a9cb 100644
--- a/jedi/inference/analysis.py
+++ b/jedi/inference/analysis.py
@@ -2,26 +2,31 @@
 Module for statical analysis.
 """
 from parso.python import tree
+
 from jedi import debug
 from jedi.inference.helpers import is_string
-CODES = {'attribute-error': (1, AttributeError, 'Potential AttributeError.'
-    ), 'name-error': (2, NameError, 'Potential NameError.'), 'import-error':
-    (3, ImportError, 'Potential ImportError.'),
+
+
+CODES = {
+    'attribute-error': (1, AttributeError, 'Potential AttributeError.'),
+    'name-error': (2, NameError, 'Potential NameError.'),
+    'import-error': (3, ImportError, 'Potential ImportError.'),
     'type-error-too-many-arguments': (4, TypeError, None),
     'type-error-too-few-arguments': (5, TypeError, None),
     'type-error-keyword-argument': (6, TypeError, None),
     'type-error-multiple-values': (7, TypeError, None),
-    'type-error-star-star': (8, TypeError, None), 'type-error-star': (9,
-    TypeError, None), 'type-error-operation': (10, TypeError, None),
+    'type-error-star-star': (8, TypeError, None),
+    'type-error-star': (9, TypeError, None),
+    'type-error-operation': (10, TypeError, None),
     'type-error-not-iterable': (11, TypeError, None),
     'type-error-isinstance': (12, TypeError, None),
     'type-error-not-subscriptable': (13, TypeError, None),
     'value-error-too-many-values': (14, ValueError, None),
-    'value-error-too-few-values': (15, ValueError, None)}
+    'value-error-too-few-values': (15, ValueError, None),
+}


 class Error:
-
     def __init__(self, name, module_path, start_pos, message=None):
         self.path = module_path
         self._start_pos = start_pos
@@ -30,13 +35,27 @@ class Error:
             message = CODES[self.name][2]
         self.message = message

+    @property
+    def line(self):
+        return self._start_pos[0]
+
+    @property
+    def column(self):
+        return self._start_pos[1]
+
+    @property
+    def code(self):
+        # The class name start
+        first = self.__class__.__name__[0]
+        return first + str(CODES[self.name][0])
+
     def __str__(self):
-        return '%s:%s:%s: %s %s' % (self.path, self.line, self.column, self
-            .code, self.message)
+        return '%s:%s:%s: %s %s' % (self.path, self.line, self.column,
+                                    self.code, self.message)

     def __eq__(self, other):
-        return (self.path == other.path and self.name == other.name and 
-            self._start_pos == other._start_pos)
+        return (self.path == other.path and self.name == other.name
+                and self._start_pos == other._start_pos)

     def __ne__(self, other):
         return not self.__eq__(other)
@@ -45,23 +64,66 @@ class Error:
         return hash((self.path, self._start_pos, self.name))

     def __repr__(self):
-        return '<%s %s: %s@%s,%s>' % (self.__class__.__name__, self.name,
-            self.path, self._start_pos[0], self._start_pos[1])
+        return '<%s %s: %s@%s,%s>' % (self.__class__.__name__,
+                                      self.name, self.path,
+                                      self._start_pos[0], self._start_pos[1])


 class Warning(Error):
     pass


+def add(node_context, error_name, node, message=None, typ=Error, payload=None):
+    exception = CODES[error_name][1]
+    if _check_for_exception_catch(node_context, node, exception, payload):
+        return
+
+    # TODO this path is probably not right
+    module_context = node_context.get_root_context()
+    module_path = module_context.py__file__()
+    issue_instance = typ(error_name, module_path, node.start_pos, message)
+    debug.warning(str(issue_instance), format=False)
+    node_context.inference_state.analysis.append(issue_instance)
+    return issue_instance
+
+
 def _check_for_setattr(instance):
     """
     Check if there's any setattr method inside an instance. If so, return True.
     """
-    pass
+    module = instance.get_root_context()
+    node = module.tree_node
+    if node is None:
+        # If it's a compiled module or doesn't have a tree_node
+        return False
+
+    try:
+        stmt_names = node.get_used_names()['setattr']
+    except KeyError:
+        return False
+
+    return any(node.start_pos < n.start_pos < node.end_pos
+               # Check if it's a function called setattr.
+               and not (n.parent.type == 'funcdef' and n.parent.name == n)
+               for n in stmt_names)


-def _check_for_exception_catch(node_context, jedi_name, exception, payload=None
-    ):
+def add_attribute_error(name_context, lookup_value, name):
+    message = ('AttributeError: %s has no attribute %s.' % (lookup_value, name))
+    # Check for __getattr__/__getattribute__ existance and issue a warning
+    # instead of an error, if that happens.
+    typ = Error
+    if lookup_value.is_instance() and not lookup_value.is_compiled():
+        # TODO maybe make a warning for __getattr__/__getattribute__
+
+        if _check_for_setattr(lookup_value):
+            typ = Warning
+
+    payload = lookup_value, name
+    add(name_context, 'attribute-error', name, message, typ, payload)
+
+
+def _check_for_exception_catch(node_context, jedi_name, exception, payload=None):
     """
     Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
     doesn't count as an error (if equal to `exception`).
@@ -69,4 +131,83 @@ def _check_for_exception_catch(node_context, jedi_name, exception, payload=None
     it.
     Returns True if the exception was catched.
     """
-    pass
+    def check_match(cls, exception):
+        if not cls.is_class():
+            return False
+
+        for python_cls in exception.mro():
+            if cls.py__name__() == python_cls.__name__ \
+                    and cls.parent_context.is_builtins_module():
+                return True
+        return False
+
+    def check_try_for_except(obj, exception):
+        # Only nodes in try
+        iterator = iter(obj.children)
+        for branch_type in iterator:
+            next(iterator)  # The colon
+            suite = next(iterator)
+            if branch_type == 'try' \
+                    and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos):
+                return False
+
+        for node in obj.get_except_clause_tests():
+            if node is None:
+                return True  # An exception block that catches everything.
+            else:
+                except_classes = node_context.infer_node(node)
+                for cls in except_classes:
+                    from jedi.inference.value import iterable
+                    if isinstance(cls, iterable.Sequence) and \
+                            cls.array_type == 'tuple':
+                        # multiple exceptions
+                        for lazy_value in cls.py__iter__():
+                            for typ in lazy_value.infer():
+                                if check_match(typ, exception):
+                                    return True
+                    else:
+                        if check_match(cls, exception):
+                            return True
+
+    def check_hasattr(node, suite):
+        try:
+            assert suite.start_pos <= jedi_name.start_pos < suite.end_pos
+            assert node.type in ('power', 'atom_expr')
+            base = node.children[0]
+            assert base.type == 'name' and base.value == 'hasattr'
+            trailer = node.children[1]
+            assert trailer.type == 'trailer'
+            arglist = trailer.children[1]
+            assert arglist.type == 'arglist'
+            from jedi.inference.arguments import TreeArguments
+            args = TreeArguments(node_context.inference_state, node_context, arglist)
+            unpacked_args = list(args.unpack())
+            # Arguments should be very simple
+            assert len(unpacked_args) == 2
+
+            # Check name
+            key, lazy_value = unpacked_args[1]
+            names = list(lazy_value.infer())
+            assert len(names) == 1 and is_string(names[0])
+            assert names[0].get_safe_value() == payload[1].value
+
+            # Check objects
+            key, lazy_value = unpacked_args[0]
+            objects = lazy_value.infer()
+            return payload[0] in objects
+        except AssertionError:
+            return False
+
+    obj = jedi_name
+    while obj is not None and not isinstance(obj, (tree.Function, tree.Class)):
+        if isinstance(obj, tree.Flow):
+            # try/except catch check
+            if obj.type == 'try_stmt' and check_try_for_except(obj, exception):
+                return True
+            # hasattr check
+            if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'):
+                if check_hasattr(obj.children[1], obj.children[3]):
+                    return True
+        obj = obj.parent
+
+    return False
diff --git a/jedi/inference/arguments.py b/jedi/inference/arguments.py
index 95120843..8602f494 100644
--- a/jedi/inference/arguments.py
+++ b/jedi/inference/arguments.py
@@ -1,10 +1,13 @@
 import re
 from itertools import zip_longest
+
 from parso.python import tree
+
 from jedi import debug
 from jedi.inference.utils import PushBackIterator
 from jedi.inference import analysis
-from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, LazyTreeValue, get_merged_lazy_value
+from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \
+    LazyTreeValue, get_merged_lazy_value
 from jedi.inference.names import ParamName, TreeNameDefinition, AnonymousParamName
 from jedi.inference.base_value import NO_VALUES, ValueSet, ContextualizedNode
 from jedi.inference.value import iterable
@@ -13,7 +16,19 @@ from jedi.inference.cache import inference_state_as_method_param_cache

 def try_iter_content(types, depth=0):
     """Helper method for static analysis."""
-    pass
+    if depth > 10:
+        # It's possible that a loop has references on itself (especially with
+        # CompiledValue). Therefore don't loop infinitely.
+        return
+
+    for typ in types:
+        try:
+            f = typ.py__iter__
+        except AttributeError:
+            pass
+        else:
+            for lazy_value in f():
+                try_iter_content(lazy_value.infer(), depth + 1)


 class ParamIssue(Exception):
@@ -31,16 +46,90 @@ def repack_with_argument_clinic(clinic_string):
         str.split.__text_signature__
         # Results in: '($self, /, sep=None, maxsplit=-1)'
     """
-    pass
+    def decorator(func):
+        def wrapper(value, arguments):
+            try:
+                args = tuple(iterate_argument_clinic(
+                    value.inference_state,
+                    arguments,
+                    clinic_string,
+                ))
+            except ParamIssue:
+                return NO_VALUES
+            else:
+                return func(value, *args)
+
+        return wrapper
+    return decorator


 def iterate_argument_clinic(inference_state, arguments, clinic_string):
     """Uses a list with argument clinic information (see PEP 436)."""
-    pass
+    clinic_args = list(_parse_argument_clinic(clinic_string))
+
+    iterator = PushBackIterator(arguments.unpack())
+    for i, (name, optional, allow_kwargs, stars) in enumerate(clinic_args):
+        if stars == 1:
+            lazy_values = []
+            for key, argument in iterator:
+                if key is not None:
+                    iterator.push_back((key, argument))
+                    break
+
+                lazy_values.append(argument)
+            yield ValueSet([iterable.FakeTuple(inference_state, lazy_values)])
+            lazy_values
+            continue
+        elif stars == 2:
+            raise NotImplementedError()
+        key, argument = next(iterator, (None, None))
+        if key is not None:
+            debug.warning('Keyword arguments in argument clinic are currently not supported.')
+            raise ParamIssue
+        if argument is None and not optional:
+            debug.warning('TypeError: %s expected at least %s arguments, got %s',
+                          name, len(clinic_args), i)
+            raise ParamIssue
+
+        value_set = NO_VALUES if argument is None else argument.infer()
+
+        if not value_set and not optional:
+            # For the stdlib we always want values. If we don't get them,
+            # that's ok, maybe something is too hard to resolve, however,
+            # we will not proceed with the type inference of that function.
+            debug.warning('argument_clinic "%s" not resolvable.', name)
+            raise ParamIssue
+        yield value_set
+
+
+def _parse_argument_clinic(string):
+    allow_kwargs = False
+    optional = False
+    while string:
+        # Optional arguments have to begin with a bracket. And should always be
+        # at the end of the arguments. This is therefore not a proper argument
+        # clinic implementation. `range()` for exmple allows an optional start
+        # value at the beginning.
+        match = re.match(r'(?:(?:(\[),? ?|, ?|)(\**\w+)|, ?/)\]*', string)
+        string = string[len(match.group(0)):]
+        if not match.group(2):  # A slash -> allow named arguments
+            allow_kwargs = True
+            continue
+        optional = optional or bool(match.group(1))
+        word = match.group(2)
+        stars = word.count('*')
+        word = word[stars:]
+        yield (word, optional, allow_kwargs, stars)
+        if stars:
+            allow_kwargs = True


 class _AbstractArgumentsMixin:
-    pass
+    def unpack(self, funcdef=None):
+        raise NotImplementedError
+
+    def get_calling_nodes(self):
+        return []


 class AbstractArguments(_AbstractArgumentsMixin):
@@ -49,8 +138,32 @@ class AbstractArguments(_AbstractArgumentsMixin):
     trailer = None


-class TreeArguments(AbstractArguments):
+def unpack_arglist(arglist):
+    if arglist is None:
+        return

+    if arglist.type != 'arglist' and not (
+            arglist.type == 'argument' and arglist.children[0] in ('*', '**')):
+        yield 0, arglist
+        return
+
+    iterator = iter(arglist.children)
+    for child in iterator:
+        if child == ',':
+            continue
+        elif child in ('*', '**'):
+            c = next(iterator, None)
+            assert c is not None
+            yield len(child.value), c
+        elif child.type == 'argument' and \
+                child.children[0] in ('*', '**'):
+            assert len(child.children) == 2
+            yield len(child.children[0].value), child.children[1]
+        else:
+            yield 0, child
+
+
+class TreeArguments(AbstractArguments):
     def __init__(self, inference_state, context, argument_node, trailer=None):
         """
         :param argument_node: May be an argument_node or a list of nodes.
@@ -58,25 +171,165 @@ class TreeArguments(AbstractArguments):
         self.argument_node = argument_node
         self.context = context
         self._inference_state = inference_state
-        self.trailer = trailer
+        self.trailer = trailer  # Can be None, e.g. in a class definition.
+
+    @classmethod
+    @inference_state_as_method_param_cache()
+    def create_cached(cls, *args, **kwargs):
+        return cls(*args, **kwargs)
+
+    def unpack(self, funcdef=None):
+        named_args = []
+        for star_count, el in unpack_arglist(self.argument_node):
+            if star_count == 1:
+                arrays = self.context.infer_node(el)
+                iterators = [_iterate_star_args(self.context, a, el, funcdef)
+                             for a in arrays]
+                for values in list(zip_longest(*iterators)):
+                    yield None, get_merged_lazy_value(
+                        [v for v in values if v is not None]
+                    )
+            elif star_count == 2:
+                arrays = self.context.infer_node(el)
+                for dct in arrays:
+                    yield from _star_star_dict(self.context, dct, el, funcdef)
+            else:
+                if el.type == 'argument':
+                    c = el.children
+                    if len(c) == 3:  # Keyword argument.
+                        named_args.append((c[0].value, LazyTreeValue(self.context, c[2]),))
+                    else:  # Generator comprehension.
+                        # Include the brackets with the parent.
+                        sync_comp_for = el.children[1]
+                        if sync_comp_for.type == 'comp_for':
+                            sync_comp_for = sync_comp_for.children[1]
+                        comp = iterable.GeneratorComprehension(
+                            self._inference_state,
+                            defining_context=self.context,
+                            sync_comp_for_node=sync_comp_for,
+                            entry_node=el.children[0],
+                        )
+                        yield None, LazyKnownValue(comp)
+                else:
+                    yield None, LazyTreeValue(self.context, el)
+
+        # Reordering arguments is necessary, because star args sometimes appear
+        # after named argument, but in the actual order it's prepended.
+        yield from named_args
+
+    def _as_tree_tuple_objects(self):
+        for star_count, argument in unpack_arglist(self.argument_node):
+            default = None
+            if argument.type == 'argument':
+                if len(argument.children) == 3:  # Keyword argument.
+                    argument, default = argument.children[::2]
+            yield argument, default, star_count
+
+    def iter_calling_names_with_star(self):
+        for name, default, star_count in self._as_tree_tuple_objects():
+            # TODO this function is a bit strange. probably refactor?
+            if not star_count or not isinstance(name, tree.Name):
+                continue
+
+            yield TreeNameDefinition(self.context, name)

     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self.argument_node)

+    def get_calling_nodes(self):
+        old_arguments_list = []
+        arguments = self

-class ValuesArguments(AbstractArguments):
+        while arguments not in old_arguments_list:
+            if not isinstance(arguments, TreeArguments):
+                break

+            old_arguments_list.append(arguments)
+            for calling_name in reversed(list(arguments.iter_calling_names_with_star())):
+                names = calling_name.goto()
+                if len(names) != 1:
+                    break
+                if isinstance(names[0], AnonymousParamName):
+                    # Dynamic parameters should not have calling nodes, because
+                    # they are dynamic and extremely random.
+                    return []
+                if not isinstance(names[0], ParamName):
+                    break
+                executed_param_name = names[0].get_executed_param_name()
+                arguments = executed_param_name.arguments
+                break
+
+        if arguments.argument_node is not None:
+            return [ContextualizedNode(arguments.context, arguments.argument_node)]
+        if arguments.trailer is not None:
+            return [ContextualizedNode(arguments.context, arguments.trailer)]
+        return []
+
+
+class ValuesArguments(AbstractArguments):
     def __init__(self, values_list):
         self._values_list = values_list

+    def unpack(self, funcdef=None):
+        for values in self._values_list:
+            yield None, LazyKnownValues(values)
+
     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self._values_list)


 class TreeArgumentsWrapper(_AbstractArgumentsMixin):
-
     def __init__(self, arguments):
         self._wrapped_arguments = arguments

+    @property
+    def context(self):
+        return self._wrapped_arguments.context
+
+    @property
+    def argument_node(self):
+        return self._wrapped_arguments.argument_node
+
+    @property
+    def trailer(self):
+        return self._wrapped_arguments.trailer
+
+    def unpack(self, func=None):
+        raise NotImplementedError
+
+    def get_calling_nodes(self):
+        return self._wrapped_arguments.get_calling_nodes()
+
     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self._wrapped_arguments)
+
+
+def _iterate_star_args(context, array, input_node, funcdef=None):
+    if not array.py__getattribute__('__iter__'):
+        if funcdef is not None:
+            # TODO this funcdef should not be needed.
+            m = "TypeError: %s() argument after * must be a sequence, not %s" \
+                % (funcdef.name.value, array)
+            analysis.add(context, 'type-error-star', input_node, message=m)
+    try:
+        iter_ = array.py__iter__
+    except AttributeError:
+        pass
+    else:
+        yield from iter_()
+
+
+def _star_star_dict(context, array, input_node, funcdef):
+    from jedi.inference.value.instance import CompiledInstance
+    if isinstance(array, CompiledInstance) and array.name.string_name == 'dict':
+        # For now ignore this case. In the future add proper iterators and just
+        # make one call without crazy isinstance checks.
+        return {}
+    elif isinstance(array, iterable.Sequence) and array.array_type == 'dict':
+        return array.exact_key_items()
+    else:
+        if funcdef is not None:
+            m = "TypeError: %s argument after ** must be a mapping, not %s" \
+                % (funcdef.name.value, array)
+            analysis.add(context, 'type-error-star-star', input_node, message=m)
+        return {}
diff --git a/jedi/inference/base_value.py b/jedi/inference/base_value.py
index 3e35794a..9a789a4e 100644
--- a/jedi/inference/base_value.py
+++ b/jedi/inference/base_value.py
@@ -9,13 +9,16 @@ just one.
 from functools import reduce
 from operator import add
 from itertools import zip_longest
+
 from parso.python.tree import Name
+
 from jedi import debug
 from jedi.parser_utils import clean_scope_docstring
 from jedi.inference.helpers import SimpleGetItemNotFound
 from jedi.inference.utils import safe_property
 from jedi.inference.cache import inference_state_as_method_param_cache
 from jedi.cache import memoize_method
+
 sentinel = object()


@@ -24,13 +27,117 @@ class HasNoContext(Exception):


 class HelperValueMixin:
-
-    def py__getattribute__(self, name_or_str, name_context=None, position=
-        None, analysis_errors=True):
+    def get_root_context(self):
+        value = self
+        if value.parent_context is None:
+            return value.as_context()
+
+        while True:
+            if value.parent_context is None:
+                return value
+            value = value.parent_context
+
+    def execute(self, arguments):
+        return self.inference_state.execute(self, arguments=arguments)
+
+    def execute_with_values(self, *value_list):
+        from jedi.inference.arguments import ValuesArguments
+        arguments = ValuesArguments([ValueSet([value]) for value in value_list])
+        return self.inference_state.execute(self, arguments)
+
+    def execute_annotation(self):
+        return self.execute_with_values()
+
+    def gather_annotation_classes(self):
+        return ValueSet([self])
+
+    def merge_types_of_iterate(self, contextualized_node=None, is_async=False):
+        return ValueSet.from_sets(
+            lazy_value.infer()
+            for lazy_value in self.iterate(contextualized_node, is_async)
+        )
+
+    def _get_value_filters(self, name_or_str):
+        origin_scope = name_or_str if isinstance(name_or_str, Name) else None
+        yield from self.get_filters(origin_scope=origin_scope)
+        # This covers the case where a stub files are incomplete.
+        if self.is_stub():
+            from jedi.inference.gradual.conversion import convert_values
+            for c in convert_values(ValueSet({self})):
+                yield from c.get_filters()
+
+    def goto(self, name_or_str, name_context=None, analysis_errors=True):
+        from jedi.inference import finder
+        filters = self._get_value_filters(name_or_str)
+        names = finder.filter_name(filters, name_or_str)
+        debug.dbg('context.goto %s in (%s): %s', name_or_str, self, names)
+        return names
+
+    def py__getattribute__(self, name_or_str, name_context=None, position=None,
+                           analysis_errors=True):
         """
         :param position: Position of the last statement -> tuple of line, column
         """
-        pass
+        if name_context is None:
+            name_context = self
+        names = self.goto(name_or_str, name_context, analysis_errors)
+        values = ValueSet.from_sets(name.infer() for name in names)
+        if not values:
+            n = name_or_str.value if isinstance(name_or_str, Name) else name_or_str
+            values = self.py__getattribute__alternatives(n)
+
+        if not names and not values and analysis_errors:
+            if isinstance(name_or_str, Name):
+                from jedi.inference import analysis
+                analysis.add_attribute_error(
+                    name_context, self, name_or_str)
+        debug.dbg('context.names_to_types: %s -> %s', names, values)
+        return values
+
+    def py__await__(self):
+        await_value_set = self.py__getattribute__("__await__")
+        if not await_value_set:
+            debug.warning('Tried to run __await__ on value %s', self)
+        return await_value_set.execute_with_values()
+
+    def py__name__(self):
+        return self.name.string_name
+
+    def iterate(self, contextualized_node=None, is_async=False):
+        debug.dbg('iterate %s', self)
+        if is_async:
+            from jedi.inference.lazy_value import LazyKnownValues
+            # TODO if no __aiter__ values are there, error should be:
+            # TypeError: 'async for' requires an object with __aiter__ method, got int
+            return iter([
+                LazyKnownValues(
+                    self.py__getattribute__('__aiter__').execute_with_values()
+                        .py__getattribute__('__anext__').execute_with_values()
+                        .py__getattribute__('__await__').execute_with_values()
+                        .py__stop_iteration_returns()
+                )  # noqa: E124
+            ])
+        return self.py__iter__(contextualized_node)
+
+    def is_sub_class_of(self, class_value):
+        with debug.increase_indent_cm('subclass matching of %s <=> %s' % (self, class_value),
+                                      color='BLUE'):
+            for cls in self.py__mro__():
+                if cls.is_same_class(class_value):
+                    debug.dbg('matched subclass True', color='BLUE')
+                    return True
+            debug.dbg('matched subclass False', color='BLUE')
+            return False
+
+    def is_same_class(self, class2):
+        # Class matching should prefer comparisons that are not this function.
+        if type(class2).is_same_class != HelperValueMixin.is_same_class:
+            return class2.is_same_class(self)
+        return self == class2
+
+    @memoize_method
+    def as_context(self, *args, **kwargs):
+        return self._as_context(*args, **kwargs)


 class Value(HelperValueMixin):
@@ -38,6 +145,8 @@ class Value(HelperValueMixin):
     To be implemented by subclasses.
     """
     tree_node = None
+    # Possible values: None, tuple, list, dict and set. Here to deal with these
+    # very important containers.
     array_type = None
     api_type = 'not_defined_please_report_bug'

@@ -45,18 +154,125 @@ class Value(HelperValueMixin):
         self.inference_state = inference_state
         self.parent_context = parent_context

+    def py__getitem__(self, index_value_set, contextualized_node):
+        from jedi.inference import analysis
+        # TODO this value is probably not right.
+        analysis.add(
+            contextualized_node.context,
+            'type-error-not-subscriptable',
+            contextualized_node.node,
+            message="TypeError: '%s' object is not subscriptable" % self
+        )
+        return NO_VALUES
+
+    def py__simple_getitem__(self, index):
+        raise SimpleGetItemNotFound
+
+    def py__iter__(self, contextualized_node=None):
+        if contextualized_node is not None:
+            from jedi.inference import analysis
+            analysis.add(
+                contextualized_node.context,
+                'type-error-not-iterable',
+                contextualized_node.node,
+                message="TypeError: '%s' object is not iterable" % self)
+        return iter([])
+
+    def py__next__(self, contextualized_node=None):
+        return self.py__iter__(contextualized_node)
+
+    def get_signatures(self):
+        return []
+
+    def is_class(self):
+        return False
+
+    def is_class_mixin(self):
+        return False
+
+    def is_instance(self):
+        return False
+
+    def is_function(self):
+        return False
+
+    def is_module(self):
+        return False
+
+    def is_namespace(self):
+        return False
+
+    def is_compiled(self):
+        return False
+
+    def is_bound_method(self):
+        return False
+
+    def is_builtins_module(self):
+        return False
+
     def py__bool__(self):
         """
         Since Wrapper is a super class for classes, functions and modules,
         the return value will always be true.
         """
-        pass
+        return True
+
+    def py__doc__(self):
+        try:
+            self.tree_node.get_doc_node
+        except AttributeError:
+            return ''
+        else:
+            return clean_scope_docstring(self.tree_node)
+
+    def get_safe_value(self, default=sentinel):
+        if default is sentinel:
+            raise ValueError("There exists no safe value for value %s" % self)
+        return default
+
+    def execute_operation(self, other, operator):
+        debug.warning("%s not possible between %s and %s", operator, self, other)
+        return NO_VALUES
+
+    def py__call__(self, arguments):
+        debug.warning("no execution possible %s", self)
+        return NO_VALUES
+
+    def py__stop_iteration_returns(self):
+        debug.warning("Not possible to return the stop iterations of %s", self)
+        return NO_VALUES

     def py__getattribute__alternatives(self, name_or_str):
         """
         For now a way to add values in cases like __getattr__.
         """
-        pass
+        return NO_VALUES
+
+    def py__get__(self, instance, class_value):
+        debug.warning("No __get__ defined on %s", self)
+        return ValueSet([self])
+
+    def py__get__on_class(self, calling_instance, instance, class_value):
+        return NotImplemented
+
+    def get_qualified_names(self):
+        # Returns Optional[Tuple[str, ...]]
+        return None
+
+    def is_stub(self):
+        # The root value knows if it's a stub or not.
+        return self.parent_context.is_stub()
+
+    def _as_context(self):
+        raise HasNoContext
+
+    @property
+    def name(self):
+        raise NotImplementedError
+
+    def get_type_hint(self, add_class_info=True):
+        return None

     def infer_type_vars(self, value_set):
         """
@@ -85,7 +301,7 @@ class Value(HelperValueMixin):
             above example this would first be the representation of the list
             `[1]` and then, when recursing, just of `1`.
         """
-        pass
+        return {}


 def iterate_values(values, contextualized_node=None, is_async=False):
@@ -93,10 +309,27 @@ def iterate_values(values, contextualized_node=None, is_async=False):
     Calls `iterate`, on all values but ignores the ordering and just returns
     all values that the iterate functions yield.
     """
-    pass
+    return ValueSet.from_sets(
+        lazy_value.infer()
+        for lazy_value in values.iterate(contextualized_node, is_async=is_async)
+    )


 class _ValueWrapperBase(HelperValueMixin):
+    @safe_property
+    def name(self):
+        from jedi.inference.names import ValueName
+        wrapped_name = self._wrapped_value.name
+        if wrapped_name.tree_name is not None:
+            return ValueName(self, wrapped_name.tree_name)
+        else:
+            from jedi.inference.compiled import CompiledValueName
+            return CompiledValueName(self, wrapped_name.string_name)
+
+    @classmethod
+    @inference_state_as_method_param_cache()
+    def create_cached(cls, inference_state, *args, **kwargs):
+        return cls(*args, **kwargs)

     def __getattr__(self, name):
         assert name != '_wrapped_value', 'Problem with _get_wrapped_value'
@@ -104,13 +337,20 @@ class _ValueWrapperBase(HelperValueMixin):


 class LazyValueWrapper(_ValueWrapperBase):
+    @safe_property
+    @memoize_method
+    def _wrapped_value(self):
+        with debug.increase_indent_cm('Resolve lazy value wrapper'):
+            return self._get_wrapped_value()

     def __repr__(self):
-        return '<%s>' % self.__class__.__name__
+        return '<%s>' % (self.__class__.__name__)

+    def _get_wrapped_value(self):
+        raise NotImplementedError

-class ValueWrapper(_ValueWrapperBase):

+class ValueWrapper(_ValueWrapperBase):
     def __init__(self, wrapped_value):
         self._wrapped_value = wrapped_value

@@ -119,7 +359,6 @@ class ValueWrapper(_ValueWrapperBase):


 class TreeValue(Value):
-
     def __init__(self, inference_state, parent_context, tree_node):
         super().__init__(inference_state, parent_context)
         self.tree_node = tree_node
@@ -129,29 +368,71 @@ class TreeValue(Value):


 class ContextualizedNode:
-
     def __init__(self, context, node):
         self.context = context
         self.node = node

+    def get_root_context(self):
+        return self.context.get_root_context()
+
+    def infer(self):
+        return self.context.infer_node(self.node)
+
     def __repr__(self):
-        return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self
-            .context)
+        return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context)
+
+
+def _getitem(value, index_values, contextualized_node):
+    # The actual getitem call.
+    result = NO_VALUES
+    unused_values = set()
+    for index_value in index_values:
+        index = index_value.get_safe_value(default=None)
+        if type(index) in (float, int, str, slice, bytes):
+            try:
+                result |= value.py__simple_getitem__(index)
+                continue
+            except SimpleGetItemNotFound:
+                pass
+
+        unused_values.add(index_value)
+
+    # The index was somehow not good enough or simply a wrong type.
+    # Therefore we now iterate through all the values and just take
+    # all results.
+    if unused_values or not index_values:
+        result |= value.py__getitem__(
+            ValueSet(unused_values),
+            contextualized_node
+        )
+    debug.dbg('py__getitem__ result: %s', result)
+    return result


 class ValueSet:
-
     def __init__(self, iterable):
         self._set = frozenset(iterable)
         for value in iterable:
             assert not isinstance(value, ValueSet)

+    @classmethod
+    def _from_frozen_set(cls, frozenset_):
+        self = cls.__new__(cls)
+        self._set = frozenset_
+        return self
+
     @classmethod
     def from_sets(cls, sets):
         """
         Used to work with an iterable of set.
         """
-        pass
+        aggregated = set()
+        for set_ in sets:
+            if isinstance(set_, ValueSet):
+                aggregated |= set_._set
+            else:
+                aggregated |= frozenset(set_)
+        return cls._from_frozen_set(frozenset(aggregated))

     def __or__(self, other):
         return self._from_frozen_set(self._set | other._set)
@@ -169,13 +450,17 @@ class ValueSet:
         return len(self._set)

     def __repr__(self):
-        return 'S{%s}' % ', '.join(str(s) for s in self._set)
+        return 'S{%s}' % (', '.join(str(s) for s in self._set))

-    def __getattr__(self, name):
+    def filter(self, filter_func):
+        return self.__class__(filter(filter_func, self._set))

+    def __getattr__(self, name):
         def mapper(*args, **kwargs):
-            return self.from_sets(getattr(value, name)(*args, **kwargs) for
-                value in self._set)
+            return self.from_sets(
+                getattr(value, name)(*args, **kwargs)
+                for value in self._set
+            )
         return mapper

     def __eq__(self, other):
@@ -187,5 +472,87 @@ class ValueSet:
     def __hash__(self):
         return hash(self._set)

+    def py__class__(self):
+        return ValueSet(c.py__class__() for c in self._set)
+
+    def iterate(self, contextualized_node=None, is_async=False):
+        from jedi.inference.lazy_value import get_merged_lazy_value
+        type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set]
+        for lazy_values in zip_longest(*type_iters):
+            yield get_merged_lazy_value(
+                [l for l in lazy_values if l is not None]
+            )
+
+    def execute(self, arguments):
+        return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set)
+
+    def execute_with_values(self, *args, **kwargs):
+        return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set)
+
+    def goto(self, *args, **kwargs):
+        return reduce(add, [c.goto(*args, **kwargs) for c in self._set], [])
+
+    def py__getattribute__(self, *args, **kwargs):
+        return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set)
+
+    def get_item(self, *args, **kwargs):
+        return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set)
+
+    def try_merge(self, function_name):
+        value_set = self.__class__([])
+        for c in self._set:
+            try:
+                method = getattr(c, function_name)
+            except AttributeError:
+                pass
+            else:
+                value_set |= method()
+        return value_set
+
+    def gather_annotation_classes(self):
+        return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set])
+
+    def get_signatures(self):
+        return [sig for c in self._set for sig in c.get_signatures()]
+
+    def get_type_hint(self, add_class_info=True):
+        t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set]
+        type_hints = sorted(filter(None, t))
+        if len(type_hints) == 1:
+            return type_hints[0]
+
+        optional = 'None' in type_hints
+        if optional:
+            type_hints.remove('None')
+
+        if len(type_hints) == 0:
+            return None
+        elif len(type_hints) == 1:
+            s = type_hints[0]
+        else:
+            s = 'Union[%s]' % ', '.join(type_hints)
+        if optional:
+            s = 'Optional[%s]' % s
+        return s
+
+    def infer_type_vars(self, value_set):
+        # Circular
+        from jedi.inference.gradual.annotation import merge_type_var_dicts
+
+        type_var_dict = {}
+        for value in self._set:
+            merge_type_var_dicts(
+                type_var_dict,
+                value.infer_type_vars(value_set),
+            )
+        return type_var_dict
+

 NO_VALUES = ValueSet([])
+
+
+def iterator_to_value_set(func):
+    def wrapper(*args, **kwargs):
+        return ValueSet(func(*args, **kwargs))
+
+    return wrapper
diff --git a/jedi/inference/cache.py b/jedi/inference/cache.py
index e584319c..8360a42a 100644
--- a/jedi/inference/cache.py
+++ b/jedi/inference/cache.py
@@ -4,13 +4,15 @@
 - ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes.
 """
 from functools import wraps
+
 from jedi import debug
+
 _NO_DEFAULT = object()
 _RECURSION_SENTINEL = object()


-def _memoize_default(default=_NO_DEFAULT, inference_state_is_first_arg=
-    False, second_arg_is_inference_state=False):
+def _memoize_default(default=_NO_DEFAULT, inference_state_is_first_arg=False,
+                     second_arg_is_inference_state=False):
     """ This is a typical memoization decorator, BUT there is one difference:
     To prevent recursion it sets defaults.

@@ -18,7 +20,54 @@ def _memoize_default(default=_NO_DEFAULT, inference_state_is_first_arg=
     don't think, that there is a big speed difference, but there are many cases
     where recursion could happen (think about a = b; b = a).
     """
-    pass
+    def func(function):
+        def wrapper(obj, *args, **kwargs):
+            # TODO These checks are kind of ugly and slow.
+            if inference_state_is_first_arg:
+                cache = obj.memoize_cache
+            elif second_arg_is_inference_state:
+                cache = args[0].memoize_cache  # needed for meta classes
+            else:
+                cache = obj.inference_state.memoize_cache
+
+            try:
+                memo = cache[function]
+            except KeyError:
+                cache[function] = memo = {}
+
+            key = (obj, args, frozenset(kwargs.items()))
+            if key in memo:
+                return memo[key]
+            else:
+                if default is not _NO_DEFAULT:
+                    memo[key] = default
+                rv = function(obj, *args, **kwargs)
+                memo[key] = rv
+                return rv
+        return wrapper
+
+    return func
+
+
+def inference_state_function_cache(default=_NO_DEFAULT):
+    def decorator(func):
+        return _memoize_default(default=default, inference_state_is_first_arg=True)(func)
+
+    return decorator
+
+
+def inference_state_method_cache(default=_NO_DEFAULT):
+    def decorator(func):
+        return _memoize_default(default=default)(func)
+
+    return decorator
+
+
+def inference_state_as_method_param_cache():
+    def decorator(call):
+        return _memoize_default(second_arg_is_inference_state=True)(call)
+
+    return decorator


 class CachedMetaClass(type):
@@ -27,7 +76,6 @@ class CachedMetaClass(type):
     class initializations. Either you do it this way or with decorators, but
     with decorators you lose class access (isinstance, etc).
     """
-
     @inference_state_as_method_param_cache()
     def __call__(self, *args, **kwargs):
         return super().__call__(*args, **kwargs)
@@ -38,4 +86,41 @@ def inference_state_method_generator_cache():
     This is a special memoizer. It memoizes generators and also checks for
     recursion errors and returns no further iterator elemends in that case.
     """
-    pass
+    def func(function):
+        @wraps(function)
+        def wrapper(obj, *args, **kwargs):
+            cache = obj.inference_state.memoize_cache
+            try:
+                memo = cache[function]
+            except KeyError:
+                cache[function] = memo = {}
+
+            key = (obj, args, frozenset(kwargs.items()))
+
+            if key in memo:
+                actual_generator, cached_lst = memo[key]
+            else:
+                actual_generator = function(obj, *args, **kwargs)
+                cached_lst = []
+                memo[key] = actual_generator, cached_lst
+
+            i = 0
+            while True:
+                try:
+                    next_element = cached_lst[i]
+                    if next_element is _RECURSION_SENTINEL:
+                        debug.warning('Found a generator recursion for %s' % obj)
+                        # This means we have hit a recursion.
+                        return
+                except IndexError:
+                    cached_lst.append(_RECURSION_SENTINEL)
+                    next_element = next(actual_generator, None)
+                    if next_element is None:
+                        cached_lst.pop()
+                        return
+                    cached_lst[-1] = next_element
+                yield next_element
+                i += 1
+        return wrapper
+
+    return func
diff --git a/jedi/inference/compiled/access.py b/jedi/inference/compiled/access.py
index 2da304b3..09ed1b64 100644
--- a/jedi/inference/compiled/access.py
+++ b/jedi/inference/compiled/access.py
@@ -10,40 +10,161 @@ import builtins
 import typing
 from pathlib import Path
 from typing import Optional, Tuple
+
 from jedi.inference.compiled.getattr_static import getattr_static
-ALLOWED_GETITEM_TYPES = str, list, tuple, bytes, bytearray, dict
+
+ALLOWED_GETITEM_TYPES = (str, list, tuple, bytes, bytearray, dict)
+
 MethodDescriptorType = type(str.replace)
-NOT_CLASS_TYPES = (types.BuiltinFunctionType, types.CodeType, types.
-    FrameType, types.FunctionType, types.GeneratorType, types.
-    GetSetDescriptorType, types.LambdaType, types.MemberDescriptorType,
-    types.MethodType, types.ModuleType, types.TracebackType,
-    MethodDescriptorType, types.MappingProxyType, types.SimpleNamespace,
-    types.DynamicClassAttribute)
+# These are not considered classes and access is granted even though they have
+# a __class__ attribute.
+NOT_CLASS_TYPES = (
+    types.BuiltinFunctionType,
+    types.CodeType,
+    types.FrameType,
+    types.FunctionType,
+    types.GeneratorType,
+    types.GetSetDescriptorType,
+    types.LambdaType,
+    types.MemberDescriptorType,
+    types.MethodType,
+    types.ModuleType,
+    types.TracebackType,
+    MethodDescriptorType,
+    types.MappingProxyType,
+    types.SimpleNamespace,
+    types.DynamicClassAttribute,
+)
+
+# Those types don't exist in typing.
 MethodDescriptorType = type(str.replace)
 WrapperDescriptorType = type(set.__iter__)
-object_class_dict = type.__dict__['__dict__'].__get__(object)
+# `object.__subclasshook__` is an already executed descriptor.
+object_class_dict = type.__dict__["__dict__"].__get__(object)  # type: ignore[index]
 ClassMethodDescriptorType = type(object_class_dict['__subclasshook__'])
+
 _sentinel = object()
-COMPARISON_OPERATORS = {'==': op.eq, '!=': op.ne, 'is': op.is_, 'is not':
-    op.is_not, '<': op.lt, '<=': op.le, '>': op.gt, '>=': op.ge}
-_OPERATORS = {'+': op.add, '-': op.sub}
+
+# Maps Python syntax to the operator module.
+COMPARISON_OPERATORS = {
+    '==': op.eq,
+    '!=': op.ne,
+    'is': op.is_,
+    'is not': op.is_not,
+    '<': op.lt,
+    '<=': op.le,
+    '>': op.gt,
+    '>=': op.ge,
+}
+
+_OPERATORS = {
+    '+': op.add,
+    '-': op.sub,
+}
 _OPERATORS.update(COMPARISON_OPERATORS)
-ALLOWED_DESCRIPTOR_ACCESS = (types.FunctionType, types.GetSetDescriptorType,
-    types.MemberDescriptorType, MethodDescriptorType, WrapperDescriptorType,
-    ClassMethodDescriptorType, staticmethod, classmethod)
-SignatureParam = namedtuple('SignatureParam',
+
+ALLOWED_DESCRIPTOR_ACCESS = (
+    types.FunctionType,
+    types.GetSetDescriptorType,
+    types.MemberDescriptorType,
+    MethodDescriptorType,
+    WrapperDescriptorType,
+    ClassMethodDescriptorType,
+    staticmethod,
+    classmethod,
+)
+
+
+def safe_getattr(obj, name, default=_sentinel):
+    try:
+        attr, is_get_descriptor = getattr_static(obj, name)
+    except AttributeError:
+        if default is _sentinel:
+            raise
+        return default
+    else:
+        if isinstance(attr, ALLOWED_DESCRIPTOR_ACCESS):
+            # In case of descriptors that have get methods we cannot return
+            # it's value, because that would mean code execution.
+            # Since it's an isinstance call, code execution is still possible,
+            # but this is not really a security feature, but much more of a
+            # safety feature. Code execution is basically always possible when
+            # a module is imported. This is here so people don't shoot
+            # themselves in the foot.
+            return getattr(obj, name)
+    return attr
+
+
+SignatureParam = namedtuple(
+    'SignatureParam',
     'name has_default default default_string has_annotation annotation annotation_string kind_name'
-    )
+)


-class AccessPath:
+def shorten_repr(func):
+    def wrapper(self):
+        r = func(self)
+        if len(r) > 50:
+            r = r[:50] + '..'
+        return r
+    return wrapper
+
+
+def create_access(inference_state, obj):
+    return inference_state.compiled_subprocess.get_or_create_access_handle(obj)
+

+def load_module(inference_state, dotted_name, sys_path):
+    temp, sys.path = sys.path, sys_path
+    try:
+        __import__(dotted_name)
+    except ImportError:
+        # If a module is "corrupt" or not really a Python module or whatever.
+        warnings.warn(
+            "Module %s not importable in path %s." % (dotted_name, sys_path),
+            UserWarning,
+            stacklevel=2,
+        )
+        return None
+    except Exception:
+        # Since __import__ pretty much makes code execution possible, just
+        # catch any error here and print it.
+        warnings.warn(
+            "Cannot import:\n%s" % traceback.format_exc(), UserWarning, stacklevel=2
+        )
+        return None
+    finally:
+        sys.path = temp
+
+    # Just access the cache after import, because of #59 as well as the very
+    # complicated import structure of Python.
+    module = sys.modules[dotted_name]
+    return create_access_path(inference_state, module)
+
+
+class AccessPath:
     def __init__(self, accesses):
         self.accesses = accesses


-class DirectObjectAccess:
+def create_access_path(inference_state, obj) -> AccessPath:
+    access = create_access(inference_state, obj)
+    return AccessPath(access.get_access_path_tuples())

+
+def get_api_type(obj):
+    if inspect.isclass(obj):
+        return 'class'
+    elif inspect.ismodule(obj):
+        return 'module'
+    elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \
+            or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj):
+        return 'function'
+    # Everything else...
+    return 'instance'
+
+
+class DirectObjectAccess:
     def __init__(self, inference_state, obj):
         self._inference_state = inference_state
         self._obj = obj
@@ -51,20 +172,391 @@ class DirectObjectAccess:
     def __repr__(self):
         return '%s(%s)' % (self.__class__.__name__, self.get_repr())

+    def _create_access(self, obj):
+        return create_access(self._inference_state, obj)
+
+    def _create_access_path(self, obj) -> AccessPath:
+        return create_access_path(self._inference_state, obj)
+
+    def py__bool__(self):
+        return bool(self._obj)
+
+    def py__file__(self) -> Optional[Path]:
+        try:
+            return Path(self._obj.__file__)
+        except AttributeError:
+            return None
+
+    def py__doc__(self):
+        return inspect.getdoc(self._obj) or ''
+
+    def py__name__(self):
+        if not _is_class_instance(self._obj) or \
+                inspect.ismethoddescriptor(self._obj):  # slots
+            cls = self._obj
+        else:
+            try:
+                cls = self._obj.__class__
+            except AttributeError:
+                # happens with numpy.core.umath._UFUNC_API (you get it
+                # automatically by doing `import numpy`.
+                return None
+
+        try:
+            return cls.__name__
+        except AttributeError:
+            return None
+
+    def py__mro__accesses(self):
+        return tuple(self._create_access_path(cls) for cls in self._obj.__mro__[1:])
+
+    def py__getitem__all_values(self):
+        if isinstance(self._obj, dict):
+            return [self._create_access_path(v) for v in self._obj.values()]
+        if isinstance(self._obj, (list, tuple)):
+            return [self._create_access_path(v) for v in self._obj]
+
+        if self.is_instance():
+            cls = DirectObjectAccess(self._inference_state, self._obj.__class__)
+            return cls.py__getitem__all_values()
+
+        try:
+            getitem = self._obj.__getitem__
+        except AttributeError:
+            pass
+        else:
+            annotation = DirectObjectAccess(self._inference_state, getitem).get_return_annotation()
+            if annotation is not None:
+                return [annotation]
+        return None
+
+    def py__simple_getitem__(self, index, *, safe=True):
+        if safe and type(self._obj) not in ALLOWED_GETITEM_TYPES:
+            # Get rid of side effects, we won't call custom `__getitem__`s.
+            return None
+
+        return self._create_access_path(self._obj[index])
+
+    def py__iter__list(self):
+        try:
+            iter_method = self._obj.__iter__
+        except AttributeError:
+            return None
+        else:
+            p = DirectObjectAccess(self._inference_state, iter_method).get_return_annotation()
+            if p is not None:
+                return [p]
+
+        if type(self._obj) not in ALLOWED_GETITEM_TYPES:
+            # Get rid of side effects, we won't call custom `__getitem__`s.
+            return []
+
+        lst = []
+        for i, part in enumerate(self._obj):
+            if i > 20:
+                # Should not go crazy with large iterators
+                break
+            lst.append(self._create_access_path(part))
+        return lst
+
+    def py__class__(self):
+        return self._create_access_path(self._obj.__class__)
+
+    def py__bases__(self):
+        return [self._create_access_path(base) for base in self._obj.__bases__]
+
+    def py__path__(self):
+        paths = getattr(self._obj, '__path__', None)
+        # Avoid some weird hacks that would just fail, because they cannot be
+        # used by pickle.
+        if not isinstance(paths, list) \
+                or not all(isinstance(p, str) for p in paths):
+            return None
+        return paths
+
+    @shorten_repr
+    def get_repr(self):
+        if inspect.ismodule(self._obj):
+            return repr(self._obj)
+        # Try to avoid execution of the property.
+        if safe_getattr(self._obj, '__module__', default='') == 'builtins':
+            return repr(self._obj)
+
+        type_ = type(self._obj)
+        if type_ == type:
+            return type.__repr__(self._obj)
+
+        if safe_getattr(type_, '__module__', default='') == 'builtins':
+            # Allow direct execution of repr for builtins.
+            return repr(self._obj)
+        return object.__repr__(self._obj)
+
+    def is_class(self):
+        return inspect.isclass(self._obj)
+
+    def is_function(self):
+        return inspect.isfunction(self._obj) or inspect.ismethod(self._obj)
+
+    def is_module(self):
+        return inspect.ismodule(self._obj)
+
+    def is_instance(self):
+        return _is_class_instance(self._obj)
+
+    def ismethoddescriptor(self):
+        return inspect.ismethoddescriptor(self._obj)
+
+    def get_qualified_names(self):
+        def try_to_get_name(obj):
+            return getattr(obj, '__qualname__', getattr(obj, '__name__', None))
+
+        if self.is_module():
+            return ()
+        name = try_to_get_name(self._obj)
+        if name is None:
+            name = try_to_get_name(type(self._obj))
+            if name is None:
+                return ()
+        return tuple(name.split('.'))
+
+    def dir(self):
+        return dir(self._obj)
+
+    def has_iter(self):
+        try:
+            iter(self._obj)
+            return True
+        except TypeError:
+            return False
+
+    def is_allowed_getattr(self, name, safe=True) -> Tuple[bool, bool, Optional[AccessPath]]:
+        # TODO this API is ugly.
+        try:
+            attr, is_get_descriptor = getattr_static(self._obj, name)
+        except AttributeError:
+            if not safe:
+                # Unsafe is mostly used to check for __getattr__/__getattribute__.
+                # getattr_static works for properties, but the underscore methods
+                # are just ignored (because it's safer and avoids more code
+                # execution). See also GH #1378.
+
+                # Avoid warnings, see comment in the next function.
+                with warnings.catch_warnings(record=True):
+                    warnings.simplefilter("always")
+                    try:
+                        return hasattr(self._obj, name), False, None
+                    except Exception:
+                        # Obviously has an attribute (probably a property) that
+                        # gets executed, so just avoid all exceptions here.
+                        pass
+            return False, False, None
+        else:
+            if is_get_descriptor and type(attr) not in ALLOWED_DESCRIPTOR_ACCESS:
+                if isinstance(attr, property):
+                    if hasattr(attr.fget, '__annotations__'):
+                        a = DirectObjectAccess(self._inference_state, attr.fget)
+                        return True, True, a.get_return_annotation()
+                # In case of descriptors that have get methods we cannot return
+                # it's value, because that would mean code execution.
+                return True, True, None
+        return True, False, None
+
+    def getattr_paths(self, name, default=_sentinel):
+        try:
+            # Make sure no warnings are printed here, this is autocompletion,
+            # warnings should not be shown. See also GH #1383.
+            with warnings.catch_warnings(record=True):
+                warnings.simplefilter("always")
+                return_obj = getattr(self._obj, name)
+        except Exception as e:
+            if default is _sentinel:
+                if isinstance(e, AttributeError):
+                    # Happens e.g. in properties of
+                    # PyQt4.QtGui.QStyleOptionComboBox.currentText
+                    # -> just set it to None
+                    raise
+                # Just in case anything happens, return an AttributeError. It
+                # should not crash.
+                raise AttributeError
+            return_obj = default
+        access = self._create_access(return_obj)
+        if inspect.ismodule(return_obj):
+            return [access]
+
+        try:
+            module = return_obj.__module__
+        except AttributeError:
+            pass
+        else:
+            if module is not None and isinstance(module, str):
+                try:
+                    __import__(module)
+                    # For some modules like _sqlite3, the __module__ for classes is
+                    # different, in this case it's sqlite3. So we have to try to
+                    # load that "original" module, because it's not loaded yet. If
+                    # we don't do that, we don't really have a "parent" module and
+                    # we would fall back to builtins.
+                except ImportError:
+                    pass
+
+        module = inspect.getmodule(return_obj)
+        if module is None:
+            module = inspect.getmodule(type(return_obj))
+            if module is None:
+                module = builtins
+        return [self._create_access(module), access]
+
+    def get_safe_value(self):
+        if type(self._obj) in (bool, bytes, float, int, str, slice) or self._obj is None:
+            return self._obj
+        raise ValueError("Object is type %s and not simple" % type(self._obj))
+
+    def get_api_type(self):
+        return get_api_type(self._obj)
+
+    def get_array_type(self):
+        if isinstance(self._obj, dict):
+            return 'dict'
+        return None
+
+    def get_key_paths(self):
+        def iter_partial_keys():
+            # We could use list(keys()), but that might take a lot more memory.
+            for (i, k) in enumerate(self._obj.keys()):
+                # Limit key listing at some point. This is artificial, but this
+                # way we don't get stalled because of slow completions
+                if i > 50:
+                    break
+                yield k
+
+        return [self._create_access_path(k) for k in iter_partial_keys()]
+
+    def get_access_path_tuples(self):
+        accesses = [create_access(self._inference_state, o) for o in self._get_objects_path()]
+        return [(access.py__name__(), access) for access in accesses]
+
+    def _get_objects_path(self):
+        def get():
+            obj = self._obj
+            yield obj
+            try:
+                obj = obj.__objclass__
+            except AttributeError:
+                pass
+            else:
+                yield obj
+
+            try:
+                # Returns a dotted string path.
+                imp_plz = obj.__module__
+            except AttributeError:
+                # Unfortunately in some cases like `int` there's no __module__
+                if not inspect.ismodule(obj):
+                    yield builtins
+            else:
+                if imp_plz is None:
+                    # Happens for example in `(_ for _ in []).send.__module__`.
+                    yield builtins
+                else:
+                    try:
+                        yield sys.modules[imp_plz]
+                    except KeyError:
+                        # __module__ can be something arbitrary that doesn't exist.
+                        yield builtins
+
+        return list(reversed(list(get())))
+
+    def execute_operation(self, other_access_handle, operator):
+        other_access = other_access_handle.access
+        op = _OPERATORS[operator]
+        return self._create_access_path(op(self._obj, other_access._obj))
+
     def get_annotation_name_and_args(self):
         """
         Returns Tuple[Optional[str], Tuple[AccessPath, ...]]
         """
-        pass
+        name = None
+        args = ()
+        if safe_getattr(self._obj, '__module__', default='') == 'typing':
+            m = re.match(r'typing.(\w+)\[', repr(self._obj))
+            if m is not None:
+                name = m.group(1)
+
+                import typing
+                if sys.version_info >= (3, 8):
+                    args = typing.get_args(self._obj)
+                else:
+                    args = safe_getattr(self._obj, '__args__', default=None)
+        return name, tuple(self._create_access_path(arg) for arg in args)
+
+    def needs_type_completions(self):
+        return inspect.isclass(self._obj) and self._obj != type
+
+    def _annotation_to_str(self, annotation):
+        return inspect.formatannotation(annotation)
+
+    def get_signature_params(self):
+        return [
+            SignatureParam(
+                name=p.name,
+                has_default=p.default is not p.empty,
+                default=self._create_access_path(p.default),
+                default_string=repr(p.default),
+                has_annotation=p.annotation is not p.empty,
+                annotation=self._create_access_path(p.annotation),
+                annotation_string=self._annotation_to_str(p.annotation),
+                kind_name=str(p.kind)
+            ) for p in self._get_signature().parameters.values()
+        ]
+
+    def _get_signature(self):
+        obj = self._obj
+        try:
+            return inspect.signature(obj)
+        except (RuntimeError, TypeError):
+            # Reading the code of the function in Python 3.6 implies there are
+            # at least these errors that might occur if something is wrong with
+            # the signature. In that case we just want a simple escape for now.
+            raise ValueError
+
+    def get_return_annotation(self) -> Optional[AccessPath]:
+        try:
+            o = self._obj.__annotations__.get('return')
+        except AttributeError:
+            return None
+
+        if o is None:
+            return None
+
+        try:
+            o = typing.get_type_hints(self._obj).get('return')
+        except Exception:
+            pass
+
+        return self._create_access_path(o)
+
+    def negate(self):
+        return self._create_access_path(-self._obj)

     def get_dir_infos(self):
         """
         Used to return a couple of infos that are needed when accessing the sub
         objects of an objects
         """
-        pass
+        tuples = dict(
+            (name, self.is_allowed_getattr(name))
+            for name in self.dir()
+        )
+        return self.needs_type_completions(), tuples


 def _is_class_instance(obj):
     """Like inspect.* methods."""
-    pass
+    try:
+        cls = obj.__class__
+    except AttributeError:
+        return False
+    else:
+        # The isinstance check for cls is just there so issubclass doesn't
+        # raise an exception.
+        return cls != type and isinstance(cls, type) and not issubclass(cls, NOT_CLASS_TYPES)
diff --git a/jedi/inference/compiled/getattr_static.py b/jedi/inference/compiled/getattr_static.py
index 2fd959ee..03c199ef 100644
--- a/jedi/inference/compiled/getattr_static.py
+++ b/jedi/inference/compiled/getattr_static.py
@@ -3,11 +3,75 @@ A static version of getattr.
 This is a backport of the Python 3 code with a little bit of additional
 information returned to enable Jedi to make decisions.
 """
+
 import types
+
 from jedi import debug
+
 _sentinel = object()


+def _check_instance(obj, attr):
+    instance_dict = {}
+    try:
+        instance_dict = object.__getattribute__(obj, "__dict__")
+    except AttributeError:
+        pass
+    return dict.get(instance_dict, attr, _sentinel)
+
+
+def _check_class(klass, attr):
+    for entry in _static_getmro(klass):
+        if _shadowed_dict(type(entry)) is _sentinel:
+            try:
+                return entry.__dict__[attr]
+            except KeyError:
+                pass
+    return _sentinel
+
+
+def _is_type(obj):
+    try:
+        _static_getmro(obj)
+    except TypeError:
+        return False
+    return True
+
+
+def _shadowed_dict(klass):
+    dict_attr = type.__dict__["__dict__"]
+    for entry in _static_getmro(klass):
+        try:
+            class_dict = dict_attr.__get__(entry)["__dict__"]
+        except KeyError:
+            pass
+        else:
+            if not (type(class_dict) is types.GetSetDescriptorType
+                    and class_dict.__name__ == "__dict__"
+                    and class_dict.__objclass__ is entry):
+                return class_dict
+    return _sentinel
+
+
+def _static_getmro(klass):
+    mro = type.__dict__['__mro__'].__get__(klass)
+    if not isinstance(mro, (tuple, list)):
+        # There are unfortunately no tests for this, I was not able to
+        # reproduce this in pure Python. However should still solve the issue
+        # raised in GH #1517.
+        debug.warning('mro of %s returned %s, should be a tuple' % (klass, mro))
+        return ()
+    return mro
+
+
+def _safe_hasattr(obj, name):
+    return _check_class(type(obj), name) is not _sentinel
+
+
+def _safe_is_data_descriptor(obj):
+    return _safe_hasattr(obj, '__set__') or _safe_hasattr(obj, '__delete__')
+
+
 def getattr_static(obj, attr, default=_sentinel):
     """Retrieve attributes without triggering dynamic lookup via the
        descriptor protocol,  __getattr__ or __getattribute__.
@@ -22,4 +86,36 @@ def getattr_static(obj, attr, default=_sentinel):
        Returns a tuple `(attr, is_get_descriptor)`. is_get_descripter means that
        the attribute is a descriptor that has a `__get__` attribute.
     """
-    pass
+    instance_result = _sentinel
+    if not _is_type(obj):
+        klass = type(obj)
+        dict_attr = _shadowed_dict(klass)
+        if (dict_attr is _sentinel or type(dict_attr) is types.MemberDescriptorType):
+            instance_result = _check_instance(obj, attr)
+    else:
+        klass = obj
+
+    klass_result = _check_class(klass, attr)
+
+    if instance_result is not _sentinel and klass_result is not _sentinel:
+        if _safe_hasattr(klass_result, '__get__') \
+                and _safe_is_data_descriptor(klass_result):
+            # A get/set descriptor has priority over everything.
+            return klass_result, True
+
+    if instance_result is not _sentinel:
+        return instance_result, False
+    if klass_result is not _sentinel:
+        return klass_result, _safe_hasattr(klass_result, '__get__')
+
+    if obj is klass:
+        # for types we check the metaclass too
+        for entry in _static_getmro(type(klass)):
+            if _shadowed_dict(type(entry)) is _sentinel:
+                try:
+                    return entry.__dict__[attr], False
+                except KeyError:
+                    pass
+    if default is not _sentinel:
+        return default, False
+    raise AttributeError(attr)
diff --git a/jedi/inference/compiled/mixed.py b/jedi/inference/compiled/mixed.py
index 747c0bd5..1bfa3405 100644
--- a/jedi/inference/compiled/mixed.py
+++ b/jedi/inference/compiled/mixed.py
@@ -1,9 +1,12 @@
 """
 Used only for REPL Completion.
 """
+
 import inspect
 from pathlib import Path
+
 from jedi.parser_utils import get_cached_code_lines
+
 from jedi import settings
 from jedi.cache import memoize_method
 from jedi.inference import compiled
@@ -11,10 +14,13 @@ from jedi.file_io import FileIO
 from jedi.inference.names import NameWrapper
 from jedi.inference.base_value import ValueSet, ValueWrapper, NO_VALUES
 from jedi.inference.value import ModuleValue
-from jedi.inference.cache import inference_state_function_cache, inference_state_method_cache
+from jedi.inference.cache import inference_state_function_cache, \
+    inference_state_method_cache
 from jedi.inference.compiled.access import ALLOWED_GETITEM_TYPES, get_api_type
 from jedi.inference.gradual.conversion import to_stub
-from jedi.inference.context import CompiledContext, CompiledModuleContext, TreeContextMixin
+from jedi.inference.context import CompiledContext, CompiledModuleContext, \
+    TreeContextMixin
+
 _sentinel = object()


@@ -35,19 +41,67 @@ class MixedObject(ValueWrapper):
     fewer special cases, because we in Python you don't have the same freedoms
     to modify the runtime.
     """
-
     def __init__(self, compiled_value, tree_value):
         super().__init__(tree_value)
         self.compiled_value = compiled_value
         self.access_handle = compiled_value.access_handle

+    def get_filters(self, *args, **kwargs):
+        yield MixedObjectFilter(
+            self.inference_state, self.compiled_value, self._wrapped_value)
+
+    def get_signatures(self):
+        # Prefer `inspect.signature` over somehow analyzing Python code. It
+        # should be very precise, especially for stuff like `partial`.
+        return self.compiled_value.get_signatures()
+
+    @inference_state_method_cache(default=NO_VALUES)
+    def py__call__(self, arguments):
+        # Fallback to the wrapped value if to stub returns no values.
+        values = to_stub(self._wrapped_value)
+        if not values:
+            values = self._wrapped_value
+        return values.py__call__(arguments)
+
+    def get_safe_value(self, default=_sentinel):
+        if default is _sentinel:
+            return self.compiled_value.get_safe_value()
+        else:
+            return self.compiled_value.get_safe_value(default)
+
+    @property
+    def array_type(self):
+        return self.compiled_value.array_type
+
+    def get_key_values(self):
+        return self.compiled_value.get_key_values()
+
+    def py__simple_getitem__(self, index):
+        python_object = self.compiled_value.access_handle.access._obj
+        if type(python_object) in ALLOWED_GETITEM_TYPES:
+            return self.compiled_value.py__simple_getitem__(index)
+        return self._wrapped_value.py__simple_getitem__(index)
+
+    def negate(self):
+        return self.compiled_value.negate()
+
+    def _as_context(self):
+        if self.parent_context is None:
+            return MixedModuleContext(self)
+        return MixedContext(self)
+
     def __repr__(self):
-        return '<%s: %s; %s>' % (type(self).__name__, self.access_handle.
-            get_repr(), self._wrapped_value)
+        return '<%s: %s; %s>' % (
+            type(self).__name__,
+            self.access_handle.get_repr(),
+            self._wrapped_value,
+        )


 class MixedContext(CompiledContext, TreeContextMixin):
-    pass
+    @property
+    def compiled_value(self):
+        return self._value.compiled_value


 class MixedModuleContext(CompiledModuleContext, MixedContext):
@@ -58,19 +112,198 @@ class MixedName(NameWrapper):
     """
     The ``CompiledName._compiled_value`` is our MixedObject.
     """
-
     def __init__(self, wrapped_name, parent_tree_value):
         super().__init__(wrapped_name)
         self._parent_tree_value = parent_tree_value

+    @property
+    def start_pos(self):
+        values = list(self.infer())
+        if not values:
+            # This means a start_pos that doesn't exist (compiled objects).
+            return 0, 0
+        return values[0].name.start_pos
+
+    @memoize_method
+    def infer(self):
+        compiled_value = self._wrapped_name.infer_compiled_value()
+        tree_value = self._parent_tree_value
+        if tree_value.is_instance() or tree_value.is_class():
+            tree_values = tree_value.py__getattribute__(self.string_name)
+            if compiled_value.is_function():
+                return ValueSet({MixedObject(compiled_value, v) for v in tree_values})
+
+        module_context = tree_value.get_root_context()
+        return _create(self._inference_state, compiled_value, module_context)

-class MixedObjectFilter(compiled.CompiledValueFilter):

+class MixedObjectFilter(compiled.CompiledValueFilter):
     def __init__(self, inference_state, compiled_value, tree_value):
         super().__init__(inference_state, compiled_value)
         self._tree_value = tree_value

+    def _create_name(self, *args, **kwargs):
+        return MixedName(
+            super()._create_name(*args, **kwargs),
+            self._tree_value,
+        )
+
+
+@inference_state_function_cache()
+def _load_module(inference_state, path):
+    return inference_state.parse(
+        path=path,
+        cache=True,
+        diff_cache=settings.fast_parser,
+        cache_path=settings.cache_directory
+    ).get_root_node()
+

 def _get_object_to_check(python_object):
     """Check if inspect.getfile has a chance to find the source."""
-    pass
+    try:
+        python_object = inspect.unwrap(python_object)
+    except ValueError:
+        # Can return a ValueError when it wraps around
+        pass
+
+    if (inspect.ismodule(python_object)
+            or inspect.isclass(python_object)
+            or inspect.ismethod(python_object)
+            or inspect.isfunction(python_object)
+            or inspect.istraceback(python_object)
+            or inspect.isframe(python_object)
+            or inspect.iscode(python_object)):
+        return python_object
+
+    try:
+        return python_object.__class__
+    except AttributeError:
+        raise TypeError  # Prevents computation of `repr` within inspect.
+
+
+def _find_syntax_node_name(inference_state, python_object):
+    original_object = python_object
+    try:
+        python_object = _get_object_to_check(python_object)
+        path = inspect.getsourcefile(python_object)
+    except (OSError, TypeError):
+        # The type might not be known (e.g. class_with_dict.__weakref__)
+        return None
+    path = None if path is None else Path(path)
+    try:
+        if path is None or not path.exists():
+            # The path might not exist or be e.g. <stdin>.
+            return None
+    except OSError:
+        # Might raise an OSError on Windows:
+        #
+        #     [WinError 123] The filename, directory name, or volume label
+        #     syntax is incorrect: '<string>'
+        return None
+
+    file_io = FileIO(path)
+    module_node = _load_module(inference_state, path)
+
+    if inspect.ismodule(python_object):
+        # We don't need to check names for modules, because there's not really
+        # a way to write a module in a module in Python (and also __name__ can
+        # be something like ``email.utils``).
+        code_lines = get_cached_code_lines(inference_state.grammar, path)
+        return module_node, module_node, file_io, code_lines
+
+    try:
+        name_str = python_object.__name__
+    except AttributeError:
+        # Stuff like python_function.__code__.
+        return None
+
+    if name_str == '<lambda>':
+        return None  # It's too hard to find lambdas.
+
+    # Doesn't always work (e.g. os.stat_result)
+    names = module_node.get_used_names().get(name_str, [])
+    # Only functions and classes are relevant. If a name e.g. points to an
+    # import, it's probably a builtin (like collections.deque) and needs to be
+    # ignored.
+    names = [
+        n for n in names
+        if n.parent.type in ('funcdef', 'classdef') and n.parent.name == n
+    ]
+    if not names:
+        return None
+
+    try:
+        code = python_object.__code__
+        # By using the line number of a code object we make the lookup in a
+        # file pretty easy. There's still a possibility of people defining
+        # stuff like ``a = 3; foo(a); a = 4`` on the same line, but if people
+        # do so we just don't care.
+        line_nr = code.co_firstlineno
+    except AttributeError:
+        pass
+    else:
+        line_names = [name for name in names if name.start_pos[0] == line_nr]
+        # There's a chance that the object is not available anymore, because
+        # the code has changed in the background.
+        if line_names:
+            names = line_names
+
+    code_lines = get_cached_code_lines(inference_state.grammar, path)
+    # It's really hard to actually get the right definition, here as a last
+    # resort we just return the last one. This chance might lead to odd
+    # completions at some points but will lead to mostly correct type
+    # inference, because people tend to define a public name in a module only
+    # once.
+    tree_node = names[-1].parent
+    if tree_node.type == 'funcdef' and get_api_type(original_object) == 'instance':
+        # If an instance is given and we're landing on a function (e.g.
+        # partial in 3.5), something is completely wrong and we should not
+        # return that.
+        return None
+    return module_node, tree_node, file_io, code_lines
+
+
+@inference_state_function_cache()
+def _create(inference_state, compiled_value, module_context):
+    # TODO accessing this is bad, but it probably doesn't matter that much,
+    # because we're working with interpreters only here.
+    python_object = compiled_value.access_handle.access._obj
+    result = _find_syntax_node_name(inference_state, python_object)
+    if result is None:
+        # TODO Care about generics from stuff like `[1]` and don't return like this.
+        if type(python_object) in (dict, list, tuple):
+            return ValueSet({compiled_value})
+
+        tree_values = to_stub(compiled_value)
+        if not tree_values:
+            return ValueSet({compiled_value})
+    else:
+        module_node, tree_node, file_io, code_lines = result
+
+        if module_context is None or module_context.tree_node != module_node:
+            root_compiled_value = compiled_value.get_root_context().get_value()
+            # TODO this __name__ might be wrong.
+            name = root_compiled_value.py__name__()
+            string_names = tuple(name.split('.'))
+            module_value = ModuleValue(
+                inference_state, module_node,
+                file_io=file_io,
+                string_names=string_names,
+                code_lines=code_lines,
+                is_package=root_compiled_value.is_package(),
+            )
+            if name is not None:
+                inference_state.module_cache.add(string_names, ValueSet([module_value]))
+            module_context = module_value.as_context()
+
+        tree_values = ValueSet({module_context.create_value(tree_node)})
+        if tree_node.type == 'classdef':
+            if not compiled_value.is_class():
+                # Is an instance, not a class.
+                tree_values = tree_values.execute_with_values()
+
+    return ValueSet(
+        MixedObject(compiled_value, tree_value=tree_value)
+        for tree_value in tree_values
+    )
diff --git a/jedi/inference/compiled/subprocess/functions.py b/jedi/inference/compiled/subprocess/functions.py
index ac69d794..50c47b83 100644
--- a/jedi/inference/compiled/subprocess/functions.py
+++ b/jedi/inference/compiled/subprocess/functions.py
@@ -6,31 +6,66 @@ from pathlib import Path
 from zipfile import ZipFile
 from zipimport import zipimporter, ZipImportError
 from importlib.machinery import all_suffixes
+
 from jedi.inference.compiled import access
 from jedi import debug
 from jedi import parser_utils
 from jedi.file_io import KnownContentFileIO, ZipFileIO


+def get_sys_path():
+    return sys.path
+
+
+def load_module(inference_state, **kwargs):
+    return access.load_module(inference_state, **kwargs)
+
+
+def get_compiled_method_return(inference_state, id, attribute, *args, **kwargs):
+    handle = inference_state.compiled_subprocess.get_access_handle(id)
+    return getattr(handle.access, attribute)(*args, **kwargs)
+
+
+def create_simple_object(inference_state, obj):
+    return access.create_access_path(inference_state, obj)
+
+
 def get_module_info(inference_state, sys_path=None, full_name=None, **kwargs):
     """
     Returns Tuple[Union[NamespaceInfo, FileIO, None], Optional[bool]]
     """
-    pass
+    if sys_path is not None:
+        sys.path, temp = sys_path, sys.path
+    try:
+        return _find_module(full_name=full_name, **kwargs)
+    except ImportError:
+        return None, None
+    finally:
+        if sys_path is not None:
+            sys.path = temp
+
+
+def get_builtin_module_names(inference_state):
+    return sys.builtin_module_names


 def _test_raise_error(inference_state, exception_type):
     """
     Raise an error to simulate certain problems for unit tests.
     """
-    pass
+    raise exception_type


 def _test_print(inference_state, stderr=None, stdout=None):
     """
     Force some prints in the subprocesses. This exists for unit tests.
     """
-    pass
+    if stderr is not None:
+        print(stderr, file=sys.stderr)
+        sys.stderr.flush()
+    if stdout is not None:
+        print(stdout)
+        sys.stdout.flush()


 def _get_init_path(directory_path):
@@ -38,7 +73,54 @@ def _get_init_path(directory_path):
     The __init__ file can be searched in a directory. If found return it, else
     None.
     """
-    pass
+    for suffix in all_suffixes():
+        path = os.path.join(directory_path, '__init__' + suffix)
+        if os.path.exists(path):
+            return path
+    return None
+
+
+def safe_literal_eval(inference_state, value):
+    return parser_utils.safe_literal_eval(value)
+
+
+def iter_module_names(*args, **kwargs):
+    return list(_iter_module_names(*args, **kwargs))
+
+
+def _iter_module_names(inference_state, paths):
+    # Python modules/packages
+    for path in paths:
+        try:
+            dir_entries = ((entry.name, entry.is_dir()) for entry in os.scandir(path))
+        except OSError:
+            try:
+                zip_import_info = zipimporter(path)
+                # Unfortunately, there is no public way to access zipimporter's
+                # private _files member. We therefore have to use a
+                # custom function to iterate over the files.
+                dir_entries = _zip_list_subdirectory(
+                    zip_import_info.archive, zip_import_info.prefix)
+            except ZipImportError:
+                # The file might not exist or reading it might lead to an error.
+                debug.warning("Not possible to list directory: %s", path)
+                continue
+        for name, is_dir in dir_entries:
+            # First Namespaces then modules/stubs
+            if is_dir:
+                # pycache is obviously not an interesting namespace. Also the
+                # name must be a valid identifier.
+                if name != '__pycache__' and name.isidentifier():
+                    yield name
+            else:
+                if name.endswith('.pyi'):  # Stub files
+                    modname = name[:-4]
+                else:
+                    modname = inspect.getmodulename(name)
+
+                if modname and '.' not in modname:
+                    if modname != '__init__':
+                        yield modname


 def _find_module(string, path=None, full_name=None, is_global_search=True):
@@ -51,7 +133,97 @@ def _find_module(string, path=None, full_name=None, is_global_search=True):
     or the name of the module if it is a builtin one and a boolean indicating
     if the module is contained in a package.
     """
-    pass
+    spec = None
+    loader = None
+
+    for finder in sys.meta_path:
+        if is_global_search and finder != importlib.machinery.PathFinder:
+            p = None
+        else:
+            p = path
+        try:
+            find_spec = finder.find_spec
+        except AttributeError:
+            # These are old-school clases that still have a different API, just
+            # ignore those.
+            continue
+
+        spec = find_spec(string, p)
+        if spec is not None:
+            if spec.origin == "frozen":
+                continue
+
+            loader = spec.loader
+
+            if loader is None and not spec.has_location:
+                # This is a namespace package.
+                full_name = string if not path else full_name
+                implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path)
+                return implicit_ns_info, True
+            break
+
+    return _find_module_py33(string, path, loader)
+
+
+def _find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True):
+    if not loader:
+        spec = importlib.machinery.PathFinder.find_spec(string, path)
+        if spec is not None:
+            loader = spec.loader
+
+    if loader is None and path is None:  # Fallback to find builtins
+        try:
+            spec = importlib.util.find_spec(string)
+            if spec is not None:
+                loader = spec.loader
+        except ValueError as e:
+            # See #491. Importlib might raise a ValueError, to avoid this, we
+            # just raise an ImportError to fix the issue.
+            raise ImportError("Originally  " + repr(e))
+
+    if loader is None:
+        raise ImportError("Couldn't find a loader for {}".format(string))
+
+    return _from_loader(loader, string)
+
+
+def _from_loader(loader, string):
+    try:
+        is_package_method = loader.is_package
+    except AttributeError:
+        is_package = False
+    else:
+        is_package = is_package_method(string)
+    try:
+        get_filename = loader.get_filename
+    except AttributeError:
+        return None, is_package
+    else:
+        module_path = get_filename(string)
+
+    # To avoid unicode and read bytes, "overwrite" loader.get_source if
+    # possible.
+    try:
+        f = type(loader).get_source
+    except AttributeError:
+        raise ImportError("get_source was not defined on loader")
+
+    if f is not importlib.machinery.SourceFileLoader.get_source:
+        # Unfortunately we are reading unicode here, not bytes.
+        # It seems hard to get bytes, because the zip importer
+        # logic just unpacks the zip file and returns a file descriptor
+        # that we cannot as easily access. Therefore we just read it as
+        # a string in the cases where get_source was overwritten.
+        code = loader.get_source(string)
+    else:
+        code = _get_source(loader, string)
+
+    if code is None:
+        return None, is_package
+    if isinstance(loader, zipimporter):
+        return ZipFileIO(module_path, code, Path(loader.archive)), is_package
+
+    return KnownContentFileIO(module_path, code), is_package


 def _get_source(loader, fullname):
@@ -59,12 +231,27 @@ def _get_source(loader, fullname):
     This method is here as a replacement for SourceLoader.get_source. That
     method returns unicode, but we prefer bytes.
     """
-    pass
+    path = loader.get_filename(fullname)
+    try:
+        return loader.get_data(path)
+    except OSError:
+        raise ImportError('source not available through get_data()',
+                          name=fullname)
+
+
+def _zip_list_subdirectory(zip_path, zip_subdir_path):
+    zip_file = ZipFile(zip_path)
+    zip_subdir_path = Path(zip_subdir_path)
+    zip_content_file_paths = zip_file.namelist()
+    for raw_file_name in zip_content_file_paths:
+        file_path = Path(raw_file_name)
+        if file_path.parent == zip_subdir_path:
+            file_path = file_path.relative_to(zip_subdir_path)
+            yield file_path.name, raw_file_name.endswith("/")


 class ImplicitNSInfo:
     """Stores information returned from an implicit namespace spec"""
-
     def __init__(self, name, paths):
         self.name = name
         self.paths = paths
diff --git a/jedi/inference/compiled/value.py b/jedi/inference/compiled/value.py
index e49e6da0..b3a841b1 100644
--- a/jedi/inference/compiled/value.py
+++ b/jedi/inference/compiled/value.py
@@ -6,11 +6,13 @@ from functools import partial
 from inspect import Parameter
 from pathlib import Path
 from typing import Optional
+
 from jedi import debug
 from jedi.inference.utils import to_list
 from jedi.cache import memoize_method
 from jedi.inference.filters import AbstractFilter
-from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, ParamNameInterface
+from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, \
+    ParamNameInterface
 from jedi.inference.base_value import Value, ValueSet, NO_VALUES
 from jedi.inference.lazy_value import LazyKnownValue
 from jedi.inference.compiled.access import _sentinel
@@ -22,8 +24,8 @@ from jedi.inference.context import CompiledContext, CompiledModuleContext

 class CheckAttribute:
     """Raises :exc:`AttributeError` if the attribute X is not available."""
-
     def __init__(self, check_name=None):
+        # Remove the py in front of e.g. py__call__.
         self.check_name = check_name

     def __call__(self, func):
@@ -35,27 +37,282 @@ class CheckAttribute:
     def __get__(self, instance, owner):
         if instance is None:
             return self
+
+        # This might raise an AttributeError. That's wanted.
         instance.access_handle.getattr_paths(self.check_name)
         return partial(self.func, instance)


 class CompiledValue(Value):
-
     def __init__(self, inference_state, access_handle, parent_context=None):
         super().__init__(inference_state, parent_context)
         self.access_handle = access_handle

+    def py__call__(self, arguments):
+        return_annotation = self.access_handle.get_return_annotation()
+        if return_annotation is not None:
+            return create_from_access_path(
+                self.inference_state,
+                return_annotation
+            ).execute_annotation()
+
+        try:
+            self.access_handle.getattr_paths('__call__')
+        except AttributeError:
+            return super().py__call__(arguments)
+        else:
+            if self.access_handle.is_class():
+                from jedi.inference.value import CompiledInstance
+                return ValueSet([
+                    CompiledInstance(self.inference_state, self.parent_context, self, arguments)
+                ])
+            else:
+                return ValueSet(self._execute_function(arguments))
+
+    @CheckAttribute()
+    def py__class__(self):
+        return create_from_access_path(self.inference_state, self.access_handle.py__class__())
+
+    @CheckAttribute()
+    def py__mro__(self):
+        return (self,) + tuple(
+            create_from_access_path(self.inference_state, access)
+            for access in self.access_handle.py__mro__accesses()
+        )
+
+    @CheckAttribute()
+    def py__bases__(self):
+        return tuple(
+            create_from_access_path(self.inference_state, access)
+            for access in self.access_handle.py__bases__()
+        )
+
+    def get_qualified_names(self):
+        return self.access_handle.get_qualified_names()
+
+    def py__bool__(self):
+        return self.access_handle.py__bool__()
+
+    def is_class(self):
+        return self.access_handle.is_class()
+
+    def is_function(self):
+        return self.access_handle.is_function()
+
+    def is_module(self):
+        return self.access_handle.is_module()
+
+    def is_compiled(self):
+        return True
+
+    def is_stub(self):
+        return False
+
+    def is_instance(self):
+        return self.access_handle.is_instance()
+
+    def py__doc__(self):
+        return self.access_handle.py__doc__()
+
+    @to_list
+    def get_param_names(self):
+        try:
+            signature_params = self.access_handle.get_signature_params()
+        except ValueError:  # Has no signature
+            params_str, ret = self._parse_function_doc()
+            if not params_str:
+                tokens = []
+            else:
+                tokens = params_str.split(',')
+            if self.access_handle.ismethoddescriptor():
+                tokens.insert(0, 'self')
+            for p in tokens:
+                name, _, default = p.strip().partition('=')
+                yield UnresolvableParamName(self, name, default)
+        else:
+            for signature_param in signature_params:
+                yield SignatureParamName(self, signature_param)
+
+    def get_signatures(self):
+        _, return_string = self._parse_function_doc()
+        return [BuiltinSignature(self, return_string)]
+
     def __repr__(self):
-        return '<%s: %s>' % (self.__class__.__name__, self.access_handle.
-            get_repr())
+        return '<%s: %s>' % (self.__class__.__name__, self.access_handle.get_repr())
+
+    @memoize_method
+    def _parse_function_doc(self):
+        doc = self.py__doc__()
+        if doc is None:
+            return '', ''
+
+        return _parse_function_doc(doc)
+
+    @property
+    def api_type(self):
+        return self.access_handle.get_api_type()
+
+    def get_filters(self, is_instance=False, origin_scope=None):
+        yield self._ensure_one_filter(is_instance)
+
+    @memoize_method
+    def _ensure_one_filter(self, is_instance):
+        return CompiledValueFilter(self.inference_state, self, is_instance)
+
+    def py__simple_getitem__(self, index):
+        with reraise_getitem_errors(IndexError, KeyError, TypeError):
+            try:
+                access = self.access_handle.py__simple_getitem__(
+                    index,
+                    safe=not self.inference_state.allow_unsafe_executions
+                )
+            except AttributeError:
+                return super().py__simple_getitem__(index)
+        if access is None:
+            return super().py__simple_getitem__(index)
+
+        return ValueSet([create_from_access_path(self.inference_state, access)])
+
+    def py__getitem__(self, index_value_set, contextualized_node):
+        all_access_paths = self.access_handle.py__getitem__all_values()
+        if all_access_paths is None:
+            # This means basically that no __getitem__ has been defined on this
+            # object.
+            return super().py__getitem__(index_value_set, contextualized_node)
+        return ValueSet(
+            create_from_access_path(self.inference_state, access)
+            for access in all_access_paths
+        )
+
+    def py__iter__(self, contextualized_node=None):
+        if not self.access_handle.has_iter():
+            yield from super().py__iter__(contextualized_node)
+
+        access_path_list = self.access_handle.py__iter__list()
+        if access_path_list is None:
+            # There is no __iter__ method on this object.
+            return
+
+        for access in access_path_list:
+            yield LazyKnownValue(create_from_access_path(self.inference_state, access))
+
+    def py__name__(self):
+        return self.access_handle.py__name__()
+
+    @property
+    def name(self):
+        name = self.py__name__()
+        if name is None:
+            name = self.access_handle.get_repr()
+        return CompiledValueName(self, name)
+
+    def _execute_function(self, params):
+        from jedi.inference import docstrings
+        from jedi.inference.compiled import builtin_from_name
+        if self.api_type != 'function':
+            return
+
+        for name in self._parse_function_doc()[1].split():
+            try:
+                # TODO wtf is this? this is exactly the same as the thing
+                # below. It uses getattr as well.
+                self.inference_state.builtins_module.access_handle.getattr_paths(name)
+            except AttributeError:
+                continue
+            else:
+                bltn_obj = builtin_from_name(self.inference_state, name)
+                yield from self.inference_state.execute(bltn_obj, params)
+        yield from docstrings.infer_return_types(self)
+
+    def get_safe_value(self, default=_sentinel):
+        try:
+            return self.access_handle.get_safe_value()
+        except ValueError:
+            if default == _sentinel:
+                raise
+            return default
+
+    def execute_operation(self, other, operator):
+        try:
+            return ValueSet([create_from_access_path(
+                self.inference_state,
+                self.access_handle.execute_operation(other.access_handle, operator)
+            )])
+        except TypeError:
+            return NO_VALUES
+
+    def execute_annotation(self):
+        if self.access_handle.get_repr() == 'None':
+            # None as an annotation doesn't need to be executed.
+            return ValueSet([self])
+
+        name, args = self.access_handle.get_annotation_name_and_args()
+        arguments = [
+            ValueSet([create_from_access_path(self.inference_state, path)])
+            for path in args
+        ]
+        if name == 'Union':
+            return ValueSet.from_sets(arg.execute_annotation() for arg in arguments)
+        elif name:
+            # While with_generics only exists on very specific objects, we
+            # should probably be fine, because we control all the typing
+            # objects.
+            return ValueSet([
+                v.with_generics(arguments)
+                for v in self.inference_state.typing_module.py__getattribute__(name)
+            ]).execute_annotation()
+        return super().execute_annotation()
+
+    def negate(self):
+        return create_from_access_path(self.inference_state, self.access_handle.negate())
+
+    def get_metaclasses(self):
+        return NO_VALUES
+
+    def _as_context(self):
+        return CompiledContext(self)
+
+    @property
+    def array_type(self):
+        return self.access_handle.get_array_type()
+
+    def get_key_values(self):
+        return [
+            create_from_access_path(self.inference_state, k)
+            for k in self.access_handle.get_key_paths()
+        ]
+
+    def get_type_hint(self, add_class_info=True):
+        if self.access_handle.get_repr() in ('None', "<class 'NoneType'>"):
+            return 'None'
+        return None


 class CompiledModule(CompiledValue):
-    file_io = None
+    file_io = None  # For modules

+    def _as_context(self):
+        return CompiledModuleContext(self)

-class CompiledName(AbstractNameDefinition):
+    def py__path__(self):
+        return self.access_handle.py__path__()
+
+    def is_package(self):
+        return self.py__path__() is not None

+    @property
+    def string_names(self):
+        # For modules
+        name = self.py__name__()
+        if name is None:
+            return ()
+        return tuple(name.split('.'))
+
+    def py__file__(self) -> Optional[Path]:
+        return self.access_handle.py__file__()  # type: ignore[no-any-return]
+
+
+class CompiledName(AbstractNameDefinition):
     def __init__(self, inference_state, parent_value, name, is_descriptor):
         self._inference_state = inference_state
         self.parent_context = parent_value.as_context()
@@ -63,32 +320,98 @@ class CompiledName(AbstractNameDefinition):
         self.string_name = name
         self.is_descriptor = is_descriptor

+    def py__doc__(self):
+        return self.infer_compiled_value().py__doc__()
+
+    def _get_qualified_names(self):
+        parent_qualified_names = self.parent_context.get_qualified_names()
+        if parent_qualified_names is None:
+            return None
+        return parent_qualified_names + (self.string_name,)
+
+    def get_defining_qualified_value(self):
+        context = self.parent_context
+        if context.is_module() or context.is_class():
+            return self.parent_context.get_value()  # Might be None
+
+        return None
+
     def __repr__(self):
         try:
-            name = self.parent_context.name
+            name = self.parent_context.name  # __name__ is not defined all the time
         except AttributeError:
             name = None
-        return '<%s: (%s).%s>' % (self.__class__.__name__, name, self.
-            string_name)
+        return '<%s: (%s).%s>' % (self.__class__.__name__, name, self.string_name)

+    @property
+    def api_type(self):
+        if self.is_descriptor:
+            # In case of properties we want to avoid executions as much as
+            # possible. Since the api_type can be wrong for other reasons
+            # anyway, we just return instance here.
+            return "instance"
+        return self.infer_compiled_value().api_type

-class SignatureParamName(ParamNameInterface, AbstractNameDefinition):
+    def infer(self):
+        return ValueSet([self.infer_compiled_value()])

+    @memoize_method
+    def infer_compiled_value(self):
+        return create_from_name(self._inference_state, self._parent_value, self.string_name)
+
+
+class SignatureParamName(ParamNameInterface, AbstractNameDefinition):
     def __init__(self, compiled_value, signature_param):
         self.parent_context = compiled_value.parent_context
         self._signature_param = signature_param

+    @property
+    def string_name(self):
+        return self._signature_param.name
+
+    def to_string(self):
+        s = self._kind_string() + self.string_name
+        if self._signature_param.has_annotation:
+            s += ': ' + self._signature_param.annotation_string
+        if self._signature_param.has_default:
+            s += '=' + self._signature_param.default_string
+        return s
+
+    def get_kind(self):
+        return getattr(Parameter, self._signature_param.kind_name)
+
+    def infer(self):
+        p = self._signature_param
+        inference_state = self.parent_context.inference_state
+        values = NO_VALUES
+        if p.has_default:
+            values = ValueSet([create_from_access_path(inference_state, p.default)])
+        if p.has_annotation:
+            annotation = create_from_access_path(inference_state, p.annotation)
+            values |= annotation.execute_with_values()
+        return values

-class UnresolvableParamName(ParamNameInterface, AbstractNameDefinition):

+class UnresolvableParamName(ParamNameInterface, AbstractNameDefinition):
     def __init__(self, compiled_value, name, default):
         self.parent_context = compiled_value.parent_context
         self.string_name = name
         self._default = default

+    def get_kind(self):
+        return Parameter.POSITIONAL_ONLY
+
+    def to_string(self):
+        string = self.string_name
+        if self._default:
+            string += '=' + self._default
+        return string
+
+    def infer(self):
+        return NO_VALUES

-class CompiledValueName(ValueNameMixin, AbstractNameDefinition):

+class CompiledValueName(ValueNameMixin, AbstractNameDefinition):
     def __init__(self, value, name):
         self.string_name = name
         self._value = value
@@ -101,32 +424,104 @@ class EmptyCompiledName(AbstractNameDefinition):
     completions, just give Jedi the option to return this object. It infers to
     nothing.
     """
-
     def __init__(self, inference_state, name):
         self.parent_context = inference_state.builtins_module
         self.string_name = name

+    def infer(self):
+        return NO_VALUES

-class CompiledValueFilter(AbstractFilter):

+class CompiledValueFilter(AbstractFilter):
     def __init__(self, inference_state, compiled_value, is_instance=False):
         self._inference_state = inference_state
         self.compiled_value = compiled_value
         self.is_instance = is_instance

-    def _get(self, name, allowed_getattr_callback, in_dir_callback,
-        check_has_attribute=False):
+    def get(self, name):
+        access_handle = self.compiled_value.access_handle
+        safe = not self._inference_state.allow_unsafe_executions
+        return self._get(
+            name,
+            lambda name: access_handle.is_allowed_getattr(name, safe=safe),
+            lambda name: name in access_handle.dir(),
+            check_has_attribute=True
+        )
+
+    def _get(self, name, allowed_getattr_callback, in_dir_callback, check_has_attribute=False):
         """
         To remove quite a few access calls we introduced the callback here.
         """
-        pass
+        has_attribute, is_descriptor, property_return_annotation = allowed_getattr_callback(
+            name,
+        )
+        if property_return_annotation is not None:
+            values = create_from_access_path(
+                self._inference_state,
+                property_return_annotation
+            ).execute_annotation()
+            if values:
+                return [CompiledValueName(v, name) for v in values]
+
+        if check_has_attribute and not has_attribute:
+            return []
+
+        if (is_descriptor or not has_attribute) \
+                and not self._inference_state.allow_unsafe_executions:
+            return [self._get_cached_name(name, is_empty=True)]
+
+        if self.is_instance and not in_dir_callback(name):
+            return []
+        return [self._get_cached_name(name, is_descriptor=is_descriptor)]
+
+    @memoize_method
+    def _get_cached_name(self, name, is_empty=False, *, is_descriptor=False):
+        if is_empty:
+            return EmptyCompiledName(self._inference_state, name)
+        else:
+            return self._create_name(name, is_descriptor=is_descriptor)
+
+    def values(self):
+        from jedi.inference.compiled import builtin_from_name
+        names = []
+        needs_type_completions, dir_infos = self.compiled_value.access_handle.get_dir_infos()
+        # We could use `safe=False` here as well, especially as a parameter to
+        # get_dir_infos. But this would lead to a lot of property executions
+        # that are probably not wanted. The drawback for this is that we
+        # have a different name for `get` and `values`. For `get` we always
+        # execute.
+        for name in dir_infos:
+            names += self._get(
+                name,
+                lambda name: dir_infos[name],
+                lambda name: name in dir_infos,
+            )
+
+        # ``dir`` doesn't include the type names.
+        if not self.is_instance and needs_type_completions:
+            for filter in builtin_from_name(self._inference_state, 'type').get_filters():
+                names += filter.values()
+        return names
+
+    def _create_name(self, name, is_descriptor):
+        return CompiledName(
+            self._inference_state,
+            self.compiled_value,
+            name,
+            is_descriptor,
+        )

     def __repr__(self):
-        return '<%s: %s>' % (self.__class__.__name__, self.compiled_value)
+        return "<%s: %s>" % (self.__class__.__name__, self.compiled_value)


-docstr_defaults = {'floating point number': 'float', 'character': 'str',
-    'integer': 'int', 'dictionary': 'dict', 'string': 'str'}
+docstr_defaults = {
+    'floating point number': 'float',
+    'character': 'str',
+    'integer': 'int',
+    'dictionary': 'dict',
+    'string': 'str',
+}


 def _parse_function_doc(doc):
@@ -137,9 +532,95 @@ def _parse_function_doc(doc):
     TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None
     TODO docstrings like 'tuple of integers'
     """
-    pass
+    # parse round parentheses: def func(a, (b,c))
+    try:
+        count = 0
+        start = doc.index('(')
+        for i, s in enumerate(doc[start:]):
+            if s == '(':
+                count += 1
+            elif s == ')':
+                count -= 1
+            if count == 0:
+                end = start + i
+                break
+        param_str = doc[start + 1:end]
+    except (ValueError, UnboundLocalError):
+        # ValueError for doc.index
+        # UnboundLocalError for undefined end in last line
+        debug.dbg('no brackets found - no param')
+        end = 0
+        param_str = ''
+    else:
+        # remove square brackets, that show an optional param ( = None)
+        def change_options(m):
+            args = m.group(1).split(',')
+            for i, a in enumerate(args):
+                if a and '=' not in a:
+                    args[i] += '=None'
+            return ','.join(args)
+
+        while True:
+            param_str, changes = re.subn(r' ?\[([^\[\]]+)\]',
+                                         change_options, param_str)
+            if changes == 0:
+                break
+    param_str = param_str.replace('-', '_')  # see: isinstance.__doc__
+
+    # parse return value
+    r = re.search('-[>-]* ', doc[end:end + 7])
+    if r is None:
+        ret = ''
+    else:
+        index = end + r.end()
+        # get result type, which can contain newlines
+        pattern = re.compile(r'(,\n|[^\n-])+')
+        ret_str = pattern.match(doc, index).group(0).strip()
+        # New object -> object()
+        ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str)
+
+        ret = docstr_defaults.get(ret_str, ret_str)
+
+    return param_str, ret
+
+
+def create_from_name(inference_state, compiled_value, name):
+    access_paths = compiled_value.access_handle.getattr_paths(name, default=None)
+
+    value = None
+    for access_path in access_paths:
+        value = create_cached_compiled_value(
+            inference_state,
+            access_path,
+            parent_context=None if value is None else value.as_context(),
+        )
+    return value


 def _normalize_create_args(func):
     """The cache doesn't care about keyword vs. normal args."""
-    pass
+    def wrapper(inference_state, obj, parent_context=None):
+        return func(inference_state, obj, parent_context)
+    return wrapper
+
+
+def create_from_access_path(inference_state, access_path):
+    value = None
+    for name, access in access_path.accesses:
+        value = create_cached_compiled_value(
+            inference_state,
+            access,
+            parent_context=None if value is None else value.as_context()
+        )
+    return value
+
+
+@_normalize_create_args
+@inference_state_function_cache()
+def create_cached_compiled_value(inference_state, access_handle, parent_context):
+    assert not isinstance(parent_context, CompiledValue)
+    if parent_context is None:
+        cls = CompiledModule
+    else:
+        cls = CompiledValue
+    return cls(inference_state, access_handle, parent_context)
diff --git a/jedi/inference/context.py b/jedi/inference/context.py
index f08ebfd9..5bc6b994 100644
--- a/jedi/inference/context.py
+++ b/jedi/inference/context.py
@@ -2,9 +2,12 @@ from abc import abstractmethod
 from contextlib import contextmanager
 from pathlib import Path
 from typing import Optional
+
 from parso.tree import search_ancestor
 from parso.python.tree import Name
-from jedi.inference.filters import ParserTreeFilter, MergedFilter, GlobalNameFilter
+
+from jedi.inference.filters import ParserTreeFilter, MergedFilter, \
+    GlobalNameFilter
 from jedi.inference.names import AnonymousParamName, TreeNameDefinition
 from jedi.inference.base_value import NO_VALUES, ValueSet
 from jedi.parser_utils import get_parent_scope
@@ -13,41 +16,326 @@ from jedi import parser_utils


 class AbstractContext:
+    # Must be defined: inference_state and tree_node and parent_context as an attribute/property

     def __init__(self, inference_state):
         self.inference_state = inference_state
         self.predefined_names = {}

-    def py__getattribute__(self, name_or_str, name_context=None, position=
-        None, analysis_errors=True):
+    @abstractmethod
+    def get_filters(self, until_position=None, origin_scope=None):
+        raise NotImplementedError
+
+    def goto(self, name_or_str, position):
+        from jedi.inference import finder
+        filters = _get_global_filters_for_name(
+            self, name_or_str if isinstance(name_or_str, Name) else None, position,
+        )
+        names = finder.filter_name(filters, name_or_str)
+        debug.dbg('context.goto %s in (%s): %s', name_or_str, self, names)
+        return names
+
+    def py__getattribute__(self, name_or_str, name_context=None, position=None,
+                           analysis_errors=True):
         """
         :param position: Position of the last statement -> tuple of line, column
         """
-        pass
+        if name_context is None:
+            name_context = self
+        names = self.goto(name_or_str, position)
+
+        string_name = name_or_str.value if isinstance(name_or_str, Name) else name_or_str
+
+        # This paragraph is currently needed for proper branch type inference
+        # (static analysis).
+        found_predefined_types = None
+        if self.predefined_names and isinstance(name_or_str, Name):
+            node = name_or_str
+            while node is not None and not parser_utils.is_scope(node):
+                node = node.parent
+                if node.type in ("if_stmt", "for_stmt", "comp_for", 'sync_comp_for'):
+                    try:
+                        name_dict = self.predefined_names[node]
+                        types = name_dict[string_name]
+                    except KeyError:
+                        continue
+                    else:
+                        found_predefined_types = types
+                        break
+        if found_predefined_types is not None and names:
+            from jedi.inference import flow_analysis
+            check = flow_analysis.reachability_check(
+                context=self,
+                value_scope=self.tree_node,
+                node=name_or_str,
+            )
+            if check is flow_analysis.UNREACHABLE:
+                values = NO_VALUES
+            else:
+                values = found_predefined_types
+        else:
+            values = ValueSet.from_sets(name.infer() for name in names)
+
+        if not names and not values and analysis_errors:
+            if isinstance(name_or_str, Name):
+                from jedi.inference import analysis
+                message = ("NameError: name '%s' is not defined." % string_name)
+                analysis.add(name_context, 'name-error', name_or_str, message)
+
+        debug.dbg('context.names_to_types: %s -> %s', names, values)
+        if values:
+            return values
+        return self._check_for_additional_knowledge(name_or_str, name_context, position)
+
+    def _check_for_additional_knowledge(self, name_or_str, name_context, position):
+        name_context = name_context or self
+        # Add isinstance and other if/assert knowledge.
+        if isinstance(name_or_str, Name) and not name_context.is_instance():
+            flow_scope = name_or_str
+            base_nodes = [name_context.tree_node]
+
+            if any(b.type in ('comp_for', 'sync_comp_for') for b in base_nodes):
+                return NO_VALUES
+            from jedi.inference.finder import check_flow_information
+            while True:
+                flow_scope = get_parent_scope(flow_scope, include_flows=True)
+                n = check_flow_information(name_context, flow_scope,
+                                           name_or_str, position)
+                if n is not None:
+                    return n
+                if flow_scope in base_nodes:
+                    break
+        return NO_VALUES
+
+    def get_root_context(self):
+        parent_context = self.parent_context
+        if parent_context is None:
+            return self
+        return parent_context.get_root_context()
+
+    def is_module(self):
+        return False
+
+    def is_builtins_module(self):
+        return False
+
+    def is_class(self):
+        return False
+
+    def is_stub(self):
+        return False
+
+    def is_instance(self):
+        return False
+
+    def is_compiled(self):
+        return False
+
+    def is_bound_method(self):
+        return False
+
+    @abstractmethod
+    def py__name__(self):
+        raise NotImplementedError
+
+    def get_value(self):
+        raise NotImplementedError
+
+    @property
+    def name(self):
+        return None
+
+    def get_qualified_names(self):
+        return ()
+
+    def py__doc__(self):
+        return ''
+
+    @contextmanager
+    def predefine_names(self, flow_scope, dct):
+        predefined = self.predefined_names
+        predefined[flow_scope] = dct
+        try:
+            yield
+        finally:
+            del predefined[flow_scope]


 class ValueContext(AbstractContext):
     """
     Should be defined, otherwise the API returns empty types.
     """
-
     def __init__(self, value):
         super().__init__(value.inference_state)
         self._value = value

+    @property
+    def tree_node(self):
+        return self._value.tree_node
+
+    @property
+    def parent_context(self):
+        return self._value.parent_context
+
+    def is_module(self):
+        return self._value.is_module()
+
+    def is_builtins_module(self):
+        return self._value == self.inference_state.builtins_module
+
+    def is_class(self):
+        return self._value.is_class()
+
+    def is_stub(self):
+        return self._value.is_stub()
+
+    def is_instance(self):
+        return self._value.is_instance()
+
+    def is_compiled(self):
+        return self._value.is_compiled()
+
+    def is_bound_method(self):
+        return self._value.is_bound_method()
+
+    def py__name__(self):
+        return self._value.py__name__()
+
+    @property
+    def name(self):
+        return self._value.name
+
+    def get_qualified_names(self):
+        return self._value.get_qualified_names()
+
+    def py__doc__(self):
+        return self._value.py__doc__()
+
+    def get_value(self):
+        return self._value
+
     def __repr__(self):
         return '%s(%s)' % (self.__class__.__name__, self._value)


 class TreeContextMixin:
-    pass
+    def infer_node(self, node):
+        from jedi.inference.syntax_tree import infer_node
+        return infer_node(self, node)
+
+    def create_value(self, node):
+        from jedi.inference import value
+
+        if node == self.tree_node:
+            assert self.is_module()
+            return self.get_value()
+
+        parent_context = self.create_context(node)
+
+        if node.type in ('funcdef', 'lambdef'):
+            func = value.FunctionValue.from_context(parent_context, node)
+            if parent_context.is_class():
+                class_value = parent_context.parent_context.create_value(parent_context.tree_node)
+                instance = value.AnonymousInstance(
+                    self.inference_state, parent_context.parent_context, class_value)
+                func = value.BoundMethod(
+                    instance=instance,
+                    class_context=class_value.as_context(),
+                    function=func
+                )
+            return func
+        elif node.type == 'classdef':
+            return value.ClassValue(self.inference_state, parent_context, node)
+        else:
+            raise NotImplementedError("Probably shouldn't happen: %s" % node)
+
+    def create_context(self, node):
+        def from_scope_node(scope_node, is_nested=True):
+            if scope_node == self.tree_node:
+                return self
+
+            if scope_node.type in ('funcdef', 'lambdef', 'classdef'):
+                return self.create_value(scope_node).as_context()
+            elif scope_node.type in ('comp_for', 'sync_comp_for'):
+                parent_context = from_scope_node(parent_scope(scope_node.parent))
+                if node.start_pos >= scope_node.children[-1].start_pos:
+                    return parent_context
+                return CompForContext(parent_context, scope_node)
+            raise Exception("There's a scope that was not managed: %s" % scope_node)
+
+        def parent_scope(node):
+            while True:
+                node = node.parent
+
+                if parser_utils.is_scope(node):
+                    return node
+                elif node.type in ('argument', 'testlist_comp'):
+                    if node.children[1].type in ('comp_for', 'sync_comp_for'):
+                        return node.children[1]
+                elif node.type == 'dictorsetmaker':
+                    for n in node.children[1:4]:
+                        # In dictionaries it can be pretty much anything.
+                        if n.type in ('comp_for', 'sync_comp_for'):
+                            return n
+
+        scope_node = parent_scope(node)
+        if scope_node.type in ('funcdef', 'classdef'):
+            colon = scope_node.children[scope_node.children.index(':')]
+            if node.start_pos < colon.start_pos:
+                parent = node.parent
+                if not (parent.type == 'param' and parent.name == node):
+                    scope_node = parent_scope(scope_node)
+        return from_scope_node(scope_node, is_nested=True)
+
+    def create_name(self, tree_name):
+        definition = tree_name.get_definition()
+        if definition and definition.type == 'param' and definition.name == tree_name:
+            funcdef = search_ancestor(definition, 'funcdef', 'lambdef')
+            func = self.create_value(funcdef)
+            return AnonymousParamName(func, tree_name)
+        else:
+            context = self.create_context(tree_name)
+            return TreeNameDefinition(context, tree_name)


 class FunctionContext(TreeContextMixin, ValueContext):
-    pass
+    def get_filters(self, until_position=None, origin_scope=None):
+        yield ParserTreeFilter(
+            self.inference_state,
+            parent_context=self,
+            until_position=until_position,
+            origin_scope=origin_scope
+        )


 class ModuleContext(TreeContextMixin, ValueContext):
+    def py__file__(self) -> Optional[Path]:
+        return self._value.py__file__()  # type: ignore[no-any-return]
+
+    def get_filters(self, until_position=None, origin_scope=None):
+        filters = self._value.get_filters(origin_scope)
+        # Skip the first filter and replace it.
+        next(filters, None)
+        yield MergedFilter(
+            ParserTreeFilter(
+                parent_context=self,
+                until_position=until_position,
+                origin_scope=origin_scope
+            ),
+            self.get_global_filter(),
+        )
+        yield from filters
+
+    def get_global_filter(self):
+        return GlobalNameFilter(self)
+
+    @property
+    def string_names(self):
+        return self._value.string_names
+
+    @property
+    def code_lines(self):
+        return self._value.code_lines

     def get_value(self):
         """
@@ -55,35 +343,94 @@ class ModuleContext(TreeContextMixin, ValueContext):
         This is necessary for stub -> python conversion and vice versa. However
         this method shouldn't be moved to AbstractContext.
         """
-        pass
+        return self._value


 class NamespaceContext(TreeContextMixin, ValueContext):
-    pass
+    def get_filters(self, until_position=None, origin_scope=None):
+        return self._value.get_filters()
+
+    def get_value(self):
+        return self._value
+
+    @property
+    def string_names(self):
+        return self._value.string_names
+
+    def py__file__(self) -> Optional[Path]:
+        return self._value.py__file__()  # type: ignore[no-any-return]


 class ClassContext(TreeContextMixin, ValueContext):
-    pass
+    def get_filters(self, until_position=None, origin_scope=None):
+        yield self.get_global_filter(until_position, origin_scope)

+    def get_global_filter(self, until_position=None, origin_scope=None):
+        return ParserTreeFilter(
+            parent_context=self,
+            until_position=until_position,
+            origin_scope=origin_scope
+        )

-class CompForContext(TreeContextMixin, AbstractContext):

+class CompForContext(TreeContextMixin, AbstractContext):
     def __init__(self, parent_context, comp_for):
         super().__init__(parent_context.inference_state)
         self.tree_node = comp_for
         self.parent_context = parent_context

+    def get_filters(self, until_position=None, origin_scope=None):
+        yield ParserTreeFilter(self)
+
+    def get_value(self):
+        return None
+
+    def py__name__(self):
+        return '<comprehension context>'
+
     def __repr__(self):
         return '%s(%s)' % (self.__class__.__name__, self.tree_node)


 class CompiledContext(ValueContext):
-    pass
+    def get_filters(self, until_position=None, origin_scope=None):
+        return self._value.get_filters()


 class CompiledModuleContext(CompiledContext):
     code_lines = None

+    def get_value(self):
+        return self._value
+
+    @property
+    def string_names(self):
+        return self._value.string_names
+
+    def py__file__(self) -> Optional[Path]:
+        return self._value.py__file__()  # type: ignore[no-any-return]
+
+
+def _get_global_filters_for_name(context, name_or_none, position):
+    # For functions and classes the defaults don't belong to the
+    # function and get inferred in the value before the function. So
+    # make sure to exclude the function/class name.
+    if name_or_none is not None:
+        ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef', 'lambdef')
+        lambdef = None
+        if ancestor == 'lambdef':
+            # For lambdas it's even more complicated since parts will
+            # be inferred later.
+            lambdef = ancestor
+            ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef')
+        if ancestor is not None:
+            colon = ancestor.children[-2]
+            if position is not None and position < colon.start_pos:
+                if lambdef is None or position < lambdef.children[-2].start_pos:
+                    position = ancestor.start_pos
+
+    return get_global_filters(context, position, name_or_none)
+

 def get_global_filters(context, until_position, origin_scope):
     """
@@ -132,4 +479,21 @@ def get_global_filters(context, until_position, origin_scope):
     >>> list(filters[3].values())  # doctest: +ELLIPSIS
     [...]
     """
-    pass
+    base_context = context
+    from jedi.inference.value.function import BaseFunctionExecutionContext
+    while context is not None:
+        # Names in methods cannot be resolved within the class.
+        yield from context.get_filters(
+            until_position=until_position,
+            origin_scope=origin_scope
+        )
+        if isinstance(context, (BaseFunctionExecutionContext, ModuleContext)):
+            # The position should be reset if the current scope is a function.
+            until_position = None
+
+        context = context.parent_context
+
+    b = next(base_context.inference_state.builtins_module.get_filters(), None)
+    assert b is not None
+    # Add builtins to the global scope.
+    yield b
diff --git a/jedi/inference/docstring_utils.py b/jedi/inference/docstring_utils.py
index 307bddc3..bee0d75e 100644
--- a/jedi/inference/docstring_utils.py
+++ b/jedi/inference/docstring_utils.py
@@ -3,14 +3,19 @@ from jedi.inference.context import ModuleContext


 class DocstringModule(ModuleValue):
-
     def __init__(self, in_module_context, **kwargs):
         super().__init__(**kwargs)
         self._in_module_context = in_module_context

+    def _as_context(self):
+        return DocstringModuleContext(self, self._in_module_context)

-class DocstringModuleContext(ModuleContext):

+class DocstringModuleContext(ModuleContext):
     def __init__(self, module_value, in_module_context):
         super().__init__(module_value)
         self._in_module_context = in_module_context
+
+    def get_filters(self, origin_scope=None, until_position=None):
+        yield from super().get_filters(until_position=until_position)
+        yield from self._in_module_context.get_filters()
diff --git a/jedi/inference/docstrings.py b/jedi/inference/docstrings.py
index c054a3b5..809974f3 100644
--- a/jedi/inference/docstrings.py
+++ b/jedi/inference/docstrings.py
@@ -14,38 +14,119 @@ type of ``foo`` is ``str``.
 As an addition to parameter searching, this module also provides return
 annotations.
 """
+
 import re
 import warnings
+
 from parso import parse, ParserSyntaxError
+
 from jedi import debug
 from jedi.inference.cache import inference_state_method_cache
-from jedi.inference.base_value import iterator_to_value_set, ValueSet, NO_VALUES
+from jedi.inference.base_value import iterator_to_value_set, ValueSet, \
+    NO_VALUES
 from jedi.inference.lazy_value import LazyKnownValues
-DOCSTRING_PARAM_PATTERNS = ['\\s*:type\\s+%s:\\s*([^\\n]+)',
-    '\\s*:param\\s+(\\w+)\\s+%s:[^\\n]*', '\\s*@type\\s+%s:\\s*([^\\n]+)']
-DOCSTRING_RETURN_PATTERNS = [re.compile('\\s*:rtype:\\s*([^\\n]+)', re.M),
-    re.compile('\\s*@rtype:\\s*([^\\n]+)', re.M)]
-REST_ROLE_PATTERN = re.compile(':[^`]+:`([^`]+)`')
+
+
+DOCSTRING_PARAM_PATTERNS = [
+    r'\s*:type\s+%s:\s*([^\n]+)',  # Sphinx
+    r'\s*:param\s+(\w+)\s+%s:[^\n]*',  # Sphinx param with type
+    r'\s*@type\s+%s:\s*([^\n]+)',  # Epydoc
+]
+
+DOCSTRING_RETURN_PATTERNS = [
+    re.compile(r'\s*:rtype:\s*([^\n]+)', re.M),  # Sphinx
+    re.compile(r'\s*@rtype:\s*([^\n]+)', re.M),  # Epydoc
+]
+
+REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`')
+
+
 _numpy_doc_string_cache = None


+def _get_numpy_doc_string_cls():
+    global _numpy_doc_string_cache
+    if isinstance(_numpy_doc_string_cache, (ImportError, SyntaxError)):
+        raise _numpy_doc_string_cache
+    from numpydoc.docscrape import NumpyDocString  # type: ignore[import]
+    _numpy_doc_string_cache = NumpyDocString
+    return _numpy_doc_string_cache
+
+
 def _search_param_in_numpydocstr(docstr, param_str):
     """Search `docstr` (in numpydoc format) for type(-s) of `param_str`."""
-    pass
+    with warnings.catch_warnings():
+        warnings.simplefilter("ignore")
+        try:
+            # This is a non-public API. If it ever changes we should be
+            # prepared and return gracefully.
+            params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters']
+        except Exception:
+            return []
+    for p_name, p_type, p_descr in params:
+        if p_name == param_str:
+            m = re.match(r'([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type)
+            if m:
+                p_type = m.group(1)
+            return list(_expand_typestr(p_type))
+    return []


 def _search_return_in_numpydocstr(docstr):
     """
     Search `docstr` (in numpydoc format) for type(-s) of function returns.
     """
-    pass
+    with warnings.catch_warnings():
+        warnings.simplefilter("ignore")
+        try:
+            doc = _get_numpy_doc_string_cls()(docstr)
+        except Exception:
+            return
+    try:
+        # This is a non-public API. If it ever changes we should be
+        # prepared and return gracefully.
+        returns = doc._parsed_data['Returns']
+        returns += doc._parsed_data['Yields']
+    except Exception:
+        return
+    for r_name, r_type, r_descr in returns:
+        # Return names are optional and if so the type is in the name
+        if not r_type:
+            r_type = r_name
+        yield from _expand_typestr(r_type)


 def _expand_typestr(type_str):
     """
     Attempts to interpret the possible types in `type_str`
     """
-    pass
+    # Check if alternative types are specified with 'or'
+    if re.search(r'\bor\b', type_str):
+        for t in type_str.split('or'):
+            yield t.split('of')[0].strip()
+    # Check if like "list of `type`" and set type to list
+    elif re.search(r'\bof\b', type_str):
+        yield type_str.split('of')[0]
+    # Check if type has is a set of valid literal values eg: {'C', 'F', 'A'}
+    elif type_str.startswith('{'):
+        node = parse(type_str, version='3.7').children[0]
+        if node.type == 'atom':
+            for leaf in getattr(node.children[1], "children", []):
+                if leaf.type == 'number':
+                    if '.' in leaf.value:
+                        yield 'float'
+                    else:
+                        yield 'int'
+                elif leaf.type == 'string':
+                    if 'b' in leaf.string_prefix.lower():
+                        yield 'bytes'
+                    else:
+                        yield 'str'
+                # Ignore everything else.
+
+    # Otherwise just work with what we have.
+    else:
+        yield type_str


 def _search_param_in_docstr(docstr, param_str):
@@ -65,7 +146,15 @@ def _search_param_in_docstr(docstr, param_str):
     ['int']

     """
-    pass
+    # look at #40 to see definitions of those params
+    patterns = [re.compile(p % re.escape(param_str))
+                for p in DOCSTRING_PARAM_PATTERNS]
+    for pattern in patterns:
+        match = pattern.search(docstr)
+        if match:
+            return [_strip_rst_role(match.group(1))]
+
+    return _search_param_in_numpydocstr(docstr, param_str)


 def _strip_rst_role(type_str):
@@ -83,7 +172,48 @@ def _strip_rst_role(type_str):
     http://sphinx-doc.org/domains.html#cross-referencing-python-objects

     """
-    pass
+    match = REST_ROLE_PATTERN.match(type_str)
+    if match:
+        return match.group(1)
+    else:
+        return type_str
+
+
+def _infer_for_statement_string(module_context, string):
+    if string is None:
+        return []
+
+    potential_imports = re.findall(r'((?:\w+\.)*\w+)\.', string)
+    # Try to import module part in dotted name.
+    # (e.g., 'threading' in 'threading.Thread').
+    imports = "\n".join(f"import {p}" for p in potential_imports)
+    string = f'{imports}\n{string}'
+
+    debug.dbg('Parse docstring code %s', string, color='BLUE')
+    grammar = module_context.inference_state.grammar
+    try:
+        module = grammar.parse(string, error_recovery=False)
+    except ParserSyntaxError:
+        return []
+    try:
+        # It's not the last item, because that's an end marker.
+        stmt = module.children[-2]
+    except (AttributeError, IndexError):
+        return []
+
+    if stmt.type not in ('name', 'atom', 'atom_expr'):
+        return []
+
+    # Here we basically use a fake module that also uses the filters in
+    # the actual module.
+    from jedi.inference.docstring_utils import DocstringModule
+    m = DocstringModule(
+        in_module_context=module_context,
+        inference_state=module_context.inference_state,
+        module_node=module,
+        code_lines=[],
+    )
+    return list(_execute_types_in_stmt(m.as_context(), stmt))


 def _execute_types_in_stmt(module_context, stmt):
@@ -92,7 +222,11 @@ def _execute_types_in_stmt(module_context, stmt):
     doesn't include tuple, list and dict literals, because the stuff they
     contain is executed. (Used as type information).
     """
-    pass
+    definitions = module_context.infer_node(stmt)
+    return ValueSet.from_sets(
+        _execute_array_values(module_context.inference_state, d)
+        for d in definitions
+    )


 def _execute_array_values(inference_state, array):
@@ -100,4 +234,53 @@ def _execute_array_values(inference_state, array):
     Tuples indicate that there's not just one return value, but the listed
     ones.  `(str, int)` means that it returns a tuple with both types.
     """
-    pass
+    from jedi.inference.value.iterable import SequenceLiteralValue, FakeTuple, FakeList
+    if isinstance(array, SequenceLiteralValue) and array.array_type in ('tuple', 'list'):
+        values = []
+        for lazy_value in array.py__iter__():
+            objects = ValueSet.from_sets(
+                _execute_array_values(inference_state, typ)
+                for typ in lazy_value.infer()
+            )
+            values.append(LazyKnownValues(objects))
+        cls = FakeTuple if array.array_type == 'tuple' else FakeList
+        return {cls(inference_state, values)}
+    else:
+        return array.execute_annotation()
+
+
+@inference_state_method_cache()
+def infer_param(function_value, param):
+    def infer_docstring(docstring):
+        return ValueSet(
+            p
+            for param_str in _search_param_in_docstr(docstring, param.name.value)
+            for p in _infer_for_statement_string(module_context, param_str)
+        )
+    module_context = function_value.get_root_context()
+    func = param.get_parent_function()
+    if func.type == 'lambdef':
+        return NO_VALUES
+
+    types = infer_docstring(function_value.py__doc__())
+    if function_value.is_bound_method() \
+            and function_value.py__name__() == '__init__':
+        types |= infer_docstring(function_value.class_context.py__doc__())
+
+    debug.dbg('Found param types for docstring: %s', types, color='BLUE')
+    return types
+
+
+@inference_state_method_cache()
+@iterator_to_value_set
+def infer_return_types(function_value):
+    def search_return_in_docstr(code):
+        for p in DOCSTRING_RETURN_PATTERNS:
+            match = p.search(code)
+            if match:
+                yield _strip_rst_role(match.group(1))
+        # Check for numpy style return hint
+        yield from _search_return_in_numpydocstr(code)
+
+    for type_str in search_return_in_docstr(function_value.py__doc__()):
+        yield from _infer_for_statement_string(function_value.get_root_context(), type_str)
diff --git a/jedi/inference/dynamic_params.py b/jedi/inference/dynamic_params.py
index ef714079..e759111a 100644
--- a/jedi/inference/dynamic_params.py
+++ b/jedi/inference/dynamic_params.py
@@ -16,6 +16,7 @@ It works as follows:
 - search for function calls named ``foo``
 - execute these calls and check the input.
 """
+
 from jedi import settings
 from jedi import debug
 from jedi.parser_utils import get_parent_scope
@@ -28,9 +29,28 @@ from jedi.inference.value import instance
 from jedi.inference.base_value import ValueSet, NO_VALUES
 from jedi.inference.references import get_module_contexts_containing_name
 from jedi.inference import recursion
+
+
 MAX_PARAM_SEARCHES = 20


+def _avoid_recursions(func):
+    def wrapper(function_value, param_index):
+        inf = function_value.inference_state
+        with recursion.execution_allowed(inf, function_value.tree_node) as allowed:
+            # We need to catch recursions that may occur, because an
+            # anonymous functions can create an anonymous parameter that is
+            # more or less self referencing.
+            if allowed:
+                inf.dynamic_params_depth += 1
+                try:
+                    return func(function_value, param_index)
+                finally:
+                    inf.dynamic_params_depth -= 1
+            return NO_VALUES
+    return wrapper
+
+
 @debug.increase_indent
 @_avoid_recursions
 def dynamic_param_lookup(function_value, param_index):
@@ -46,7 +66,37 @@ def dynamic_param_lookup(function_value, param_index):
     have to look for all calls to ``func`` to find out what ``foo`` possibly
     is.
     """
-    pass
+    if not function_value.inference_state.do_dynamic_params_search:
+        return NO_VALUES
+
+    funcdef = function_value.tree_node
+
+    path = function_value.get_root_context().py__file__()
+    if path is not None and is_stdlib_path(path):
+        # We don't want to search for references in the stdlib. Usually people
+        # don't work with it (except if you are a core maintainer, sorry).
+        # This makes everything slower. Just disable it and run the tests,
+        # you will see the slowdown, especially in 3.6.
+        return NO_VALUES
+
+    if funcdef.type == 'lambdef':
+        string_name = _get_lambda_name(funcdef)
+        if string_name is None:
+            return NO_VALUES
+    else:
+        string_name = funcdef.name.value
+    debug.dbg('Dynamic param search in %s.', string_name, color='MAGENTA')
+
+    module_context = function_value.get_root_context()
+    arguments_list = _search_function_arguments(module_context, funcdef, string_name)
+    values = ValueSet.from_sets(
+        get_executed_param_names(
+            function_value, arguments
+        )[param_index].infer()
+        for arguments in arguments_list
+    )
+    debug.dbg('Dynamic param result finished', color='MAGENTA')
+    return values


 @inference_state_method_cache(default=None)
@@ -55,4 +105,120 @@ def _search_function_arguments(module_context, funcdef, string_name):
     """
     Returns a list of param names.
     """
-    pass
+    compare_node = funcdef
+    if string_name == '__init__':
+        cls = get_parent_scope(funcdef)
+        if cls.type == 'classdef':
+            string_name = cls.name.value
+            compare_node = cls
+
+    found_arguments = False
+    i = 0
+    inference_state = module_context.inference_state
+
+    if settings.dynamic_params_for_other_modules:
+        module_contexts = get_module_contexts_containing_name(
+            inference_state, [module_context], string_name,
+            # Limit the amounts of files to be opened massively.
+            limit_reduction=5,
+        )
+    else:
+        module_contexts = [module_context]
+
+    for for_mod_context in module_contexts:
+        for name, trailer in _get_potential_nodes(for_mod_context, string_name):
+            i += 1
+
+            # This is a simple way to stop Jedi's dynamic param recursion
+            # from going wild: The deeper Jedi's in the recursion, the less
+            # code should be inferred.
+            if i * inference_state.dynamic_params_depth > MAX_PARAM_SEARCHES:
+                return
+
+            random_context = for_mod_context.create_context(name)
+            for arguments in _check_name_for_execution(
+                    inference_state, random_context, compare_node, name, trailer):
+                found_arguments = True
+                yield arguments
+
+        # If there are results after processing a module, we're probably
+        # good to process. This is a speed optimization.
+        if found_arguments:
+            return
+
+
+def _get_lambda_name(node):
+    stmt = node.parent
+    if stmt.type == 'expr_stmt':
+        first_operator = next(stmt.yield_operators(), None)
+        if first_operator == '=':
+            first = stmt.children[0]
+            if first.type == 'name':
+                return first.value
+
+    return None
+
+
+def _get_potential_nodes(module_value, func_string_name):
+    try:
+        names = module_value.tree_node.get_used_names()[func_string_name]
+    except KeyError:
+        return
+
+    for name in names:
+        bracket = name.get_next_leaf()
+        trailer = bracket.parent
+        if trailer.type == 'trailer' and bracket == '(':
+            yield name, trailer
+
+
+def _check_name_for_execution(inference_state, context, compare_node, name, trailer):
+    from jedi.inference.value.function import BaseFunctionExecutionContext
+
+    def create_args(value):
+        arglist = trailer.children[1]
+        if arglist == ')':
+            arglist = None
+        args = TreeArguments(inference_state, context, arglist, trailer)
+        from jedi.inference.value.instance import InstanceArguments
+        if value.tree_node.type == 'classdef':
+            created_instance = instance.TreeInstance(
+                inference_state,
+                value.parent_context,
+                value,
+                args
+            )
+            return InstanceArguments(created_instance, args)
+        else:
+            if value.is_bound_method():
+                args = InstanceArguments(value.instance, args)
+            return args
+
+    for value in inference_state.infer(context, name):
+        value_node = value.tree_node
+        if compare_node == value_node:
+            yield create_args(value)
+        elif isinstance(value.parent_context, BaseFunctionExecutionContext) \
+                and compare_node.type == 'funcdef':
+            # Here we're trying to find decorators by checking the first
+            # parameter. It's not very generic though. Should find a better
+            # solution that also applies to nested decorators.
+            param_names = value.parent_context.get_param_names()
+            if len(param_names) != 1:
+                continue
+            values = param_names[0].infer()
+            if [v.tree_node for v in values] == [compare_node]:
+                # Found a decorator.
+                module_context = context.get_root_context()
+                execution_context = value.as_context(create_args(value))
+                potential_nodes = _get_potential_nodes(module_context, param_names[0].string_name)
+                for name, trailer in potential_nodes:
+                    if value_node.start_pos < name.start_pos < value_node.end_pos:
+                        random_context = execution_context.create_context(name)
+                        yield from _check_name_for_execution(
+                            inference_state,
+                            random_context,
+                            compare_node,
+                            name,
+                            trailer
+                        )
diff --git a/jedi/inference/filters.py b/jedi/inference/filters.py
index 4dab38a3..62782334 100644
--- a/jedi/inference/filters.py
+++ b/jedi/inference/filters.py
@@ -5,13 +5,18 @@ are needed for name resolution.
 from abc import abstractmethod
 from typing import List, MutableMapping, Type
 import weakref
+
 from parso.tree import search_ancestor
 from parso.python.tree import Name, UsedNamesMapping
+
 from jedi.inference import flow_analysis
-from jedi.inference.base_value import ValueSet, ValueWrapper, LazyValueWrapper
+from jedi.inference.base_value import ValueSet, ValueWrapper, \
+    LazyValueWrapper
 from jedi.parser_utils import get_cached_parent_scope, get_parso_cache_node
 from jedi.inference.utils import to_list
-from jedi.inference.names import TreeNameDefinition, ParamName, AnonymousParamName, AbstractNameDefinition, NameWrapper
+from jedi.inference.names import TreeNameDefinition, ParamName, \
+    AnonymousParamName, AbstractNameDefinition, NameWrapper
+
 _definition_name_cache: MutableMapping[UsedNamesMapping, List[Name]]
 _definition_name_cache = weakref.WeakKeyDictionary()

@@ -19,6 +24,19 @@ _definition_name_cache = weakref.WeakKeyDictionary()
 class AbstractFilter:
     _until_position = None

+    def _filter(self, names):
+        if self._until_position is not None:
+            return [n for n in names if n.start_pos < self._until_position]
+        return names
+
+    @abstractmethod
+    def get(self, name):
+        raise NotImplementedError
+
+    @abstractmethod
+    def values(self):
+        raise NotImplementedError
+

 class FilterWrapper:
     name_wrapper_class: Type[NameWrapper]
@@ -26,6 +44,35 @@ class FilterWrapper:
     def __init__(self, wrapped_filter):
         self._wrapped_filter = wrapped_filter

+    def wrap_names(self, names):
+        return [self.name_wrapper_class(name) for name in names]
+
+    def get(self, name):
+        return self.wrap_names(self._wrapped_filter.get(name))
+
+    def values(self):
+        return self.wrap_names(self._wrapped_filter.values())
+
+
+def _get_definition_names(parso_cache_node, used_names, name_key):
+    if parso_cache_node is None:
+        names = used_names.get(name_key, ())
+        return tuple(name for name in names if name.is_definition(include_setitem=True))
+
+    try:
+        for_module = _definition_name_cache[parso_cache_node]
+    except KeyError:
+        for_module = _definition_name_cache[parso_cache_node] = {}
+
+    try:
+        return for_module[name_key]
+    except KeyError:
+        names = used_names.get(name_key, ())
+        result = for_module[name_key] = tuple(
+            name for name in names if name.is_definition(include_setitem=True)
+        )
+        return result
+

 class _AbstractUsedNamesFilter(AbstractFilter):
     name_class = TreeNameDefinition
@@ -36,24 +83,52 @@ class _AbstractUsedNamesFilter(AbstractFilter):
         self._node_context = node_context
         self._parser_scope = node_context.tree_node
         module_context = node_context.get_root_context()
+        # It is quite hacky that we have to use that. This is for caching
+        # certain things with a WeakKeyDictionary. However, parso intentionally
+        # uses slots (to save memory) and therefore we end up with having to
+        # have a weak reference to the object that caches the tree.
+        #
+        # Previously we have tried to solve this by using a weak reference onto
+        # used_names. However that also does not work, because it has a
+        # reference from the module, which itself is referenced by any node
+        # through parents.
         path = module_context.py__file__()
         if path is None:
+            # If the path is None, there is no guarantee that parso caches it.
             self._parso_cache_node = None
         else:
-            self._parso_cache_node = get_parso_cache_node(module_context.
-                inference_state.latest_grammar if module_context.is_stub() else
-                module_context.inference_state.grammar, path)
+            self._parso_cache_node = get_parso_cache_node(
+                module_context.inference_state.latest_grammar
+                if module_context.is_stub() else module_context.inference_state.grammar,
+                path
+            )
         self._used_names = module_context.tree_node.get_used_names()
         self.parent_context = parent_context

+    def get(self, name):
+        return self._convert_names(self._filter(
+            _get_definition_names(self._parso_cache_node, self._used_names, name),
+        ))
+
+    def _convert_names(self, names):
+        return [self.name_class(self.parent_context, name) for name in names]
+
+    def values(self):
+        return self._convert_names(
+            name
+            for name_key in self._used_names
+            for name in self._filter(
+                _get_definition_names(self._parso_cache_node, self._used_names, name_key),
+            )
+        )
+
     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self.parent_context)


 class ParserTreeFilter(_AbstractUsedNamesFilter):
-
-    def __init__(self, parent_context, node_context=None, until_position=
-        None, origin_scope=None):
+    def __init__(self, parent_context, node_context=None, until_position=None,
+                 origin_scope=None):
         """
         node_context is an option to specify a second value for use cases
         like the class mro where the parent class of a new name would be the
@@ -64,49 +139,134 @@ class ParserTreeFilter(_AbstractUsedNamesFilter):
         self._origin_scope = origin_scope
         self._until_position = until_position

+    def _filter(self, names):
+        names = super()._filter(names)
+        names = [n for n in names if self._is_name_reachable(n)]
+        return list(self._check_flows(names))
+
+    def _is_name_reachable(self, name):
+        parent = name.parent
+        if parent.type == 'trailer':
+            return False
+        base_node = parent if parent.type in ('classdef', 'funcdef') else name
+        return get_cached_parent_scope(self._parso_cache_node, base_node) == self._parser_scope
+
+    def _check_flows(self, names):
+        for name in sorted(names, key=lambda name: name.start_pos, reverse=True):
+            check = flow_analysis.reachability_check(
+                context=self._node_context,
+                value_scope=self._parser_scope,
+                node=name,
+                origin_scope=self._origin_scope
+            )
+            if check is not flow_analysis.UNREACHABLE:
+                yield name
+
+            if check is flow_analysis.REACHABLE:
+                break

-class _FunctionExecutionFilter(ParserTreeFilter):

-    def __init__(self, parent_context, function_value, until_position,
-        origin_scope):
-        super().__init__(parent_context, until_position=until_position,
-            origin_scope=origin_scope)
+class _FunctionExecutionFilter(ParserTreeFilter):
+    def __init__(self, parent_context, function_value, until_position, origin_scope):
+        super().__init__(
+            parent_context,
+            until_position=until_position,
+            origin_scope=origin_scope,
+        )
         self._function_value = function_value

+    def _convert_param(self, param, name):
+        raise NotImplementedError
+
+    @to_list
+    def _convert_names(self, names):
+        for name in names:
+            param = search_ancestor(name, 'param')
+            # Here we don't need to check if the param is a default/annotation,
+            # because those are not definitions and never make it to this
+            # point.
+            if param:
+                yield self._convert_param(param, name)
+            else:
+                yield TreeNameDefinition(self.parent_context, name)

-class FunctionExecutionFilter(_FunctionExecutionFilter):

+class FunctionExecutionFilter(_FunctionExecutionFilter):
     def __init__(self, *args, arguments, **kwargs):
         super().__init__(*args, **kwargs)
         self._arguments = arguments

+    def _convert_param(self, param, name):
+        return ParamName(self._function_value, name, self._arguments)
+

 class AnonymousFunctionExecutionFilter(_FunctionExecutionFilter):
-    pass
+    def _convert_param(self, param, name):
+        return AnonymousParamName(self._function_value, name)


 class GlobalNameFilter(_AbstractUsedNamesFilter):
-    pass
+    def get(self, name):
+        try:
+            names = self._used_names[name]
+        except KeyError:
+            return []
+        return self._convert_names(self._filter(names))
+
+    @to_list
+    def _filter(self, names):
+        for name in names:
+            if name.parent.type == 'global_stmt':
+                yield name
+
+    def values(self):
+        return self._convert_names(
+            name for name_list in self._used_names.values()
+            for name in self._filter(name_list)
+        )


 class DictFilter(AbstractFilter):
-
     def __init__(self, dct):
         self._dct = dct

+    def get(self, name):
+        try:
+            value = self._convert(name, self._dct[name])
+        except KeyError:
+            return []
+        else:
+            return list(self._filter([value]))
+
+    def values(self):
+        def yielder():
+            for item in self._dct.items():
+                try:
+                    yield self._convert(*item)
+                except KeyError:
+                    pass
+        return self._filter(yielder())
+
+    def _convert(self, name, value):
+        return value
+
     def __repr__(self):
         keys = ', '.join(self._dct.keys())
         return '<%s: for {%s}>' % (self.__class__.__name__, keys)


 class MergedFilter:
-
     def __init__(self, *filters):
         self._filters = filters

+    def get(self, name):
+        return [n for filter in self._filters for n in filter.get(name)]
+
+    def values(self):
+        return [n for filter in self._filters for n in filter.values()]
+
     def __repr__(self):
-        return '%s(%s)' % (self.__class__.__name__, ', '.join(str(f) for f in
-            self._filters))
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(str(f) for f in self._filters))


 class _BuiltinMappedMethod(ValueWrapper):
@@ -118,24 +278,40 @@ class _BuiltinMappedMethod(ValueWrapper):
         self._value = value
         self._method = method

+    def py__call__(self, arguments):
+        # TODO add TypeError if params are given/or not correct.
+        return self._method(self._value, arguments)
+

 class SpecialMethodFilter(DictFilter):
     """
     A filter for methods that are defined in this module on the corresponding
     classes like Generator (for __next__, etc).
     """
-
-
     class SpecialMethodName(AbstractNameDefinition):
         api_type = 'function'

-        def __init__(self, parent_context, string_name, callable_,
-            builtin_value):
+        def __init__(self, parent_context, string_name, callable_, builtin_value):
             self.parent_context = parent_context
             self.string_name = string_name
             self._callable = callable_
             self._builtin_value = builtin_value

+        def infer(self):
+            for filter in self._builtin_value.get_filters():
+                # We can take the first index, because on builtin methods there's
+                # always only going to be one name. The same is true for the
+                # inferred values.
+                for name in filter.get(self.string_name):
+                    builtin_func = next(iter(name.infer()))
+                    break
+                else:
+                    continue
+                break
+            return ValueSet([
+                _BuiltinMappedMethod(self.parent_context, self._callable, builtin_func)
+            ])
+
     def __init__(self, value, dct, builtin_value):
         super().__init__(dct)
         self.value = value
@@ -147,17 +323,21 @@ class SpecialMethodFilter(DictFilter):
         We distinguish, because we have to.
         """

+    def _convert(self, name, value):
+        return self.SpecialMethodName(self.value, name, value, self._builtin_value)

-class _OverwriteMeta(type):

+class _OverwriteMeta(type):
     def __init__(cls, name, bases, dct):
         super().__init__(name, bases, dct)
+
         base_dct = {}
         for base_cls in reversed(cls.__bases__):
             try:
                 base_dct.update(base_cls.overwritten_methods)
             except AttributeError:
                 pass
+
         for func in cls.__dict__.values():
             try:
                 base_dct.update(func.registered_overwritten_methods)
@@ -167,16 +347,25 @@ class _OverwriteMeta(type):


 class _AttributeOverwriteMixin:
-    pass
+    def get_filters(self, *args, **kwargs):
+        yield SpecialMethodFilter(self, self.overwritten_methods, self._wrapped_value)
+        yield from self._wrapped_value.get_filters(*args, **kwargs)


 class LazyAttributeOverwrite(_AttributeOverwriteMixin, LazyValueWrapper,
-    metaclass=_OverwriteMeta):
-
+                             metaclass=_OverwriteMeta):
     def __init__(self, inference_state):
         self.inference_state = inference_state


-class AttributeOverwrite(_AttributeOverwriteMixin, ValueWrapper, metaclass=
-    _OverwriteMeta):
+class AttributeOverwrite(_AttributeOverwriteMixin, ValueWrapper,
+                         metaclass=_OverwriteMeta):
     pass
+
+
+def publish_method(method_name):
+    def decorator(func):
+        dct = func.__dict__.setdefault('registered_overwritten_methods', {})
+        dct[method_name] = func
+        return func
+    return decorator
diff --git a/jedi/inference/finder.py b/jedi/inference/finder.py
index 48ed829c..aac58732 100644
--- a/jedi/inference/finder.py
+++ b/jedi/inference/finder.py
@@ -14,8 +14,10 @@ would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``.
 Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
 check for -> a is a string). There's big potential in these checks.
 """
+
 from parso.tree import search_ancestor
 from parso.python.tree import Name
+
 from jedi import settings
 from jedi.inference.arguments import TreeArguments
 from jedi.inference.value import iterable
@@ -28,7 +30,24 @@ def filter_name(filters, name_or_str):
     Searches names that are defined in a scope (the different
     ``filters``), until a name fits.
     """
-    pass
+    string_name = name_or_str.value if isinstance(name_or_str, Name) else name_or_str
+    names = []
+    for filter in filters:
+        names = filter.get(string_name)
+        if names:
+            break
+
+    return list(_remove_del_stmt(names))
+
+
+def _remove_del_stmt(names):
+    # Catch del statements and remove them from results.
+    for name in names:
+        if name.tree_name is not None:
+            definition = name.tree_name.get_definition()
+            if definition is not None and definition.type == 'del_stmt':
+                continue
+        yield name


 def check_flow_information(value, flow, search_name, pos):
@@ -40,4 +59,88 @@ def check_flow_information(value, flow, search_name, pos):

     ensures that `k` is a string.
     """
-    pass
+    if not settings.dynamic_flow_information:
+        return None
+
+    result = None
+    if is_scope(flow):
+        # Check for asserts.
+        module_node = flow.get_root_node()
+        try:
+            names = module_node.get_used_names()[search_name.value]
+        except KeyError:
+            return None
+        names = reversed([
+            n for n in names
+            if flow.start_pos <= n.start_pos < (pos or flow.end_pos)
+        ])
+
+        for name in names:
+            ass = search_ancestor(name, 'assert_stmt')
+            if ass is not None:
+                result = _check_isinstance_type(value, ass.assertion, search_name)
+                if result is not None:
+                    return result
+
+    if flow.type in ('if_stmt', 'while_stmt'):
+        potential_ifs = [c for c in flow.children[1::4] if c != ':']
+        for if_test in reversed(potential_ifs):
+            if search_name.start_pos > if_test.end_pos:
+                return _check_isinstance_type(value, if_test, search_name)
+    return result
+
+
+def _get_isinstance_trailer_arglist(node):
+    if node.type in ('power', 'atom_expr') and len(node.children) == 2:
+        # This might be removed if we analyze and, etc
+        first, trailer = node.children
+        if first.type == 'name' and first.value == 'isinstance' \
+                and trailer.type == 'trailer' and trailer.children[0] == '(':
+            return trailer
+    return None
+
+
+def _check_isinstance_type(value, node, search_name):
+    lazy_cls = None
+    trailer = _get_isinstance_trailer_arglist(node)
+    if trailer is not None and len(trailer.children) == 3:
+        arglist = trailer.children[1]
+        args = TreeArguments(value.inference_state, value, arglist, trailer)
+        param_list = list(args.unpack())
+        # Disallow keyword arguments
+        if len(param_list) == 2 and len(arglist.children) == 3:
+            (key1, _), (key2, lazy_value_cls) = param_list
+            if key1 is None and key2 is None:
+                call = _get_call_string(search_name)
+                is_instance_call = _get_call_string(arglist.children[0])
+                # Do a simple get_code comparison of the strings . They should
+                # just have the same code, and everything will be all right.
+                # There are ways that this is not correct, if some stuff is
+                # redefined in between. However here we don't care, because
+                # it's a heuristic that works pretty well.
+                if call == is_instance_call:
+                    lazy_cls = lazy_value_cls
+    if lazy_cls is None:
+        return None
+
+    value_set = NO_VALUES
+    for cls_or_tup in lazy_cls.infer():
+        if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple':
+            for lazy_value in cls_or_tup.py__iter__():
+                value_set |= lazy_value.infer().execute_with_values()
+        else:
+            value_set |= cls_or_tup.execute_with_values()
+    return value_set
+
+
+def _get_call_string(node):
+    if node.parent.type == 'atom_expr':
+        return _get_call_string(node.parent)
+
+    code = ''
+    leaf = node.get_first_leaf()
+    end = node.get_last_leaf().end_pos
+    while leaf.start_pos < end:
+        code += leaf.value
+        leaf = leaf.get_next_leaf()
+    return code
diff --git a/jedi/inference/flow_analysis.py b/jedi/inference/flow_analysis.py
index d2a4ad77..89bfe578 100644
--- a/jedi/inference/flow_analysis.py
+++ b/jedi/inference/flow_analysis.py
@@ -1,4 +1,5 @@
 from typing import Dict, Optional
+
 from jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope
 from jedi.inference.recursion import execution_allowed
 from jedi.inference.helpers import is_big_annoying_library
@@ -7,11 +8,19 @@ from jedi.inference.helpers import is_big_annoying_library
 class Status:
     lookup_table: Dict[Optional[bool], 'Status'] = {}

-    def __init__(self, value: Optional[bool], name: str) ->None:
+    def __init__(self, value: Optional[bool], name: str) -> None:
         self._value = value
         self._name = name
         Status.lookup_table[value] = self

+    def invert(self):
+        if self is REACHABLE:
+            return UNREACHABLE
+        elif self is UNREACHABLE:
+            return REACHABLE
+        else:
+            return UNSURE
+
     def __and__(self, other):
         if UNSURE in (self, other):
             return UNSURE
@@ -25,3 +34,92 @@ class Status:
 REACHABLE = Status(True, 'reachable')
 UNREACHABLE = Status(False, 'unreachable')
 UNSURE = Status(None, 'unsure')
+
+
+def _get_flow_scopes(node):
+    while True:
+        node = get_parent_scope(node, include_flows=True)
+        if node is None or is_scope(node):
+            return
+        yield node
+
+
+def reachability_check(context, value_scope, node, origin_scope=None):
+    if is_big_annoying_library(context) \
+            or not context.inference_state.flow_analysis_enabled:
+        return UNSURE
+
+    first_flow_scope = get_parent_scope(node, include_flows=True)
+    if origin_scope is not None:
+        origin_flow_scopes = list(_get_flow_scopes(origin_scope))
+        node_flow_scopes = list(_get_flow_scopes(node))
+
+        branch_matches = True
+        for flow_scope in origin_flow_scopes:
+            if flow_scope in node_flow_scopes:
+                node_keyword = get_flow_branch_keyword(flow_scope, node)
+                origin_keyword = get_flow_branch_keyword(flow_scope, origin_scope)
+                branch_matches = node_keyword == origin_keyword
+                if flow_scope.type == 'if_stmt':
+                    if not branch_matches:
+                        return UNREACHABLE
+                elif flow_scope.type == 'try_stmt':
+                    if not branch_matches and origin_keyword == 'else' \
+                            and node_keyword == 'except':
+                        return UNREACHABLE
+                if branch_matches:
+                    break
+
+        # Direct parents get resolved, we filter scopes that are separate
+        # branches.  This makes sense for autocompletion and static analysis.
+        # For actual Python it doesn't matter, because we're talking about
+        # potentially unreachable code.
+        # e.g. `if 0:` would cause all name lookup within the flow make
+        # unaccessible. This is not a "problem" in Python, because the code is
+        # never called. In Jedi though, we still want to infer types.
+        while origin_scope is not None:
+            if first_flow_scope == origin_scope and branch_matches:
+                return REACHABLE
+            origin_scope = origin_scope.parent
+
+    return _break_check(context, value_scope, first_flow_scope, node)
+
+
+def _break_check(context, value_scope, flow_scope, node):
+    reachable = REACHABLE
+    if flow_scope.type == 'if_stmt':
+        if flow_scope.is_node_after_else(node):
+            for check_node in flow_scope.get_test_nodes():
+                reachable = _check_if(context, check_node)
+                if reachable in (REACHABLE, UNSURE):
+                    break
+            reachable = reachable.invert()
+        else:
+            flow_node = flow_scope.get_corresponding_test_node(node)
+            if flow_node is not None:
+                reachable = _check_if(context, flow_node)
+    elif flow_scope.type in ('try_stmt', 'while_stmt'):
+        return UNSURE
+
+    # Only reachable branches need to be examined further.
+    if reachable in (UNREACHABLE, UNSURE):
+        return reachable
+
+    if value_scope != flow_scope and value_scope != flow_scope.parent:
+        flow_scope = get_parent_scope(flow_scope, include_flows=True)
+        return reachable & _break_check(context, value_scope, flow_scope, node)
+    else:
+        return reachable
+
+
+def _check_if(context, node):
+    with execution_allowed(context.inference_state, node) as allowed:
+        if not allowed:
+            return UNSURE
+
+        types = context.infer_node(node)
+        values = set(x.py__bool__() for x in types)
+        if len(values) == 1:
+            return Status.lookup_table[values.pop()]
+        else:
+            return UNSURE
diff --git a/jedi/inference/gradual/annotation.py b/jedi/inference/gradual/annotation.py
index e720fb88..57098276 100644
--- a/jedi/inference/gradual/annotation.py
+++ b/jedi/inference/gradual/annotation.py
@@ -4,9 +4,12 @@ through function annotations. There is a strong suggestion in this document
 that only the type of type hinting defined in PEP0484 should be allowed
 as annotations in future python versions.
 """
+
 import re
 from inspect import Parameter
+
 from parso import ParserSyntaxError, parse
+
 from jedi.inference.cache import inference_state_method_cache
 from jedi.inference.base_value import ValueSet, NO_VALUES
 from jedi.inference.gradual.base import DefineGenericBaseClass, GenericClass
@@ -28,7 +31,51 @@ def infer_annotation(context, annotation):

     Also checks for forward references (strings)
     """
-    pass
+    value_set = context.infer_node(annotation)
+    if len(value_set) != 1:
+        debug.warning("Inferred typing index %s should lead to 1 object, "
+                      " not %s" % (annotation, value_set))
+        return value_set
+
+    inferred_value = list(value_set)[0]
+    if is_string(inferred_value):
+        result = _get_forward_reference_node(context, inferred_value.get_safe_value())
+        if result is not None:
+            return context.infer_node(result)
+    return value_set
+
+
+def _infer_annotation_string(context, string, index=None):
+    node = _get_forward_reference_node(context, string)
+    if node is None:
+        return NO_VALUES
+
+    value_set = context.infer_node(node)
+    if index is not None:
+        value_set = value_set.filter(
+            lambda value: (
+                value.array_type == 'tuple'
+                and len(list(value.py__iter__())) >= index
+            )
+        ).py__simple_getitem__(index)
+    return value_set
+
+
+def _get_forward_reference_node(context, string):
+    try:
+        new_node = context.inference_state.grammar.parse(
+            string,
+            start_symbol='eval_input',
+            error_recovery=False
+        )
+    except ParserSyntaxError:
+        debug.warning('Annotation not parsed: %s' % string)
+        return None
+    else:
+        module = context.tree_node.get_root_node()
+        parser_utils.move(new_node, module.end_pos[0])
+        new_node.parent = context.tree_node
+        return new_node


 def _split_comment_param_declaration(decl_text):
@@ -40,14 +87,139 @@ def _split_comment_param_declaration(decl_text):
     ['foo', 'Bar[baz, biz]'].

     """
-    pass
+    try:
+        node = parse(decl_text, error_recovery=False).children[0]
+    except ParserSyntaxError:
+        debug.warning('Comment annotation is not valid Python: %s' % decl_text)
+        return []
+
+    if node.type in ['name', 'atom_expr', 'power']:
+        return [node.get_code().strip()]
+
+    params = []
+    try:
+        children = node.children
+    except AttributeError:
+        return []
+    else:
+        for child in children:
+            if child.type in ['name', 'atom_expr', 'power']:
+                params.append(child.get_code().strip())
+
+    return params
+
+
+@inference_state_method_cache()
+def infer_param(function_value, param, ignore_stars=False):
+    values = _infer_param(function_value, param)
+    if ignore_stars or not values:
+        return values
+    inference_state = function_value.inference_state
+    if param.star_count == 1:
+        tuple_ = builtin_from_name(inference_state, 'tuple')
+        return ValueSet([GenericClass(
+            tuple_,
+            TupleGenericManager((values,)),
+        )])
+    elif param.star_count == 2:
+        dct = builtin_from_name(inference_state, 'dict')
+        generics = (
+            ValueSet([builtin_from_name(inference_state, 'str')]),
+            values
+        )
+        return ValueSet([GenericClass(
+            dct,
+            TupleGenericManager(generics),
+        )])
+    return values


 def _infer_param(function_value, param):
     """
     Infers the type of a function parameter, using type annotations.
     """
-    pass
+    annotation = param.annotation
+    if annotation is None:
+        # If no Python 3-style annotation, look for a comment annotation.
+        # Identify parameters to function in the same sequence as they would
+        # appear in a type comment.
+        all_params = [child for child in param.parent.children
+                      if child.type == 'param']
+
+        node = param.parent.parent
+        comment = parser_utils.get_following_comment_same_line(node)
+        if comment is None:
+            return NO_VALUES
+
+        match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment)
+        if not match:
+            return NO_VALUES
+        params_comments = _split_comment_param_declaration(match.group(1))
+
+        # Find the specific param being investigated
+        index = all_params.index(param)
+        # If the number of parameters doesn't match length of type comment,
+        # ignore first parameter (assume it's self).
+        if len(params_comments) != len(all_params):
+            debug.warning(
+                "Comments length != Params length %s %s",
+                params_comments, all_params
+            )
+        if function_value.is_bound_method():
+            if index == 0:
+                # Assume it's self, which is already handled
+                return NO_VALUES
+            index -= 1
+        if index >= len(params_comments):
+            return NO_VALUES
+
+        param_comment = params_comments[index]
+        return _infer_annotation_string(
+            function_value.get_default_param_context(),
+            param_comment
+        )
+    # Annotations are like default params and resolve in the same way.
+    context = function_value.get_default_param_context()
+    return infer_annotation(context, annotation)
+
+
+def py__annotations__(funcdef):
+    dct = {}
+    for function_param in funcdef.get_params():
+        param_annotation = function_param.annotation
+        if param_annotation is not None:
+            dct[function_param.name.value] = param_annotation
+
+    return_annotation = funcdef.annotation
+    if return_annotation:
+        dct['return'] = return_annotation
+    return dct
+
+
+def resolve_forward_references(context, all_annotations):
+    def resolve(node):
+        if node is None or node.type != 'string':
+            return node
+
+        node = _get_forward_reference_node(
+            context,
+            context.inference_state.compiled_subprocess.safe_literal_eval(
+                node.value,
+            ),
+        )
+
+        if node is None:
+            # There was a string, but it's not a valid annotation
+            return None
+
+        # The forward reference tree has an additional root node ('eval_input')
+        # that we don't want. Extract the node we do want, that is equivalent to
+        # the nodes returned by `py__annotations__` for a non-quoted node.
+        node = node.children[0]
+
+        return node
+
+    return {name: resolve(node) for name, node in all_annotations.items()}


 @inference_state_method_cache()
@@ -56,7 +228,41 @@ def infer_return_types(function, arguments):
     Infers the type of a function's return value,
     according to type annotations.
     """
-    pass
+    context = function.get_default_param_context()
+    all_annotations = resolve_forward_references(
+        context,
+        py__annotations__(function.tree_node),
+    )
+    annotation = all_annotations.get("return", None)
+    if annotation is None:
+        # If there is no Python 3-type annotation, look for an annotation
+        # comment.
+        node = function.tree_node
+        comment = parser_utils.get_following_comment_same_line(node)
+        if comment is None:
+            return NO_VALUES
+
+        match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment)
+        if not match:
+            return NO_VALUES
+
+        return _infer_annotation_string(
+            context,
+            match.group(1).strip()
+        ).execute_annotation()
+
+    unknown_type_vars = find_unknown_type_vars(context, annotation)
+    annotation_values = infer_annotation(context, annotation)
+    if not unknown_type_vars:
+        return annotation_values.execute_annotation()
+
+    type_var_dict = infer_type_vars_for_execution(function, arguments, all_annotations)
+
+    return ValueSet.from_sets(
+        ann.define_generics(type_var_dict)
+        if isinstance(ann, (DefineGenericBaseClass, TypeVar)) else ValueSet({ann})
+        for ann in annotation_values
+    ).execute_annotation()


 def infer_type_vars_for_execution(function, arguments, annotation_dict):
@@ -69,7 +275,47 @@ def infer_type_vars_for_execution(function, arguments, annotation_dict):
     2. Infer type vars with the execution state we have.
     3. Return the union of all type vars that have been found.
     """
-    pass
+    context = function.get_default_param_context()
+
+    annotation_variable_results = {}
+    executed_param_names = get_executed_param_names(function, arguments)
+    for executed_param_name in executed_param_names:
+        try:
+            annotation_node = annotation_dict[executed_param_name.string_name]
+        except KeyError:
+            continue
+
+        annotation_variables = find_unknown_type_vars(context, annotation_node)
+        if annotation_variables:
+            # Infer unknown type var
+            annotation_value_set = context.infer_node(annotation_node)
+            kind = executed_param_name.get_kind()
+            actual_value_set = executed_param_name.infer()
+            if kind is Parameter.VAR_POSITIONAL:
+                actual_value_set = actual_value_set.merge_types_of_iterate()
+            elif kind is Parameter.VAR_KEYWORD:
+                # TODO _dict_values is not public.
+                actual_value_set = actual_value_set.try_merge('_dict_values')
+            merge_type_var_dicts(
+                annotation_variable_results,
+                annotation_value_set.infer_type_vars(actual_value_set),
+            )
+    return annotation_variable_results
+
+
+def infer_return_for_callable(arguments, param_values, result_values):
+    all_type_vars = {}
+    for pv in param_values:
+        if pv.array_type == 'list':
+            type_var_dict = _infer_type_vars_for_callable(arguments, pv.py__iter__())
+            all_type_vars.update(type_var_dict)
+
+    return ValueSet.from_sets(
+        v.define_generics(all_type_vars)
+        if isinstance(v, (DefineGenericBaseClass, TypeVar))
+        else ValueSet({v})
+        for v in result_values
+    ).execute_annotation()


 def _infer_type_vars_for_callable(arguments, lazy_params):
@@ -78,7 +324,25 @@ def _infer_type_vars_for_callable(arguments, lazy_params):

         def x() -> Callable[[Callable[..., _T]], _T]: ...
     """
-    pass
+    annotation_variable_results = {}
+    for (_, lazy_value), lazy_callable_param in zip(arguments.unpack(), lazy_params):
+        callable_param_values = lazy_callable_param.infer()
+        # Infer unknown type var
+        actual_value_set = lazy_value.infer()
+        merge_type_var_dicts(
+            annotation_variable_results,
+            callable_param_values.infer_type_vars(actual_value_set),
+        )
+    return annotation_variable_results
+
+
+def merge_type_var_dicts(base_dict, new_dict):
+    for type_var_name, values in new_dict.items():
+        if values:
+            try:
+                base_dict[type_var_name] |= values
+            except KeyError:
+                base_dict[type_var_name] = values


 def merge_pairwise_generics(annotation_value, annotated_argument_class):
@@ -115,4 +379,96 @@ def merge_pairwise_generics(annotation_value, annotated_argument_class):
     `annotated_argument_class`: represents the annotated class of the
         argument being passed to the object annotated by `annotation_value`.
     """
-    pass
+
+    type_var_dict = {}
+
+    if not isinstance(annotated_argument_class, DefineGenericBaseClass):
+        return type_var_dict
+
+    annotation_generics = annotation_value.get_generics()
+    actual_generics = annotated_argument_class.get_generics()
+
+    for annotation_generics_set, actual_generic_set in zip(annotation_generics, actual_generics):
+        merge_type_var_dicts(
+            type_var_dict,
+            annotation_generics_set.infer_type_vars(actual_generic_set.execute_annotation()),
+        )
+
+    return type_var_dict
+
+
+def find_type_from_comment_hint_for(context, node, name):
+    return _find_type_from_comment_hint(context, node, node.children[1], name)
+
+
+def find_type_from_comment_hint_with(context, node, name):
+    if len(node.children) > 4:
+        # In case there are multiple with_items, we do not want a type hint for
+        # now.
+        return []
+    assert len(node.children[1].children) == 3, \
+        "Can only be here when children[1] is 'foo() as f'"
+    varlist = node.children[1].children[2]
+    return _find_type_from_comment_hint(context, node, varlist, name)
+
+
+def find_type_from_comment_hint_assign(context, node, name):
+    return _find_type_from_comment_hint(context, node, node.children[0], name)
+
+
+def _find_type_from_comment_hint(context, node, varlist, name):
+    index = None
+    if varlist.type in ("testlist_star_expr", "exprlist", "testlist"):
+        # something like "a, b = 1, 2"
+        index = 0
+        for child in varlist.children:
+            if child == name:
+                break
+            if child.type == "operator":
+                continue
+            index += 1
+        else:
+            return []
+
+    comment = parser_utils.get_following_comment_same_line(node)
+    if comment is None:
+        return []
+    match = re.match(r"^#\s*type:\s*([^#]*)", comment)
+    if match is None:
+        return []
+    return _infer_annotation_string(
+        context, match.group(1).strip(), index
+    ).execute_annotation()
+
+
+def find_unknown_type_vars(context, node):
+    def check_node(node):
+        if node.type in ('atom_expr', 'power'):
+            trailer = node.children[-1]
+            if trailer.type == 'trailer' and trailer.children[0] == '[':
+                for subscript_node in _unpack_subscriptlist(trailer.children[1]):
+                    check_node(subscript_node)
+        else:
+            found[:] = _filter_type_vars(context.infer_node(node), found)
+
+    found = []  # We're not using a set, because the order matters.
+    check_node(node)
+    return found
+
+
+def _filter_type_vars(value_set, found=()):
+    new_found = list(found)
+    for type_var in value_set:
+        if isinstance(type_var, TypeVar) and type_var not in found:
+            new_found.append(type_var)
+    return new_found
+
+
+def _unpack_subscriptlist(subscriptlist):
+    if subscriptlist.type == 'subscriptlist':
+        for subscript in subscriptlist.children[::2]:
+            if subscript.type != 'subscript':
+                yield subscript
+    else:
+        if subscriptlist.type != 'subscript':
+            yield subscriptlist
diff --git a/jedi/inference/gradual/base.py b/jedi/inference/gradual/base.py
index 6eb858d9..ce574297 100644
--- a/jedi/inference/gradual/base.py
+++ b/jedi/inference/gradual/base.py
@@ -1,5 +1,6 @@
 from jedi.inference.cache import inference_state_method_cache
-from jedi.inference.base_value import ValueSet, NO_VALUES, Value, iterator_to_value_set, LazyValueWrapper, ValueWrapper
+from jedi.inference.base_value import ValueSet, NO_VALUES, Value, \
+    iterator_to_value_set, LazyValueWrapper, ValueWrapper
 from jedi.inference.compiled import builtin_from_name
 from jedi.inference.value.klass import ClassFilter
 from jedi.inference.value.klass import ClassMixin
@@ -13,15 +14,27 @@ class _BoundTypeVarName(AbstractNameDefinition):
     """
     This type var was bound to a certain type, e.g. int.
     """
-
     def __init__(self, type_var, value_set):
         self._type_var = type_var
         self.parent_context = type_var.parent_context
         self._value_set = value_set

+    def infer(self):
+        def iter_():
+            for value in self._value_set:
+                # Replace any with the constraints if they are there.
+                from jedi.inference.gradual.typing import AnyClass
+                if isinstance(value, AnyClass):
+                    yield from self._type_var.constraints
+                else:
+                    yield value
+        return ValueSet(iter_())
+
+    def py__name__(self):
+        return self._type_var.py__name__()
+
     def __repr__(self):
-        return '<%s %s -> %s>' % (self.__class__.__name__, self.py__name__(
-            ), self._value_set)
+        return '<%s %s -> %s>' % (self.__class__.__name__, self.py__name__(), self._value_set)


 class _TypeVarFilter:
@@ -35,24 +48,113 @@ class _TypeVarFilter:

     In this example we would have two type vars given: A and B
     """
-
     def __init__(self, generics, type_vars):
         self._generics = generics
         self._type_vars = type_vars

+    def get(self, name):
+        for i, type_var in enumerate(self._type_vars):
+            if type_var.py__name__() == name:
+                try:
+                    return [_BoundTypeVarName(type_var, self._generics[i])]
+                except IndexError:
+                    return [type_var.name]
+        return []
+
+    def values(self):
+        # The values are not relevant. If it's not searched exactly, the type
+        # vars are just global and should be looked up as that.
+        return []
+

 class _AnnotatedClassContext(ClassContext):
-    pass
+    def get_filters(self, *args, **kwargs):
+        filters = super().get_filters(
+            *args, **kwargs
+        )
+        yield from filters

+        # The type vars can only be looked up if it's a global search and
+        # not a direct lookup on the class.
+        yield self._value.get_type_var_filter()

-class DefineGenericBaseClass(LazyValueWrapper):

+class DefineGenericBaseClass(LazyValueWrapper):
     def __init__(self, generics_manager):
         self._generics_manager = generics_manager

+    def _create_instance_with_generics(self, generics_manager):
+        raise NotImplementedError
+
+    @inference_state_method_cache()
+    def get_generics(self):
+        return self._generics_manager.to_tuple()
+
+    def define_generics(self, type_var_dict):
+        from jedi.inference.gradual.type_var import TypeVar
+        changed = False
+        new_generics = []
+        for generic_set in self.get_generics():
+            values = NO_VALUES
+            for generic in generic_set:
+                if isinstance(generic, (DefineGenericBaseClass, TypeVar)):
+                    result = generic.define_generics(type_var_dict)
+                    values |= result
+                    if result != ValueSet({generic}):
+                        changed = True
+                else:
+                    values |= ValueSet([generic])
+            new_generics.append(values)
+
+        if not changed:
+            # There might not be any type vars that change. In that case just
+            # return itself, because it does not make sense to potentially lose
+            # cached results.
+            return ValueSet([self])
+
+        return ValueSet([self._create_instance_with_generics(
+            TupleGenericManager(tuple(new_generics))
+        )])
+
+    def is_same_class(self, other):
+        if not isinstance(other, DefineGenericBaseClass):
+            return False
+
+        if self.tree_node != other.tree_node:
+            # TODO not sure if this is nice.
+            return False
+        given_params1 = self.get_generics()
+        given_params2 = other.get_generics()
+
+        if len(given_params1) != len(given_params2):
+            # If the amount of type vars doesn't match, the class doesn't
+            # match.
+            return False
+
+        # Now compare generics
+        return all(
+            any(
+                # TODO why is this ordering the correct one?
+                cls2.is_same_class(cls1)
+                # TODO I'm still not sure gather_annotation_classes is a good
+                # idea. They are essentially here to avoid comparing Tuple <=>
+                # tuple and instead compare tuple <=> tuple, but at the moment
+                # the whole `is_same_class` and `is_sub_class` matching is just
+                # not in the best shape.
+                for cls1 in class_set1.gather_annotation_classes()
+                for cls2 in class_set2.gather_annotation_classes()
+            ) for class_set1, class_set2 in zip(given_params1, given_params2)
+        )
+
+    def get_signatures(self):
+        return []
+
     def __repr__(self):
-        return '<%s: %s%s>' % (self.__class__.__name__, self._wrapped_value,
-            list(self.get_generics()))
+        return '<%s: %s%s>' % (
+            self.__class__.__name__,
+            self._wrapped_value,
+            list(self.get_generics()),
+        )


 class GenericClass(DefineGenericBaseClass, ClassMixin):
@@ -62,25 +164,157 @@ class GenericClass(DefineGenericBaseClass, ClassMixin):
         class Foo(Generic[T]): ...
         my_foo_int_cls = Foo[int]
     """
-
     def __init__(self, class_value, generics_manager):
         super().__init__(generics_manager)
         self._class_value = class_value

+    def _get_wrapped_value(self):
+        return self._class_value
+
+    def get_type_hint(self, add_class_info=True):
+        n = self.py__name__()
+        # Not sure if this is the best way to do this, but all of these types
+        # are a bit special in that they have type aliases and other ways to
+        # become lower case. It's probably better to make them upper case,
+        # because that's what you can use in annotations.
+        n = dict(list="List", dict="Dict", set="Set", tuple="Tuple").get(n, n)
+        s = n + self._generics_manager.get_type_hint()
+        if add_class_info:
+            return 'Type[%s]' % s
+        return s
+
+    def get_type_var_filter(self):
+        return _TypeVarFilter(self.get_generics(), self.list_type_vars())
+
+    def py__call__(self, arguments):
+        instance, = super().py__call__(arguments)
+        return ValueSet([_GenericInstanceWrapper(instance)])
+
+    def _as_context(self):
+        return _AnnotatedClassContext(self)
+
+    @to_list
+    def py__bases__(self):
+        for base in self._wrapped_value.py__bases__():
+            yield _LazyGenericBaseClass(self, base, self._generics_manager)
+
+    def _create_instance_with_generics(self, generics_manager):
+        return GenericClass(self._class_value, generics_manager)
+
+    def is_sub_class_of(self, class_value):
+        if super().is_sub_class_of(class_value):
+            return True
+        return self._class_value.is_sub_class_of(class_value)
+
+    def with_generics(self, generics_tuple):
+        return self._class_value.with_generics(generics_tuple)
+
+    def infer_type_vars(self, value_set):
+        # Circular
+        from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
+
+        annotation_name = self.py__name__()
+        type_var_dict = {}
+        if annotation_name == 'Iterable':
+            annotation_generics = self.get_generics()
+            if annotation_generics:
+                return annotation_generics[0].infer_type_vars(
+                    value_set.merge_types_of_iterate(),
+                )
+        else:
+            # Note: we need to handle the MRO _in order_, so we need to extract
+            # the elements from the set first, then handle them, even if we put
+            # them back in a set afterwards.
+            for py_class in value_set:
+                if py_class.is_instance() and not py_class.is_compiled():
+                    py_class = py_class.get_annotated_class_object()
+                else:
+                    continue
+
+                if py_class.api_type != 'class':
+                    # Functions & modules don't have an MRO and we're not
+                    # expecting a Callable (those are handled separately within
+                    # TypingClassValueWithIndex).
+                    continue
+
+                for parent_class in py_class.py__mro__():
+                    class_name = parent_class.py__name__()
+                    if annotation_name == class_name:
+                        merge_type_var_dicts(
+                            type_var_dict,
+                            merge_pairwise_generics(self, parent_class),
+                        )
+                        break
+
+        return type_var_dict

-class _LazyGenericBaseClass:

+class _LazyGenericBaseClass:
     def __init__(self, class_value, lazy_base_class, generics_manager):
         self._class_value = class_value
         self._lazy_base_class = lazy_base_class
         self._generics_manager = generics_manager

+    @iterator_to_value_set
+    def infer(self):
+        for base in self._lazy_base_class.infer():
+            if isinstance(base, GenericClass):
+                # Here we have to recalculate the given types.
+                yield GenericClass.create_cached(
+                    base.inference_state,
+                    base._wrapped_value,
+                    TupleGenericManager(tuple(self._remap_type_vars(base))),
+                )
+            else:
+                if base.is_class_mixin():
+                    # This case basically allows classes like `class Foo(List)`
+                    # to be used like `Foo[int]`. The generics are not
+                    # necessary and can be used later.
+                    yield GenericClass.create_cached(
+                        base.inference_state,
+                        base,
+                        self._generics_manager,
+                    )
+                else:
+                    yield base
+
+    def _remap_type_vars(self, base):
+        from jedi.inference.gradual.type_var import TypeVar
+        filter = self._class_value.get_type_var_filter()
+        for type_var_set in base.get_generics():
+            new = NO_VALUES
+            for type_var in type_var_set:
+                if isinstance(type_var, TypeVar):
+                    names = filter.get(type_var.py__name__())
+                    new |= ValueSet.from_sets(
+                        name.infer() for name in names
+                    )
+                else:
+                    # Mostly will be type vars, except if in some cases
+                    # a concrete type will already be there. In that
+                    # case just add it to the value set.
+                    new |= ValueSet([type_var])
+            yield new
+
     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self._lazy_base_class)


 class _GenericInstanceWrapper(ValueWrapper):
-    pass
+    def py__stop_iteration_returns(self):
+        for cls in self._wrapped_value.class_value.py__mro__():
+            if cls.py__name__() == 'Generator':
+                generics = cls.get_generics()
+                try:
+                    return generics[2].execute_annotation()
+                except IndexError:
+                    pass
+            elif cls.py__name__() == 'Iterator':
+                return ValueSet([builtin_from_name(self.inference_state, 'None')])
+        return self._wrapped_value.py__stop_iteration_returns()
+
+    def get_type_hint(self, add_class_info=True):
+        return self._wrapped_value.class_value.get_type_hint(add_class_info=False)


 class _PseudoTreeNameClass(Value):
@@ -96,46 +330,105 @@ class _PseudoTreeNameClass(Value):
     api_type = 'class'

     def __init__(self, parent_context, tree_name):
-        super().__init__(parent_context.inference_state, parent_context)
+        super().__init__(
+            parent_context.inference_state,
+            parent_context
+        )
         self._tree_name = tree_name

+    @property
+    def tree_node(self):
+        return self._tree_name
+
+    def get_filters(self, *args, **kwargs):
+        # TODO this is obviously wrong. Is it though?
+        class EmptyFilter(ClassFilter):
+            def __init__(self):
+                pass
+
+            def get(self, name, **kwargs):
+                return []
+
+            def values(self, **kwargs):
+                return []
+
+        yield EmptyFilter()
+
+    def py__class__(self):
+        # This might not be 100% correct, but it is good enough. The details of
+        # the typing library are not really an issue for Jedi.
+        return builtin_from_name(self.inference_state, 'type')
+
+    @property
+    def name(self):
+        return ValueName(self, self._tree_name)
+
+    def get_qualified_names(self):
+        return (self._tree_name.value,)
+
     def __repr__(self):
         return '%s(%s)' % (self.__class__.__name__, self._tree_name.value)


 class BaseTypingValue(LazyValueWrapper):
-
     def __init__(self, parent_context, tree_name):
         self.inference_state = parent_context.inference_state
         self.parent_context = parent_context
         self._tree_name = tree_name

+    @property
+    def name(self):
+        return ValueName(self, self._tree_name)
+
+    def _get_wrapped_value(self):
+        return _PseudoTreeNameClass(self.parent_context, self._tree_name)
+
+    def get_signatures(self):
+        return self._wrapped_value.get_signatures()
+
     def __repr__(self):
         return '%s(%s)' % (self.__class__.__name__, self._tree_name.value)


 class BaseTypingClassWithGenerics(DefineGenericBaseClass):
-
     def __init__(self, parent_context, tree_name, generics_manager):
         super().__init__(generics_manager)
         self.inference_state = parent_context.inference_state
         self.parent_context = parent_context
         self._tree_name = tree_name

+    def _get_wrapped_value(self):
+        return _PseudoTreeNameClass(self.parent_context, self._tree_name)
+
     def __repr__(self):
         return '%s(%s%s)' % (self.__class__.__name__, self._tree_name.value,
-            self._generics_manager)
+                             self._generics_manager)


 class BaseTypingInstance(LazyValueWrapper):
-
-    def __init__(self, parent_context, class_value, tree_name, generics_manager
-        ):
+    def __init__(self, parent_context, class_value, tree_name, generics_manager):
         self.inference_state = class_value.inference_state
         self.parent_context = parent_context
         self._class_value = class_value
         self._tree_name = tree_name
         self._generics_manager = generics_manager

+    def py__class__(self):
+        return self._class_value
+
+    def get_annotated_class_object(self):
+        return self._class_value
+
+    def get_qualified_names(self):
+        return (self.py__name__(),)
+
+    @property
+    def name(self):
+        return ValueName(self, self._tree_name)
+
+    def _get_wrapped_value(self):
+        object_, = builtin_from_name(self.inference_state, 'object').execute_annotation()
+        return object_
+
     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self._generics_manager)
diff --git a/jedi/inference/gradual/conversion.py b/jedi/inference/gradual/conversion.py
index 7295c6d9..9b36a2e1 100644
--- a/jedi/inference/gradual/conversion.py
+++ b/jedi/inference/gradual/conversion.py
@@ -1,6 +1,209 @@
 from jedi import debug
-from jedi.inference.base_value import ValueSet, NO_VALUES
+from jedi.inference.base_value import ValueSet, \
+    NO_VALUES
 from jedi.inference.utils import to_list
 from jedi.inference.gradual.stub_value import StubModuleValue
 from jedi.inference.gradual.typeshed import try_to_load_stub_cached
 from jedi.inference.value.decorator import Decoratee
+
+
+def _stub_to_python_value_set(stub_value, ignore_compiled=False):
+    stub_module_context = stub_value.get_root_context()
+    if not stub_module_context.is_stub():
+        return ValueSet([stub_value])
+
+    decorates = None
+    if isinstance(stub_value, Decoratee):
+        decorates = stub_value._original_value
+
+    was_instance = stub_value.is_instance()
+    if was_instance:
+        arguments = getattr(stub_value, '_arguments', None)
+        stub_value = stub_value.py__class__()
+
+    qualified_names = stub_value.get_qualified_names()
+    if qualified_names is None:
+        return NO_VALUES
+
+    was_bound_method = stub_value.is_bound_method()
+    if was_bound_method:
+        # Infer the object first. We can infer the method later.
+        method_name = qualified_names[-1]
+        qualified_names = qualified_names[:-1]
+        was_instance = True
+        arguments = None
+
+    values = _infer_from_stub(stub_module_context, qualified_names, ignore_compiled)
+    if was_instance:
+        values = ValueSet.from_sets(
+            c.execute_with_values() if arguments is None else c.execute(arguments)
+            for c in values
+            if c.is_class()
+        )
+    if was_bound_method:
+        # Now that the instance has been properly created, we can simply get
+        # the method.
+        values = values.py__getattribute__(method_name)
+    if decorates is not None:
+        values = ValueSet(Decoratee(v, decorates) for v in values)
+    return values
+
+
+def _infer_from_stub(stub_module_context, qualified_names, ignore_compiled):
+    from jedi.inference.compiled.mixed import MixedObject
+    stub_module = stub_module_context.get_value()
+    assert isinstance(stub_module, (StubModuleValue, MixedObject)), stub_module_context
+    non_stubs = stub_module.non_stub_value_set
+    if ignore_compiled:
+        non_stubs = non_stubs.filter(lambda c: not c.is_compiled())
+    for name in qualified_names:
+        non_stubs = non_stubs.py__getattribute__(name)
+    return non_stubs
+
+
+@to_list
+def _try_stub_to_python_names(names, prefer_stub_to_compiled=False):
+    for name in names:
+        module_context = name.get_root_context()
+        if not module_context.is_stub():
+            yield name
+            continue
+
+        if name.api_type == 'module':
+            values = convert_values(name.infer(), ignore_compiled=prefer_stub_to_compiled)
+            if values:
+                for v in values:
+                    yield v.name
+                continue
+        else:
+            v = name.get_defining_qualified_value()
+            if v is not None:
+                converted = _stub_to_python_value_set(v, ignore_compiled=prefer_stub_to_compiled)
+                if converted:
+                    converted_names = converted.goto(name.get_public_name())
+                    if converted_names:
+                        for n in converted_names:
+                            if n.get_root_context().is_stub():
+                                # If it's a stub again, it means we're going in
+                                # a circle. Probably some imports make it a
+                                # stub again.
+                                yield name
+                            else:
+                                yield n
+                        continue
+        yield name
+
+
+def _load_stub_module(module):
+    if module.is_stub():
+        return module
+    return try_to_load_stub_cached(
+        module.inference_state,
+        import_names=module.string_names,
+        python_value_set=ValueSet([module]),
+        parent_module_value=None,
+        sys_path=module.inference_state.get_sys_path(),
+    )
+
+
+@to_list
+def _python_to_stub_names(names, fallback_to_python=False):
+    for name in names:
+        module_context = name.get_root_context()
+        if module_context.is_stub():
+            yield name
+            continue
+
+        if name.api_type == 'module':
+            found_name = False
+            for n in name.goto():
+                if n.api_type == 'module':
+                    values = convert_values(n.infer(), only_stubs=True)
+                    for v in values:
+                        yield v.name
+                        found_name = True
+                else:
+                    for x in _python_to_stub_names([n], fallback_to_python=fallback_to_python):
+                        yield x
+                        found_name = True
+            if found_name:
+                continue
+        else:
+            v = name.get_defining_qualified_value()
+            if v is not None:
+                converted = to_stub(v)
+                if converted:
+                    converted_names = converted.goto(name.get_public_name())
+                    if converted_names:
+                        yield from converted_names
+                        continue
+        if fallback_to_python:
+            # This is the part where if we haven't found anything, just return
+            # the stub name.
+            yield name
+
+
+def convert_names(names, only_stubs=False, prefer_stubs=False, prefer_stub_to_compiled=True):
+    if only_stubs and prefer_stubs:
+        raise ValueError("You cannot use both of only_stubs and prefer_stubs.")
+
+    with debug.increase_indent_cm('convert names'):
+        if only_stubs or prefer_stubs:
+            return _python_to_stub_names(names, fallback_to_python=prefer_stubs)
+        else:
+            return _try_stub_to_python_names(
+                names, prefer_stub_to_compiled=prefer_stub_to_compiled)
+
+
+def convert_values(values, only_stubs=False, prefer_stubs=False, ignore_compiled=True):
+    assert not (only_stubs and prefer_stubs)
+    with debug.increase_indent_cm('convert values'):
+        if only_stubs or prefer_stubs:
+            return ValueSet.from_sets(
+                to_stub(value)
+                or (ValueSet({value}) if prefer_stubs else NO_VALUES)
+                for value in values
+            )
+        else:
+            return ValueSet.from_sets(
+                _stub_to_python_value_set(stub_value, ignore_compiled=ignore_compiled)
+                or ValueSet({stub_value})
+                for stub_value in values
+            )
+
+
+def to_stub(value):
+    if value.is_stub():
+        return ValueSet([value])
+
+    was_instance = value.is_instance()
+    if was_instance:
+        value = value.py__class__()
+
+    qualified_names = value.get_qualified_names()
+    stub_module = _load_stub_module(value.get_root_context().get_value())
+    if stub_module is None or qualified_names is None:
+        return NO_VALUES
+
+    was_bound_method = value.is_bound_method()
+    if was_bound_method:
+        # Infer the object first. We can infer the method later.
+        method_name = qualified_names[-1]
+        qualified_names = qualified_names[:-1]
+        was_instance = True
+
+    stub_values = ValueSet([stub_module])
+    for name in qualified_names:
+        stub_values = stub_values.py__getattribute__(name)
+
+    if was_instance:
+        stub_values = ValueSet.from_sets(
+            c.execute_with_values()
+            for c in stub_values
+            if c.is_class()
+        )
+    if was_bound_method:
+        # Now that the instance has been properly created, we can simply get
+        # the method.
+        stub_values = stub_values.py__getattribute__(method_name)
+    return stub_values
diff --git a/jedi/inference/gradual/generics.py b/jedi/inference/gradual/generics.py
index b7bca99a..f4a5ae9c 100644
--- a/jedi/inference/gradual/generics.py
+++ b/jedi/inference/gradual/generics.py
@@ -2,6 +2,7 @@
 This module is about generics, like the `int` in `List[int]`. It's not about
 the Generic class.
 """
+
 from jedi import debug
 from jedi.cache import memoize_method
 from jedi.inference.utils import to_tuple
@@ -10,12 +11,31 @@ from jedi.inference.value.iterable import SequenceLiteralValue
 from jedi.inference.helpers import is_string


+def _resolve_forward_references(context, value_set):
+    for value in value_set:
+        if is_string(value):
+            from jedi.inference.gradual.annotation import _get_forward_reference_node
+            node = _get_forward_reference_node(context, value.get_safe_value())
+            if node is not None:
+                for c in context.infer_node(node):
+                    yield c
+        else:
+            yield value
+
+
 class _AbstractGenericManager:
-    pass
+    def get_index_and_execute(self, index):
+        try:
+            return self[index].execute_annotation()
+        except IndexError:
+            debug.warning('No param #%s found for annotation %s', index, self)
+            return NO_VALUES

+    def get_type_hint(self):
+        return '[%s]' % ', '.join(t.get_type_hint(add_class_info=False) for t in self.to_tuple())

-class LazyGenericManager(_AbstractGenericManager):

+class LazyGenericManager(_AbstractGenericManager):
     def __init__(self, context_of_index, index_value):
         self._context_of_index = context_of_index
         self._index_value = index_value
@@ -27,12 +47,41 @@ class LazyGenericManager(_AbstractGenericManager):
     def __len__(self):
         return len(self._tuple())

+    @memoize_method
+    @to_tuple
+    def _tuple(self):
+        def lambda_scoping_in_for_loop_sucks(lazy_value):
+            return lambda: ValueSet(_resolve_forward_references(
+                self._context_of_index,
+                lazy_value.infer()
+            ))
+
+        if isinstance(self._index_value, SequenceLiteralValue):
+            for lazy_value in self._index_value.py__iter__(contextualized_node=None):
+                yield lambda_scoping_in_for_loop_sucks(lazy_value)
+        else:
+            yield lambda: ValueSet(_resolve_forward_references(
+                self._context_of_index,
+                ValueSet([self._index_value])
+            ))
+
+    @to_tuple
+    def to_tuple(self):
+        for callable_ in self._tuple():
+            yield callable_()
+
+    def is_homogenous_tuple(self):
+        if isinstance(self._index_value, SequenceLiteralValue):
+            entries = self._index_value.get_tree_entries()
+            if len(entries) == 2 and entries[1] == '...':
+                return True
+        return False
+
     def __repr__(self):
-        return '<LazyG>[%s]' % ', '.join(repr(x) for x in self.to_tuple())
+        return '<LazyG>[%s]' % (', '.join(repr(x) for x in self.to_tuple()))


 class TupleGenericManager(_AbstractGenericManager):
-
     def __init__(self, tup):
         self._tuple = tup

@@ -42,5 +91,11 @@ class TupleGenericManager(_AbstractGenericManager):
     def __len__(self):
         return len(self._tuple)

+    def to_tuple(self):
+        return self._tuple
+
+    def is_homogenous_tuple(self):
+        return False
+
     def __repr__(self):
-        return '<TupG>[%s]' % ', '.join(repr(x) for x in self.to_tuple())
+        return '<TupG>[%s]' % (', '.join(repr(x) for x in self.to_tuple()))
diff --git a/jedi/inference/gradual/stub_value.py b/jedi/inference/gradual/stub_value.py
index 17b168a1..22f08a10 100644
--- a/jedi/inference/gradual/stub_value.py
+++ b/jedi/inference/gradual/stub_value.py
@@ -13,30 +13,90 @@ class StubModuleValue(ModuleValue):
         super().__init__(*args, **kwargs)
         self.non_stub_value_set = non_stub_value_set

+    def is_stub(self):
+        return True
+
     def sub_modules_dict(self):
         """
         We have to overwrite this, because it's possible to have stubs that
         don't have code for all the child modules. At the time of writing this
         there are for example no stubs for `json.tool`.
         """
-        pass
+        names = {}
+        for value in self.non_stub_value_set:
+            try:
+                method = value.sub_modules_dict
+            except AttributeError:
+                pass
+            else:
+                names.update(method())
+        names.update(super().sub_modules_dict())
+        return names
+
+    def _get_stub_filters(self, origin_scope):
+        return [StubFilter(
+            parent_context=self.as_context(),
+            origin_scope=origin_scope
+        )] + list(self.iter_star_filters())
+
+    def get_filters(self, origin_scope=None):
+        filters = super().get_filters(origin_scope)
+        next(filters, None)  # Ignore the first filter and replace it with our own
+        stub_filters = self._get_stub_filters(origin_scope=origin_scope)
+        yield from stub_filters
+        yield from filters
+
+    def _as_context(self):
+        return StubModuleContext(self)


 class StubModuleContext(ModuleContext):
-    pass
+    def get_filters(self, until_position=None, origin_scope=None):
+        # Make sure to ignore the position, because positions are not relevant
+        # for stubs.
+        return super().get_filters(origin_scope=origin_scope)


 class TypingModuleWrapper(StubModuleValue):
-    pass
+    def get_filters(self, *args, **kwargs):
+        filters = super().get_filters(*args, **kwargs)
+        f = next(filters, None)
+        assert f is not None
+        yield TypingModuleFilterWrapper(f)
+        yield from filters
+
+    def _as_context(self):
+        return TypingModuleContext(self)


 class TypingModuleContext(ModuleContext):
-    pass
+    def get_filters(self, *args, **kwargs):
+        filters = super().get_filters(*args, **kwargs)
+        yield TypingModuleFilterWrapper(next(filters, None))
+        yield from filters


 class StubFilter(ParserTreeFilter):
     name_class = StubName

+    def _is_name_reachable(self, name):
+        if not super()._is_name_reachable(name):
+            return False
+
+        # Imports in stub files are only public if they have an "as"
+        # export.
+        definition = name.get_definition()
+        if definition is None:
+            return False
+        if definition.type in ('import_from', 'import_name'):
+            if name.parent.type not in ('import_as_name', 'dotted_as_name'):
+                return False
+        n = name.value
+        # TODO rewrite direct return
+        if n.startswith('_') and not (n.startswith('__') and n.endswith('__')):
+            return False
+        return True
+

 class VersionInfo(ValueWrapper):
     pass
diff --git a/jedi/inference/gradual/type_var.py b/jedi/inference/gradual/type_var.py
index 207439f1..c09773f1 100644
--- a/jedi/inference/gradual/type_var.py
+++ b/jedi/inference/gradual/type_var.py
@@ -4,14 +4,51 @@ from jedi.inference.gradual.base import BaseTypingValue


 class TypeVarClass(ValueWrapper):
-    pass
+    def py__call__(self, arguments):
+        unpacked = arguments.unpack()

+        key, lazy_value = next(unpacked, (None, None))
+        var_name = self._find_string_name(lazy_value)
+        # The name must be given, otherwise it's useless.
+        if var_name is None or key is not None:
+            debug.warning('Found a variable without a name %s', arguments)
+            return NO_VALUES
+
+        return ValueSet([TypeVar.create_cached(
+            self.inference_state,
+            self.parent_context,
+            tree_name=self.tree_node.name,
+            var_name=var_name,
+            unpacked_args=unpacked,
+        )])
+
+    def _find_string_name(self, lazy_value):
+        if lazy_value is None:
+            return None
+
+        value_set = lazy_value.infer()
+        if not value_set:
+            return None
+        if len(value_set) > 1:
+            debug.warning('Found multiple values for a type variable: %s', value_set)
+
+        name_value = next(iter(value_set))
+        try:
+            method = name_value.get_safe_value
+        except AttributeError:
+            return None
+        else:
+            safe_value = method(default=None)
+            if isinstance(safe_value, str):
+                return safe_value
+            return None

-class TypeVar(BaseTypingValue):

+class TypeVar(BaseTypingValue):
     def __init__(self, parent_context, tree_name, var_name, unpacked_args):
         super().__init__(parent_context, tree_name)
         self._var_name = var_name
+
         self._constraints_lazy_values = []
         self._bound_lazy_value = None
         self._covariant_lazy_value = None
@@ -19,21 +56,72 @@ class TypeVar(BaseTypingValue):
         for key, lazy_value in unpacked_args:
             if key is None:
                 self._constraints_lazy_values.append(lazy_value)
-            elif key == 'bound':
-                self._bound_lazy_value = lazy_value
-            elif key == 'covariant':
-                self._covariant_lazy_value = lazy_value
-            elif key == 'contravariant':
-                self._contra_variant_lazy_value = lazy_value
             else:
-                debug.warning('Invalid TypeVar param name %s', key)
+                if key == 'bound':
+                    self._bound_lazy_value = lazy_value
+                elif key == 'covariant':
+                    self._covariant_lazy_value = lazy_value
+                elif key == 'contravariant':
+                    self._contra_variant_lazy_value = lazy_value
+                else:
+                    debug.warning('Invalid TypeVar param name %s', key)
+
+    def py__name__(self):
+        return self._var_name
+
+    def get_filters(self, *args, **kwargs):
+        return iter([])
+
+    def _get_classes(self):
+        if self._bound_lazy_value is not None:
+            return self._bound_lazy_value.infer()
+        if self._constraints_lazy_values:
+            return self.constraints
+        debug.warning('Tried to infer the TypeVar %s without a given type', self._var_name)
+        return NO_VALUES
+
+    def is_same_class(self, other):
+        # Everything can match an undefined type var.
+        return True
+
+    @property
+    def constraints(self):
+        return ValueSet.from_sets(
+            lazy.infer() for lazy in self._constraints_lazy_values
+        )
+
+    def define_generics(self, type_var_dict):
+        try:
+            found = type_var_dict[self.py__name__()]
+        except KeyError:
+            pass
+        else:
+            if found:
+                return found
+        return ValueSet({self})
+
+    def execute_annotation(self):
+        return self._get_classes().execute_annotation()
+
+    def infer_type_vars(self, value_set):
+        def iterate():
+            for v in value_set:
+                cls = v.py__class__()
+                if v.is_function() or v.is_class():
+                    cls = TypeWrapper(cls, v)
+                yield cls
+
+        annotation_name = self.py__name__()
+        return {annotation_name: ValueSet(iterate())}

     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self.py__name__())


 class TypeWrapper(ValueWrapper):
-
     def __init__(self, wrapped_value, original_value):
         super().__init__(wrapped_value)
         self._original_value = original_value
+
+    def execute_annotation(self):
+        return ValueSet({self._original_value})
diff --git a/jedi/inference/gradual/typeshed.py b/jedi/inference/gradual/typeshed.py
index 002795c5..50217cd3 100644
--- a/jedi/inference/gradual/typeshed.py
+++ b/jedi/inference/gradual/typeshed.py
@@ -4,25 +4,73 @@ from functools import wraps
 from collections import namedtuple
 from typing import Dict, Mapping, Tuple
 from pathlib import Path
+
 from jedi import settings
 from jedi.file_io import FileIO
 from jedi.parser_utils import get_cached_code_lines
 from jedi.inference.base_value import ValueSet, NO_VALUES
 from jedi.inference.gradual.stub_value import TypingModuleWrapper, StubModuleValue
 from jedi.inference.value import ModuleValue
+
 _jedi_path = Path(__file__).parent.parent.parent
 TYPESHED_PATH = _jedi_path.joinpath('third_party', 'typeshed')
 DJANGO_INIT_PATH = _jedi_path.joinpath('third_party', 'django-stubs',
-    'django-stubs', '__init__.pyi')
-_IMPORT_MAP = dict(_collections='collections', _socket='socket')
+                                       'django-stubs', '__init__.pyi')
+
+_IMPORT_MAP = dict(
+    _collections='collections',
+    _socket='socket',
+)
+
 PathInfo = namedtuple('PathInfo', 'path is_third_party')


+def _merge_create_stub_map(path_infos):
+    map_ = {}
+    for directory_path_info in path_infos:
+        map_.update(_create_stub_map(directory_path_info))
+    return map_
+
+
 def _create_stub_map(directory_path_info):
     """
     Create a mapping of an importable name in Python to a stub file.
     """
-    pass
+    def generate():
+        try:
+            listed = os.listdir(directory_path_info.path)
+        except (FileNotFoundError, NotADirectoryError):
+            return
+
+        for entry in listed:
+            path = os.path.join(directory_path_info.path, entry)
+            if os.path.isdir(path):
+                init = os.path.join(path, '__init__.pyi')
+                if os.path.isfile(init):
+                    yield entry, PathInfo(init, directory_path_info.is_third_party)
+            elif entry.endswith('.pyi') and os.path.isfile(path):
+                name = entry[:-4]
+                if name != '__init__':
+                    yield name, PathInfo(path, directory_path_info.is_third_party)
+
+    # Create a dictionary from the tuple generator.
+    return dict(generate())
+
+
+def _get_typeshed_directories(version_info):
+    check_version_list = ['2and3', '3']
+    for base in ['stdlib', 'third_party']:
+        base_path = TYPESHED_PATH.joinpath(base)
+        base_list = os.listdir(base_path)
+        for base_list_entry in base_list:
+            match = re.match(r'(\d+)\.(\d+)$', base_list_entry)
+            if match is not None:
+                if match.group(1) == '3' and int(match.group(2)) <= version_info.minor:
+                    check_version_list.append(base_list_entry)
+
+        for check_version in check_version_list:
+            is_third_party = base != 'stdlib'
+            yield PathInfo(str(base_path.joinpath(check_version)), is_third_party)


 _version_cache: Dict[Tuple[int, int], Mapping[str, PathInfo]] = {}
@@ -32,15 +80,231 @@ def _cache_stub_file_map(version_info):
     """
     Returns a map of an importable name in Python to a stub file.
     """
-    pass
+    # TODO this caches the stub files indefinitely, maybe use a time cache
+    # for that?
+    version = version_info[:2]
+    try:
+        return _version_cache[version]
+    except KeyError:
+        pass
+
+    _version_cache[version] = file_set = \
+        _merge_create_stub_map(_get_typeshed_directories(version_info))
+    return file_set
+
+
+def import_module_decorator(func):
+    @wraps(func)
+    def wrapper(inference_state, import_names, parent_module_value, sys_path, prefer_stubs):
+        python_value_set = inference_state.module_cache.get(import_names)
+        if python_value_set is None:
+            if parent_module_value is not None and parent_module_value.is_stub():
+                parent_module_values = parent_module_value.non_stub_value_set
+            else:
+                parent_module_values = [parent_module_value]
+            if import_names == ('os', 'path'):
+                # This is a huge exception, we follow a nested import
+                # ``os.path``, because it's a very important one in Python
+                # that is being achieved by messing with ``sys.modules`` in
+                # ``os``.
+                python_value_set = ValueSet.from_sets(
+                    func(inference_state, (n,), None, sys_path,)
+                    for n in ['posixpath', 'ntpath', 'macpath', 'os2emxpath']
+                )
+            else:
+                python_value_set = ValueSet.from_sets(
+                    func(inference_state, import_names, p, sys_path,)
+                    for p in parent_module_values
+                )
+            inference_state.module_cache.add(import_names, python_value_set)
+
+        if not prefer_stubs or import_names[0] in settings.auto_import_modules:
+            return python_value_set
+
+        stub = try_to_load_stub_cached(inference_state, import_names, python_value_set,
+                                       parent_module_value, sys_path)
+        if stub is not None:
+            return ValueSet([stub])
+        return python_value_set
+
+    return wrapper
+
+
+def try_to_load_stub_cached(inference_state, import_names, *args, **kwargs):
+    if import_names is None:
+        return None
+
+    try:
+        return inference_state.stub_module_cache[import_names]
+    except KeyError:
+        pass
+
+    # TODO is this needed? where are the exceptions coming from that make this
+    # necessary? Just remove this line.
+    inference_state.stub_module_cache[import_names] = None
+    inference_state.stub_module_cache[import_names] = result = \
+        _try_to_load_stub(inference_state, import_names, *args, **kwargs)
+    return result


 def _try_to_load_stub(inference_state, import_names, python_value_set,
-    parent_module_value, sys_path):
+                      parent_module_value, sys_path):
     """
     Trying to load a stub for a set of import_names.

     This is modelled to work like "PEP 561 -- Distributing and Packaging Type
     Information", see https://www.python.org/dev/peps/pep-0561.
     """
-    pass
+    if parent_module_value is None and len(import_names) > 1:
+        try:
+            parent_module_value = try_to_load_stub_cached(
+                inference_state, import_names[:-1], NO_VALUES,
+                parent_module_value=None, sys_path=sys_path)
+        except KeyError:
+            pass
+
+    # 1. Try to load foo-stubs folders on path for import name foo.
+    if len(import_names) == 1:
+        # foo-stubs
+        for p in sys_path:
+            init = os.path.join(p, *import_names) + '-stubs' + os.path.sep + '__init__.pyi'
+            m = _try_to_load_stub_from_file(
+                inference_state,
+                python_value_set,
+                file_io=FileIO(init),
+                import_names=import_names,
+            )
+            if m is not None:
+                return m
+        if import_names[0] == 'django' and python_value_set:
+            return _try_to_load_stub_from_file(
+                inference_state,
+                python_value_set,
+                file_io=FileIO(str(DJANGO_INIT_PATH)),
+                import_names=import_names,
+            )
+
+    # 2. Try to load pyi files next to py files.
+    for c in python_value_set:
+        try:
+            method = c.py__file__
+        except AttributeError:
+            pass
+        else:
+            file_path = method()
+            file_paths = []
+            if c.is_namespace():
+                file_paths = [os.path.join(p, '__init__.pyi') for p in c.py__path__()]
+            elif file_path is not None and file_path.suffix == '.py':
+                file_paths = [str(file_path) + 'i']
+
+            for file_path in file_paths:
+                m = _try_to_load_stub_from_file(
+                    inference_state,
+                    python_value_set,
+                    # The file path should end with .pyi
+                    file_io=FileIO(file_path),
+                    import_names=import_names,
+                )
+                if m is not None:
+                    return m
+
+    # 3. Try to load typeshed
+    m = _load_from_typeshed(inference_state, python_value_set, parent_module_value, import_names)
+    if m is not None:
+        return m
+
+    # 4. Try to load pyi file somewhere if python_value_set was not defined.
+    if not python_value_set:
+        if parent_module_value is not None:
+            check_path = parent_module_value.py__path__() or []
+            # In case import_names
+            names_for_path = (import_names[-1],)
+        else:
+            check_path = sys_path
+            names_for_path = import_names
+
+        for p in check_path:
+            m = _try_to_load_stub_from_file(
+                inference_state,
+                python_value_set,
+                file_io=FileIO(os.path.join(p, *names_for_path) + '.pyi'),
+                import_names=import_names,
+            )
+            if m is not None:
+                return m
+
+    # If no stub is found, that's fine, the calling function has to deal with
+    # it.
+    return None
+
+
+def _load_from_typeshed(inference_state, python_value_set, parent_module_value, import_names):
+    import_name = import_names[-1]
+    map_ = None
+    if len(import_names) == 1:
+        map_ = _cache_stub_file_map(inference_state.grammar.version_info)
+        import_name = _IMPORT_MAP.get(import_name, import_name)
+    elif isinstance(parent_module_value, ModuleValue):
+        if not parent_module_value.is_package():
+            # Only if it's a package (= a folder) something can be
+            # imported.
+            return None
+        paths = parent_module_value.py__path__()
+        # Once the initial package has been loaded, the sub packages will
+        # always be loaded, regardless if they are there or not. This makes
+        # sense, IMO, because stubs take preference, even if the original
+        # library doesn't provide a module (it could be dynamic). ~dave
+        map_ = _merge_create_stub_map([PathInfo(p, is_third_party=False) for p in paths])
+
+    if map_ is not None:
+        path_info = map_.get(import_name)
+        if path_info is not None and (not path_info.is_third_party or python_value_set):
+            return _try_to_load_stub_from_file(
+                inference_state,
+                python_value_set,
+                file_io=FileIO(path_info.path),
+                import_names=import_names,
+            )
+
+
+def _try_to_load_stub_from_file(inference_state, python_value_set, file_io, import_names):
+    try:
+        stub_module_node = parse_stub_module(inference_state, file_io)
+    except OSError:
+        # The file that you're looking for doesn't exist (anymore).
+        return None
+    else:
+        return create_stub_module(
+            inference_state, inference_state.latest_grammar, python_value_set,
+            stub_module_node, file_io, import_names
+        )
+
+
+def parse_stub_module(inference_state, file_io):
+    return inference_state.parse(
+        file_io=file_io,
+        cache=True,
+        diff_cache=settings.fast_parser,
+        cache_path=settings.cache_directory,
+        use_latest_grammar=True
+    )
+
+
+def create_stub_module(inference_state, grammar, python_value_set,
+                       stub_module_node, file_io, import_names):
+    if import_names == ('typing',):
+        module_cls = TypingModuleWrapper
+    else:
+        module_cls = StubModuleValue
+    file_name = os.path.basename(file_io.path)
+    stub_module_value = module_cls(
+        python_value_set, inference_state, stub_module_node,
+        file_io=file_io,
+        string_names=import_names,
+        # The code was loaded with latest_grammar, so use
+        # that.
+        code_lines=get_cached_code_lines(grammar, file_io.path),
+        is_package=file_name == '__init__.pyi',
+    )
+    return stub_module_value
diff --git a/jedi/inference/gradual/typing.py b/jedi/inference/gradual/typing.py
index c13d5322..4466cbf4 100644
--- a/jedi/inference/gradual/typing.py
+++ b/jedi/inference/gradual/typing.py
@@ -6,27 +6,91 @@ values.
 This file deals with all the typing.py cases.
 """
 import itertools
+
 from jedi import debug
 from jedi.inference.compiled import builtin_from_name, create_simple_object
-from jedi.inference.base_value import ValueSet, NO_VALUES, Value, LazyValueWrapper, ValueWrapper
+from jedi.inference.base_value import ValueSet, NO_VALUES, Value, \
+    LazyValueWrapper, ValueWrapper
 from jedi.inference.lazy_value import LazyKnownValues
 from jedi.inference.arguments import repack_with_argument_clinic
 from jedi.inference.filters import FilterWrapper
 from jedi.inference.names import NameWrapper, ValueName
 from jedi.inference.value.klass import ClassMixin
-from jedi.inference.gradual.base import BaseTypingValue, BaseTypingClassWithGenerics, BaseTypingInstance
+from jedi.inference.gradual.base import BaseTypingValue, \
+    BaseTypingClassWithGenerics, BaseTypingInstance
 from jedi.inference.gradual.type_var import TypeVarClass
 from jedi.inference.gradual.generics import LazyGenericManager, TupleGenericManager
+
 _PROXY_CLASS_TYPES = 'Tuple Generic Protocol Callable Type'.split()
-_TYPE_ALIAS_TYPES = {'List': 'builtins.list', 'Dict': 'builtins.dict',
-    'Set': 'builtins.set', 'FrozenSet': 'builtins.frozenset', 'ChainMap':
-    'collections.ChainMap', 'Counter': 'collections.Counter', 'DefaultDict':
-    'collections.defaultdict', 'Deque': 'collections.deque'}
+_TYPE_ALIAS_TYPES = {
+    'List': 'builtins.list',
+    'Dict': 'builtins.dict',
+    'Set': 'builtins.set',
+    'FrozenSet': 'builtins.frozenset',
+    'ChainMap': 'collections.ChainMap',
+    'Counter': 'collections.Counter',
+    'DefaultDict': 'collections.defaultdict',
+    'Deque': 'collections.deque',
+}
 _PROXY_TYPES = 'Optional Union ClassVar Annotated'.split()


 class TypingModuleName(NameWrapper):
-    pass
+    def infer(self):
+        return ValueSet(self._remap())
+
+    def _remap(self):
+        name = self.string_name
+        inference_state = self.parent_context.inference_state
+        try:
+            actual = _TYPE_ALIAS_TYPES[name]
+        except KeyError:
+            pass
+        else:
+            yield TypeAlias.create_cached(
+                inference_state, self.parent_context, self.tree_name, actual)
+            return
+
+        if name in _PROXY_CLASS_TYPES:
+            yield ProxyTypingClassValue.create_cached(
+                inference_state, self.parent_context, self.tree_name)
+        elif name in _PROXY_TYPES:
+            yield ProxyTypingValue.create_cached(
+                inference_state, self.parent_context, self.tree_name)
+        elif name == 'runtime':
+            # We don't want anything here, not sure what this function is
+            # supposed to do, since it just appears in the stubs and shouldn't
+            # have any effects there (because it's never executed).
+            return
+        elif name == 'TypeVar':
+            cls, = self._wrapped_name.infer()
+            yield TypeVarClass.create_cached(inference_state, cls)
+        elif name == 'Any':
+            yield AnyClass.create_cached(
+                inference_state, self.parent_context, self.tree_name)
+        elif name == 'TYPE_CHECKING':
+            # This is needed for e.g. imports that are only available for type
+            # checking or are in cycles. The user can then check this variable.
+            yield builtin_from_name(inference_state, 'True')
+        elif name == 'overload':
+            yield OverloadFunction.create_cached(
+                inference_state, self.parent_context, self.tree_name)
+        elif name == 'NewType':
+            v, = self._wrapped_name.infer()
+            yield NewTypeFunction.create_cached(inference_state, v)
+        elif name == 'cast':
+            cast_fn, = self._wrapped_name.infer()
+            yield CastFunction.create_cached(inference_state, cast_fn)
+        elif name == 'TypedDict':
+            # TODO doesn't even exist in typeshed/typing.py, yet. But will be
+            # added soon.
+            yield TypedDictClass.create_cached(
+                inference_state, self.parent_context, self.tree_name)
+        else:
+            # Not necessary, as long as we are not doing type checking:
+            # no_type_check & no_type_check_decorator
+            # Everything else shouldn't be relevant...
+            yield from self._wrapped_name.infer()


 class TypingModuleFilterWrapper(FilterWrapper):
@@ -34,19 +98,142 @@ class TypingModuleFilterWrapper(FilterWrapper):


 class ProxyWithGenerics(BaseTypingClassWithGenerics):
-    pass
+    def execute_annotation(self):
+        string_name = self._tree_name.value
+
+        if string_name == 'Union':
+            # This is kind of a special case, because we have Unions (in Jedi
+            # ValueSets).
+            return self.gather_annotation_classes().execute_annotation()
+        elif string_name == 'Optional':
+            # Optional is basically just saying it's either None or the actual
+            # type.
+            return self.gather_annotation_classes().execute_annotation() \
+                | ValueSet([builtin_from_name(self.inference_state, 'None')])
+        elif string_name == 'Type':
+            # The type is actually already given in the index_value
+            return self._generics_manager[0]
+        elif string_name in ['ClassVar', 'Annotated']:
+            # For now don't do anything here, ClassVars are always used.
+            return self._generics_manager[0].execute_annotation()
+
+        mapped = {
+            'Tuple': Tuple,
+            'Generic': Generic,
+            'Protocol': Protocol,
+            'Callable': Callable,
+        }
+        cls = mapped[string_name]
+        return ValueSet([cls(
+            self.parent_context,
+            self,
+            self._tree_name,
+            generics_manager=self._generics_manager,
+        )])
+
+    def gather_annotation_classes(self):
+        return ValueSet.from_sets(self._generics_manager.to_tuple())
+
+    def _create_instance_with_generics(self, generics_manager):
+        return ProxyWithGenerics(
+            self.parent_context,
+            self._tree_name,
+            generics_manager
+        )
+
+    def infer_type_vars(self, value_set):
+        annotation_generics = self.get_generics()
+
+        if not annotation_generics:
+            return {}
+
+        annotation_name = self.py__name__()
+        if annotation_name == 'Optional':
+            # Optional[T] is equivalent to Union[T, None]. In Jedi unions
+            # are represented by members within a ValueSet, so we extract
+            # the T from the Optional[T] by removing the None value.
+            none = builtin_from_name(self.inference_state, 'None')
+            return annotation_generics[0].infer_type_vars(
+                value_set.filter(lambda x: x != none),
+            )
+
+        return {}


 class ProxyTypingValue(BaseTypingValue):
     index_class = ProxyWithGenerics

+    def with_generics(self, generics_tuple):
+        return self.index_class.create_cached(
+            self.inference_state,
+            self.parent_context,
+            self._tree_name,
+            generics_manager=TupleGenericManager(generics_tuple)
+        )
+
+    def py__getitem__(self, index_value_set, contextualized_node):
+        return ValueSet(
+            self.index_class.create_cached(
+                self.inference_state,
+                self.parent_context,
+                self._tree_name,
+                generics_manager=LazyGenericManager(
+                    context_of_index=contextualized_node.context,
+                    index_value=index_value,
+                )
+            ) for index_value in index_value_set
+        )
+

 class _TypingClassMixin(ClassMixin):
-    pass
+    def py__bases__(self):
+        return [LazyKnownValues(
+            self.inference_state.builtins_module.py__getattribute__('object')
+        )]
+
+    def get_metaclasses(self):
+        return []
+
+    @property
+    def name(self):
+        return ValueName(self, self._tree_name)


 class TypingClassWithGenerics(ProxyWithGenerics, _TypingClassMixin):
-    pass
+    def infer_type_vars(self, value_set):
+        type_var_dict = {}
+        annotation_generics = self.get_generics()
+
+        if not annotation_generics:
+            return type_var_dict
+
+        annotation_name = self.py__name__()
+        if annotation_name == 'Type':
+            return annotation_generics[0].infer_type_vars(
+                # This is basically a trick to avoid extra code: We execute the
+                # incoming classes to be able to use the normal code for type
+                # var inference.
+                value_set.execute_annotation(),
+            )
+
+        elif annotation_name == 'Callable':
+            if len(annotation_generics) == 2:
+                return annotation_generics[1].infer_type_vars(
+                    value_set.execute_annotation(),
+                )
+
+        elif annotation_name == 'Tuple':
+            tuple_annotation, = self.execute_annotation()
+            return tuple_annotation.infer_type_vars(value_set)
+
+        return type_var_dict
+
+    def _create_instance_with_generics(self, generics_manager):
+        return TypingClassWithGenerics(
+            self.parent_context,
+            self._tree_name,
+            generics_manager
+        )


 class ProxyTypingClassValue(ProxyTypingValue, _TypingClassMixin):
@@ -54,28 +241,142 @@ class ProxyTypingClassValue(ProxyTypingValue, _TypingClassMixin):


 class TypeAlias(LazyValueWrapper):
-
     def __init__(self, parent_context, origin_tree_name, actual):
         self.inference_state = parent_context.inference_state
         self.parent_context = parent_context
         self._origin_tree_name = origin_tree_name
-        self._actual = actual
+        self._actual = actual  # e.g. builtins.list
+
+    @property
+    def name(self):
+        return ValueName(self, self._origin_tree_name)
+
+    def py__name__(self):
+        return self.name.string_name

     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self._actual)

+    def _get_wrapped_value(self):
+        module_name, class_name = self._actual.split('.')
+
+        # TODO use inference_state.import_module?
+        from jedi.inference.imports import Importer
+        module, = Importer(
+            self.inference_state, [module_name], self.inference_state.builtins_module
+        ).follow()
+        classes = module.py__getattribute__(class_name)
+        # There should only be one, because it's code that we control.
+        assert len(classes) == 1, classes
+        cls = next(iter(classes))
+        return cls
+
+    def gather_annotation_classes(self):
+        return ValueSet([self._get_wrapped_value()])
+
+    def get_signatures(self):
+        return []

-class Callable(BaseTypingInstance):

+class Callable(BaseTypingInstance):
     def py__call__(self, arguments):
         """
             def x() -> Callable[[Callable[..., _T]], _T]: ...
         """
-        pass
+        # The 0th index are the arguments.
+        try:
+            param_values = self._generics_manager[0]
+            result_values = self._generics_manager[1]
+        except IndexError:
+            debug.warning('Callable[...] defined without two arguments')
+            return NO_VALUES
+        else:
+            from jedi.inference.gradual.annotation import infer_return_for_callable
+            return infer_return_for_callable(arguments, param_values, result_values)
+
+    def py__get__(self, instance, class_value):
+        return ValueSet([self])


 class Tuple(BaseTypingInstance):
-    pass
+    def _is_homogenous(self):
+        # To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
+        # is used.
+        return self._generics_manager.is_homogenous_tuple()
+
+    def py__simple_getitem__(self, index):
+        if self._is_homogenous():
+            return self._generics_manager.get_index_and_execute(0)
+        else:
+            if isinstance(index, int):
+                return self._generics_manager.get_index_and_execute(index)
+
+            debug.dbg('The getitem type on Tuple was %s' % index)
+            return NO_VALUES
+
+    def py__iter__(self, contextualized_node=None):
+        if self._is_homogenous():
+            yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
+        else:
+            for v in self._generics_manager.to_tuple():
+                yield LazyKnownValues(v.execute_annotation())
+
+    def py__getitem__(self, index_value_set, contextualized_node):
+        if self._is_homogenous():
+            return self._generics_manager.get_index_and_execute(0)
+
+        return ValueSet.from_sets(
+            self._generics_manager.to_tuple()
+        ).execute_annotation()
+
+    def _get_wrapped_value(self):
+        tuple_, = self.inference_state.builtins_module \
+            .py__getattribute__('tuple').execute_annotation()
+        return tuple_
+
+    @property
+    def name(self):
+        return self._wrapped_value.name
+
+    def infer_type_vars(self, value_set):
+        # Circular
+        from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
+
+        value_set = value_set.filter(
+            lambda x: x.py__name__().lower() == 'tuple',
+        )
+
+        if self._is_homogenous():
+            # The parameter annotation is of the form `Tuple[T, ...]`,
+            # so we treat the incoming tuple like a iterable sequence
+            # rather than a positional container of elements.
+            return self._class_value.get_generics()[0].infer_type_vars(
+                value_set.merge_types_of_iterate(),
+            )
+
+        else:
+            # The parameter annotation has only explicit type parameters
+            # (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
+            # treat the incoming values as needing to match the annotation
+            # exactly, just as we would for non-tuple annotations.
+
+            type_var_dict = {}
+            for element in value_set:
+                try:
+                    method = element.get_annotated_class_object
+                except AttributeError:
+                    # This might still happen, because the tuple name matching
+                    # above is not 100% correct, so just catch the remaining
+                    # cases here.
+                    continue
+
+                py_class = method()
+                merge_type_var_dicts(
+                    type_var_dict,
+                    merge_pairwise_generics(self._class_value, py_class),
+                )
+
+            return type_var_dict


 class Generic(BaseTypingInstance):
@@ -87,31 +388,60 @@ class Protocol(BaseTypingInstance):


 class AnyClass(BaseTypingValue):
-    pass
+    def execute_annotation(self):
+        debug.warning('Used Any - returned no results')
+        return NO_VALUES


 class OverloadFunction(BaseTypingValue):
-    pass
+    @repack_with_argument_clinic('func, /')
+    def py__call__(self, func_value_set):
+        # Just pass arguments through.
+        return func_value_set


 class NewTypeFunction(ValueWrapper):
-    pass
+    def py__call__(self, arguments):
+        ordered_args = arguments.unpack()
+        next(ordered_args, (None, None))
+        _, second_arg = next(ordered_args, (None, None))
+        if second_arg is None:
+            return NO_VALUES
+        return ValueSet(
+            NewType(
+                self.inference_state,
+                contextualized_node.context,
+                contextualized_node.node,
+                second_arg.infer(),
+            ) for contextualized_node in arguments.get_calling_nodes())


 class NewType(Value):
-
-    def __init__(self, inference_state, parent_context, tree_node,
-        type_value_set):
+    def __init__(self, inference_state, parent_context, tree_node, type_value_set):
         super().__init__(inference_state, parent_context)
         self._type_value_set = type_value_set
         self.tree_node = tree_node

-    def __repr__(self) ->str:
+    def py__class__(self):
+        c, = self._type_value_set.py__class__()
+        return c
+
+    def py__call__(self, arguments):
+        return self._type_value_set.execute_annotation()
+
+    @property
+    def name(self):
+        from jedi.inference.compiled.value import CompiledValueName
+        return CompiledValueName(self, 'NewType')
+
+    def __repr__(self) -> str:
         return '<NewType: %s>%s' % (self.tree_node, self._type_value_set)


 class CastFunction(ValueWrapper):
-    pass
+    @repack_with_argument_clinic('type, object, /')
+    def py__call__(self, type_value_set, object_value_set):
+        return type_value_set.execute_annotation()


 class TypedDictClass(BaseTypingValue):
@@ -123,9 +453,36 @@ class TypedDictClass(BaseTypingValue):

 class TypedDict(LazyValueWrapper):
     """Represents the instance version of ``TypedDictClass``."""
-
     def __init__(self, definition_class):
         self.inference_state = definition_class.inference_state
         self.parent_context = definition_class.parent_context
         self.tree_node = definition_class.tree_node
         self._definition_class = definition_class
+
+    @property
+    def name(self):
+        return ValueName(self, self.tree_node.name)
+
+    def py__simple_getitem__(self, index):
+        if isinstance(index, str):
+            return ValueSet.from_sets(
+                name.infer()
+                for filter in self._definition_class.get_filters(is_instance=True)
+                for name in filter.get(index)
+            )
+        return NO_VALUES
+
+    def get_key_values(self):
+        filtered_values = itertools.chain.from_iterable((
+            f.values()
+            for f in self._definition_class.get_filters(is_instance=True)
+        ))
+        return ValueSet({
+            create_simple_object(self.inference_state, v.string_name)
+            for v in filtered_values
+        })
+
+    def _get_wrapped_value(self):
+        d, = self.inference_state.builtins_module.py__getattribute__('dict')
+        result, = d.execute_with_values()
+        return result
diff --git a/jedi/inference/gradual/utils.py b/jedi/inference/gradual/utils.py
index 70d131f0..af3703c7 100644
--- a/jedi/inference/gradual/utils.py
+++ b/jedi/inference/gradual/utils.py
@@ -1,11 +1,35 @@
 from pathlib import Path
+
 from jedi.inference.gradual.typeshed import TYPESHED_PATH, create_stub_module


-def load_proper_stub_module(inference_state, grammar, file_io, import_names,
-    module_node):
+def load_proper_stub_module(inference_state, grammar, file_io, import_names, module_node):
     """
     This function is given a random .pyi file and should return the proper
     module.
     """
-    pass
+    path = file_io.path
+    path = Path(path)
+    assert path.suffix == '.pyi'
+    try:
+        relative_path = path.relative_to(TYPESHED_PATH)
+    except ValueError:
+        pass
+    else:
+        # /[...]/stdlib/3/os/__init__.pyi -> stdlib/3/os/__init__
+        rest = relative_path.with_suffix('')
+        # Remove the stdlib/3 or third_party/3.6 part
+        import_names = rest.parts[2:]
+        if rest.name == '__init__':
+            import_names = import_names[:-1]
+
+    if import_names is not None:
+        actual_value_set = inference_state.import_module(import_names, prefer_stubs=False)
+
+        stub = create_stub_module(
+            inference_state, grammar, actual_value_set,
+            module_node, file_io, import_names
+        )
+        inference_state.stub_module_cache[import_names] = stub
+        return stub
+    return None
diff --git a/jedi/inference/helpers.py b/jedi/inference/helpers.py
index 528831da..0e344c24 100644
--- a/jedi/inference/helpers.py
+++ b/jedi/inference/helpers.py
@@ -4,14 +4,42 @@ import re
 import os
 from itertools import chain
 from contextlib import contextmanager
+
 from parso.python import tree


+def is_stdlib_path(path):
+    # Python standard library paths look like this:
+    # /usr/lib/python3.9/...
+    # TODO The implementation below is probably incorrect and not complete.
+    parts = path.parts
+    if 'dist-packages' in parts or 'site-packages' in parts:
+        return False
+
+    base_path = os.path.join(sys.prefix, 'lib', 'python')
+    return bool(re.match(re.escape(base_path) + r'\d.\d', str(path)))
+
+
 def deep_ast_copy(obj):
     """
     Much, much faster than copy.deepcopy, but just for parser tree nodes.
     """
-    pass
+    # If it's already in the cache, just return it.
+    new_obj = copy.copy(obj)
+
+    # Copy children
+    new_children = []
+    for child in obj.children:
+        if isinstance(child, tree.Leaf):
+            new_child = copy.copy(child)
+            new_child.parent = new_obj
+        else:
+            new_child = deep_ast_copy(child)
+            new_child.parent = new_obj
+        new_children.append(new_child)
+    new_obj.children = new_children
+
+    return new_obj


 def infer_call_of_leaf(context, leaf, cut_own_trailer=False):
@@ -33,8 +61,142 @@ def infer_call_of_leaf(context, leaf, cut_own_trailer=False):
       - infer the type of ``bar`` to be able to jump to the definition of foo
     The option ``cut_own_trailer`` must be set to true for the second purpose.
     """
-    pass
+    trailer = leaf.parent
+    if trailer.type == 'fstring':
+        from jedi.inference import compiled
+        return compiled.get_string_value_set(context.inference_state)
+
+    # The leaf may not be the last or first child, because there exist three
+    # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples
+    # we should not match anything more than x.
+    if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]):
+        if leaf == ':':
+            # Basically happens with foo[:] when the cursor is on the colon
+            from jedi.inference.base_value import NO_VALUES
+            return NO_VALUES
+        if trailer.type == 'atom':
+            return context.infer_node(trailer)
+        return context.infer_node(leaf)
+
+    power = trailer.parent
+    index = power.children.index(trailer)
+    if cut_own_trailer:
+        cut = index
+    else:
+        cut = index + 1
+
+    if power.type == 'error_node':
+        start = index
+        while True:
+            start -= 1
+            base = power.children[start]
+            if base.type != 'trailer':
+                break
+        trailers = power.children[start + 1:cut]
+    else:
+        base = power.children[0]
+        trailers = power.children[1:cut]
+
+    if base == 'await':
+        base = trailers[0]
+        trailers = trailers[1:]
+
+    values = context.infer_node(base)
+    from jedi.inference.syntax_tree import infer_trailer
+    for trailer in trailers:
+        values = infer_trailer(context, values, trailer)
+    return values
+
+
+def get_names_of_node(node):
+    try:
+        children = node.children
+    except AttributeError:
+        if node.type == 'name':
+            return [node]
+        else:
+            return []
+    else:
+        return list(chain.from_iterable(get_names_of_node(c) for c in children))
+
+
+def is_string(value):
+    return value.is_compiled() and isinstance(value.get_safe_value(default=None), str)
+
+
+def is_literal(value):
+    return is_number(value) or is_string(value)
+
+
+def _get_safe_value_or_none(value, accept):
+    value = value.get_safe_value(default=None)
+    if isinstance(value, accept):
+        return value
+
+
+def get_int_or_none(value):
+    return _get_safe_value_or_none(value, int)
+
+
+def get_str_or_none(value):
+    return _get_safe_value_or_none(value, str)
+
+
+def is_number(value):
+    return _get_safe_value_or_none(value, (int, float)) is not None


 class SimpleGetItemNotFound(Exception):
     pass
+
+
+@contextmanager
+def reraise_getitem_errors(*exception_classes):
+    try:
+        yield
+    except exception_classes as e:
+        raise SimpleGetItemNotFound(e)
+
+
+def parse_dotted_names(nodes, is_import_from, until_node=None):
+    level = 0
+    names = []
+    for node in nodes[1:]:
+        if node in ('.', '...'):
+            if not names:
+                level += len(node.value)
+        elif node.type == 'dotted_name':
+            for n in node.children[::2]:
+                names.append(n)
+                if n is until_node:
+                    break
+            else:
+                continue
+            break
+        elif node.type == 'name':
+            names.append(node)
+            if node is until_node:
+                break
+        elif node == ',':
+            if not is_import_from:
+                names = []
+        else:
+            # Here if the keyword `import` comes along it stops checking
+            # for names.
+            break
+    return level, names
+
+
+def values_from_qualified_names(inference_state, *names):
+    return inference_state.import_module(names[:-1]).py__getattribute__(names[-1])
+
+
+def is_big_annoying_library(context):
+    string_names = context.get_root_context().string_names
+    if string_names is None:
+        return False
+
+    # Especially pandas and tensorflow are huge complicated Python libraries
+    # that get even slower than they already are when Jedi tries to undrstand
+    # dynamic features like decorators, ifs and other stuff.
+    return string_names[0] in ('pandas', 'numpy', 'tensorflow', 'matplotlib')
diff --git a/jedi/inference/imports.py b/jedi/inference/imports.py
index 960fd535..c1a4953f 100644
--- a/jedi/inference/imports.py
+++ b/jedi/inference/imports.py
@@ -10,8 +10,10 @@ statements like ``from datetim`` (cursor at the end would return ``datetime``).
 """
 import os
 from pathlib import Path
+
 from parso.python import tree
 from parso.tree import search_ancestor
+
 from jedi import debug
 from jedi import settings
 from jedi.file_io import FolderIO
@@ -24,16 +26,102 @@ from jedi.inference.utils import unite
 from jedi.inference.cache import inference_state_method_cache
 from jedi.inference.names import ImportName, SubModuleName
 from jedi.inference.base_value import ValueSet, NO_VALUES
-from jedi.inference.gradual.typeshed import import_module_decorator, create_stub_module, parse_stub_module
+from jedi.inference.gradual.typeshed import import_module_decorator, \
+    create_stub_module, parse_stub_module
 from jedi.inference.compiled.subprocess.functions import ImplicitNSInfo
 from jedi.plugins import plugin_manager


 class ModuleCache:
-
     def __init__(self):
         self._name_cache = {}

+    def add(self, string_names, value_set):
+        if string_names is not None:
+            self._name_cache[string_names] = value_set
+
+    def get(self, string_names):
+        return self._name_cache.get(string_names)
+
+
+# This memoization is needed, because otherwise we will infinitely loop on
+# certain imports.
+@inference_state_method_cache(default=NO_VALUES)
+def infer_import(context, tree_name):
+    module_context = context.get_root_context()
+    from_import_name, import_path, level, values = \
+        _prepare_infer_import(module_context, tree_name)
+    if values:
+
+        if from_import_name is not None:
+            values = values.py__getattribute__(
+                from_import_name,
+                name_context=context,
+                analysis_errors=False
+            )
+
+            if not values:
+                path = import_path + (from_import_name,)
+                importer = Importer(context.inference_state, path, module_context, level)
+                values = importer.follow()
+    debug.dbg('after import: %s', values)
+    return values
+
+
+@inference_state_method_cache(default=[])
+def goto_import(context, tree_name):
+    module_context = context.get_root_context()
+    from_import_name, import_path, level, values = \
+        _prepare_infer_import(module_context, tree_name)
+    if not values:
+        return []
+
+    if from_import_name is not None:
+        names = unite([
+            c.goto(
+                from_import_name,
+                name_context=context,
+                analysis_errors=False
+            ) for c in values
+        ])
+        # Avoid recursion on the same names.
+        if names and not any(n.tree_name is tree_name for n in names):
+            return names
+
+        path = import_path + (from_import_name,)
+        importer = Importer(context.inference_state, path, module_context, level)
+        values = importer.follow()
+    return set(s.name for s in values)
+
+
+def _prepare_infer_import(module_context, tree_name):
+    import_node = search_ancestor(tree_name, 'import_name', 'import_from')
+    import_path = import_node.get_path_for_name(tree_name)
+    from_import_name = None
+    try:
+        from_names = import_node.get_from_names()
+    except AttributeError:
+        # Is an import_name
+        pass
+    else:
+        if len(from_names) + 1 == len(import_path):
+            # We have to fetch the from_names part first and then check
+            # if from_names exists in the modules.
+            from_import_name = import_path[-1]
+            import_path = from_names
+
+    importer = Importer(module_context.inference_state, tuple(import_path),
+                        module_context, import_node.level)
+
+    return from_import_name, tuple(import_path), import_node.level, importer.follow()
+
+
+def _add_error(value, name, message):
+    if hasattr(name, 'parent') and value is not None:
+        analysis.add(value, 'import-error', name, message)
+    else:
+        debug.warning('ImportError without origin: ' + message)
+

 def _level_to_base_import_path(project_path, directory, level):
     """
@@ -41,11 +129,28 @@ def _level_to_base_import_path(project_path, directory, level):
     import .....foo), we can still try our best to help the user for
     completions.
     """
-    pass
+    for i in range(level - 1):
+        old = directory
+        directory = os.path.dirname(directory)
+        if old == directory:
+            return None, None

+    d = directory
+    level_import_paths = []
+    # Now that we are on the level that the user wants to be, calculate the
+    # import path for it.
+    while True:
+        if d == project_path:
+            return level_import_paths, d
+        dir_name = os.path.basename(d)
+        if dir_name:
+            level_import_paths.insert(0, dir_name)
+            d = os.path.dirname(d)
+        else:
+            return None, directory

-class Importer:

+class Importer:
     def __init__(self, inference_state, import_path, module_context, level=0):
         """
         An implementation similar to ``__import__``. Use `follow`
@@ -63,11 +168,22 @@ class Importer:
         self._inference_state = inference_state
         self.level = level
         self._module_context = module_context
+
         self._fixed_sys_path = None
         self._infer_possible = True
         if level:
             base = module_context.get_value().py__package__()
+            # We need to care for two cases, the first one is if it's a valid
+            # Python import. This import has a properly defined module name
+            # chain like `foo.bar.baz` and an import in baz is made for
+            # `..lala.` It can then resolve to `foo.bar.lala`.
+            # The else here is a heuristic for all other cases, if for example
+            # in `foo` you search for `...bar`, it's obviously out of scope.
+            # However since Jedi tries to just do it's best, we help the user
+            # here, because he might have specified something wrong in his
+            # project.
             if level <= len(base):
+                # Here we basically rewrite the level to 0.
                 base = tuple(base)
                 if level > 1:
                     base = base[:-level + 1]
@@ -77,20 +193,30 @@ class Importer:
                 project_path = self._inference_state.project.path
                 import_path = list(import_path)
                 if path is None:
+                    # If no path is defined, our best guess is that the current
+                    # file is edited by a user on the current working
+                    # directory. We need to add an initial path, because it
+                    # will get removed as the name of the current file.
                     directory = project_path
                 else:
                     directory = os.path.dirname(path)
+
                 base_import_path, base_directory = _level_to_base_import_path(
-                    project_path, directory, level)
+                    project_path, directory, level,
+                )
                 if base_directory is None:
+                    # Everything is lost, the relative import does point
+                    # somewhere out of the filesystem.
                     self._infer_possible = False
                 else:
                     self._fixed_sys_path = [base_directory]
+
                 if base_import_path is None:
                     if import_path:
-                        _add_error(module_context, import_path[0], message=
-                            'Attempted relative import beyond top-level package.'
-                            )
+                        _add_error(
+                            module_context, import_path[0],
+                            message='Attempted relative import beyond top-level package.'
+                        )
                 else:
                     import_path = base_import_path + import_path
         self.import_path = import_path
@@ -98,47 +224,369 @@ class Importer:
     @property
     def _str_import_path(self):
         """Returns the import path as pure strings instead of `Name`."""
-        pass
+        return tuple(
+            name.value if isinstance(name, tree.Name) else name
+            for name in self.import_path
+        )
+
+    def _sys_path_with_modifications(self, is_completion):
+        if self._fixed_sys_path is not None:
+            return self._fixed_sys_path
+
+        return (
+            # For import completions we don't want to see init paths, but for
+            # inference we want to show the user as much as possible.
+            # See GH #1446.
+            self._inference_state.get_sys_path(add_init_paths=not is_completion)
+            + [
+                str(p) for p
+                in sys_path.check_sys_path_modifications(self._module_context)
+            ]
+        )
+
+    def follow(self):
+        if not self.import_path:
+            if self._fixed_sys_path:
+                # This is a bit of a special case, that maybe should be
+                # revisited. If the project path is wrong or the user uses
+                # relative imports the wrong way, we might end up here, where
+                # the `fixed_sys_path == project.path` in that case we kind of
+                # use the project.path.parent directory as our path. This is
+                # usually not a problem, except if imports in other places are
+                # using the same names. Example:
+                #
+                # foo/                       < #1
+                #   - setup.py
+                #   - foo/                   < #2
+                #     - __init__.py
+                #     - foo.py               < #3
+                #
+                # If the top foo is our project folder and somebody uses
+                # `from . import foo` in `setup.py`, it will resolve to foo #2,
+                # which means that the import for foo.foo is cached as
+                # `__init__.py` (#2) and not as `foo.py` (#3). This is usually
+                # not an issue, because this case is probably pretty rare, but
+                # might be an issue for some people.
+                #
+                # However for most normal cases where we work with different
+                # file names, this code path hits where we basically change the
+                # project path to an ancestor of project path.
+                from jedi.inference.value.namespace import ImplicitNamespaceValue
+                import_path = (os.path.basename(self._fixed_sys_path[0]),)
+                ns = ImplicitNamespaceValue(
+                    self._inference_state,
+                    string_names=import_path,
+                    paths=self._fixed_sys_path,
+                )
+                return ValueSet({ns})
+            return NO_VALUES
+        if not self._infer_possible:
+            return NO_VALUES
+
+        # Check caches first
+        from_cache = self._inference_state.stub_module_cache.get(self._str_import_path)
+        if from_cache is not None:
+            return ValueSet({from_cache})
+        from_cache = self._inference_state.module_cache.get(self._str_import_path)
+        if from_cache is not None:
+            return from_cache
+
+        sys_path = self._sys_path_with_modifications(is_completion=False)
+
+        return import_module_by_names(
+            self._inference_state, self.import_path, sys_path, self._module_context
+        )

     def _get_module_names(self, search_path=None, in_module=None):
         """
         Get the names of all modules in the search_path. This means file names
         and not names defined in the files.
         """
-        pass
+        if search_path is None:
+            sys_path = self._sys_path_with_modifications(is_completion=True)
+        else:
+            sys_path = search_path
+        return list(iter_module_names(
+            self._inference_state, self._module_context, sys_path,
+            module_cls=ImportName if in_module is None else SubModuleName,
+            add_builtin_modules=search_path is None and in_module is None,
+        ))

     def completion_names(self, inference_state, only_modules=False):
         """
         :param only_modules: Indicates wheter it's possible to import a
             definition that is not defined in a module.
         """
-        pass
+        if not self._infer_possible:
+            return []
+
+        names = []
+        if self.import_path:
+            # flask
+            if self._str_import_path == ('flask', 'ext'):
+                # List Flask extensions like ``flask_foo``
+                for mod in self._get_module_names():
+                    modname = mod.string_name
+                    if modname.startswith('flask_'):
+                        extname = modname[len('flask_'):]
+                        names.append(ImportName(self._module_context, extname))
+                # Now the old style: ``flaskext.foo``
+                for dir in self._sys_path_with_modifications(is_completion=True):
+                    flaskext = os.path.join(dir, 'flaskext')
+                    if os.path.isdir(flaskext):
+                        names += self._get_module_names([flaskext])
+
+            values = self.follow()
+            for value in values:
+                # Non-modules are not completable.
+                if value.api_type not in ('module', 'namespace'):  # not a module
+                    continue
+                if not value.is_compiled():
+                    # sub_modules_dict is not implemented for compiled modules.
+                    names += value.sub_modules_dict().values()
+
+            if not only_modules:
+                from jedi.inference.gradual.conversion import convert_values
+
+                both_values = values | convert_values(values)
+                for c in both_values:
+                    for filter in c.get_filters():
+                        names += filter.values()
+        else:
+            if self.level:
+                # We only get here if the level cannot be properly calculated.
+                names += self._get_module_names(self._fixed_sys_path)
+            else:
+                # This is just the list of global imports.
+                names += self._get_module_names()
+        return names
+
+
+def import_module_by_names(inference_state, import_names, sys_path=None,
+                           module_context=None, prefer_stubs=True):
+    if sys_path is None:
+        sys_path = inference_state.get_sys_path()
+
+    str_import_names = tuple(
+        i.value if isinstance(i, tree.Name) else i
+        for i in import_names
+    )
+    value_set = [None]
+    for i, name in enumerate(import_names):
+        value_set = ValueSet.from_sets([
+            import_module(
+                inference_state,
+                str_import_names[:i+1],
+                parent_module_value,
+                sys_path,
+                prefer_stubs=prefer_stubs,
+            ) for parent_module_value in value_set
+        ])
+        if not value_set:
+            message = 'No module named ' + '.'.join(str_import_names)
+            if module_context is not None:
+                _add_error(module_context, name, message)
+            else:
+                debug.warning(message)
+            return NO_VALUES
+    return value_set


 @plugin_manager.decorate()
 @import_module_decorator
-def import_module(inference_state, import_names, parent_module_value, sys_path
-    ):
+def import_module(inference_state, import_names, parent_module_value, sys_path):
     """
     This method is very similar to importlib's `_gcd_import`.
     """
-    pass
+    if import_names[0] in settings.auto_import_modules:
+        module = _load_builtin_module(inference_state, import_names, sys_path)
+        if module is None:
+            return NO_VALUES
+        return ValueSet([module])
+
+    module_name = '.'.join(import_names)
+    if parent_module_value is None:
+        # Override the sys.path. It works only good that way.
+        # Injecting the path directly into `find_module` did not work.
+        file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info(
+            string=import_names[-1],
+            full_name=module_name,
+            sys_path=sys_path,
+            is_global_search=True,
+        )
+        if is_pkg is None:
+            return NO_VALUES
+    else:
+        paths = parent_module_value.py__path__()
+        if paths is None:
+            # The module might not be a package.
+            return NO_VALUES
+
+        file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info(
+            string=import_names[-1],
+            path=paths,
+            full_name=module_name,
+            is_global_search=False,
+        )
+        if is_pkg is None:
+            return NO_VALUES
+
+    if isinstance(file_io_or_ns, ImplicitNSInfo):
+        from jedi.inference.value.namespace import ImplicitNamespaceValue
+        module = ImplicitNamespaceValue(
+            inference_state,
+            string_names=tuple(file_io_or_ns.name.split('.')),
+            paths=file_io_or_ns.paths,
+        )
+    elif file_io_or_ns is None:
+        module = _load_builtin_module(inference_state, import_names, sys_path)
+        if module is None:
+            return NO_VALUES
+    else:
+        module = _load_python_module(
+            inference_state, file_io_or_ns,
+            import_names=import_names,
+            is_package=is_pkg,
+        )
+
+    if parent_module_value is None:
+        debug.dbg('global search_module %s: %s', import_names[-1], module)
+    else:
+        debug.dbg('search_module %s in paths %s: %s', module_name, paths, module)
+    return ValueSet([module])


-def load_module_from_path(inference_state, file_io, import_names=None,
-    is_package=None):
+def _load_python_module(inference_state, file_io,
+                        import_names=None, is_package=False):
+    module_node = inference_state.parse(
+        file_io=file_io,
+        cache=True,
+        diff_cache=settings.fast_parser,
+        cache_path=settings.cache_directory,
+    )
+
+    from jedi.inference.value import ModuleValue
+    return ModuleValue(
+        inference_state, module_node,
+        file_io=file_io,
+        string_names=import_names,
+        code_lines=get_cached_code_lines(inference_state.grammar, file_io.path),
+        is_package=is_package,
+    )
+
+
+def _load_builtin_module(inference_state, import_names=None, sys_path=None):
+    project = inference_state.project
+    if sys_path is None:
+        sys_path = inference_state.get_sys_path()
+    if not project._load_unsafe_extensions:
+        safe_paths = project._get_base_sys_path(inference_state)
+        sys_path = [p for p in sys_path if p in safe_paths]
+
+    dotted_name = '.'.join(import_names)
+    assert dotted_name is not None
+    module = compiled.load_module(inference_state, dotted_name=dotted_name, sys_path=sys_path)
+    if module is None:
+        # The file might raise an ImportError e.g. and therefore not be
+        # importable.
+        return None
+    return module
+
+
+def load_module_from_path(inference_state, file_io, import_names=None, is_package=None):
     """
     This should pretty much only be used for get_modules_containing_name. It's
     here to ensure that a random path is still properly loaded into the Jedi
     module structure.
     """
-    pass
+    path = Path(file_io.path)
+    if import_names is None:
+        e_sys_path = inference_state.get_sys_path()
+        import_names, is_package = sys_path.transform_path_to_dotted(e_sys_path, path)
+    else:
+        assert isinstance(is_package, bool)
+
+    is_stub = path.suffix == '.pyi'
+    if is_stub:
+        folder_io = file_io.get_parent_folder()
+        if folder_io.path.endswith('-stubs'):
+            folder_io = FolderIO(folder_io.path[:-6])
+        if path.name == '__init__.pyi':
+            python_file_io = folder_io.get_file_io('__init__.py')
+        else:
+            python_file_io = folder_io.get_file_io(import_names[-1] + '.py')
+
+        try:
+            v = load_module_from_path(
+                inference_state, python_file_io,
+                import_names, is_package=is_package
+            )
+            values = ValueSet([v])
+        except FileNotFoundError:
+            values = NO_VALUES
+
+        return create_stub_module(
+            inference_state, inference_state.latest_grammar, values,
+            parse_stub_module(inference_state, file_io), file_io, import_names
+        )
+    else:
+        module = _load_python_module(
+            inference_state, file_io,
+            import_names=import_names,
+            is_package=is_package,
+        )
+        inference_state.module_cache.add(import_names, ValueSet([module]))
+        return module
+
+
+def load_namespace_from_path(inference_state, folder_io):
+    import_names, is_package = sys_path.transform_path_to_dotted(
+        inference_state.get_sys_path(),
+        Path(folder_io.path)
+    )
+    from jedi.inference.value.namespace import ImplicitNamespaceValue
+    return ImplicitNamespaceValue(inference_state, import_names, [folder_io.path])
+
+
+def follow_error_node_imports_if_possible(context, name):
+    error_node = tree.search_ancestor(name, 'error_node')
+    if error_node is not None:
+        # Get the first command start of a started simple_stmt. The error
+        # node is sometimes a small_stmt and sometimes a simple_stmt. Check
+        # for ; leaves that start a new statements.
+        start_index = 0
+        for index, n in enumerate(error_node.children):
+            if n.start_pos > name.start_pos:
+                break
+            if n == ';':
+                start_index = index + 1
+        nodes = error_node.children[start_index:]
+        first_name = nodes[0].get_first_leaf().value
+
+        # Make it possible to infer stuff like `import foo.` or
+        # `from foo.bar`.
+        if first_name in ('from', 'import'):
+            is_import_from = first_name == 'from'
+            level, names = helpers.parse_dotted_names(
+                nodes,
+                is_import_from=is_import_from,
+                until_node=name,
+            )
+            return Importer(
+                context.inference_state, names, context.get_root_context(), level).follow()
+    return None


 def iter_module_names(inference_state, module_context, search_path,
-    module_cls=ImportName, add_builtin_modules=True):
+                      module_cls=ImportName, add_builtin_modules=True):
     """
     Get the names of all modules in the search_path. This means file names
     and not names defined in the files.
     """
-    pass
+    # add builtin module names
+    if add_builtin_modules:
+        for name in inference_state.compiled_subprocess.get_builtin_module_names():
+            yield module_cls(module_context, name)
+
+    for name in inference_state.compiled_subprocess.iter_module_names(search_path):
+        yield module_cls(module_context, name)
diff --git a/jedi/inference/lazy_value.py b/jedi/inference/lazy_value.py
index 81192b15..b149f21e 100644
--- a/jedi/inference/lazy_value.py
+++ b/jedi/inference/lazy_value.py
@@ -3,7 +3,6 @@ from jedi.common import monkeypatch


 class AbstractLazyValue:
-
     def __init__(self, data, min=1, max=1):
         self.data = data
         self.min = min
@@ -12,28 +11,51 @@ class AbstractLazyValue:
     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self.data)

+    def infer(self):
+        raise NotImplementedError
+

 class LazyKnownValue(AbstractLazyValue):
     """data is a Value."""
+    def infer(self):
+        return ValueSet([self.data])


 class LazyKnownValues(AbstractLazyValue):
     """data is a ValueSet."""
+    def infer(self):
+        return self.data


 class LazyUnknownValue(AbstractLazyValue):
-
     def __init__(self, min=1, max=1):
         super().__init__(None, min, max)

+    def infer(self):
+        return NO_VALUES

-class LazyTreeValue(AbstractLazyValue):

+class LazyTreeValue(AbstractLazyValue):
     def __init__(self, context, node, min=1, max=1):
         super().__init__(node, min, max)
         self.context = context
+        # We need to save the predefined names. It's an unfortunate side effect
+        # that needs to be tracked otherwise results will be wrong.
         self._predefined_names = dict(context.predefined_names)

+    def infer(self):
+        with monkeypatch(self.context, 'predefined_names', self._predefined_names):
+            return self.context.infer_node(self.data)
+
+
+def get_merged_lazy_value(lazy_values):
+    if len(lazy_values) > 1:
+        return MergedLazyValues(lazy_values)
+    else:
+        return lazy_values[0]
+

 class MergedLazyValues(AbstractLazyValue):
     """data is a list of lazy values."""
+    def infer(self):
+        return ValueSet.from_sets(l.infer() for l in self.data)
diff --git a/jedi/inference/names.py b/jedi/inference/names.py
index 3c2fd660..c3896923 100644
--- a/jedi/inference/names.py
+++ b/jedi/inference/names.py
@@ -1,7 +1,9 @@
 from abc import abstractmethod
 from inspect import Parameter
 from typing import Optional, Tuple
+
 from parso.tree import search_ancestor
+
 from jedi.parser_utils import find_statement_documentation, clean_scope_docstring
 from jedi.inference.utils import unite
 from jedi.inference.base_value import ValueSet, NO_VALUES
@@ -12,6 +14,17 @@ from jedi.inference.helpers import deep_ast_copy, infer_call_of_leaf
 from jedi.plugins import plugin_manager


+def _merge_name_docs(names):
+    doc = ''
+    for name in names:
+        if doc:
+            # In case we have multiple values, just return all of them
+            # separated by a few dashes.
+            doc += '\n' + '-' * 30 + '\n'
+        doc += name.py__doc__()
+    return doc
+
+
 class AbstractNameDefinition:
     start_pos: Optional[Tuple[int, int]] = None
     string_name: str
@@ -22,19 +35,58 @@ class AbstractNameDefinition:
     Used for the Jedi API to know if it's a keyword or an actual name.
     """

+    @abstractmethod
+    def infer(self):
+        raise NotImplementedError
+
+    @abstractmethod
+    def goto(self):
+        # Typically names are already definitions and therefore a goto on that
+        # name will always result on itself.
+        return {self}
+
+    def get_qualified_names(self, include_module_names=False):
+        qualified_names = self._get_qualified_names()
+        if qualified_names is None or not include_module_names:
+            return qualified_names
+
+        module_names = self.get_root_context().string_names
+        if module_names is None:
+            return None
+        return module_names + qualified_names
+
+    def _get_qualified_names(self):
+        # By default, a name has no qualified names.
+        return None
+
+    def get_root_context(self):
+        return self.parent_context.get_root_context()
+
+    def get_public_name(self):
+        return self.string_name
+
     def __repr__(self):
         if self.start_pos is None:
-            return '<%s: string_name=%s>' % (self.__class__.__name__, self.
-                string_name)
-        return '<%s: string_name=%s start_pos=%s>' % (self.__class__.
-            __name__, self.string_name, self.start_pos)
+            return '<%s: string_name=%s>' % (self.__class__.__name__, self.string_name)
+        return '<%s: string_name=%s start_pos=%s>' % (self.__class__.__name__,
+                                                      self.string_name, self.start_pos)
+
+    def is_import(self):
+        return False
+
+    def py__doc__(self):
+        return ''
+
+    @property
+    def api_type(self):
+        return self.parent_context.api_type

     def get_defining_qualified_value(self):
         """
         Returns either None or the value that is public and qualified. Won't
         return a function, because a name in a function is never public.
         """
-        pass
+        return None


 class AbstractArbitraryName(AbstractNameDefinition):
@@ -50,28 +102,194 @@ class AbstractArbitraryName(AbstractNameDefinition):
         self.string_name = string
         self.parent_context = inference_state.builtins_module

+    def infer(self):
+        return NO_VALUES

-class AbstractTreeName(AbstractNameDefinition):

+class AbstractTreeName(AbstractNameDefinition):
     def __init__(self, parent_context, tree_name):
         self.parent_context = parent_context
         self.tree_name = tree_name

+    def get_qualified_names(self, include_module_names=False):
+        import_node = search_ancestor(self.tree_name, 'import_name', 'import_from')
+        # For import nodes we cannot just have names, because it's very unclear
+        # how they would look like. For now we just ignore them in most cases.
+        # In case of level == 1, it works always, because it's like a submodule
+        # lookup.
+        if import_node is not None and not (import_node.level == 1
+                                            and self.get_root_context().get_value().is_package()):
+            # TODO improve the situation for when level is present.
+            if include_module_names and not import_node.level:
+                return tuple(n.value for n in import_node.get_path_for_name(self.tree_name))
+            else:
+                return None
+
+        return super().get_qualified_names(include_module_names)
+
+    def _get_qualified_names(self):
+        parent_names = self.parent_context.get_qualified_names()
+        if parent_names is None:
+            return None
+        return parent_names + (self.tree_name.value,)
+
+    def get_defining_qualified_value(self):
+        if self.is_import():
+            raise NotImplementedError("Shouldn't really happen, please report")
+        elif self.parent_context:
+            return self.parent_context.get_value()  # Might be None
+        return None
+
+    def goto(self):
+        context = self.parent_context
+        name = self.tree_name
+        definition = name.get_definition(import_name_always=True)
+        if definition is not None:
+            type_ = definition.type
+            if type_ == 'expr_stmt':
+                # Only take the parent, because if it's more complicated than just
+                # a name it's something you can "goto" again.
+                is_simple_name = name.parent.type not in ('power', 'trailer')
+                if is_simple_name:
+                    return [self]
+            elif type_ in ('import_from', 'import_name'):
+                from jedi.inference.imports import goto_import
+                module_names = goto_import(context, name)
+                return module_names
+            else:
+                return [self]
+        else:
+            from jedi.inference.imports import follow_error_node_imports_if_possible
+            values = follow_error_node_imports_if_possible(context, name)
+            if values is not None:
+                return [value.name for value in values]
+
+        par = name.parent
+        node_type = par.type
+        if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name:
+            # Named param goto.
+            trailer = par.parent
+            if trailer.type == 'arglist':
+                trailer = trailer.parent
+            if trailer.type != 'classdef':
+                if trailer.type == 'decorator':
+                    value_set = context.infer_node(trailer.children[1])
+                else:
+                    i = trailer.parent.children.index(trailer)
+                    to_infer = trailer.parent.children[:i]
+                    if to_infer[0] == 'await':
+                        to_infer.pop(0)
+                    value_set = context.infer_node(to_infer[0])
+                    from jedi.inference.syntax_tree import infer_trailer
+                    for trailer in to_infer[1:]:
+                        value_set = infer_trailer(context, value_set, trailer)
+                param_names = []
+                for value in value_set:
+                    for signature in value.get_signatures():
+                        for param_name in signature.get_param_names():
+                            if param_name.string_name == name.value:
+                                param_names.append(param_name)
+                return param_names
+        elif node_type == 'dotted_name':  # Is a decorator.
+            index = par.children.index(name)
+            if index > 0:
+                new_dotted = deep_ast_copy(par)
+                new_dotted.children[index - 1:] = []
+                values = context.infer_node(new_dotted)
+                return unite(
+                    value.goto(name, name_context=context)
+                    for value in values
+                )
+
+        if node_type == 'trailer' and par.children[0] == '.':
+            values = infer_call_of_leaf(context, name, cut_own_trailer=True)
+            return values.goto(name, name_context=context)
+        else:
+            stmt = search_ancestor(
+                name, 'expr_stmt', 'lambdef'
+            ) or name
+            if stmt.type == 'lambdef':
+                stmt = name
+            return context.goto(name, position=stmt.start_pos)
+
+    def is_import(self):
+        imp = search_ancestor(self.tree_name, 'import_from', 'import_name')
+        return imp is not None
+
+    @property
+    def string_name(self):
+        return self.tree_name.value
+
+    @property
+    def start_pos(self):
+        return self.tree_name.start_pos
+

 class ValueNameMixin:
-    pass
+    def infer(self):
+        return ValueSet([self._value])
+
+    def py__doc__(self):
+        doc = self._value.py__doc__()
+        if not doc and self._value.is_stub():
+            from jedi.inference.gradual.conversion import convert_names
+            names = convert_names([self], prefer_stub_to_compiled=False)
+            if self not in names:
+                return _merge_name_docs(names)
+        return doc
+
+    def _get_qualified_names(self):
+        return self._value.get_qualified_names()
+
+    def get_root_context(self):
+        if self.parent_context is None:  # A module
+            return self._value.as_context()
+        return super().get_root_context()

+    def get_defining_qualified_value(self):
+        context = self.parent_context
+        if context is not None and (context.is_module() or context.is_class()):
+            return self.parent_context.get_value()  # Might be None
+        return None

-class ValueName(ValueNameMixin, AbstractTreeName):
+    @property
+    def api_type(self):
+        return self._value.api_type

+
+class ValueName(ValueNameMixin, AbstractTreeName):
     def __init__(self, value, tree_name):
         super().__init__(value.parent_context, tree_name)
         self._value = value

+    def goto(self):
+        return ValueSet([self._value.name])
+

 class TreeNameDefinition(AbstractTreeName):
-    _API_TYPES = dict(import_name='module', import_from='module', funcdef=
-        'function', param='param', classdef='class')
+    _API_TYPES = dict(
+        import_name='module',
+        import_from='module',
+        funcdef='function',
+        param='param',
+        classdef='class',
+    )
+
+    def infer(self):
+        # Refactor this, should probably be here.
+        from jedi.inference.syntax_tree import tree_name_to_values
+        return tree_name_to_values(
+            self.parent_context.inference_state,
+            self.parent_context,
+            self.tree_name
+        )
+
+    @property
+    def api_type(self):
+        definition = self.tree_name.get_definition(import_name_always=True)
+        if definition is None:
+            return 'statement'
+        return self._API_TYPES.get(definition.type, 'statement')

     def assignment_indexes(self):
         """
@@ -88,16 +306,94 @@ class TreeNameDefinition(AbstractTreeName):

             [(slice(1, -1), abc_node)]
         """
-        pass
+        indexes = []
+        is_star_expr = False
+        node = self.tree_name.parent
+        compare = self.tree_name
+        while node is not None:
+            if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'):
+                for i, child in enumerate(node.children):
+                    if child == compare:
+                        index = int(i / 2)
+                        if is_star_expr:
+                            from_end = int((len(node.children) - i) / 2)
+                            index = slice(index, -from_end)
+                        indexes.insert(0, (index, node))
+                        break
+                else:
+                    raise LookupError("Couldn't find the assignment.")
+                is_star_expr = False
+            elif node.type == 'star_expr':
+                is_star_expr = True
+            elif node.type in ('expr_stmt', 'sync_comp_for'):
+                break
+
+            compare = node
+            node = node.parent
+        return indexes
+
+    @property
+    def inference_state(self):
+        # Used by the cache function below
+        return self.parent_context.inference_state
+
+    @inference_state_method_cache(default='')
+    def py__doc__(self):
+        api_type = self.api_type
+        if api_type in ('function', 'class', 'property'):
+            if self.parent_context.get_root_context().is_stub():
+                from jedi.inference.gradual.conversion import convert_names
+                names = convert_names([self], prefer_stub_to_compiled=False)
+                if self not in names:
+                    return _merge_name_docs(names)
+
+            # Make sure the names are not TreeNameDefinitions anymore.
+            return clean_scope_docstring(self.tree_name.get_definition())
+
+        if api_type == 'module':
+            names = self.goto()
+            if self not in names:
+                return _merge_name_docs(names)
+
+        if api_type == 'statement' and self.tree_name.is_definition():
+            return find_statement_documentation(self.tree_name.get_definition())
+        return ''


 class _ParamMixin:
-    pass
+    def maybe_positional_argument(self, include_star=True):
+        options = [Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD]
+        if include_star:
+            options.append(Parameter.VAR_POSITIONAL)
+        return self.get_kind() in options
+
+    def maybe_keyword_argument(self, include_stars=True):
+        options = [Parameter.KEYWORD_ONLY, Parameter.POSITIONAL_OR_KEYWORD]
+        if include_stars:
+            options.append(Parameter.VAR_KEYWORD)
+        return self.get_kind() in options
+
+    def _kind_string(self):
+        kind = self.get_kind()
+        if kind == Parameter.VAR_POSITIONAL:  # *args
+            return '*'
+        if kind == Parameter.VAR_KEYWORD:  # **kwargs
+            return '**'
+        return ''
+
+    def get_qualified_names(self, include_module_names=False):
+        return None


 class ParamNameInterface(_ParamMixin):
     api_type = 'param'

+    def get_kind(self):
+        raise NotImplementedError
+
+    def to_string(self):
+        raise NotImplementedError
+
     def get_executed_param_name(self):
         """
         For dealing with type inference and working around the graph, we
@@ -107,34 +403,164 @@ class ParamNameInterface(_ParamMixin):
         For now however it exists to avoid infering params when we don't really
         need them (e.g. when we can just instead use annotations.
         """
-        pass
+        return None
+
+    @property
+    def star_count(self):
+        kind = self.get_kind()
+        if kind == Parameter.VAR_POSITIONAL:
+            return 1
+        if kind == Parameter.VAR_KEYWORD:
+            return 2
+        return 0
+
+    def infer_default(self):
+        return NO_VALUES


 class BaseTreeParamName(ParamNameInterface, AbstractTreeName):
     annotation_node = None
     default_node = None

+    def to_string(self):
+        output = self._kind_string() + self.get_public_name()
+        annotation = self.annotation_node
+        default = self.default_node
+        if annotation is not None:
+            output += ': ' + annotation.get_code(include_prefix=False)
+        if default is not None:
+            output += '=' + default.get_code(include_prefix=False)
+        return output

-class _ActualTreeParamName(BaseTreeParamName):
+    def get_public_name(self):
+        name = self.string_name
+        if name.startswith('__'):
+            # Params starting with __ are an equivalent to positional only
+            # variables in typeshed.
+            name = name[2:]
+        return name
+
+    def goto(self, **kwargs):
+        return [self]

+
+class _ActualTreeParamName(BaseTreeParamName):
     def __init__(self, function_value, tree_name):
-        super().__init__(function_value.get_default_param_context(), tree_name)
+        super().__init__(
+            function_value.get_default_param_context(), tree_name)
         self.function_value = function_value

+    def _get_param_node(self):
+        return search_ancestor(self.tree_name, 'param')
+
+    @property
+    def annotation_node(self):
+        return self._get_param_node().annotation
+
+    def infer_annotation(self, execute_annotation=True, ignore_stars=False):
+        from jedi.inference.gradual.annotation import infer_param
+        values = infer_param(
+            self.function_value, self._get_param_node(),
+            ignore_stars=ignore_stars)
+        if execute_annotation:
+            values = values.execute_annotation()
+        return values
+
+    def infer_default(self):
+        node = self.default_node
+        if node is None:
+            return NO_VALUES
+        return self.parent_context.infer_node(node)
+
+    @property
+    def default_node(self):
+        return self._get_param_node().default
+
+    def get_kind(self):
+        tree_param = self._get_param_node()
+        if tree_param.star_count == 1:  # *args
+            return Parameter.VAR_POSITIONAL
+        if tree_param.star_count == 2:  # **kwargs
+            return Parameter.VAR_KEYWORD
+
+        # Params starting with __ are an equivalent to positional only
+        # variables in typeshed.
+        if tree_param.name.value.startswith('__'):
+            return Parameter.POSITIONAL_ONLY
+
+        parent = tree_param.parent
+        param_appeared = False
+        for p in parent.children:
+            if param_appeared:
+                if p == '/':
+                    return Parameter.POSITIONAL_ONLY
+            else:
+                if p == '*':
+                    return Parameter.KEYWORD_ONLY
+                if p.type == 'param':
+                    if p.star_count:
+                        return Parameter.KEYWORD_ONLY
+                    if p == tree_param:
+                        param_appeared = True
+        return Parameter.POSITIONAL_OR_KEYWORD
+
+    def infer(self):
+        values = self.infer_annotation()
+        if values:
+            return values
+
+        doc_params = docstrings.infer_param(self.function_value, self._get_param_node())
+        return doc_params
+

 class AnonymousParamName(_ActualTreeParamName):
-    pass
+    @plugin_manager.decorate(name='goto_anonymous_param')
+    def goto(self):
+        return super().goto()
+
+    @plugin_manager.decorate(name='infer_anonymous_param')
+    def infer(self):
+        values = super().infer()
+        if values:
+            return values
+        from jedi.inference.dynamic_params import dynamic_param_lookup
+        param = self._get_param_node()
+        values = dynamic_param_lookup(self.function_value, param.position_index)
+        if values:
+            return values
+
+        if param.star_count == 1:
+            from jedi.inference.value.iterable import FakeTuple
+            value = FakeTuple(self.function_value.inference_state, [])
+        elif param.star_count == 2:
+            from jedi.inference.value.iterable import FakeDict
+            value = FakeDict(self.function_value.inference_state, {})
+        elif param.default is None:
+            return NO_VALUES
+        else:
+            return self.function_value.parent_context.infer_node(param.default)
+        return ValueSet({value})


 class ParamName(_ActualTreeParamName):
-
     def __init__(self, function_value, tree_name, arguments):
         super().__init__(function_value, tree_name)
         self.arguments = arguments

+    def infer(self):
+        values = super().infer()
+        if values:
+            return values

-class ParamNameWrapper(_ParamMixin):
+        return self.get_executed_param_name().infer()

+    def get_executed_param_name(self):
+        from jedi.inference.param import get_executed_param_names
+        params_names = get_executed_param_names(self.function_value, self.arguments)
+        return params_names[self._get_param_node().position_index]
+
+
+class ParamNameWrapper(_ParamMixin):
     def __init__(self, param_name):
         self._wrapped_param_name = param_name

@@ -146,20 +572,56 @@ class ParamNameWrapper(_ParamMixin):


 class ImportName(AbstractNameDefinition):
-    start_pos = 1, 0
+    start_pos = (1, 0)
     _level = 0

     def __init__(self, parent_context, string_name):
         self._from_module_context = parent_context
         self.string_name = string_name

+    def get_qualified_names(self, include_module_names=False):
+        if include_module_names:
+            if self._level:
+                assert self._level == 1, "Everything else is not supported for now"
+                module_names = self._from_module_context.string_names
+                if module_names is None:
+                    return module_names
+                return module_names + (self.string_name,)
+            return (self.string_name,)
+        return ()
+
+    @property
+    def parent_context(self):
+        m = self._from_module_context
+        import_values = self.infer()
+        if not import_values:
+            return m
+        # It's almost always possible to find the import or to not find it. The
+        # importing returns only one value, pretty much always.
+        return next(iter(import_values)).as_context()
+
+    @memoize_method
+    def infer(self):
+        from jedi.inference.imports import Importer
+        m = self._from_module_context
+        return Importer(m.inference_state, [self.string_name], m, level=self._level).follow()
+
+    def goto(self):
+        return [m.name for m in self.infer()]
+
+    @property
+    def api_type(self):
+        return 'module'
+
+    def py__doc__(self):
+        return _merge_name_docs(self.goto())
+

 class SubModuleName(ImportName):
     _level = 1


 class NameWrapper:
-
     def __init__(self, wrapped_name):
         self._wrapped_name = wrapped_name

@@ -171,11 +633,32 @@ class NameWrapper:


 class StubNameMixin:
-    pass
-
-
+    def py__doc__(self):
+        from jedi.inference.gradual.conversion import convert_names
+        # Stubs are not complicated and we can just follow simple statements
+        # that have an equals in them, because they typically make something
+        # else public. See e.g. stubs for `requests`.
+        names = [self]
+        if self.api_type == 'statement' and '=' in self.tree_name.get_definition().children:
+            names = [v.name for v in self.infer()]
+
+        names = convert_names(names, prefer_stub_to_compiled=False)
+        if self in names:
+            return super().py__doc__()
+        else:
+            # We have signatures ourselves in stubs, so don't use signatures
+            # from the implementation.
+            return _merge_name_docs(names)
+
+
+# From here on down we make looking up the sys.version_info fast.
 class StubName(StubNameMixin, TreeNameDefinition):
-    pass
+    def infer(self):
+        inferred = super().infer()
+        if self.string_name == 'version_info' and self.get_root_context().py__name__() == 'sys':
+            from jedi.inference.gradual.stub_value import VersionInfo
+            return ValueSet(VersionInfo(c) for c in inferred)
+        return inferred


 class ModuleName(ValueNameMixin, AbstractNameDefinition):
@@ -185,6 +668,10 @@ class ModuleName(ValueNameMixin, AbstractNameDefinition):
         self._value = value
         self._name = name

+    @property
+    def string_name(self):
+        return self._name
+

 class StubModuleName(StubNameMixin, ModuleName):
     pass
diff --git a/jedi/inference/param.py b/jedi/inference/param.py
index 3880af71..1f296215 100644
--- a/jedi/inference/param.py
+++ b/jedi/inference/param.py
@@ -1,21 +1,50 @@
 from collections import defaultdict
 from inspect import Parameter
+
 from jedi import debug
 from jedi.inference.utils import PushBackIterator
 from jedi.inference import analysis
-from jedi.inference.lazy_value import LazyKnownValue, LazyTreeValue, LazyUnknownValue
+from jedi.inference.lazy_value import LazyKnownValue, \
+    LazyTreeValue, LazyUnknownValue
 from jedi.inference.value import iterable
 from jedi.inference.names import ParamName


-class ExecutedParamName(ParamName):
+def _add_argument_issue(error_name, lazy_value, message):
+    if isinstance(lazy_value, LazyTreeValue):
+        node = lazy_value.data
+        if node.parent.type == 'argument':
+            node = node.parent
+        return analysis.add(lazy_value.context, error_name, node, message)
+

-    def __init__(self, function_value, arguments, param_node, lazy_value,
-        is_default=False):
+class ExecutedParamName(ParamName):
+    def __init__(self, function_value, arguments, param_node, lazy_value, is_default=False):
         super().__init__(function_value, param_node.name, arguments=arguments)
         self._lazy_value = lazy_value
         self._is_default = is_default

+    def infer(self):
+        return self._lazy_value.infer()
+
+    def matches_signature(self):
+        if self._is_default:
+            return True
+        argument_values = self.infer().py__class__()
+        if self.get_kind() in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):
+            return True
+        annotations = self.infer_annotation(execute_annotation=False)
+        if not annotations:
+            # If we cannot infer annotations - or there aren't any - pretend
+            # that the signature matches.
+            return True
+        matches = any(c1.is_sub_class_of(c2)
+                      for c1 in argument_values
+                      for c2 in annotations.gather_annotation_classes())
+        debug.dbg("param compare %s: %s <=> %s",
+                  matches, argument_values, annotations, color='BLUE')
+        return matches
+
     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self.string_name)

@@ -40,7 +69,159 @@ def get_executed_param_names_and_issues(function_value, arguments):
         c, & d will have their values (42, 'c' and 'd' respectively) included.
       - a list with a single entry about the lack of a value for `b`
     """
-    pass
+    def too_many_args(argument):
+        m = _error_argument_count(funcdef, len(unpacked_va))
+        # Just report an error for the first param that is not needed (like
+        # cPython).
+        if arguments.get_calling_nodes():
+            # There might not be a valid calling node so check for that first.
+            issues.append(
+                _add_argument_issue(
+                    'type-error-too-many-arguments',
+                    argument,
+                    message=m
+                )
+            )
+        else:
+            issues.append(None)
+            debug.warning('non-public warning: %s', m)
+
+    issues = []  # List[Optional[analysis issue]]
+    result_params = []
+    param_dict = {}
+    funcdef = function_value.tree_node
+    # Default params are part of the value where the function was defined.
+    # This means that they might have access on class variables that the
+    # function itself doesn't have.
+    default_param_context = function_value.get_default_param_context()
+
+    for param in funcdef.get_params():
+        param_dict[param.name.value] = param
+    unpacked_va = list(arguments.unpack(funcdef))
+    var_arg_iterator = PushBackIterator(iter(unpacked_va))
+
+    non_matching_keys = defaultdict(lambda: [])
+    keys_used = {}
+    keys_only = False
+    had_multiple_value_error = False
+    for param in funcdef.get_params():
+        # The value and key can both be null. There, the defaults apply.
+        # args / kwargs will just be empty arrays / dicts, respectively.
+        # Wrong value count is just ignored. If you try to test cases that are
+        # not allowed in Python, Jedi will maybe not show any completions.
+        is_default = False
+        key, argument = next(var_arg_iterator, (None, None))
+        while key is not None:
+            keys_only = True
+            try:
+                key_param = param_dict[key]
+            except KeyError:
+                non_matching_keys[key] = argument
+            else:
+                if key in keys_used:
+                    had_multiple_value_error = True
+                    m = ("TypeError: %s() got multiple values for keyword argument '%s'."
+                         % (funcdef.name, key))
+                    for contextualized_node in arguments.get_calling_nodes():
+                        issues.append(
+                            analysis.add(contextualized_node.context,
+                                         'type-error-multiple-values',
+                                         contextualized_node.node, message=m)
+                        )
+                else:
+                    keys_used[key] = ExecutedParamName(
+                        function_value, arguments, key_param, argument)
+            key, argument = next(var_arg_iterator, (None, None))
+
+        try:
+            result_params.append(keys_used[param.name.value])
+            continue
+        except KeyError:
+            pass
+
+        if param.star_count == 1:
+            # *args param
+            lazy_value_list = []
+            if argument is not None:
+                lazy_value_list.append(argument)
+                for key, argument in var_arg_iterator:
+                    # Iterate until a key argument is found.
+                    if key:
+                        var_arg_iterator.push_back((key, argument))
+                        break
+                    lazy_value_list.append(argument)
+            seq = iterable.FakeTuple(function_value.inference_state, lazy_value_list)
+            result_arg = LazyKnownValue(seq)
+        elif param.star_count == 2:
+            if argument is not None:
+                too_many_args(argument)
+            # **kwargs param
+            dct = iterable.FakeDict(function_value.inference_state, dict(non_matching_keys))
+            result_arg = LazyKnownValue(dct)
+            non_matching_keys = {}
+        else:
+            # normal param
+            if argument is None:
+                # No value: Return an empty container
+                if param.default is None:
+                    result_arg = LazyUnknownValue()
+                    if not keys_only:
+                        for contextualized_node in arguments.get_calling_nodes():
+                            m = _error_argument_count(funcdef, len(unpacked_va))
+                            issues.append(
+                                analysis.add(
+                                    contextualized_node.context,
+                                    'type-error-too-few-arguments',
+                                    contextualized_node.node,
+                                    message=m,
+                                )
+                            )
+                else:
+                    result_arg = LazyTreeValue(default_param_context, param.default)
+                    is_default = True
+            else:
+                result_arg = argument
+
+        result_params.append(ExecutedParamName(
+            function_value, arguments, param, result_arg, is_default=is_default
+        ))
+        if not isinstance(result_arg, LazyUnknownValue):
+            keys_used[param.name.value] = result_params[-1]
+
+    if keys_only:
+        # All arguments should be handed over to the next function. It's not
+        # about the values inside, it's about the names. Jedi needs to now that
+        # there's nothing to find for certain names.
+        for k in set(param_dict) - set(keys_used):
+            param = param_dict[k]
+
+            if not (non_matching_keys or had_multiple_value_error
+                    or param.star_count or param.default):
+                # add a warning only if there's not another one.
+                for contextualized_node in arguments.get_calling_nodes():
+                    m = _error_argument_count(funcdef, len(unpacked_va))
+                    issues.append(
+                        analysis.add(contextualized_node.context,
+                                     'type-error-too-few-arguments',
+                                     contextualized_node.node, message=m)
+                    )
+
+    for key, lazy_value in non_matching_keys.items():
+        m = "TypeError: %s() got an unexpected keyword argument '%s'." \
+            % (funcdef.name, key)
+        issues.append(
+            _add_argument_issue(
+                'type-error-keyword-argument',
+                lazy_value,
+                message=m
+            )
+        )
+
+    remaining_arguments = list(var_arg_iterator)
+    if remaining_arguments:
+        first_key, lazy_value = remaining_arguments[0]
+        too_many_args(lazy_value)
+    return result_params, issues


 def get_executed_param_names(function_value, arguments):
@@ -61,4 +242,16 @@ def get_executed_param_names(function_value, arguments):
     for each parameter a, b, c & d; the entries for a, c, & d will have their
     values (42, 'c' and 'd' respectively) included.
     """
-    pass
+    return get_executed_param_names_and_issues(function_value, arguments)[0]
+
+
+def _error_argument_count(funcdef, actual_count):
+    params = funcdef.get_params()
+    default_arguments = sum(1 for p in params if p.default or p.star_count)
+
+    if default_arguments == 0:
+        before = 'exactly '
+    else:
+        before = 'from %s to ' % (len(params) - default_arguments)
+    return ('TypeError: %s() takes %s%s arguments (%s given).'
+            % (funcdef.name, before, len(params), actual_count))
diff --git a/jedi/inference/parser_cache.py b/jedi/inference/parser_cache.py
index 83db87aa..c9b9b2bd 100644
--- a/jedi/inference/parser_cache.py
+++ b/jedi/inference/parser_cache.py
@@ -1 +1,6 @@
 from jedi.inference.cache import inference_state_function_cache
+
+
+@inference_state_function_cache()
+def get_yield_exprs(inference_state, funcdef):
+    return list(funcdef.iter_yield_exprs())
diff --git a/jedi/inference/recursion.py b/jedi/inference/recursion.py
index 69b2848b..cc241873 100644
--- a/jedi/inference/recursion.py
+++ b/jedi/inference/recursion.py
@@ -25,9 +25,13 @@ therefore the quality might not always be maximal.
 .. autodata:: per_function_execution_limit
 .. autodata:: per_function_recursion_limit
 """
+
 from contextlib import contextmanager
+
 from jedi import debug
 from jedi.inference.base_value import NO_VALUES
+
+
 recursion_limit = 15
 """
 Like :func:`sys.getrecursionlimit()`, just for |jedi|.
@@ -47,7 +51,6 @@ A function may not be executed more than this number of times recursively.


 class RecursionDetector:
-
     def __init__(self):
         self.pushed_nodes = []

@@ -58,17 +61,93 @@ def execution_allowed(inference_state, node):
     A decorator to detect recursions in statements. In a recursion a statement
     at the same place, in the same module may not be executed two times.
     """
-    pass
+    pushed_nodes = inference_state.recursion_detector.pushed_nodes
+
+    if node in pushed_nodes:
+        debug.warning('catched stmt recursion: %s @%s', node,
+                      getattr(node, 'start_pos', None))
+        yield False
+    else:
+        try:
+            pushed_nodes.append(node)
+            yield True
+        finally:
+            pushed_nodes.pop()
+
+
+def execution_recursion_decorator(default=NO_VALUES):
+    def decorator(func):
+        def wrapper(self, **kwargs):
+            detector = self.inference_state.execution_recursion_detector
+            limit_reached = detector.push_execution(self)
+            try:
+                if limit_reached:
+                    result = default
+                else:
+                    result = func(self, **kwargs)
+            finally:
+                detector.pop_execution()
+            return result
+        return wrapper
+    return decorator


 class ExecutionRecursionDetector:
     """
     Catches recursions of executions.
     """
-
     def __init__(self, inference_state):
         self._inference_state = inference_state
+
         self._recursion_level = 0
         self._parent_execution_funcs = []
         self._funcdef_execution_counts = {}
         self._execution_count = 0
+
+    def pop_execution(self):
+        self._parent_execution_funcs.pop()
+        self._recursion_level -= 1
+
+    def push_execution(self, execution):
+        funcdef = execution.tree_node
+
+        # These two will be undone in pop_execution.
+        self._recursion_level += 1
+        self._parent_execution_funcs.append(funcdef)
+
+        module_context = execution.get_root_context()
+
+        if module_context.is_builtins_module():
+            # We have control over builtins so we know they are not recursing
+            # like crazy. Therefore we just let them execute always, because
+            # they usually just help a lot with getting good results.
+            return False
+
+        if self._recursion_level > recursion_limit:
+            debug.warning('Recursion limit (%s) reached', recursion_limit)
+            return True
+
+        if self._execution_count >= total_function_execution_limit:
+            debug.warning('Function execution limit (%s) reached', total_function_execution_limit)
+            return True
+        self._execution_count += 1
+
+        if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit:
+            if module_context.py__name__() == 'typing':
+                return False
+            debug.warning(
+                'Per function execution limit (%s) reached: %s',
+                per_function_execution_limit,
+                funcdef
+            )
+            return True
+        self._funcdef_execution_counts[funcdef] += 1
+
+        if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit:
+            debug.warning(
+                'Per function recursion limit (%s) reached: %s',
+                per_function_recursion_limit,
+                funcdef
+            )
+            return True
+        return False
diff --git a/jedi/inference/references.py b/jedi/inference/references.py
index d77bb44e..d01890bd 100644
--- a/jedi/inference/references.py
+++ b/jedi/inference/references.py
@@ -1,13 +1,17 @@
 import os
 import re
+
 from parso import python_bytes_to_unicode
+
 from jedi.debug import dbg
 from jedi.file_io import KnownContentFileIO, FolderIO
 from jedi.inference.names import SubModuleName
 from jedi.inference.imports import load_module_from_path
 from jedi.inference.filters import ParserTreeFilter
 from jedi.inference.gradual.conversion import convert_names
-_IGNORE_FOLDERS = '.tox', '.venv', '.mypy_cache', 'venv', '__pycache__'
+
+_IGNORE_FOLDERS = ('.tox', '.venv', '.mypy_cache', 'venv', '__pycache__')
+
 _OPENED_FILE_LIMIT = 2000
 """
 Stats from a 2016 Lenovo Notebook running Linux:
@@ -22,12 +26,294 @@ easily 100ms for bigger files.
 """


-def get_module_contexts_containing_name(inference_state, module_contexts,
-    name, limit_reduction=1):
+def _resolve_names(definition_names, avoid_names=()):
+    for name in definition_names:
+        if name in avoid_names:
+            # Avoiding recursions here, because goto on a module name lands
+            # on the same module.
+            continue
+
+        if not isinstance(name, SubModuleName):
+            # SubModuleNames are not actually existing names but created
+            # names when importing something like `import foo.bar.baz`.
+            yield name
+
+        if name.api_type == 'module':
+            yield from _resolve_names(name.goto(), definition_names)
+
+
+def _dictionarize(names):
+    return dict(
+        (n if n.tree_name is None else n.tree_name, n)
+        for n in names
+    )
+
+
+def _find_defining_names(module_context, tree_name):
+    found_names = _find_names(module_context, tree_name)
+
+    for name in list(found_names):
+        # Convert from/to stubs, because those might also be usages.
+        found_names |= set(convert_names(
+            [name],
+            only_stubs=not name.get_root_context().is_stub(),
+            prefer_stub_to_compiled=False
+        ))
+
+    found_names |= set(_find_global_variables(found_names, tree_name.value))
+    for name in list(found_names):
+        if name.api_type == 'param' or name.tree_name is None \
+                or name.tree_name.parent.type == 'trailer':
+            continue
+        found_names |= set(_add_names_in_same_context(name.parent_context, name.string_name))
+    return set(_resolve_names(found_names))
+
+
+def _find_names(module_context, tree_name):
+    name = module_context.create_name(tree_name)
+    found_names = set(name.goto())
+    found_names.add(name)
+
+    return set(_resolve_names(found_names))
+
+
+def _add_names_in_same_context(context, string_name):
+    if context.tree_node is None:
+        return
+
+    until_position = None
+    while True:
+        filter_ = ParserTreeFilter(
+            parent_context=context,
+            until_position=until_position,
+        )
+        names = set(filter_.get(string_name))
+        if not names:
+            break
+        yield from names
+        ordered = sorted(names, key=lambda x: x.start_pos)
+        until_position = ordered[0].start_pos
+
+
+def _find_global_variables(names, search_name):
+    for name in names:
+        if name.tree_name is None:
+            continue
+        module_context = name.get_root_context()
+        try:
+            method = module_context.get_global_filter
+        except AttributeError:
+            continue
+        else:
+            for global_name in method().get(search_name):
+                yield global_name
+                c = module_context.create_context(global_name.tree_name)
+                yield from _add_names_in_same_context(c, global_name.string_name)
+
+
+def find_references(module_context, tree_name, only_in_module=False):
+    inf = module_context.inference_state
+    search_name = tree_name.value
+
+    # We disable flow analysis, because if we have ifs that are only true in
+    # certain cases, we want both sides.
+    try:
+        inf.flow_analysis_enabled = False
+        found_names = _find_defining_names(module_context, tree_name)
+    finally:
+        inf.flow_analysis_enabled = True
+
+    found_names_dct = _dictionarize(found_names)
+
+    module_contexts = [module_context]
+    if not only_in_module:
+        for m in set(d.get_root_context() for d in found_names):
+            if m != module_context and m.tree_node is not None \
+                    and inf.project.path in m.py__file__().parents:
+                module_contexts.append(m)
+    # For param no search for other modules is necessary.
+    if only_in_module or any(n.api_type == 'param' for n in found_names):
+        potential_modules = module_contexts
+    else:
+        potential_modules = get_module_contexts_containing_name(
+            inf,
+            module_contexts,
+            search_name,
+        )
+
+    non_matching_reference_maps = {}
+    for module_context in potential_modules:
+        for name_leaf in module_context.tree_node.get_used_names().get(search_name, []):
+            new = _dictionarize(_find_names(module_context, name_leaf))
+            if any(tree_name in found_names_dct for tree_name in new):
+                found_names_dct.update(new)
+                for tree_name in new:
+                    for dct in non_matching_reference_maps.get(tree_name, []):
+                        # A reference that was previously searched for matches
+                        # with a now found name. Merge.
+                        found_names_dct.update(dct)
+                    try:
+                        del non_matching_reference_maps[tree_name]
+                    except KeyError:
+                        pass
+            else:
+                for name in new:
+                    non_matching_reference_maps.setdefault(name, []).append(new)
+    result = found_names_dct.values()
+    if only_in_module:
+        return [n for n in result if n.get_root_context() == module_context]
+    return result
+
+
+def _check_fs(inference_state, file_io, regex):
+    try:
+        code = file_io.read()
+    except FileNotFoundError:
+        return None
+    code = python_bytes_to_unicode(code, errors='replace')
+    if not regex.search(code):
+        return None
+    new_file_io = KnownContentFileIO(file_io.path, code)
+    m = load_module_from_path(inference_state, new_file_io)
+    if m.is_compiled():
+        return None
+    return m.as_context()
+
+
+def gitignored_paths(folder_io, file_io):
+    ignored_paths_abs = set()
+    ignored_paths_rel = set()
+
+    for l in file_io.read().splitlines():
+        if not l or l.startswith(b'#') or l.startswith(b'!') or b'*' in l:
+            continue
+
+        p = l.decode('utf-8', 'ignore').rstrip('/')
+        if '/' in p:
+            name = p.lstrip('/')
+            ignored_paths_abs.add(os.path.join(folder_io.path, name))
+        else:
+            name = p
+            ignored_paths_rel.add((folder_io.path, name))
+
+    return ignored_paths_abs, ignored_paths_rel
+
+
+def expand_relative_ignore_paths(folder_io, relative_paths):
+    curr_path = folder_io.path
+    return {os.path.join(curr_path, p[1]) for p in relative_paths if curr_path.startswith(p[0])}
+
+
+def recurse_find_python_folders_and_files(folder_io, except_paths=()):
+    except_paths = set(except_paths)
+    except_paths_relative = set()
+
+    for root_folder_io, folder_ios, file_ios in folder_io.walk():
+        # Delete folders that we don't want to iterate over.
+        for file_io in file_ios:
+            path = file_io.path
+            if path.suffix in ('.py', '.pyi'):
+                if path not in except_paths:
+                    yield None, file_io
+
+            if path.name == '.gitignore':
+                ignored_paths_abs, ignored_paths_rel = gitignored_paths(
+                    root_folder_io, file_io
+                )
+                except_paths |= ignored_paths_abs
+                except_paths_relative |= ignored_paths_rel
+
+        except_paths_relative_expanded = expand_relative_ignore_paths(
+            root_folder_io, except_paths_relative
+        )
+
+        folder_ios[:] = [
+            folder_io
+            for folder_io in folder_ios
+            if folder_io.path not in except_paths
+            and folder_io.path not in except_paths_relative_expanded
+            and folder_io.get_base_name() not in _IGNORE_FOLDERS
+        ]
+        for folder_io in folder_ios:
+            yield folder_io, None
+
+
+def recurse_find_python_files(folder_io, except_paths=()):
+    for folder_io, file_io in recurse_find_python_folders_and_files(folder_io, except_paths):
+        if file_io is not None:
+            yield file_io
+
+
+def _find_python_files_in_sys_path(inference_state, module_contexts):
+    sys_path = inference_state.get_sys_path()
+    except_paths = set()
+    yielded_paths = [m.py__file__() for m in module_contexts]
+    for module_context in module_contexts:
+        file_io = module_context.get_value().file_io
+        if file_io is None:
+            continue
+
+        folder_io = file_io.get_parent_folder()
+        while True:
+            path = folder_io.path
+            if not any(path.startswith(p) for p in sys_path) or path in except_paths:
+                break
+            for file_io in recurse_find_python_files(folder_io, except_paths):
+                if file_io.path not in yielded_paths:
+                    yield file_io
+            except_paths.add(path)
+            folder_io = folder_io.get_parent_folder()
+
+
+def _find_project_modules(inference_state, module_contexts):
+    except_ = [m.py__file__() for m in module_contexts]
+    yield from recurse_find_python_files(FolderIO(inference_state.project.path), except_)
+
+
+def get_module_contexts_containing_name(inference_state, module_contexts, name,
+                                        limit_reduction=1):
     """
     Search a name in the directories of modules.

     :param limit_reduction: Divides the limits on opening/parsing files by this
         factor.
     """
-    pass
+    # Skip non python modules
+    for module_context in module_contexts:
+        if module_context.is_compiled():
+            continue
+        yield module_context
+
+    # Very short names are not searched in other modules for now to avoid lots
+    # of file lookups.
+    if len(name) <= 2:
+        return
+
+    # Currently not used, because there's only `scope=project` and `scope=file`
+    # At the moment there is no such thing as `scope=sys.path`.
+    # file_io_iterator = _find_python_files_in_sys_path(inference_state, module_contexts)
+    file_io_iterator = _find_project_modules(inference_state, module_contexts)
+    yield from search_in_file_ios(inference_state, file_io_iterator, name,
+                                  limit_reduction=limit_reduction)
+
+
+def search_in_file_ios(inference_state, file_io_iterator, name,
+                       limit_reduction=1, complete=False):
+    parse_limit = _PARSED_FILE_LIMIT / limit_reduction
+    open_limit = _OPENED_FILE_LIMIT / limit_reduction
+    file_io_count = 0
+    parsed_file_count = 0
+    regex = re.compile(r'\b' + re.escape(name) + (r'' if complete else r'\b'))
+    for file_io in file_io_iterator:
+        file_io_count += 1
+        m = _check_fs(inference_state, file_io, regex)
+        if m is not None:
+            parsed_file_count += 1
+            yield m
+            if parsed_file_count >= parse_limit:
+                dbg('Hit limit of parsed files: %s', parse_limit)
+                break
+
+        if file_io_count >= open_limit:
+            dbg('Hit limit of opened files: %s', open_limit)
+            break
diff --git a/jedi/inference/signature.py b/jedi/inference/signature.py
index eb8704ba..565a269b 100644
--- a/jedi/inference/signature.py
+++ b/jedi/inference/signature.py
@@ -1,44 +1,150 @@
 from inspect import Parameter
+
 from jedi.cache import memoize_method
 from jedi import debug
 from jedi import parser_utils


 class _SignatureMixin:
-    pass
+    def to_string(self):
+        def param_strings():
+            is_positional = False
+            is_kw_only = False
+            for n in self.get_param_names(resolve_stars=True):
+                kind = n.get_kind()
+                is_positional |= kind == Parameter.POSITIONAL_ONLY
+                if is_positional and kind != Parameter.POSITIONAL_ONLY:
+                    yield '/'
+                    is_positional = False
+
+                if kind == Parameter.VAR_POSITIONAL:
+                    is_kw_only = True
+                elif kind == Parameter.KEYWORD_ONLY and not is_kw_only:
+                    yield '*'
+                    is_kw_only = True
+
+                yield n.to_string()
+
+            if is_positional:
+                yield '/'
+
+        s = self.name.string_name + '(' + ', '.join(param_strings()) + ')'
+        annotation = self.annotation_string
+        if annotation:
+            s += ' -> ' + annotation
+        return s


 class AbstractSignature(_SignatureMixin):
-
     def __init__(self, value, is_bound=False):
         self.value = value
         self.is_bound = is_bound

+    @property
+    def name(self):
+        return self.value.name
+
+    @property
+    def annotation_string(self):
+        return ''
+
+    def get_param_names(self, resolve_stars=False):
+        param_names = self._function_value.get_param_names()
+        if self.is_bound:
+            return param_names[1:]
+        return param_names
+
+    def bind(self, value):
+        raise NotImplementedError
+
+    def matches_signature(self, arguments):
+        return True
+
     def __repr__(self):
         if self.value is self._function_value:
             return '<%s: %s>' % (self.__class__.__name__, self.value)
-        return '<%s: %s, %s>' % (self.__class__.__name__, self.value, self.
-            _function_value)
+        return '<%s: %s, %s>' % (self.__class__.__name__, self.value, self._function_value)


 class TreeSignature(AbstractSignature):
-
     def __init__(self, value, function_value=None, is_bound=False):
         super().__init__(value, is_bound)
         self._function_value = function_value or value

+    def bind(self, value):
+        return TreeSignature(value, self._function_value, is_bound=True)
+
+    @property
+    def _annotation(self):
+        # Classes don't need annotations, even if __init__ has one. They always
+        # return themselves.
+        if self.value.is_class():
+            return None
+        return self._function_value.tree_node.annotation
+
+    @property
+    def annotation_string(self):
+        a = self._annotation
+        if a is None:
+            return ''
+        return a.get_code(include_prefix=False)
+
+    @memoize_method
+    def get_param_names(self, resolve_stars=False):
+        params = self._function_value.get_param_names()
+        if resolve_stars:
+            from jedi.inference.star_args import process_params
+            params = process_params(params)
+        if self.is_bound:
+            return params[1:]
+        return params
+
+    def matches_signature(self, arguments):
+        from jedi.inference.param import get_executed_param_names_and_issues
+        executed_param_names, issues = \
+            get_executed_param_names_and_issues(self._function_value, arguments)
+        if issues:
+            return False
+
+        matches = all(executed_param_name.matches_signature()
+                      for executed_param_name in executed_param_names)
+        if debug.enable_notice:
+            tree_node = self._function_value.tree_node
+            signature = parser_utils.get_signature(tree_node)
+            if matches:
+                debug.dbg("Overloading match: %s@%s (%s)",
+                          signature, tree_node.start_pos[0], arguments, color='BLUE')
+            else:
+                debug.dbg("Overloading no match: %s@%s (%s)",
+                          signature, tree_node.start_pos[0], arguments, color='BLUE')
+        return matches

-class BuiltinSignature(AbstractSignature):

-    def __init__(self, value, return_string, function_value=None, is_bound=
-        False):
+class BuiltinSignature(AbstractSignature):
+    def __init__(self, value, return_string, function_value=None, is_bound=False):
         super().__init__(value, is_bound)
         self._return_string = return_string
         self.__function_value = function_value

+    @property
+    def annotation_string(self):
+        return self._return_string

-class SignatureWrapper(_SignatureMixin):
+    @property
+    def _function_value(self):
+        if self.__function_value is None:
+            return self.value
+        return self.__function_value

+    def bind(self, value):
+        return BuiltinSignature(
+            value, self._return_string,
+            function_value=self.value,
+            is_bound=True
+        )
+
+
+class SignatureWrapper(_SignatureMixin):
     def __init__(self, wrapped_signature):
         self._wrapped_signature = wrapped_signature

diff --git a/jedi/inference/star_args.py b/jedi/inference/star_args.py
index 4000bc5a..71ea7093 100644
--- a/jedi/inference/star_args.py
+++ b/jedi/inference/star_args.py
@@ -11,14 +11,210 @@ This means for example in this case::
 The signature here for bar should be `bar(b, c)` instead of bar(*args).
 """
 from inspect import Parameter
+
 from parso import tree
+
 from jedi.inference.utils import to_list
 from jedi.inference.names import ParamNameWrapper
 from jedi.inference.helpers import is_big_annoying_library


-class ParamNameFixedKind(ParamNameWrapper):
+def _iter_nodes_for_param(param_name):
+    from parso.python.tree import search_ancestor
+    from jedi.inference.arguments import TreeArguments
+
+    execution_context = param_name.parent_context
+    # Walk up the parso tree to get the FunctionNode we want. We use the parso
+    # tree rather than going via the execution context so that we're agnostic of
+    # the specific scope we're evaluating within (i.e: module or function,
+    # etc.).
+    function_node = tree.search_ancestor(param_name.tree_name, 'funcdef', 'lambdef')
+    module_node = function_node.get_root_node()
+    start = function_node.children[-1].start_pos
+    end = function_node.children[-1].end_pos
+    for name in module_node.get_used_names().get(param_name.string_name):
+        if start <= name.start_pos < end:
+            # Is used in the function
+            argument = name.parent
+            if argument.type == 'argument' \
+                    and argument.children[0] == '*' * param_name.star_count:
+                trailer = search_ancestor(argument, 'trailer')
+                if trailer is not None:  # Make sure we're in a function
+                    context = execution_context.create_context(trailer)
+                    if _goes_to_param_name(param_name, context, name):
+                        values = _to_callables(context, trailer)
+
+                        args = TreeArguments.create_cached(
+                            execution_context.inference_state,
+                            context=context,
+                            argument_node=trailer.children[1],
+                            trailer=trailer,
+                        )
+                        for c in values:
+                            yield c, args
+
+
+def _goes_to_param_name(param_name, context, potential_name):
+    if potential_name.type != 'name':
+        return False
+    from jedi.inference.names import TreeNameDefinition
+    found = TreeNameDefinition(context, potential_name).goto()
+    return any(param_name.parent_context == p.parent_context
+               and param_name.start_pos == p.start_pos
+               for p in found)
+
+
+def _to_callables(context, trailer):
+    from jedi.inference.syntax_tree import infer_trailer
+
+    atom_expr = trailer.parent
+    index = atom_expr.children[0] == 'await'
+    # Infer atom first
+    values = context.infer_node(atom_expr.children[index])
+    for trailer2 in atom_expr.children[index + 1:]:
+        if trailer == trailer2:
+            break
+        values = infer_trailer(context, values, trailer2)
+    return values
+
+
+def _remove_given_params(arguments, param_names):
+    count = 0
+    used_keys = set()
+    for key, _ in arguments.unpack():
+        if key is None:
+            count += 1
+        else:
+            used_keys.add(key)
+
+    for p in param_names:
+        if count and p.maybe_positional_argument():
+            count -= 1
+            continue
+        if p.string_name in used_keys and p.maybe_keyword_argument():
+            continue
+        yield p
+

+@to_list
+def process_params(param_names, star_count=3):  # default means both * and **
+    if param_names:
+        if is_big_annoying_library(param_names[0].parent_context):
+            # At first this feature can look innocent, but it does a lot of
+            # type inference in some cases, so we just ditch it.
+            yield from param_names
+            return
+
+    used_names = set()
+    arg_callables = []
+    kwarg_callables = []
+
+    kw_only_names = []
+    kwarg_names = []
+    arg_names = []
+    original_arg_name = None
+    original_kwarg_name = None
+    for p in param_names:
+        kind = p.get_kind()
+        if kind == Parameter.VAR_POSITIONAL:
+            if star_count & 1:
+                arg_callables = _iter_nodes_for_param(p)
+                original_arg_name = p
+        elif p.get_kind() == Parameter.VAR_KEYWORD:
+            if star_count & 2:
+                kwarg_callables = list(_iter_nodes_for_param(p))
+                original_kwarg_name = p
+        elif kind == Parameter.KEYWORD_ONLY:
+            if star_count & 2:
+                kw_only_names.append(p)
+        elif kind == Parameter.POSITIONAL_ONLY:
+            if star_count & 1:
+                yield p
+        else:
+            if star_count == 1:
+                yield ParamNameFixedKind(p, Parameter.POSITIONAL_ONLY)
+            elif star_count == 2:
+                kw_only_names.append(ParamNameFixedKind(p, Parameter.KEYWORD_ONLY))
+            else:
+                used_names.add(p.string_name)
+                yield p
+
+    # First process *args
+    longest_param_names = ()
+    found_arg_signature = False
+    found_kwarg_signature = False
+    for func_and_argument in arg_callables:
+        func, arguments = func_and_argument
+        new_star_count = star_count
+        if func_and_argument in kwarg_callables:
+            kwarg_callables.remove(func_and_argument)
+        else:
+            new_star_count = 1
+
+        for signature in func.get_signatures():
+            found_arg_signature = True
+            if new_star_count == 3:
+                found_kwarg_signature = True
+            args_for_this_func = []
+            for p in process_params(
+                    list(_remove_given_params(
+                        arguments,
+                        signature.get_param_names(resolve_stars=False)
+                    )), new_star_count):
+                if p.get_kind() == Parameter.VAR_KEYWORD:
+                    kwarg_names.append(p)
+                elif p.get_kind() == Parameter.VAR_POSITIONAL:
+                    arg_names.append(p)
+                elif p.get_kind() == Parameter.KEYWORD_ONLY:
+                    kw_only_names.append(p)
+                else:
+                    args_for_this_func.append(p)
+            if len(args_for_this_func) > len(longest_param_names):
+                longest_param_names = args_for_this_func
+
+    for p in longest_param_names:
+        if star_count == 1 and p.get_kind() != Parameter.VAR_POSITIONAL:
+            yield ParamNameFixedKind(p, Parameter.POSITIONAL_ONLY)
+        else:
+            if p.get_kind() == Parameter.POSITIONAL_OR_KEYWORD:
+                used_names.add(p.string_name)
+            yield p
+
+    if not found_arg_signature and original_arg_name is not None:
+        yield original_arg_name
+    elif arg_names:
+        yield arg_names[0]
+
+    # Then process **kwargs
+    for func, arguments in kwarg_callables:
+        for signature in func.get_signatures():
+            found_kwarg_signature = True
+            for p in process_params(
+                    list(_remove_given_params(
+                        arguments,
+                        signature.get_param_names(resolve_stars=False)
+                    )), star_count=2):
+                if p.get_kind() == Parameter.VAR_KEYWORD:
+                    kwarg_names.append(p)
+                elif p.get_kind() == Parameter.KEYWORD_ONLY:
+                    kw_only_names.append(p)
+
+    for p in kw_only_names:
+        if p.string_name in used_names:
+            continue
+        yield p
+        used_names.add(p.string_name)
+
+    if not found_kwarg_signature and original_kwarg_name is not None:
+        yield original_kwarg_name
+    elif kwarg_names:
+        yield kwarg_names[0]
+
+
+class ParamNameFixedKind(ParamNameWrapper):
     def __init__(self, param_name, new_kind):
         super().__init__(param_name)
         self._new_kind = new_kind
+
+    def get_kind(self):
+        return self._new_kind
diff --git a/jedi/inference/syntax_tree.py b/jedi/inference/syntax_tree.py
index 6868286f..2cd2a140 100644
--- a/jedi/inference/syntax_tree.py
+++ b/jedi/inference/syntax_tree.py
@@ -3,10 +3,13 @@ Functions inferring the syntax tree.
 """
 import copy
 import itertools
+
 from parso.python import tree
+
 from jedi import debug
 from jedi import parser_utils
-from jedi.inference.base_value import ValueSet, NO_VALUES, ContextualizedNode, iterator_to_value_set, iterate_values
+from jedi.inference.base_value import ValueSet, NO_VALUES, ContextualizedNode, \
+    iterator_to_value_set, iterate_values
 from jedi.inference.lazy_value import LazyTreeValue
 from jedi.inference import compiled
 from jedi.inference import recursion
@@ -17,7 +20,8 @@ from jedi.inference.value import ClassValue, FunctionValue
 from jedi.inference.value import iterable
 from jedi.inference.value.dynamic_arrays import ListModification, DictModification
 from jedi.inference.value import TreeInstance
-from jedi.inference.helpers import is_string, is_literal, is_number, get_names_of_node, is_big_annoying_library
+from jedi.inference.helpers import is_string, is_literal, is_number, \
+    get_names_of_node, is_big_annoying_library
 from jedi.inference.compiled.access import COMPARISON_OPERATORS
 from jedi.inference.cache import inference_state_method_cache
 from jedi.inference.gradual.stub_value import VersionInfo
@@ -26,12 +30,26 @@ from jedi.inference.names import TreeNameDefinition
 from jedi.inference.context import CompForContext
 from jedi.inference.value.decorator import Decoratee
 from jedi.plugins import plugin_manager
-operator_to_magic_method = {'+': '__add__', '-': '__sub__', '*': '__mul__',
-    '@': '__matmul__', '/': '__truediv__', '//': '__floordiv__', '%':
-    '__mod__', '**': '__pow__', '<<': '__lshift__', '>>': '__rshift__', '&':
-    '__and__', '|': '__or__', '^': '__xor__'}
-reverse_operator_to_magic_method = {k: ('__r' + v[2:]) for k, v in
-    operator_to_magic_method.items()}
+
+operator_to_magic_method = {
+    '+': '__add__',
+    '-': '__sub__',
+    '*': '__mul__',
+    '@': '__matmul__',
+    '/': '__truediv__',
+    '//': '__floordiv__',
+    '%': '__mod__',
+    '**': '__pow__',
+    '<<': '__lshift__',
+    '>>': '__rshift__',
+    '&': '__and__',
+    '|': '__or__',
+    '^': '__xor__',
+}
+
+reverse_operator_to_magic_method = {
+    k: '__r' + v[2:] for k, v in operator_to_magic_method.items()
+}


 def _limit_value_infers(func):
@@ -43,14 +61,222 @@ def _limit_value_infers(func):
     I'm still not sure this is the way to go, but it looks okay for now and we
     can still go anther way in the future. Tests are there. ~ dave
     """
-    pass
+    def wrapper(context, *args, **kwargs):
+        n = context.tree_node
+        inference_state = context.inference_state
+        try:
+            inference_state.inferred_element_counts[n] += 1
+            maximum = 300
+            if context.parent_context is None \
+                    and context.get_value() is inference_state.builtins_module:
+                # Builtins should have a more generous inference limit.
+                # It is important that builtins can be executed, otherwise some
+                # functions that depend on certain builtins features would be
+                # broken, see e.g. GH #1432
+                maximum *= 100
+
+            if inference_state.inferred_element_counts[n] > maximum:
+                debug.warning('In value %s there were too many inferences.', n)
+                return NO_VALUES
+        except KeyError:
+            inference_state.inferred_element_counts[n] = 1
+        return func(context, *args, **kwargs)
+
+    return wrapper
+
+
+def infer_node(context, element):
+    if isinstance(context, CompForContext):
+        return _infer_node(context, element)
+
+    if_stmt = element
+    while if_stmt is not None:
+        if_stmt = if_stmt.parent
+        if if_stmt.type in ('if_stmt', 'for_stmt'):
+            break
+        if parser_utils.is_scope(if_stmt):
+            if_stmt = None
+            break
+    predefined_if_name_dict = context.predefined_names.get(if_stmt)
+    # TODO there's a lot of issues with this one. We actually should do
+    # this in a different way. Caching should only be active in certain
+    # cases and this all sucks.
+    if predefined_if_name_dict is None and if_stmt \
+            and if_stmt.type == 'if_stmt' and context.inference_state.is_analysis:
+        if_stmt_test = if_stmt.children[1]
+        name_dicts = [{}]
+        # If we already did a check, we don't want to do it again -> If
+        # value.predefined_names is filled, we stop.
+        # We don't want to check the if stmt itself, it's just about
+        # the content.
+        if element.start_pos > if_stmt_test.end_pos:
+            # Now we need to check if the names in the if_stmt match the
+            # names in the suite.
+            if_names = get_names_of_node(if_stmt_test)
+            element_names = get_names_of_node(element)
+            str_element_names = [e.value for e in element_names]
+            if any(i.value in str_element_names for i in if_names):
+                for if_name in if_names:
+                    definitions = context.inference_state.infer(context, if_name)
+                    # Every name that has multiple different definitions
+                    # causes the complexity to rise. The complexity should
+                    # never fall below 1.
+                    if len(definitions) > 1:
+                        if len(name_dicts) * len(definitions) > 16:
+                            debug.dbg('Too many options for if branch inference %s.', if_stmt)
+                            # There's only a certain amount of branches
+                            # Jedi can infer, otherwise it will take to
+                            # long.
+                            name_dicts = [{}]
+                            break
+
+                        original_name_dicts = list(name_dicts)
+                        name_dicts = []
+                        for definition in definitions:
+                            new_name_dicts = list(original_name_dicts)
+                            for i, name_dict in enumerate(new_name_dicts):
+                                new_name_dicts[i] = name_dict.copy()
+                                new_name_dicts[i][if_name.value] = ValueSet([definition])
+
+                            name_dicts += new_name_dicts
+                    else:
+                        for name_dict in name_dicts:
+                            name_dict[if_name.value] = definitions
+        if len(name_dicts) > 1:
+            result = NO_VALUES
+            for name_dict in name_dicts:
+                with context.predefine_names(if_stmt, name_dict):
+                    result |= _infer_node(context, element)
+            return result
+        else:
+            return _infer_node_if_inferred(context, element)
+    else:
+        if predefined_if_name_dict:
+            return _infer_node(context, element)
+        else:
+            return _infer_node_if_inferred(context, element)


 def _infer_node_if_inferred(context, element):
     """
     TODO This function is temporary: Merge with infer_node.
     """
-    pass
+    parent = element
+    while parent is not None:
+        parent = parent.parent
+        predefined_if_name_dict = context.predefined_names.get(parent)
+        if predefined_if_name_dict is not None:
+            return _infer_node(context, element)
+    return _infer_node_cached(context, element)
+
+
+@inference_state_method_cache(default=NO_VALUES)
+def _infer_node_cached(context, element):
+    return _infer_node(context, element)
+
+
+@debug.increase_indent
+@_limit_value_infers
+def _infer_node(context, element):
+    debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context)
+    inference_state = context.inference_state
+    typ = element.type
+    if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'):
+        return infer_atom(context, element)
+    elif typ == 'lambdef':
+        return ValueSet([FunctionValue.from_context(context, element)])
+    elif typ == 'expr_stmt':
+        return infer_expr_stmt(context, element)
+    elif typ in ('power', 'atom_expr'):
+        first_child = element.children[0]
+        children = element.children[1:]
+        had_await = False
+        if first_child.type == 'keyword' and first_child.value == 'await':
+            had_await = True
+            first_child = children.pop(0)
+
+        value_set = context.infer_node(first_child)
+        for (i, trailer) in enumerate(children):
+            if trailer == '**':  # has a power operation.
+                right = context.infer_node(children[i + 1])
+                value_set = _infer_comparison(
+                    context,
+                    value_set,
+                    trailer,
+                    right
+                )
+                break
+            value_set = infer_trailer(context, value_set, trailer)
+
+        if had_await:
+            return value_set.py__await__().py__stop_iteration_returns()
+        return value_set
+    elif typ in ('testlist_star_expr', 'testlist',):
+        # The implicit tuple in statements.
+        return ValueSet([iterable.SequenceLiteralValue(inference_state, context, element)])
+    elif typ in ('not_test', 'factor'):
+        value_set = context.infer_node(element.children[-1])
+        for operator in element.children[:-1]:
+            value_set = infer_factor(value_set, operator)
+        return value_set
+    elif typ == 'test':
+        # `x if foo else y` case.
+        return (context.infer_node(element.children[0])
+                | context.infer_node(element.children[-1]))
+    elif typ == 'operator':
+        # Must be an ellipsis, other operators are not inferred.
+        if element.value != '...':
+            origin = element.parent
+            raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin))
+        return ValueSet([compiled.builtin_from_name(inference_state, 'Ellipsis')])
+    elif typ == 'dotted_name':
+        value_set = infer_atom(context, element.children[0])
+        for next_name in element.children[2::2]:
+            value_set = value_set.py__getattribute__(next_name, name_context=context)
+        return value_set
+    elif typ == 'eval_input':
+        return context.infer_node(element.children[0])
+    elif typ == 'annassign':
+        return annotation.infer_annotation(context, element.children[1]) \
+            .execute_annotation()
+    elif typ == 'yield_expr':
+        if len(element.children) and element.children[1].type == 'yield_arg':
+            # Implies that it's a yield from.
+            element = element.children[1].children[1]
+            generators = context.infer_node(element) \
+                .py__getattribute__('__iter__').execute_with_values()
+            return generators.py__stop_iteration_returns()
+
+        # Generator.send() is not implemented.
+        return NO_VALUES
+    elif typ == 'namedexpr_test':
+        return context.infer_node(element.children[2])
+    else:
+        return infer_or_test(context, element)
+
+
+def infer_trailer(context, atom_values, trailer):
+    trailer_op, node = trailer.children[:2]
+    if node == ')':  # `arglist` is optional.
+        node = None
+
+    if trailer_op == '[':
+        trailer_op, node, _ = trailer.children
+        return atom_values.get_item(
+            _infer_subscript_list(context, node),
+            ContextualizedNode(context, trailer)
+        )
+    else:
+        debug.dbg('infer_trailer: %s in %s', trailer, atom_values)
+        if trailer_op == '.':
+            return atom_values.py__getattribute__(
+                name_context=context,
+                name_or_str=node
+            )
+        else:
+            assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op
+            args = arguments.TreeArguments(context.inference_state, context, node, trailer)
+            return atom_values.execute(args)


 def infer_atom(context, atom):
@@ -59,7 +285,97 @@ def infer_atom(context, atom):
     generate the node (because it has just one child). In that case an atom
     might be a name or a literal as well.
     """
-    pass
+    state = context.inference_state
+    if atom.type == 'name':
+        # This is the first global lookup.
+        stmt = tree.search_ancestor(atom, 'expr_stmt', 'lambdef', 'if_stmt') or atom
+        if stmt.type == 'if_stmt':
+            if not any(n.start_pos <= atom.start_pos < n.end_pos for n in stmt.get_test_nodes()):
+                stmt = atom
+        elif stmt.type == 'lambdef':
+            stmt = atom
+        position = stmt.start_pos
+        if _is_annotation_name(atom):
+            # Since Python 3.7 (with from __future__ import annotations),
+            # annotations are essentially strings and can reference objects
+            # that are defined further down in code. Therefore just set the
+            # position to None, so the finder will not try to stop at a certain
+            # position in the module.
+            position = None
+        return context.py__getattribute__(atom, position=position)
+    elif atom.type == 'keyword':
+        # For False/True/None
+        if atom.value in ('False', 'True', 'None'):
+            return ValueSet([compiled.builtin_from_name(state, atom.value)])
+        elif atom.value == 'yield':
+            # Contrary to yield from, yield can just appear alone to return a
+            # value when used with `.send()`.
+            return NO_VALUES
+        assert False, 'Cannot infer the keyword %s' % atom
+
+    elif isinstance(atom, tree.Literal):
+        string = state.compiled_subprocess.safe_literal_eval(atom.value)
+        return ValueSet([compiled.create_simple_object(state, string)])
+    elif atom.type == 'strings':
+        # Will be multiple string.
+        value_set = infer_atom(context, atom.children[0])
+        for string in atom.children[1:]:
+            right = infer_atom(context, string)
+            value_set = _infer_comparison(context, value_set, '+', right)
+        return value_set
+    elif atom.type == 'fstring':
+        return compiled.get_string_value_set(state)
+    else:
+        c = atom.children
+        # Parentheses without commas are not tuples.
+        if c[0] == '(' and not len(c) == 2 \
+                and not (c[1].type == 'testlist_comp'
+                         and len(c[1].children) > 1):
+            return context.infer_node(c[1])
+
+        try:
+            comp_for = c[1].children[1]
+        except (IndexError, AttributeError):
+            pass
+        else:
+            if comp_for == ':':
+                # Dict comprehensions have a colon at the 3rd index.
+                try:
+                    comp_for = c[1].children[3]
+                except IndexError:
+                    pass
+
+            if comp_for.type in ('comp_for', 'sync_comp_for'):
+                return ValueSet([iterable.comprehension_from_atom(
+                    state, context, atom
+                )])
+
+        # It's a dict/list/tuple literal.
+        array_node = c[1]
+        try:
+            array_node_c = array_node.children
+        except AttributeError:
+            array_node_c = []
+        if c[0] == '{' and (array_node == '}' or ':' in array_node_c
+                            or '**' in array_node_c):
+            new_value = iterable.DictLiteralValue(state, context, atom)
+        else:
+            new_value = iterable.SequenceLiteralValue(state, context, atom)
+        return ValueSet([new_value])
+
+
+@_limit_value_infers
+def infer_expr_stmt(context, stmt, seek_name=None):
+    with recursion.execution_allowed(context.inference_state, stmt) as allowed:
+        if allowed:
+            if seek_name is not None:
+                pep0484_values = \
+                    annotation.find_type_from_comment_hint_assign(context, stmt, seek_name)
+                if pep0484_values:
+                    return pep0484_values
+
+            return _infer_expr_stmt(context, stmt, seek_name)
+    return NO_VALUES


 @debug.increase_indent
@@ -78,7 +394,91 @@ def _infer_expr_stmt(context, stmt, seek_name=None):

     :param stmt: A `tree.ExprStmt`.
     """
-    pass
+    def check_setitem(stmt):
+        atom_expr = stmt.children[0]
+        if atom_expr.type not in ('atom_expr', 'power'):
+            return False, None
+        name = atom_expr.children[0]
+        if name.type != 'name' or len(atom_expr.children) != 2:
+            return False, None
+        trailer = atom_expr.children[-1]
+        return trailer.children[0] == '[', trailer.children[1]
+
+    debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name)
+    rhs = stmt.get_rhs()
+
+    value_set = context.infer_node(rhs)
+
+    if seek_name:
+        n = TreeNameDefinition(context, seek_name)
+        value_set = check_tuple_assignments(n, value_set)
+
+    first_operator = next(stmt.yield_operators(), None)
+    is_setitem, subscriptlist = check_setitem(stmt)
+    is_annassign = first_operator not in ('=', None) and first_operator.type == 'operator'
+    if is_annassign or is_setitem:
+        # `=` is always the last character in aug assignments -> -1
+        name = stmt.get_defined_names(include_setitem=True)[0].value
+        left_values = context.py__getattribute__(name, position=stmt.start_pos)
+
+        if is_setitem:
+            def to_mod(v):
+                c = ContextualizedSubscriptListNode(context, subscriptlist)
+                if v.array_type == 'dict':
+                    return DictModification(v, value_set, c)
+                elif v.array_type == 'list':
+                    return ListModification(v, value_set, c)
+                return v
+
+            value_set = ValueSet(to_mod(v) for v in left_values)
+        else:
+            operator = copy.copy(first_operator)
+            operator.value = operator.value[:-1]
+            for_stmt = tree.search_ancestor(stmt, 'for_stmt')
+            if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \
+                    and parser_utils.for_stmt_defines_one_name(for_stmt):
+                # Iterate through result and add the values, that's possible
+                # only in for loops without clutter, because they are
+                # predictable. Also only do it, if the variable is not a tuple.
+                node = for_stmt.get_testlist()
+                cn = ContextualizedNode(context, node)
+                ordered = list(cn.infer().iterate(cn))
+
+                for lazy_value in ordered:
+                    dct = {for_stmt.children[1].value: lazy_value.infer()}
+                    with context.predefine_names(for_stmt, dct):
+                        t = context.infer_node(rhs)
+                        left_values = _infer_comparison(context, left_values, operator, t)
+                value_set = left_values
+            else:
+                value_set = _infer_comparison(context, left_values, operator, value_set)
+    debug.dbg('infer_expr_stmt result %s', value_set)
+    return value_set
+
+
+def infer_or_test(context, or_test):
+    iterator = iter(or_test.children)
+    types = context.infer_node(next(iterator))
+    for operator in iterator:
+        right = next(iterator)
+        if operator.type == 'comp_op':  # not in / is not
+            operator = ' '.join(c.value for c in operator.children)
+
+        # handle type inference of and/or here.
+        if operator in ('and', 'or'):
+            left_bools = set(left.py__bool__() for left in types)
+            if left_bools == {True}:
+                if operator == 'and':
+                    types = context.infer_node(right)
+            elif left_bools == {False}:
+                if operator != 'and':
+                    types = context.infer_node(right)
+            # Otherwise continue, because of uncertainty.
+        else:
+            types = _infer_comparison(context, types, operator,
+                                      context.infer_node(right))
+    debug.dbg('infer_or_test types %s', types)
+    return types


 @iterator_to_value_set
@@ -86,31 +486,409 @@ def infer_factor(value_set, operator):
     """
     Calculates `+`, `-`, `~` and `not` prefixes.
     """
-    pass
+    for value in value_set:
+        if operator == '-':
+            if is_number(value):
+                yield value.negate()
+        elif operator == 'not':
+            b = value.py__bool__()
+            if b is None:  # Uncertainty.
+                return
+            yield compiled.create_simple_object(value.inference_state, not b)
+        else:
+            yield value
+

+def _literals_to_types(inference_state, result):
+    # Changes literals ('a', 1, 1.0, etc) to its type instances (str(),
+    # int(), float(), etc).
+    new_result = NO_VALUES
+    for typ in result:
+        if is_literal(typ):
+            # Literals are only valid as long as the operations are
+            # correct. Otherwise add a value-free instance.
+            cls = compiled.builtin_from_name(inference_state, typ.name.string_name)
+            new_result |= cls.execute_with_values()
+        else:
+            new_result |= ValueSet([typ])
+    return new_result

+
+def _infer_comparison(context, left_values, operator, right_values):
+    state = context.inference_state
+    if isinstance(operator, str):
+        operator_str = operator
+    else:
+        operator_str = str(operator.value)
+    if not left_values or not right_values:
+        # illegal slices e.g. cause left/right_result to be None
+        result = (left_values or NO_VALUES) | (right_values or NO_VALUES)
+        return _literals_to_types(state, result)
+    elif operator_str == "|" and all(
+        value.is_class() or value.is_compiled()
+        for value in itertools.chain(left_values, right_values)
+    ):
+        # ^^^ A naive hack for PEP 604
+        return ValueSet.from_sets((left_values, right_values))
+    else:
+        # I don't think there's a reasonable chance that a string
+        # operation is still correct, once we pass something like six
+        # objects.
+        if len(left_values) * len(right_values) > 6:
+            return _literals_to_types(state, left_values | right_values)
+        else:
+            return ValueSet.from_sets(
+                _infer_comparison_part(state, context, left, operator, right)
+                for left in left_values
+                for right in right_values
+            )
+
+
+def _is_annotation_name(name):
+    ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt')
+    if ancestor is None:
+        return False
+
+    if ancestor.type in ('param', 'funcdef'):
+        ann = ancestor.annotation
+        if ann is not None:
+            return ann.start_pos <= name.start_pos < ann.end_pos
+    elif ancestor.type == 'expr_stmt':
+        c = ancestor.children
+        if len(c) > 1 and c[1].type == 'annassign':
+            return c[1].start_pos <= name.start_pos < c[1].end_pos
+    return False
+
+
+def _is_list(value):
+    return value.array_type == 'list'
+
+
+def _is_tuple(value):
+    return value.array_type == 'tuple'
+
+
+def _bool_to_value(inference_state, bool_):
+    return compiled.builtin_from_name(inference_state, str(bool_))
+
+
+def _get_tuple_ints(value):
+    if not isinstance(value, iterable.SequenceLiteralValue):
+        return None
+    numbers = []
+    for lazy_value in value.py__iter__():
+        if not isinstance(lazy_value, LazyTreeValue):
+            return None
+        node = lazy_value.data
+        if node.type != 'number':
+            return None
+        try:
+            numbers.append(int(node.value))
+        except ValueError:
+            return None
+    return numbers
+
+
+def _infer_comparison_part(inference_state, context, left, operator, right):
+    l_is_num = is_number(left)
+    r_is_num = is_number(right)
+    if isinstance(operator, str):
+        str_operator = operator
+    else:
+        str_operator = str(operator.value)
+
+    if str_operator == '*':
+        # for iterables, ignore * operations
+        if isinstance(left, iterable.Sequence) or is_string(left):
+            return ValueSet([left])
+        elif isinstance(right, iterable.Sequence) or is_string(right):
+            return ValueSet([right])
+    elif str_operator == '+':
+        if l_is_num and r_is_num or is_string(left) and is_string(right):
+            return left.execute_operation(right, str_operator)
+        elif _is_list(left) and _is_list(right) or _is_tuple(left) and _is_tuple(right):
+            return ValueSet([iterable.MergedArray(inference_state, (left, right))])
+    elif str_operator == '-':
+        if l_is_num and r_is_num:
+            return left.execute_operation(right, str_operator)
+    elif str_operator == '%':
+        # With strings and numbers the left type typically remains. Except for
+        # `int() % float()`.
+        return ValueSet([left])
+    elif str_operator in COMPARISON_OPERATORS:
+        if left.is_compiled() and right.is_compiled():
+            # Possible, because the return is not an option. Just compare.
+            result = left.execute_operation(right, str_operator)
+            if result:
+                return result
+        else:
+            if str_operator in ('is', '!=', '==', 'is not'):
+                operation = COMPARISON_OPERATORS[str_operator]
+                bool_ = operation(left, right)
+                # Only if == returns True or != returns False, we can continue.
+                # There's no guarantee that they are not equal. This can help
+                # in some cases, but does not cover everything.
+                if (str_operator in ('is', '==')) == bool_:
+                    return ValueSet([_bool_to_value(inference_state, bool_)])
+
+            if isinstance(left, VersionInfo):
+                version_info = _get_tuple_ints(right)
+                if version_info is not None:
+                    bool_result = compiled.access.COMPARISON_OPERATORS[operator](
+                        inference_state.environment.version_info,
+                        tuple(version_info)
+                    )
+                    return ValueSet([_bool_to_value(inference_state, bool_result)])
+
+        return ValueSet([
+            _bool_to_value(inference_state, True),
+            _bool_to_value(inference_state, False)
+        ])
+    elif str_operator in ('in', 'not in'):
+        return NO_VALUES
+
+    def check(obj):
+        """Checks if a Jedi object is either a float or an int."""
+        return isinstance(obj, TreeInstance) and \
+            obj.name.string_name in ('int', 'float')
+
+    # Static analysis, one is a number, the other one is not.
+    if str_operator in ('+', '-') and l_is_num != r_is_num \
+            and not (check(left) or check(right)):
+        message = "TypeError: unsupported operand type(s) for +: %s and %s"
+        analysis.add(context, 'type-error-operation', operator,
+                     message % (left, right))
+
+    if left.is_class() or right.is_class():
+        return NO_VALUES
+
+    method_name = operator_to_magic_method[str_operator]
+    magic_methods = left.py__getattribute__(method_name)
+    if magic_methods:
+        result = magic_methods.execute_with_values(right)
+        if result:
+            return result
+
+    if not magic_methods:
+        reverse_method_name = reverse_operator_to_magic_method[str_operator]
+        magic_methods = right.py__getattribute__(reverse_method_name)
+
+        result = magic_methods.execute_with_values(left)
+        if result:
+            return result
+
+    result = ValueSet([left, right])
+    debug.dbg('Used operator %s resulting in %s', operator, result)
+    return result
+
+
+@plugin_manager.decorate()
+def tree_name_to_values(inference_state, context, tree_name):
+    value_set = NO_VALUES
+    module_node = context.get_root_context().tree_node
+    # First check for annotations, like: `foo: int = 3`
+    if module_node is not None:
+        names = module_node.get_used_names().get(tree_name.value, [])
+        found_annotation = False
+        for name in names:
+            expr_stmt = name.parent
+
+            if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign":
+                correct_scope = parser_utils.get_parent_scope(name) == context.tree_node
+                if correct_scope:
+                    found_annotation = True
+                    value_set |= annotation.infer_annotation(
+                        context, expr_stmt.children[1].children[1]
+                    ).execute_annotation()
+        if found_annotation:
+            return value_set
+
+    types = []
+    node = tree_name.get_definition(import_name_always=True, include_setitem=True)
+    if node is None:
+        node = tree_name.parent
+        if node.type == 'global_stmt':
+            c = context.create_context(tree_name)
+            if c.is_module():
+                # In case we are already part of the module, there is no point
+                # in looking up the global statement anymore, because it's not
+                # valid at that point anyway.
+                return NO_VALUES
+            # For global_stmt lookups, we only need the first possible scope,
+            # which means the function itself.
+            filter = next(c.get_filters())
+            names = filter.get(tree_name.value)
+            return ValueSet.from_sets(name.infer() for name in names)
+        elif node.type not in ('import_from', 'import_name'):
+            c = context.create_context(tree_name)
+            return infer_atom(c, tree_name)
+
+    typ = node.type
+    if typ == 'for_stmt':
+        types = annotation.find_type_from_comment_hint_for(context, node, tree_name)
+        if types:
+            return types
+    if typ == 'with_stmt':
+        types = annotation.find_type_from_comment_hint_with(context, node, tree_name)
+        if types:
+            return types
+
+    if typ in ('for_stmt', 'comp_for', 'sync_comp_for'):
+        try:
+            types = context.predefined_names[node][tree_name.value]
+        except KeyError:
+            cn = ContextualizedNode(context, node.children[3])
+            for_types = iterate_values(
+                cn.infer(),
+                contextualized_node=cn,
+                is_async=node.parent.type == 'async_stmt',
+            )
+            n = TreeNameDefinition(context, tree_name)
+            types = check_tuple_assignments(n, for_types)
+    elif typ == 'expr_stmt':
+        types = infer_expr_stmt(context, node, tree_name)
+    elif typ == 'with_stmt':
+        value_managers = context.infer_node(node.get_test_node_from_name(tree_name))
+        if node.parent.type == 'async_stmt':
+            # In the case of `async with` statements, we need to
+            # first get the coroutine from the `__aenter__` method,
+            # then "unwrap" via the `__await__` method
+            enter_methods = value_managers.py__getattribute__('__aenter__')
+            coro = enter_methods.execute_with_values()
+            return coro.py__await__().py__stop_iteration_returns()
+        enter_methods = value_managers.py__getattribute__('__enter__')
+        return enter_methods.execute_with_values()
+    elif typ in ('import_from', 'import_name'):
+        types = imports.infer_import(context, tree_name)
+    elif typ in ('funcdef', 'classdef'):
+        types = _apply_decorators(context, node)
+    elif typ == 'try_stmt':
+        # TODO an exception can also be a tuple. Check for those.
+        # TODO check for types that are not classes and add it to
+        # the static analysis report.
+        exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling())
+        types = exceptions.execute_with_values()
+    elif typ == 'param':
+        types = NO_VALUES
+    elif typ == 'del_stmt':
+        types = NO_VALUES
+    elif typ == 'namedexpr_test':
+        types = infer_node(context, node)
+    else:
+        raise ValueError("Should not happen. type: %s" % typ)
+    return types
+
+
+# We don't want to have functions/classes that are created by the same
+# tree_node.
 @inference_state_method_cache()
 def _apply_decorators(context, node):
     """
     Returns the function, that should to be executed in the end.
     This is also the places where the decorators are processed.
     """
-    pass
+    if node.type == 'classdef':
+        decoratee_value = ClassValue(
+            context.inference_state,
+            parent_context=context,
+            tree_node=node
+        )
+    else:
+        decoratee_value = FunctionValue.from_context(context, node)
+    initial = values = ValueSet([decoratee_value])
+
+    if is_big_annoying_library(context):
+        return values
+
+    for dec in reversed(node.get_decorators()):
+        debug.dbg('decorator: %s %s', dec, values, color="MAGENTA")
+        with debug.increase_indent_cm():
+            dec_values = context.infer_node(dec.children[1])
+            trailer_nodes = dec.children[2:-1]
+            if trailer_nodes:
+                # Create a trailer and infer it.
+                trailer = tree.PythonNode('trailer', trailer_nodes)
+                trailer.parent = dec
+                dec_values = infer_trailer(context, dec_values, trailer)
+
+            if not len(dec_values):
+                code = dec.get_code(include_prefix=False)
+                # For the short future, we don't want to hear about the runtime
+                # decorator in typing that was intentionally omitted. This is not
+                # "correct", but helps with debugging.
+                if code != '@runtime\n':
+                    debug.warning('decorator not found: %s on %s', dec, node)
+                return initial
+
+            values = dec_values.execute(arguments.ValuesArguments([values]))
+            if not len(values):
+                debug.warning('not possible to resolve wrappers found %s', node)
+                return initial
+
+        debug.dbg('decorator end %s', values, color="MAGENTA")
+    if values != initial:
+        return ValueSet([Decoratee(c, decoratee_value) for c in values])
+    return values


 def check_tuple_assignments(name, value_set):
     """
     Checks if tuples are assigned.
     """
-    pass
+    lazy_value = None
+    for index, node in name.assignment_indexes():
+        cn = ContextualizedNode(name.parent_context, node)
+        iterated = value_set.iterate(cn)
+        if isinstance(index, slice):
+            # For no star unpacking is not possible.
+            return NO_VALUES
+        i = 0
+        while i <= index:
+            try:
+                lazy_value = next(iterated)
+            except StopIteration:
+                # We could do this with the default param in next. But this
+                # would allow this loop to run for a very long time if the
+                # index number is high. Therefore break if the loop is
+                # finished.
+                return NO_VALUES
+            else:
+                i += lazy_value.max
+        value_set = lazy_value.infer()
+    return value_set


 class ContextualizedSubscriptListNode(ContextualizedNode):
-    pass
+    def infer(self):
+        return _infer_subscript_list(self.context, self.node)


 def _infer_subscript_list(context, index):
     """
     Handles slices in subscript nodes.
     """
-    pass
+    if index == ':':
+        # Like array[:]
+        return ValueSet([iterable.Slice(context, None, None, None)])
+
+    elif index.type == 'subscript' and not index.children[0] == '.':
+        # subscript basically implies a slice operation
+        # e.g. array[:3]
+        result = []
+        for el in index.children:
+            if el == ':':
+                if not result:
+                    result.append(None)
+            elif el.type == 'sliceop':
+                if len(el.children) == 2:
+                    result.append(el.children[1])
+            else:
+                result.append(el)
+        result += [None] * (3 - len(result))
+
+        return ValueSet([iterable.Slice(context, *result)])
+    elif index.type == 'subscriptlist':
+        return ValueSet([iterable.SequenceLiteralValue(context.inference_state, context, index)])
+
+    # No slices
+    return context.infer_node(index)
diff --git a/jedi/inference/sys_path.py b/jedi/inference/sys_path.py
index a8e35f04..062a0aa3 100644
--- a/jedi/inference/sys_path.py
+++ b/jedi/inference/sys_path.py
@@ -2,6 +2,7 @@ import os
 import re
 from pathlib import Path
 from importlib.machinery import all_suffixes
+
 from jedi.inference.cache import inference_state_method_cache
 from jedi.inference.base_value import ContextualizedNode
 from jedi.inference.helpers import is_string, get_str_or_none
@@ -9,9 +10,25 @@ from jedi.parser_utils import get_cached_code_lines
 from jedi.file_io import FileIO
 from jedi import settings
 from jedi import debug
+
 _BUILDOUT_PATH_INSERTION_LIMIT = 10


+def _abs_path(module_context, str_path: str):
+    path = Path(str_path)
+    if path.is_absolute():
+        return path
+
+    module_path = module_context.py__file__()
+    if module_path is None:
+        # In this case we have no idea where we actually are in the file
+        # system.
+        return None
+
+    base_dir = module_path.parent
+    return base_dir.joinpath(path).absolute()
+
+
 def _paths_from_assignment(module_context, expr_stmt):
     """
     Extracts the assigned strings from an assignment that looks as follows::
@@ -23,12 +40,61 @@ def _paths_from_assignment(module_context, expr_stmt):
     because it will only affect Jedi in very random situations and by adding
     more paths than necessary, it usually benefits the general user.
     """
-    pass
+    for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]):
+        try:
+            assert operator in ['=', '+=']
+            assert assignee.type in ('power', 'atom_expr') and \
+                len(assignee.children) > 1
+            c = assignee.children
+            assert c[0].type == 'name' and c[0].value == 'sys'
+            trailer = c[1]
+            assert trailer.children[0] == '.' and trailer.children[1].value == 'path'
+            # TODO Essentially we're not checking details on sys.path
+            # manipulation. Both assigment of the sys.path and changing/adding
+            # parts of the sys.path are the same: They get added to the end of
+            # the current sys.path.
+            """
+            execution = c[2]
+            assert execution.children[0] == '['
+            subscript = execution.children[1]
+            assert subscript.type == 'subscript'
+            assert ':' in subscript.children
+            """
+        except AssertionError:
+            continue
+
+        cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt)
+        for lazy_value in cn.infer().iterate(cn):
+            for value in lazy_value.infer():
+                if is_string(value):
+                    abs_path = _abs_path(module_context, value.get_safe_value())
+                    if abs_path is not None:
+                        yield abs_path


 def _paths_from_list_modifications(module_context, trailer1, trailer2):
     """ extract the path from either "sys.path.append" or "sys.path.insert" """
-    pass
+    # Guarantee that both are trailers, the first one a name and the second one
+    # a function execution with at least one param.
+    if not (trailer1.type == 'trailer' and trailer1.children[0] == '.'
+            and trailer2.type == 'trailer' and trailer2.children[0] == '('
+            and len(trailer2.children) == 3):
+        return
+
+    name = trailer1.children[1].value
+    if name not in ['insert', 'append']:
+        return
+    arg = trailer2.children[1]
+    if name == 'insert' and len(arg.children) in (3, 4):  # Possible trailing comma.
+        arg = arg.children[2]
+
+    for value in module_context.create_context(arg).infer_node(arg):
+        p = get_str_or_none(value)
+        if p is None:
+            continue
+        abs_path = _abs_path(module_context, p)
+        if abs_path is not None:
+            yield abs_path


 @inference_state_method_cache(default=[])
@@ -36,7 +102,81 @@ def check_sys_path_modifications(module_context):
     """
     Detect sys.path modifications within module.
     """
-    pass
+    def get_sys_path_powers(names):
+        for name in names:
+            power = name.parent.parent
+            if power is not None and power.type in ('power', 'atom_expr'):
+                c = power.children
+                if c[0].type == 'name' and c[0].value == 'sys' \
+                        and c[1].type == 'trailer':
+                    n = c[1].children[1]
+                    if n.type == 'name' and n.value == 'path':
+                        yield name, power
+
+    if module_context.tree_node is None:
+        return []
+
+    added = []
+    try:
+        possible_names = module_context.tree_node.get_used_names()['path']
+    except KeyError:
+        pass
+    else:
+        for name, power in get_sys_path_powers(possible_names):
+            expr_stmt = power.parent
+            if len(power.children) >= 4:
+                added.extend(
+                    _paths_from_list_modifications(
+                        module_context, *power.children[2:4]
+                    )
+                )
+            elif expr_stmt is not None and expr_stmt.type == 'expr_stmt':
+                added.extend(_paths_from_assignment(module_context, expr_stmt))
+    return added
+
+
+def discover_buildout_paths(inference_state, script_path):
+    buildout_script_paths = set()
+
+    for buildout_script_path in _get_buildout_script_paths(script_path):
+        for path in _get_paths_from_buildout_script(inference_state, buildout_script_path):
+            buildout_script_paths.add(path)
+            if len(buildout_script_paths) >= _BUILDOUT_PATH_INSERTION_LIMIT:
+                break
+
+    return buildout_script_paths
+
+
+def _get_paths_from_buildout_script(inference_state, buildout_script_path):
+    file_io = FileIO(str(buildout_script_path))
+    try:
+        module_node = inference_state.parse(
+            file_io=file_io,
+            cache=True,
+            cache_path=settings.cache_directory
+        )
+    except IOError:
+        debug.warning('Error trying to read buildout_script: %s', buildout_script_path)
+        return
+
+    from jedi.inference.value import ModuleValue
+    module_context = ModuleValue(
+        inference_state, module_node,
+        file_io=file_io,
+        string_names=None,
+        code_lines=get_cached_code_lines(inference_state.grammar, buildout_script_path),
+    ).as_context()
+    yield from check_sys_path_modifications(module_context)
+
+
+def _get_parent_dir_with_file(path: Path, filename):
+    for parent in path.parents:
+        try:
+            if parent.joinpath(filename).is_file():
+                return parent
+        except OSError:
+            continue
+    return None


 def _get_buildout_script_paths(search_path: Path):
@@ -47,7 +187,33 @@ def _get_buildout_script_paths(search_path: Path):

     :param search_path: absolute path to the module.
     """
-    pass
+    project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg')
+    if not project_root:
+        return
+    bin_path = project_root.joinpath('bin')
+    if not bin_path.exists():
+        return
+
+    for filename in os.listdir(bin_path):
+        try:
+            filepath = bin_path.joinpath(filename)
+            with open(filepath, 'r') as f:
+                firstline = f.readline()
+                if firstline.startswith('#!') and 'python' in firstline:
+                    yield filepath
+        except (UnicodeDecodeError, IOError) as e:
+            # Probably a binary file; permission error or race cond. because
+            # file got deleted. Ignore it.
+            debug.warning(str(e))
+            continue
+
+
+def remove_python_path_suffix(path):
+    for suffix in all_suffixes() + ['.pyi']:
+        if path.suffix == suffix:
+            path = path.with_name(path.stem)
+            break
+    return path


 def transform_path_to_dotted(sys_path, module_path):
@@ -60,4 +226,47 @@ def transform_path_to_dotted(sys_path, module_path):
     Returns (None, False) if the path doesn't really resolve to anything.
     The second return part is if it is a package.
     """
-    pass
+    # First remove the suffix.
+    module_path = remove_python_path_suffix(module_path)
+    if module_path.name.startswith('.'):
+        return None, False
+
+    # Once the suffix was removed we are using the files as we know them. This
+    # means that if someone uses an ending like .vim for a Python file, .vim
+    # will be part of the returned dotted part.
+
+    is_package = module_path.name == '__init__'
+    if is_package:
+        module_path = module_path.parent
+
+    def iter_potential_solutions():
+        for p in sys_path:
+            if str(module_path).startswith(p):
+                # Strip the trailing slash/backslash
+                rest = str(module_path)[len(p):]
+                # On Windows a path can also use a slash.
+                if rest.startswith(os.path.sep) or rest.startswith('/'):
+                    # Remove a slash in cases it's still there.
+                    rest = rest[1:]
+
+                if rest:
+                    split = rest.split(os.path.sep)
+                    if not all(split):
+                        # This means that part of the file path was empty, this
+                        # is very strange and is probably a file that is called
+                        # `.py`.
+                        return
+                    # Stub folders for foo can end with foo-stubs. Just remove
+                    # it.
+                    yield tuple(re.sub(r'-stubs$', '', s) for s in split)
+
+    potential_solutions = tuple(iter_potential_solutions())
+    if not potential_solutions:
+        return None, False
+    # Try to find the shortest path, this makes more sense usually, because the
+    # user usually has venvs somewhere. This means that a path like
+    # .tox/py37/lib/python3.7/os.py can be normal for a file. However in that
+    # case we definitely want to return ['os'] as a path and not a crazy
+    # ['.tox', 'py37', 'lib', 'python3.7', 'os']. Keep in mind that this is a
+    # heuristic and there's now ay to "always" do it right.
+    return sorted(potential_solutions, key=lambda p: len(p))[0], is_package
diff --git a/jedi/inference/utils.py b/jedi/inference/utils.py
index 9e8c4321..ab10bcd9 100644
--- a/jedi/inference/utils.py
+++ b/jedi/inference/utils.py
@@ -2,17 +2,30 @@
 import functools
 import re
 import os
+
+
 _sep = os.path.sep
 if os.path.altsep is not None:
     _sep += os.path.altsep
-_path_re = re.compile('(?:\\.[^{0}]+|[{0}]__init__\\.py)$'.format(re.escape
-    (_sep)))
+_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
 del _sep


+def to_list(func):
+    def wrapper(*args, **kwargs):
+        return list(func(*args, **kwargs))
+    return wrapper
+
+
+def to_tuple(func):
+    def wrapper(*args, **kwargs):
+        return tuple(func(*args, **kwargs))
+    return wrapper
+
+
 def unite(iterable):
     """Turns a two dimensional array into a one dimensional."""
-    pass
+    return set(typ for types in iterable for typ in types)


 class UncaughtAttributeError(Exception):
@@ -27,6 +40,10 @@ class UncaughtAttributeError(Exception):
     """


+def safe_property(func):
+    return property(reraise_uncaught(func))
+
+
 def reraise_uncaught(func):
     """
     Re-throw uncaught `AttributeError`.
@@ -44,16 +61,24 @@ def reraise_uncaught(func):
     `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
     This helps us noticing bugs earlier and facilitates debugging.
     """
-    pass
+    @functools.wraps(func)
+    def wrapper(*args, **kwds):
+        try:
+            return func(*args, **kwds)
+        except AttributeError as e:
+            raise UncaughtAttributeError(e) from e
+    return wrapper


 class PushBackIterator:
-
     def __init__(self, iterator):
         self.pushes = []
         self.iterator = iterator
         self.current = None

+    def push_back(self, value):
+        self.pushes.append(value)
+
     def __iter__(self):
         return self

diff --git a/jedi/inference/value/decorator.py b/jedi/inference/value/decorator.py
index be2e0d4c..69c4cb6a 100644
--- a/jedi/inference/value/decorator.py
+++ b/jedi/inference/value/decorator.py
@@ -1,12 +1,34 @@
-"""
+'''
 Decorators are not really values, however we need some wrappers to improve
 docstrings and other things around decorators.
-"""
+'''
+
 from jedi.inference.base_value import ValueWrapper, ValueSet


 class Decoratee(ValueWrapper):
-
     def __init__(self, wrapped_value, original_value):
         super().__init__(wrapped_value)
         self._original_value = original_value
+
+    def py__doc__(self):
+        return self._original_value.py__doc__()
+
+    def py__get__(self, instance, class_value):
+        return ValueSet(
+            Decoratee(v, self._original_value)
+            for v in self._wrapped_value.py__get__(instance, class_value)
+        )
+
+    def get_signatures(self):
+        signatures = self._wrapped_value.get_signatures()
+        if signatures:
+            return signatures
+        # Fallback to signatures of the original function/class if the
+        # decorator has no signature or it is not inferrable.
+        #
+        # __get__ means that it's a descriptor. In that case we don't return
+        # signatures, because they are usually properties.
+        if not self._wrapped_value.py__getattribute__('__get__'):
+            return self._original_value.get_signatures()
+        return []
diff --git a/jedi/inference/value/dynamic_arrays.py b/jedi/inference/value/dynamic_arrays.py
index 419cd8a5..c451612d 100644
--- a/jedi/inference/value/dynamic_arrays.py
+++ b/jedi/inference/value/dynamic_arrays.py
@@ -22,16 +22,22 @@ It is important to note that:
 from jedi import debug
 from jedi import settings
 from jedi.inference import recursion
-from jedi.inference.base_value import ValueSet, NO_VALUES, HelperValueMixin, ValueWrapper
+from jedi.inference.base_value import ValueSet, NO_VALUES, HelperValueMixin, \
+    ValueWrapper
 from jedi.inference.lazy_value import LazyKnownValues
 from jedi.inference.helpers import infer_call_of_leaf
 from jedi.inference.cache import inference_state_method_cache
+
 _sentinel = object()


 def check_array_additions(context, sequence):
     """ Just a mapper function for the internal _internal_check_array_additions """
-    pass
+    if sequence.array_type not in ('list', 'set'):
+        # TODO also check for dict updates
+        return NO_VALUES
+
+    return _internal_check_array_additions(context, sequence)


 @inference_state_method_cache(default=NO_VALUES)
@@ -43,12 +49,85 @@ def _internal_check_array_additions(context, sequence):
     >>> a = [""]
     >>> a.append(1)
     """
-    pass
+    from jedi.inference import arguments
+
+    debug.dbg('Dynamic array search for %s' % sequence, color='MAGENTA')
+    module_context = context.get_root_context()
+    if not settings.dynamic_array_additions or module_context.is_compiled():
+        debug.dbg('Dynamic array search aborted.', color='MAGENTA')
+        return NO_VALUES
+
+    def find_additions(context, arglist, add_name):
+        params = list(arguments.TreeArguments(context.inference_state, context, arglist).unpack())
+        result = set()
+        if add_name in ['insert']:
+            params = params[1:]
+        if add_name in ['append', 'add', 'insert']:
+            for key, lazy_value in params:
+                result.add(lazy_value)
+        elif add_name in ['extend', 'update']:
+            for key, lazy_value in params:
+                result |= set(lazy_value.infer().iterate())
+        return result
+
+    temp_param_add, settings.dynamic_params_for_other_modules = \
+        settings.dynamic_params_for_other_modules, False
+
+    is_list = sequence.name.string_name == 'list'
+    search_names = (['append', 'extend', 'insert'] if is_list else ['add', 'update'])
+
+    added_types = set()
+    for add_name in search_names:
+        try:
+            possible_names = module_context.tree_node.get_used_names()[add_name]
+        except KeyError:
+            continue
+        else:
+            for name in possible_names:
+                value_node = context.tree_node
+                if not (value_node.start_pos < name.start_pos < value_node.end_pos):
+                    continue
+                trailer = name.parent
+                power = trailer.parent
+                trailer_pos = power.children.index(trailer)
+                try:
+                    execution_trailer = power.children[trailer_pos + 1]
+                except IndexError:
+                    continue
+                else:
+                    if execution_trailer.type != 'trailer' \
+                            or execution_trailer.children[0] != '(' \
+                            or execution_trailer.children[1] == ')':
+                        continue
+
+                random_context = context.create_context(name)
+
+                with recursion.execution_allowed(context.inference_state, power) as allowed:
+                    if allowed:
+                        found = infer_call_of_leaf(
+                            random_context,
+                            name,
+                            cut_own_trailer=True
+                        )
+                        if sequence in found:
+                            # The arrays match. Now add the results
+                            added_types |= find_additions(
+                                random_context,
+                                execution_trailer.children[1],
+                                add_name
+                            )
+
+    # reset settings
+    settings.dynamic_params_for_other_modules = temp_param_add
+    debug.dbg('Dynamic array result %s', added_types, color='MAGENTA')
+    return added_types


 def get_dynamic_array_instance(instance, arguments):
     """Used for set() and list() instances."""
-    pass
+    ai = _DynamicArrayAdditions(instance, arguments)
+    from jedi.inference import arguments
+    return arguments.ValuesArguments([ValueSet([ai])])


 class _DynamicArrayAdditions(HelperValueMixin):
@@ -61,23 +140,61 @@ class _DynamicArrayAdditions(HelperValueMixin):
     in the wild, it's just used within typeshed as an argument to `__init__`
     for set/list and never used in any other place.
     """
-
     def __init__(self, instance, arguments):
         self._instance = instance
         self._arguments = arguments

+    def py__class__(self):
+        tuple_, = self._instance.inference_state.builtins_module.py__getattribute__('tuple')
+        return tuple_

-class _Modification(ValueWrapper):
+    def py__iter__(self, contextualized_node=None):
+        arguments = self._arguments
+        try:
+            _, lazy_value = next(arguments.unpack())
+        except StopIteration:
+            pass
+        else:
+            yield from lazy_value.infer().iterate()
+
+        from jedi.inference.arguments import TreeArguments
+        if isinstance(arguments, TreeArguments):
+            additions = _internal_check_array_additions(arguments.context, self._instance)
+            yield from additions
+
+    def iterate(self, contextualized_node=None, is_async=False):
+        return self.py__iter__(contextualized_node)

+
+class _Modification(ValueWrapper):
     def __init__(self, wrapped_value, assigned_values, contextualized_key):
         super().__init__(wrapped_value)
         self._assigned_values = assigned_values
         self._contextualized_key = contextualized_key

+    def py__getitem__(self, *args, **kwargs):
+        return self._wrapped_value.py__getitem__(*args, **kwargs) | self._assigned_values
+
+    def py__simple_getitem__(self, index):
+        actual = [
+            v.get_safe_value(_sentinel)
+            for v in self._contextualized_key.infer()
+        ]
+        if index in actual:
+            return self._assigned_values
+        return self._wrapped_value.py__simple_getitem__(index)
+

 class DictModification(_Modification):
-    pass
+    def py__iter__(self, contextualized_node=None):
+        yield from self._wrapped_value.py__iter__(contextualized_node)
+        yield self._contextualized_key
+
+    def get_key_values(self):
+        return self._wrapped_value.get_key_values() | self._contextualized_key.infer()


 class ListModification(_Modification):
-    pass
+    def py__iter__(self, contextualized_node=None):
+        yield from self._wrapped_value.py__iter__(contextualized_node)
+        yield LazyKnownValues(self._assigned_values)
diff --git a/jedi/inference/value/function.py b/jedi/inference/value/function.py
index 7ee96aa8..a89e9c88 100644
--- a/jedi/inference/value/function.py
+++ b/jedi/inference/value/function.py
@@ -1,4 +1,5 @@
 from parso.python import tree
+
 from jedi import debug
 from jedi.inference.cache import inference_state_method_cache, CachedMetaClass
 from jedi.inference import compiled
@@ -6,10 +7,14 @@ from jedi.inference import recursion
 from jedi.inference import docstrings
 from jedi.inference import flow_analysis
 from jedi.inference.signature import TreeSignature
-from jedi.inference.filters import ParserTreeFilter, FunctionExecutionFilter, AnonymousFunctionExecutionFilter
-from jedi.inference.names import ValueName, AbstractNameDefinition, AnonymousParamName, ParamName, NameWrapper
-from jedi.inference.base_value import ContextualizedNode, NO_VALUES, ValueSet, TreeValue, ValueWrapper
-from jedi.inference.lazy_value import LazyKnownValues, LazyKnownValue, LazyTreeValue
+from jedi.inference.filters import ParserTreeFilter, FunctionExecutionFilter, \
+    AnonymousFunctionExecutionFilter
+from jedi.inference.names import ValueName, AbstractNameDefinition, \
+    AnonymousParamName, ParamName, NameWrapper
+from jedi.inference.base_value import ContextualizedNode, NO_VALUES, \
+    ValueSet, TreeValue, ValueWrapper
+from jedi.inference.lazy_value import LazyKnownValues, LazyKnownValue, \
+    LazyTreeValue
 from jedi.inference.context import ValueContext, TreeContextMixin
 from jedi.inference.value import iterable
 from jedi import parser_utils
@@ -26,56 +31,431 @@ class LambdaName(AbstractNameDefinition):
         self._lambda_value = lambda_value
         self.parent_context = lambda_value.parent_context

+    @property
+    def start_pos(self):
+        return self._lambda_value.tree_node.start_pos
+
+    def infer(self):
+        return ValueSet([self._lambda_value])
+

 class FunctionAndClassBase(TreeValue):
-    pass
+    def get_qualified_names(self):
+        if self.parent_context.is_class():
+            n = self.parent_context.get_qualified_names()
+            if n is None:
+                # This means that the parent class lives within a function.
+                return None
+            return n + (self.py__name__(),)
+        elif self.parent_context.is_module():
+            return (self.py__name__(),)
+        else:
+            return None


 class FunctionMixin:
     api_type = 'function'

+    def get_filters(self, origin_scope=None):
+        cls = self.py__class__()
+        for instance in cls.execute_with_values():
+            yield from instance.get_filters(origin_scope=origin_scope)

-class FunctionValue(FunctionMixin, FunctionAndClassBase, metaclass=
-    CachedMetaClass):
-    pass
+    def py__get__(self, instance, class_value):
+        from jedi.inference.value.instance import BoundMethod
+        if instance is None:
+            # Calling the Foo.bar results in the original bar function.
+            return ValueSet([self])
+        return ValueSet([BoundMethod(instance, class_value.as_context(), self)])

+    def get_param_names(self):
+        return [AnonymousParamName(self, param.name)
+                for param in self.tree_node.get_params()]

-class FunctionNameInClass(NameWrapper):
+    @property
+    def name(self):
+        if self.tree_node.type == 'lambdef':
+            return LambdaName(self)
+        return ValueName(self, self.tree_node.name)
+
+    def is_function(self):
+        return True
+
+    def py__name__(self):
+        return self.name.string_name
+
+    def get_type_hint(self, add_class_info=True):
+        return_annotation = self.tree_node.annotation
+        if return_annotation is None:
+            def param_name_to_str(n):
+                s = n.string_name
+                annotation = n.infer().get_type_hint()
+                if annotation is not None:
+                    s += ': ' + annotation
+                if n.default_node is not None:
+                    s += '=' + n.default_node.get_code(include_prefix=False)
+                return s
+
+            function_execution = self.as_context()
+            result = function_execution.infer()
+            return_hint = result.get_type_hint()
+            body = self.py__name__() + '(%s)' % ', '.join([
+                param_name_to_str(n)
+                for n in function_execution.get_param_names()
+            ])
+            if return_hint is None:
+                return body
+        else:
+            return_hint = return_annotation.get_code(include_prefix=False)
+            body = self.py__name__() + self.tree_node.children[2].get_code(include_prefix=False)
+
+        return body + ' -> ' + return_hint
+
+    def py__call__(self, arguments):
+        function_execution = self.as_context(arguments)
+        return function_execution.infer()
+
+    def _as_context(self, arguments=None):
+        if arguments is None:
+            return AnonymousFunctionExecution(self)
+        return FunctionExecutionContext(self, arguments)
+
+    def get_signatures(self):
+        return [TreeSignature(f) for f in self.get_signature_functions()]
+
+
+class FunctionValue(FunctionMixin, FunctionAndClassBase, metaclass=CachedMetaClass):
+    @classmethod
+    def from_context(cls, context, tree_node):
+        def create(tree_node):
+            if context.is_class():
+                return MethodValue(
+                    context.inference_state,
+                    context,
+                    parent_context=parent_context,
+                    tree_node=tree_node
+                )
+            else:
+                return cls(
+                    context.inference_state,
+                    parent_context=parent_context,
+                    tree_node=tree_node
+                )

+        overloaded_funcs = list(_find_overload_functions(context, tree_node))
+
+        parent_context = context
+        while parent_context.is_class() or parent_context.is_instance():
+            parent_context = parent_context.parent_context
+
+        function = create(tree_node)
+
+        if overloaded_funcs:
+            return OverloadedFunctionValue(
+                function,
+                # Get them into the correct order: lower line first.
+                list(reversed([create(f) for f in overloaded_funcs]))
+            )
+        return function
+
+    def py__class__(self):
+        c, = values_from_qualified_names(self.inference_state, 'types', 'FunctionType')
+        return c
+
+    def get_default_param_context(self):
+        return self.parent_context
+
+    def get_signature_functions(self):
+        return [self]
+
+
+class FunctionNameInClass(NameWrapper):
     def __init__(self, class_context, name):
         super().__init__(name)
         self._class_context = class_context

+    def get_defining_qualified_value(self):
+        return self._class_context.get_value()  # Might be None.

-class MethodValue(FunctionValue):

+class MethodValue(FunctionValue):
     def __init__(self, inference_state, class_context, *args, **kwargs):
         super().__init__(inference_state, *args, **kwargs)
         self.class_context = class_context

+    def get_default_param_context(self):
+        return self.class_context
+
+    def get_qualified_names(self):
+        # Need to implement this, because the parent value of a method
+        # value is not the class value but the module.
+        names = self.class_context.get_qualified_names()
+        if names is None:
+            return None
+        return names + (self.py__name__(),)
+
+    @property
+    def name(self):
+        return FunctionNameInClass(self.class_context, super().name)
+

 class BaseFunctionExecutionContext(ValueContext, TreeContextMixin):
+    def infer_annotations(self):
+        raise NotImplementedError
+
+    @inference_state_method_cache(default=NO_VALUES)
+    @recursion.execution_recursion_decorator()
+    def get_return_values(self, check_yields=False):
+        funcdef = self.tree_node
+        if funcdef.type == 'lambdef':
+            return self.infer_node(funcdef.children[-1])
+
+        if check_yields:
+            value_set = NO_VALUES
+            returns = get_yield_exprs(self.inference_state, funcdef)
+        else:
+            value_set = self.infer_annotations()
+            if value_set:
+                # If there are annotations, prefer them over anything else.
+                # This will make it faster.
+                return value_set
+            value_set |= docstrings.infer_return_types(self._value)
+            returns = funcdef.iter_return_stmts()
+
+        for r in returns:
+            if check_yields:
+                value_set |= ValueSet.from_sets(
+                    lazy_value.infer()
+                    for lazy_value in self._get_yield_lazy_value(r)
+                )
+            else:
+                check = flow_analysis.reachability_check(self, funcdef, r)
+                if check is flow_analysis.UNREACHABLE:
+                    debug.dbg('Return unreachable: %s', r)
+                else:
+                    try:
+                        children = r.children
+                    except AttributeError:
+                        ctx = compiled.builtin_from_name(self.inference_state, 'None')
+                        value_set |= ValueSet([ctx])
+                    else:
+                        value_set |= self.infer_node(children[1])
+                if check is flow_analysis.REACHABLE:
+                    debug.dbg('Return reachable: %s', r)
+                    break
+        return value_set
+
+    def _get_yield_lazy_value(self, yield_expr):
+        if yield_expr.type == 'keyword':
+            # `yield` just yields None.
+            ctx = compiled.builtin_from_name(self.inference_state, 'None')
+            yield LazyKnownValue(ctx)
+            return
+
+        node = yield_expr.children[1]
+        if node.type == 'yield_arg':  # It must be a yield from.
+            cn = ContextualizedNode(self, node.children[1])
+            yield from cn.infer().iterate(cn)
+        else:
+            yield LazyTreeValue(self, node)
+
+    @recursion.execution_recursion_decorator(default=iter([]))
+    def get_yield_lazy_values(self, is_async=False):
+        # TODO: if is_async, wrap yield statements in Awaitable/async_generator_asend
+        for_parents = [(y, tree.search_ancestor(y, 'for_stmt', 'funcdef',
+                                                'while_stmt', 'if_stmt'))
+                       for y in get_yield_exprs(self.inference_state, self.tree_node)]
+
+        # Calculate if the yields are placed within the same for loop.
+        yields_order = []
+        last_for_stmt = None
+        for yield_, for_stmt in for_parents:
+            # For really simple for loops we can predict the order. Otherwise
+            # we just ignore it.
+            parent = for_stmt.parent
+            if parent.type == 'suite':
+                parent = parent.parent
+            if for_stmt.type == 'for_stmt' and parent == self.tree_node \
+                    and parser_utils.for_stmt_defines_one_name(for_stmt):  # Simplicity for now.
+                if for_stmt == last_for_stmt:
+                    yields_order[-1][1].append(yield_)
+                else:
+                    yields_order.append((for_stmt, [yield_]))
+            elif for_stmt == self.tree_node:
+                yields_order.append((None, [yield_]))
+            else:
+                types = self.get_return_values(check_yields=True)
+                if types:
+                    yield LazyKnownValues(types, min=0, max=float('inf'))
+                return
+            last_for_stmt = for_stmt
+
+        for for_stmt, yields in yields_order:
+            if for_stmt is None:
+                # No for_stmt, just normal yields.
+                for yield_ in yields:
+                    yield from self._get_yield_lazy_value(yield_)
+            else:
+                input_node = for_stmt.get_testlist()
+                cn = ContextualizedNode(self, input_node)
+                ordered = cn.infer().iterate(cn)
+                ordered = list(ordered)
+                for lazy_value in ordered:
+                    dct = {str(for_stmt.children[1].value): lazy_value.infer()}
+                    with self.predefine_names(for_stmt, dct):
+                        for yield_in_same_for_stmt in yields:
+                            yield from self._get_yield_lazy_value(yield_in_same_for_stmt)
+
+    def merge_yield_values(self, is_async=False):
+        return ValueSet.from_sets(
+            lazy_value.infer()
+            for lazy_value in self.get_yield_lazy_values()
+        )
+
+    def is_generator(self):
+        return bool(get_yield_exprs(self.inference_state, self.tree_node))

     def infer(self):
         """
         Created to be used by inheritance.
         """
-        pass
+        inference_state = self.inference_state
+        is_coroutine = self.tree_node.parent.type in ('async_stmt', 'async_funcdef')
+        from jedi.inference.gradual.base import GenericClass

+        if is_coroutine:
+            if self.is_generator():
+                async_generator_classes = inference_state.typing_module \
+                    .py__getattribute__('AsyncGenerator')

-class FunctionExecutionContext(BaseFunctionExecutionContext):
+                yield_values = self.merge_yield_values(is_async=True)
+                # The contravariant doesn't seem to be defined.
+                generics = (yield_values.py__class__(), NO_VALUES)
+                return ValueSet(
+                    GenericClass(c, TupleGenericManager(generics))
+                    for c in async_generator_classes
+                ).execute_annotation()
+            else:
+                async_classes = inference_state.typing_module.py__getattribute__('Coroutine')
+                return_values = self.get_return_values()
+                # Only the first generic is relevant.
+                generics = (return_values.py__class__(), NO_VALUES, NO_VALUES)
+                return ValueSet(
+                    GenericClass(c, TupleGenericManager(generics)) for c in async_classes
+                ).execute_annotation()
+        else:
+            # If there are annotations, prefer them over anything else.
+            if self.is_generator() and not self.infer_annotations():
+                return ValueSet([iterable.Generator(inference_state, self)])
+            else:
+                return self.get_return_values()

+
+class FunctionExecutionContext(BaseFunctionExecutionContext):
     def __init__(self, function_value, arguments):
         super().__init__(function_value)
         self._arguments = arguments

+    def get_filters(self, until_position=None, origin_scope=None):
+        yield FunctionExecutionFilter(
+            self, self._value,
+            until_position=until_position,
+            origin_scope=origin_scope,
+            arguments=self._arguments
+        )
+
+    def infer_annotations(self):
+        from jedi.inference.gradual.annotation import infer_return_types
+        return infer_return_types(self._value, self._arguments)
+
+    def get_param_names(self):
+        return [
+            ParamName(self._value, param.name, self._arguments)
+            for param in self._value.tree_node.get_params()
+        ]
+

 class AnonymousFunctionExecution(BaseFunctionExecutionContext):
-    pass
+    def infer_annotations(self):
+        # I don't think inferring anonymous executions is a big thing.
+        # Anonymous contexts are mostly there for the user to work in. ~ dave
+        return NO_VALUES

+    def get_filters(self, until_position=None, origin_scope=None):
+        yield AnonymousFunctionExecutionFilter(
+            self, self._value,
+            until_position=until_position,
+            origin_scope=origin_scope,
+        )
+
+    def get_param_names(self):
+        return self._value.get_param_names()

-class OverloadedFunctionValue(FunctionMixin, ValueWrapper):

+class OverloadedFunctionValue(FunctionMixin, ValueWrapper):
     def __init__(self, function, overloaded_functions):
         super().__init__(function)
         self._overloaded_functions = overloaded_functions
+
+    def py__call__(self, arguments):
+        debug.dbg("Execute overloaded function %s", self._wrapped_value, color='BLUE')
+        function_executions = []
+        for signature in self.get_signatures():
+            function_execution = signature.value.as_context(arguments)
+            function_executions.append(function_execution)
+            if signature.matches_signature(arguments):
+                return function_execution.infer()
+
+        if self.inference_state.is_analysis:
+            # In this case we want precision.
+            return NO_VALUES
+        return ValueSet.from_sets(fe.infer() for fe in function_executions)
+
+    def get_signature_functions(self):
+        return self._overloaded_functions
+
+    def get_type_hint(self, add_class_info=True):
+        return 'Union[%s]' % ', '.join(f.get_type_hint() for f in self._overloaded_functions)
+
+
+def _find_overload_functions(context, tree_node):
+    def _is_overload_decorated(funcdef):
+        if funcdef.parent.type == 'decorated':
+            decorators = funcdef.parent.children[0]
+            if decorators.type == 'decorator':
+                decorators = [decorators]
+            else:
+                decorators = decorators.children
+            for decorator in decorators:
+                dotted_name = decorator.children[1]
+                if dotted_name.type == 'name' and dotted_name.value == 'overload':
+                    # TODO check with values if it's the right overload
+                    return True
+        return False
+
+    if tree_node.type == 'lambdef':
+        return
+
+    if _is_overload_decorated(tree_node):
+        yield tree_node
+
+    while True:
+        filter = ParserTreeFilter(
+            context,
+            until_position=tree_node.start_pos
+        )
+        names = filter.get(tree_node.name.value)
+        assert isinstance(names, list)
+        if not names:
+            break
+
+        found = False
+        for name in names:
+            funcdef = name.tree_name.parent
+            if funcdef.type == 'funcdef' and _is_overload_decorated(funcdef):
+                tree_node = funcdef
+                found = True
+                yield funcdef
+
+        if not found:
+            break
diff --git a/jedi/inference/value/instance.py b/jedi/inference/value/instance.py
index f61454ad..63f220e0 100644
--- a/jedi/inference/value/instance.py
+++ b/jedi/inference/value/instance.py
@@ -1,45 +1,87 @@
 from abc import abstractproperty
+
 from parso.tree import search_ancestor
+
 from jedi import debug
 from jedi import settings
 from jedi.inference import compiled
 from jedi.inference.compiled.value import CompiledValueFilter
 from jedi.inference.helpers import values_from_qualified_names, is_big_annoying_library
 from jedi.inference.filters import AbstractFilter, AnonymousFunctionExecutionFilter
-from jedi.inference.names import ValueName, TreeNameDefinition, ParamName, NameWrapper
-from jedi.inference.base_value import Value, NO_VALUES, ValueSet, iterator_to_value_set, ValueWrapper
+from jedi.inference.names import ValueName, TreeNameDefinition, ParamName, \
+    NameWrapper
+from jedi.inference.base_value import Value, NO_VALUES, ValueSet, \
+    iterator_to_value_set, ValueWrapper
 from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues
 from jedi.inference.cache import inference_state_method_cache
 from jedi.inference.arguments import ValuesArguments, TreeArgumentsWrapper
-from jedi.inference.value.function import FunctionValue, FunctionMixin, OverloadedFunctionValue, BaseFunctionExecutionContext, FunctionExecutionContext, FunctionNameInClass
+from jedi.inference.value.function import \
+    FunctionValue, FunctionMixin, OverloadedFunctionValue, \
+    BaseFunctionExecutionContext, FunctionExecutionContext, FunctionNameInClass
 from jedi.inference.value.klass import ClassFilter
 from jedi.inference.value.dynamic_arrays import get_dynamic_array_instance
 from jedi.parser_utils import function_is_staticmethod, function_is_classmethod


 class InstanceExecutedParamName(ParamName):
-
     def __init__(self, instance, function_value, tree_name):
-        super().__init__(function_value, tree_name, arguments=None)
+        super().__init__(
+            function_value, tree_name, arguments=None)
         self._instance = instance

+    def infer(self):
+        return ValueSet([self._instance])

-class AnonymousMethodExecutionFilter(AnonymousFunctionExecutionFilter):
+    def matches_signature(self):
+        return True

+
+class AnonymousMethodExecutionFilter(AnonymousFunctionExecutionFilter):
     def __init__(self, instance, *args, **kwargs):
         super().__init__(*args, **kwargs)
         self._instance = instance

+    def _convert_param(self, param, name):
+        if param.position_index == 0:
+            if function_is_classmethod(self._function_value.tree_node):
+                return InstanceExecutedParamName(
+                    self._instance.py__class__(),
+                    self._function_value,
+                    name
+                )
+            elif not function_is_staticmethod(self._function_value.tree_node):
+                return InstanceExecutedParamName(
+                    self._instance,
+                    self._function_value,
+                    name
+                )
+        return super()._convert_param(param, name)

-class AnonymousMethodExecutionContext(BaseFunctionExecutionContext):

+class AnonymousMethodExecutionContext(BaseFunctionExecutionContext):
     def __init__(self, instance, value):
         super().__init__(value)
         self.instance = instance

+    def get_filters(self, until_position=None, origin_scope=None):
+        yield AnonymousMethodExecutionFilter(
+            self.instance, self, self._value,
+            until_position=until_position,
+            origin_scope=origin_scope,
+        )

-class MethodExecutionContext(FunctionExecutionContext):
+    def get_param_names(self):
+        param_names = list(self._value.get_param_names())
+        # set the self name
+        param_names[0] = InstanceExecutedParamName(
+            self.instance,
+            self._value,
+            param_names[0].tree_name
+        )
+        return param_names

+
+class MethodExecutionContext(FunctionExecutionContext):
     def __init__(self, instance, *args, **kwargs):
         super().__init__(*args, **kwargs)
         self.instance = instance
@@ -50,52 +92,305 @@ class AbstractInstanceValue(Value):

     def __init__(self, inference_state, parent_context, class_value):
         super().__init__(inference_state, parent_context)
+        # Generated instances are classes that are just generated by self
+        # (No arguments) used.
         self.class_value = class_value

+    def is_instance(self):
+        return True
+
+    def get_qualified_names(self):
+        return self.class_value.get_qualified_names()
+
+    def get_annotated_class_object(self):
+        return self.class_value  # This is the default.
+
+    def py__class__(self):
+        return self.class_value
+
+    def py__bool__(self):
+        # Signalize that we don't know about the bool type.
+        return None
+
+    @abstractproperty
+    def name(self):
+        raise NotImplementedError
+
+    def get_signatures(self):
+        call_funcs = self.py__getattribute__('__call__').py__get__(self, self.class_value)
+        return [s.bind(self) for s in call_funcs.get_signatures()]
+
+    def get_function_slot_names(self, name):
+        # Python classes don't look at the dictionary of the instance when
+        # looking up `__call__`. This is something that has to do with Python's
+        # internal slot system (note: not __slots__, but C slots).
+        for filter in self.get_filters(include_self_names=False):
+            names = filter.get(name)
+            if names:
+                return names
+        return []
+
+    def execute_function_slots(self, names, *inferred_args):
+        return ValueSet.from_sets(
+            name.infer().execute_with_values(*inferred_args)
+            for name in names
+        )
+
+    def get_type_hint(self, add_class_info=True):
+        return self.py__name__()
+
+    def py__getitem__(self, index_value_set, contextualized_node):
+        names = self.get_function_slot_names('__getitem__')
+        if not names:
+            return super().py__getitem__(
+                index_value_set,
+                contextualized_node,
+            )
+
+        args = ValuesArguments([index_value_set])
+        return ValueSet.from_sets(name.infer().execute(args) for name in names)
+
+    def py__iter__(self, contextualized_node=None):
+        iter_slot_names = self.get_function_slot_names('__iter__')
+        if not iter_slot_names:
+            return super().py__iter__(contextualized_node)
+
+        def iterate():
+            for generator in self.execute_function_slots(iter_slot_names):
+                yield from generator.py__next__(contextualized_node)
+        return iterate()
+
     def __repr__(self):
-        return '<%s of %s>' % (self.__class__.__name__, self.class_value)
+        return "<%s of %s>" % (self.__class__.__name__, self.class_value)


 class CompiledInstance(AbstractInstanceValue):
-
-    def __init__(self, inference_state, parent_context, class_value, arguments
-        ):
+    # This is not really a compiled class, it's just an instance from a
+    # compiled class.
+    def __init__(self, inference_state, parent_context, class_value, arguments):
         super().__init__(inference_state, parent_context, class_value)
         self._arguments = arguments

+    def get_filters(self, origin_scope=None, include_self_names=True):
+        class_value = self.get_annotated_class_object()
+        class_filters = class_value.get_filters(
+            origin_scope=origin_scope,
+            is_instance=True,
+        )
+        for f in class_filters:
+            yield CompiledInstanceClassFilter(self, f)
+
+    @property
+    def name(self):
+        return compiled.CompiledValueName(self, self.class_value.name.string_name)
+
+    def is_stub(self):
+        return False
+

 class _BaseTreeInstance(AbstractInstanceValue):
+    @property
+    def array_type(self):
+        name = self.class_value.py__name__()
+        if name in ['list', 'set', 'dict'] \
+                and self.parent_context.get_root_context().is_builtins_module():
+            return name
+        return None
+
+    @property
+    def name(self):
+        return ValueName(self, self.class_value.name.tree_name)
+
+    def get_filters(self, origin_scope=None, include_self_names=True):
+        class_value = self.get_annotated_class_object()
+        if include_self_names:
+            for cls in class_value.py__mro__():
+                if not cls.is_compiled():
+                    # In this case we're excluding compiled objects that are
+                    # not fake objects. It doesn't make sense for normal
+                    # compiled objects to search for self variables.
+                    yield SelfAttributeFilter(self, class_value, cls.as_context(), origin_scope)
+
+        class_filters = class_value.get_filters(
+            origin_scope=origin_scope,
+            is_instance=True,
+        )
+        for f in class_filters:
+            if isinstance(f, ClassFilter):
+                yield InstanceClassFilter(self, f)
+            elif isinstance(f, CompiledValueFilter):
+                yield CompiledInstanceClassFilter(self, f)
+            else:
+                # Propably from the metaclass.
+                yield f
+
+    @inference_state_method_cache()
+    def create_instance_context(self, class_context, node):
+        new = node
+        while True:
+            func_node = new
+            new = search_ancestor(new, 'funcdef', 'classdef')
+            if class_context.tree_node is new:
+                func = FunctionValue.from_context(class_context, func_node)
+                bound_method = BoundMethod(self, class_context, func)
+                if func_node.name.value == '__init__':
+                    context = bound_method.as_context(self._arguments)
+                else:
+                    context = bound_method.as_context()
+                break
+        return context.create_context(node)

     def py__getattribute__alternatives(self, string_name):
-        """
+        '''
         Since nothing was inferred, now check the __getattr__ and
         __getattribute__ methods. Stubs don't need to be checked, because
         they don't contain any logic.
-        """
-        pass
+        '''
+        if self.is_stub():
+            return NO_VALUES
+
+        name = compiled.create_simple_object(self.inference_state, string_name)
+
+        # This is a little bit special. `__getattribute__` is in Python
+        # executed before `__getattr__`. But: I know no use case, where
+        # this could be practical and where Jedi would return wrong types.
+        # If you ever find something, let me know!
+        # We are inversing this, because a hand-crafted `__getattribute__`
+        # could still call another hand-crafted `__getattr__`, but not the
+        # other way around.
+        if is_big_annoying_library(self.parent_context):
+            return NO_VALUES
+        names = (self.get_function_slot_names('__getattr__')
+                 or self.get_function_slot_names('__getattribute__'))
+        return self.execute_function_slots(names, name)
+
+    def py__next__(self, contextualized_node=None):
+        name = u'__next__'
+        next_slot_names = self.get_function_slot_names(name)
+        if next_slot_names:
+            yield LazyKnownValues(
+                self.execute_function_slots(next_slot_names)
+            )
+        else:
+            debug.warning('Instance has no __next__ function in %s.', self)
+
+    def py__call__(self, arguments):
+        names = self.get_function_slot_names('__call__')
+        if not names:
+            # Means the Instance is not callable.
+            return super().py__call__(arguments)
+
+        return ValueSet.from_sets(name.infer().execute(arguments) for name in names)

     def py__get__(self, instance, class_value):
         """
         obj may be None.
         """
-        pass
+        # Arguments in __get__ descriptors are obj, class.
+        # `method` is the new parent of the array, don't know if that's good.
+        for cls in self.class_value.py__mro__():
+            result = cls.py__get__on_class(self, instance, class_value)
+            if result is not NotImplemented:
+                return result
+
+        names = self.get_function_slot_names('__get__')
+        if names:
+            if instance is None:
+                instance = compiled.builtin_from_name(self.inference_state, 'None')
+            return self.execute_function_slots(names, instance, class_value)
+        else:
+            return ValueSet([self])


 class TreeInstance(_BaseTreeInstance):
-
-    def __init__(self, inference_state, parent_context, class_value, arguments
-        ):
-        if class_value.py__name__() in ['list', 'set'
-            ] and parent_context.get_root_context().is_builtins_module():
+    def __init__(self, inference_state, parent_context, class_value, arguments):
+        # I don't think that dynamic append lookups should happen here. That
+        # sounds more like something that should go to py__iter__.
+        if class_value.py__name__() in ['list', 'set'] \
+                and parent_context.get_root_context().is_builtins_module():
+            # compare the module path with the builtin name.
             if settings.dynamic_array_additions:
                 arguments = get_dynamic_array_instance(self, arguments)
+
         super().__init__(inference_state, parent_context, class_value)
         self._arguments = arguments
         self.tree_node = class_value.tree_node

+    # This can recurse, if the initialization of the class includes a reference
+    # to itself.
+    @inference_state_method_cache(default=None)
+    def _get_annotated_class_object(self):
+        from jedi.inference.gradual.annotation import py__annotations__, \
+            infer_type_vars_for_execution
+
+        args = InstanceArguments(self, self._arguments)
+        for signature in self.class_value.py__getattribute__('__init__').get_signatures():
+            # Just take the first result, it should always be one, because we
+            # control the typeshed code.
+            funcdef = signature.value.tree_node
+            if funcdef is None or funcdef.type != 'funcdef' \
+                    or not signature.matches_signature(args):
+                # First check if the signature even matches, if not we don't
+                # need to infer anything.
+                continue
+            bound_method = BoundMethod(self, self.class_value.as_context(), signature.value)
+            all_annotations = py__annotations__(funcdef)
+            type_var_dict = infer_type_vars_for_execution(bound_method, args, all_annotations)
+            if type_var_dict:
+                defined, = self.class_value.define_generics(
+                    infer_type_vars_for_execution(signature.value, args, all_annotations),
+                )
+                debug.dbg('Inferred instance value as %s', defined, color='BLUE')
+                return defined
+        return None
+
+    def get_annotated_class_object(self):
+        return self._get_annotated_class_object() or self.class_value
+
+    def get_key_values(self):
+        values = NO_VALUES
+        if self.array_type == 'dict':
+            for i, (key, instance) in enumerate(self._arguments.unpack()):
+                if key is None and i == 0:
+                    values |= ValueSet.from_sets(
+                        v.get_key_values()
+                        for v in instance.infer()
+                        if v.array_type == 'dict'
+                    )
+                if key:
+                    values |= ValueSet([compiled.create_simple_object(
+                        self.inference_state,
+                        key,
+                    )])
+
+        return values
+
+    def py__simple_getitem__(self, index):
+        if self.array_type == 'dict':
+            # Logic for dict({'foo': bar}) and dict(foo=bar)
+            # reversed, because:
+            # >>> dict({'a': 1}, a=3)
+            # {'a': 3}
+            # TODO tuple initializations
+            # >>> dict([('a', 4)])
+            # {'a': 4}
+            for key, lazy_context in reversed(list(self._arguments.unpack())):
+                if key is None:
+                    values = ValueSet.from_sets(
+                        dct_value.py__simple_getitem__(index)
+                        for dct_value in lazy_context.infer()
+                        if dct_value.array_type == 'dict'
+                    )
+                    if values:
+                        return values
+                else:
+                    if key == index:
+                        return lazy_context.infer()
+        return super().py__simple_getitem__(index)
+
     def __repr__(self):
-        return '<%s of %s(%s)>' % (self.__class__.__name__, self.
-            class_value, self._arguments)
+        return "<%s of %s(%s)>" % (self.__class__.__name__, self.class_value,
+                                   self._arguments)


 class AnonymousInstance(_BaseTreeInstance):
@@ -103,48 +398,134 @@ class AnonymousInstance(_BaseTreeInstance):


 class CompiledInstanceName(NameWrapper):
-    pass
+    @iterator_to_value_set
+    def infer(self):
+        for result_value in self._wrapped_name.infer():
+            if result_value.api_type == 'function':
+                yield CompiledBoundMethod(result_value)
+            else:
+                yield result_value


 class CompiledInstanceClassFilter(AbstractFilter):
-
     def __init__(self, instance, f):
         self._instance = instance
         self._class_filter = f

+    def get(self, name):
+        return self._convert(self._class_filter.get(name))

-class BoundMethod(FunctionMixin, ValueWrapper):
+    def values(self):
+        return self._convert(self._class_filter.values())
+
+    def _convert(self, names):
+        return [CompiledInstanceName(n) for n in names]

+
+class BoundMethod(FunctionMixin, ValueWrapper):
     def __init__(self, instance, class_context, function):
         super().__init__(function)
         self.instance = instance
         self._class_context = class_context

+    def is_bound_method(self):
+        return True
+
+    @property
+    def name(self):
+        return FunctionNameInClass(
+            self._class_context,
+            super().name
+        )
+
+    def py__class__(self):
+        c, = values_from_qualified_names(self.inference_state, 'types', 'MethodType')
+        return c
+
+    def _get_arguments(self, arguments):
+        assert arguments is not None
+        return InstanceArguments(self.instance, arguments)
+
+    def _as_context(self, arguments=None):
+        if arguments is None:
+            return AnonymousMethodExecutionContext(self.instance, self)
+
+        arguments = self._get_arguments(arguments)
+        return MethodExecutionContext(self.instance, self, arguments)
+
+    def py__call__(self, arguments):
+        if isinstance(self._wrapped_value, OverloadedFunctionValue):
+            return self._wrapped_value.py__call__(self._get_arguments(arguments))
+
+        function_execution = self.as_context(arguments)
+        return function_execution.infer()
+
+    def get_signature_functions(self):
+        return [
+            BoundMethod(self.instance, self._class_context, f)
+            for f in self._wrapped_value.get_signature_functions()
+        ]
+
+    def get_signatures(self):
+        return [sig.bind(self) for sig in super().get_signatures()]
+
     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self._wrapped_value)


 class CompiledBoundMethod(ValueWrapper):
-    pass
+    def is_bound_method(self):
+        return True
+
+    def get_signatures(self):
+        return [sig.bind(self) for sig in self._wrapped_value.get_signatures()]


 class SelfName(TreeNameDefinition):
     """
     This name calculates the parent_context lazily.
     """
-
     def __init__(self, instance, class_context, tree_name):
         self._instance = instance
         self.class_context = class_context
         self.tree_name = tree_name

+    @property
+    def parent_context(self):
+        return self._instance.create_instance_context(self.class_context, self.tree_name)

-class LazyInstanceClassName(NameWrapper):
+    def get_defining_qualified_value(self):
+        return self._instance
+
+    def infer(self):
+        stmt = search_ancestor(self.tree_name, 'expr_stmt')
+        if stmt is not None:
+            if stmt.children[1].type == "annassign":
+                from jedi.inference.gradual.annotation import infer_annotation
+                values = infer_annotation(
+                    self.parent_context, stmt.children[1].children[1]
+                ).execute_annotation()
+                if values:
+                    return values
+        return super().infer()

+
+class LazyInstanceClassName(NameWrapper):
     def __init__(self, instance, class_member_name):
         super().__init__(class_member_name)
         self._instance = instance

+    @iterator_to_value_set
+    def infer(self):
+        for result_value in self._wrapped_name.infer():
+            yield from result_value.py__get__(self._instance, self._instance.py__class__())
+
+    def get_signatures(self):
+        return self.infer().get_signatures()
+
+    def get_defining_qualified_value(self):
+        return self._instance
+

 class InstanceClassFilter(AbstractFilter):
     """
@@ -152,11 +533,22 @@ class InstanceClassFilter(AbstractFilter):
     resulting names in LazyInstanceClassName. The idea is that the class name
     filtering can be very flexible and always be reflected in instances.
     """
-
     def __init__(self, instance, class_filter):
         self._instance = instance
         self._class_filter = class_filter

+    def get(self, name):
+        return self._convert(self._class_filter.get(name))
+
+    def values(self):
+        return self._convert(self._class_filter.values())
+
+    def _convert(self, names):
+        return [
+            LazyInstanceClassName(self._instance, n)
+            for n in names
+        ]
+
     def __repr__(self):
         return '<%s for %s>' % (self.__class__.__name__, self._class_filter)

@@ -165,15 +557,54 @@ class SelfAttributeFilter(ClassFilter):
     """
     This class basically filters all the use cases where `self.*` was assigned.
     """
-
     def __init__(self, instance, instance_class, node_context, origin_scope):
-        super().__init__(class_value=instance_class, node_context=
-            node_context, origin_scope=origin_scope, is_instance=True)
+        super().__init__(
+            class_value=instance_class,
+            node_context=node_context,
+            origin_scope=origin_scope,
+            is_instance=True,
+        )
         self._instance = instance

+    def _filter(self, names):
+        start, end = self._parser_scope.start_pos, self._parser_scope.end_pos
+        names = [n for n in names if start < n.start_pos < end]
+        return self._filter_self_names(names)
+
+    def _filter_self_names(self, names):
+        for name in names:
+            trailer = name.parent
+            if trailer.type == 'trailer' \
+                    and len(trailer.parent.children) == 2 \
+                    and trailer.children[0] == '.':
+                if name.is_definition() and self._access_possible(name):
+                    # TODO filter non-self assignments instead of this bad
+                    #      filter.
+                    if self._is_in_right_scope(trailer.parent.children[0], name):
+                        yield name
+
+    def _is_in_right_scope(self, self_name, name):
+        self_context = self._node_context.create_context(self_name)
+        names = self_context.goto(self_name, position=self_name.start_pos)
+        return any(
+            n.api_type == 'param'
+            and n.tree_name.get_definition().position_index == 0
+            and n.parent_context.tree_node is self._parser_scope
+            for n in names
+        )
+
+    def _convert_names(self, names):
+        return [SelfName(self._instance, self._node_context, name) for name in names]
+
+    def _check_flows(self, names):
+        return names

-class InstanceArguments(TreeArgumentsWrapper):

+class InstanceArguments(TreeArgumentsWrapper):
     def __init__(self, instance, arguments):
         super().__init__(arguments)
         self.instance = instance
+
+    def unpack(self, func=None):
+        yield None, LazyKnownValue(self.instance)
+        yield from self._wrapped_arguments.unpack(func)
diff --git a/jedi/inference/value/iterable.py b/jedi/inference/value/iterable.py
index 6b4bb3e8..7cc37173 100644
--- a/jedi/inference/value/iterable.py
+++ b/jedi/inference/value/iterable.py
@@ -4,55 +4,221 @@ iterators in general.
 """
 from jedi.inference import compiled
 from jedi.inference import analysis
-from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, LazyTreeValue
-from jedi.inference.helpers import get_int_or_none, is_string, reraise_getitem_errors, SimpleGetItemNotFound
+from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \
+    LazyTreeValue
+from jedi.inference.helpers import get_int_or_none, is_string, \
+    reraise_getitem_errors, SimpleGetItemNotFound
 from jedi.inference.utils import safe_property, to_list
 from jedi.inference.cache import inference_state_method_cache
 from jedi.inference.filters import LazyAttributeOverwrite, publish_method
-from jedi.inference.base_value import ValueSet, Value, NO_VALUES, ContextualizedNode, iterate_values, sentinel, LazyValueWrapper
+from jedi.inference.base_value import ValueSet, Value, NO_VALUES, \
+    ContextualizedNode, iterate_values, sentinel, \
+    LazyValueWrapper
 from jedi.parser_utils import get_sync_comp_fors
 from jedi.inference.context import CompForContext
 from jedi.inference.value.dynamic_arrays import check_array_additions


 class IterableMixin:
+    def py__next__(self, contextualized_node=None):
+        return self.py__iter__(contextualized_node)
+
+    def py__stop_iteration_returns(self):
+        return ValueSet([compiled.builtin_from_name(self.inference_state, 'None')])
+
+    # At the moment, safe values are simple values like "foo", 1 and not
+    # lists/dicts. Therefore as a small speed optimization we can just do the
+    # default instead of resolving the lazy wrapped values, that are just
+    # doing this in the end as well.
+    # This mostly speeds up patterns like `sys.version_info >= (3, 0)` in
+    # typeshed.
     get_safe_value = Value.get_safe_value


 class GeneratorBase(LazyAttributeOverwrite, IterableMixin):
     array_type = None

+    def _get_wrapped_value(self):
+        instance, = self._get_cls().execute_annotation()
+        return instance
+
+    def _get_cls(self):
+        generator, = self.inference_state.typing_module.py__getattribute__('Generator')
+        return generator
+
+    def py__bool__(self):
+        return True
+
+    @publish_method('__iter__')
+    def _iter(self, arguments):
+        return ValueSet([self])
+
+    @publish_method('send')
+    @publish_method('__next__')
+    def _next(self, arguments):
+        return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
+
+    def py__stop_iteration_returns(self):
+        return ValueSet([compiled.builtin_from_name(self.inference_state, 'None')])
+
+    @property
+    def name(self):
+        return compiled.CompiledValueName(self, 'Generator')
+
+    def get_annotated_class_object(self):
+        from jedi.inference.gradual.generics import TupleGenericManager
+        gen_values = self.merge_types_of_iterate().py__class__()
+        gm = TupleGenericManager((gen_values, NO_VALUES, NO_VALUES))
+        return self._get_cls().with_generics(gm)
+

 class Generator(GeneratorBase):
     """Handling of `yield` functions."""
-
     def __init__(self, inference_state, func_execution_context):
         super().__init__(inference_state)
         self._func_execution_context = func_execution_context

+    def py__iter__(self, contextualized_node=None):
+        iterators = self._func_execution_context.infer_annotations()
+        if iterators:
+            return iterators.iterate(contextualized_node)
+        return self._func_execution_context.get_yield_lazy_values()
+
+    def py__stop_iteration_returns(self):
+        return self._func_execution_context.get_return_values()
+
     def __repr__(self):
-        return '<%s of %s>' % (type(self).__name__, self.
-            _func_execution_context)
+        return "<%s of %s>" % (type(self).__name__, self._func_execution_context)
+
+
+def comprehension_from_atom(inference_state, value, atom):
+    bracket = atom.children[0]
+    test_list_comp = atom.children[1]
+
+    if bracket == '{':
+        if atom.children[1].children[1] == ':':
+            sync_comp_for = test_list_comp.children[3]
+            if sync_comp_for.type == 'comp_for':
+                sync_comp_for = sync_comp_for.children[1]
+
+            return DictComprehension(
+                inference_state,
+                value,
+                sync_comp_for_node=sync_comp_for,
+                key_node=test_list_comp.children[0],
+                value_node=test_list_comp.children[2],
+            )
+        else:
+            cls = SetComprehension
+    elif bracket == '(':
+        cls = GeneratorComprehension
+    elif bracket == '[':
+        cls = ListComprehension
+
+    sync_comp_for = test_list_comp.children[1]
+    if sync_comp_for.type == 'comp_for':
+        sync_comp_for = sync_comp_for.children[1]
+
+    return cls(
+        inference_state,
+        defining_context=value,
+        sync_comp_for_node=sync_comp_for,
+        entry_node=test_list_comp.children[0],
+    )


 class ComprehensionMixin:
+    @inference_state_method_cache()
+    def _get_comp_for_context(self, parent_context, comp_for):
+        return CompForContext(parent_context, comp_for)
+
+    def _nested(self, comp_fors, parent_context=None):
+        comp_for = comp_fors[0]
+
+        is_async = comp_for.parent.type == 'comp_for'
+
+        input_node = comp_for.children[3]
+        parent_context = parent_context or self._defining_context
+        input_types = parent_context.infer_node(input_node)
+
+        cn = ContextualizedNode(parent_context, input_node)
+        iterated = input_types.iterate(cn, is_async=is_async)
+        exprlist = comp_for.children[1]
+        for i, lazy_value in enumerate(iterated):
+            types = lazy_value.infer()
+            dct = unpack_tuple_to_dict(parent_context, types, exprlist)
+            context = self._get_comp_for_context(
+                parent_context,
+                comp_for,
+            )
+            with context.predefine_names(comp_for, dct):
+                try:
+                    yield from self._nested(comp_fors[1:], context)
+                except IndexError:
+                    iterated = context.infer_node(self._entry_node)
+                    if self.array_type == 'dict':
+                        yield iterated, context.infer_node(self._value_node)
+                    else:
+                        yield iterated
+
+    @inference_state_method_cache(default=[])
+    @to_list
+    def _iterate(self):
+        comp_fors = tuple(get_sync_comp_fors(self._sync_comp_for_node))
+        yield from self._nested(comp_fors)
+
+    def py__iter__(self, contextualized_node=None):
+        for set_ in self._iterate():
+            yield LazyKnownValues(set_)

     def __repr__(self):
-        return '<%s of %s>' % (type(self).__name__, self._sync_comp_for_node)
+        return "<%s of %s>" % (type(self).__name__, self._sync_comp_for_node)


 class _DictMixin:
-    pass
+    def _get_generics(self):
+        return tuple(c_set.py__class__() for c_set in self.get_mapping_item_values())


 class Sequence(LazyAttributeOverwrite, IterableMixin):
     api_type = 'instance'

+    @property
+    def name(self):
+        return compiled.CompiledValueName(self, self.array_type)

-class _BaseComprehension(ComprehensionMixin):
+    def _get_generics(self):
+        return (self.merge_types_of_iterate().py__class__(),)
+
+    @inference_state_method_cache(default=())
+    def _cached_generics(self):
+        return self._get_generics()
+
+    def _get_wrapped_value(self):
+        from jedi.inference.gradual.base import GenericClass
+        from jedi.inference.gradual.generics import TupleGenericManager
+        klass = compiled.builtin_from_name(self.inference_state, self.array_type)
+        c, = GenericClass(
+            klass,
+            TupleGenericManager(self._cached_generics())
+        ).execute_annotation()
+        return c
+
+    def py__bool__(self):
+        return None  # We don't know the length, because of appends.
+
+    @safe_property
+    def parent(self):
+        return self.inference_state.builtins_module

-    def __init__(self, inference_state, defining_context,
-        sync_comp_for_node, entry_node):
+    def py__getitem__(self, index_value_set, contextualized_node):
+        if self.array_type == 'dict':
+            return self._dict_values()
+        return iterate_values(ValueSet([self]))
+
+
+class _BaseComprehension(ComprehensionMixin):
+    def __init__(self, inference_state, defining_context, sync_comp_for_node, entry_node):
         assert sync_comp_for_node.type == 'sync_comp_for'
         super().__init__(inference_state)
         self._defining_context = defining_context
@@ -63,6 +229,15 @@ class _BaseComprehension(ComprehensionMixin):
 class ListComprehension(_BaseComprehension, Sequence):
     array_type = 'list'

+    def py__simple_getitem__(self, index):
+        if isinstance(index, slice):
+            return ValueSet([self])
+
+        all_types = list(self.py__iter__())
+        with reraise_getitem_errors(IndexError, TypeError):
+            lazy_value = all_types[index]
+        return lazy_value.infer()
+

 class SetComprehension(_BaseComprehension, Sequence):
     array_type = 'set'
@@ -73,14 +248,19 @@ class GeneratorComprehension(_BaseComprehension, GeneratorBase):


 class _DictKeyMixin:
-    pass
+    # TODO merge with _DictMixin?
+    def get_mapping_item_values(self):
+        return self._dict_keys(), self._dict_values()
+
+    def get_key_values(self):
+        # TODO merge with _dict_keys?
+        return self._dict_keys()


 class DictComprehension(ComprehensionMixin, Sequence, _DictKeyMixin):
     array_type = 'dict'

-    def __init__(self, inference_state, defining_context,
-        sync_comp_for_node, key_node, value_node):
+    def __init__(self, inference_state, defining_context, sync_comp_for_node, key_node, value_node):
         assert sync_comp_for_node.type == 'sync_comp_for'
         super().__init__(inference_state)
         self._defining_context = defining_context
@@ -88,65 +268,226 @@ class DictComprehension(ComprehensionMixin, Sequence, _DictKeyMixin):
         self._entry_node = key_node
         self._value_node = value_node

+    def py__iter__(self, contextualized_node=None):
+        for keys, values in self._iterate():
+            yield LazyKnownValues(keys)
+
+    def py__simple_getitem__(self, index):
+        for keys, values in self._iterate():
+            for k in keys:
+                # Be careful in the future if refactoring, index could be a
+                # slice object.
+                if k.get_safe_value(default=object()) == index:
+                    return values
+        raise SimpleGetItemNotFound()
+
+    def _dict_keys(self):
+        return ValueSet.from_sets(keys for keys, values in self._iterate())
+
+    def _dict_values(self):
+        return ValueSet.from_sets(values for keys, values in self._iterate())
+
+    @publish_method('values')
+    def _imitate_values(self, arguments):
+        lazy_value = LazyKnownValues(self._dict_values())
+        return ValueSet([FakeList(self.inference_state, [lazy_value])])
+
+    @publish_method('items')
+    def _imitate_items(self, arguments):
+        lazy_values = [
+            LazyKnownValue(
+                FakeTuple(
+                    self.inference_state,
+                    [LazyKnownValues(key),
+                     LazyKnownValues(value)]
+                )
+            )
+            for key, value in self._iterate()
+        ]
+
+        return ValueSet([FakeList(self.inference_state, lazy_values)])
+
+    def exact_key_items(self):
+        # NOTE: A smarter thing can probably done here to achieve better
+        # completions, but at least like this jedi doesn't crash
+        return []
+

 class SequenceLiteralValue(Sequence):
     _TUPLE_LIKE = 'testlist_star_expr', 'testlist', 'subscriptlist'
-    mapping = {'(': 'tuple', '[': 'list', '{': 'set'}
+    mapping = {'(': 'tuple',
+               '[': 'list',
+               '{': 'set'}

     def __init__(self, inference_state, defining_context, atom):
         super().__init__(inference_state)
         self.atom = atom
         self._defining_context = defining_context
+
         if self.atom.type in self._TUPLE_LIKE:
             self.array_type = 'tuple'
         else:
             self.array_type = SequenceLiteralValue.mapping[atom.children[0]]
             """The builtin name of the array (list, set, tuple or dict)."""

+    def _get_generics(self):
+        if self.array_type == 'tuple':
+            return tuple(x.infer().py__class__() for x in self.py__iter__())
+        return super()._get_generics()
+
     def py__simple_getitem__(self, index):
         """Here the index is an int/str. Raises IndexError/KeyError."""
-        pass
+        if isinstance(index, slice):
+            return ValueSet([self])
+        else:
+            with reraise_getitem_errors(TypeError, KeyError, IndexError):
+                node = self.get_tree_entries()[index]
+            if node == ':' or node.type == 'subscript':
+                return NO_VALUES
+            return self._defining_context.infer_node(node)

     def py__iter__(self, contextualized_node=None):
         """
         While values returns the possible values for any array field, this
         function returns the value for a certain index.
         """
-        pass
+        for node in self.get_tree_entries():
+            if node == ':' or node.type == 'subscript':
+                # TODO this should probably use at least part of the code
+                #      of infer_subscript_list.
+                yield LazyKnownValue(Slice(self._defining_context, None, None, None))
+            else:
+                yield LazyTreeValue(self._defining_context, node)
+        yield from check_array_additions(self._defining_context, self)
+
+    def py__len__(self):
+        # This function is not really used often. It's more of a try.
+        return len(self.get_tree_entries())
+
+    def get_tree_entries(self):
+        c = self.atom.children
+
+        if self.atom.type in self._TUPLE_LIKE:
+            return c[::2]
+
+        array_node = c[1]
+        if array_node in (']', '}', ')'):
+            return []  # Direct closing bracket, doesn't contain items.
+
+        if array_node.type == 'testlist_comp':
+            # filter out (for now) pep 448 single-star unpacking
+            return [value for value in array_node.children[::2]
+                    if value.type != "star_expr"]
+        elif array_node.type == 'dictorsetmaker':
+            kv = []
+            iterator = iter(array_node.children)
+            for key in iterator:
+                if key == "**":
+                    # dict with pep 448 double-star unpacking
+                    # for now ignoring the values imported by **
+                    next(iterator)
+                    next(iterator, None)  # Possible comma.
+                else:
+                    op = next(iterator, None)
+                    if op is None or op == ',':
+                        if key.type == "star_expr":
+                            # pep 448 single-star unpacking
+                            # for now ignoring values imported by *
+                            pass
+                        else:
+                            kv.append(key)  # A set.
+                    else:
+                        assert op == ':'  # A dict.
+                        kv.append((key, next(iterator)))
+                        next(iterator, None)  # Possible comma.
+            return kv
+        else:
+            if array_node.type == "star_expr":
+                # pep 448 single-star unpacking
+                # for now ignoring values imported by *
+                return []
+            else:
+                return [array_node]

     def __repr__(self):
-        return '<%s of %s>' % (self.__class__.__name__, self.atom)
+        return "<%s of %s>" % (self.__class__.__name__, self.atom)


 class DictLiteralValue(_DictMixin, SequenceLiteralValue, _DictKeyMixin):
     array_type = 'dict'

     def __init__(self, inference_state, defining_context, atom):
+        # Intentionally don't call the super class. This is definitely a sign
+        # that the architecture is bad and we should refactor.
         Sequence.__init__(self, inference_state)
         self._defining_context = defining_context
         self.atom = atom

     def py__simple_getitem__(self, index):
         """Here the index is an int/str. Raises IndexError/KeyError."""
-        pass
+        compiled_value_index = compiled.create_simple_object(self.inference_state, index)
+        for key, value in self.get_tree_entries():
+            for k in self._defining_context.infer_node(key):
+                for key_v in k.execute_operation(compiled_value_index, '=='):
+                    if key_v.get_safe_value():
+                        return self._defining_context.infer_node(value)
+        raise SimpleGetItemNotFound('No key found in dictionary %s.' % self)

     def py__iter__(self, contextualized_node=None):
         """
         While values returns the possible values for any array field, this
         function returns the value for a certain index.
         """
-        pass
+        # Get keys.
+        types = NO_VALUES
+        for k, _ in self.get_tree_entries():
+            types |= self._defining_context.infer_node(k)
+        # We don't know which dict index comes first, therefore always
+        # yield all the types.
+        for _ in types:
+            yield LazyKnownValues(types)
+
+    @publish_method('values')
+    def _imitate_values(self, arguments):
+        lazy_value = LazyKnownValues(self._dict_values())
+        return ValueSet([FakeList(self.inference_state, [lazy_value])])
+
+    @publish_method('items')
+    def _imitate_items(self, arguments):
+        lazy_values = [
+            LazyKnownValue(FakeTuple(
+                self.inference_state,
+                (LazyTreeValue(self._defining_context, key_node),
+                 LazyTreeValue(self._defining_context, value_node))
+            )) for key_node, value_node in self.get_tree_entries()
+        ]
+
+        return ValueSet([FakeList(self.inference_state, lazy_values)])

     def exact_key_items(self):
         """
         Returns a generator of tuples like dict.items(), where the key is
         resolved (as a string) and the values are still lazy values.
         """
-        pass
+        for key_node, value in self.get_tree_entries():
+            for key in self._defining_context.infer_node(key_node):
+                if is_string(key):
+                    yield key.get_safe_value(), LazyTreeValue(self._defining_context, value)

+    def _dict_values(self):
+        return ValueSet.from_sets(
+            self._defining_context.infer_node(v)
+            for k, v in self.get_tree_entries()
+        )

-class _FakeSequence(Sequence):
+    def _dict_keys(self):
+        return ValueSet.from_sets(
+            self._defining_context.infer_node(k)
+            for k, v in self.get_tree_entries()
+        )

+
+class _FakeSequence(Sequence):
     def __init__(self, inference_state, lazy_value_list):
         """
         type should be one of "tuple", "list"
@@ -154,8 +495,22 @@ class _FakeSequence(Sequence):
         super().__init__(inference_state)
         self._lazy_value_list = lazy_value_list

+    def py__simple_getitem__(self, index):
+        if isinstance(index, slice):
+            return ValueSet([self])
+
+        with reraise_getitem_errors(IndexError, TypeError):
+            lazy_value = self._lazy_value_list[index]
+        return lazy_value.infer()
+
+    def py__iter__(self, contextualized_node=None):
+        return self._lazy_value_list
+
+    def py__bool__(self):
+        return bool(len(self._lazy_value_list))
+
     def __repr__(self):
-        return '<%s of %s>' % (type(self).__name__, self._lazy_value_list)
+        return "<%s of %s>" % (type(self).__name__, self._lazy_value_list)


 class FakeTuple(_FakeSequence):
@@ -173,37 +528,120 @@ class FakeDict(_DictMixin, Sequence, _DictKeyMixin):
         super().__init__(inference_state)
         self._dct = dct

+    def py__iter__(self, contextualized_node=None):
+        for key in self._dct:
+            yield LazyKnownValue(compiled.create_simple_object(self.inference_state, key))
+
+    def py__simple_getitem__(self, index):
+        with reraise_getitem_errors(KeyError, TypeError):
+            lazy_value = self._dct[index]
+        return lazy_value.infer()
+
+    @publish_method('values')
+    def _values(self, arguments):
+        return ValueSet([FakeTuple(
+            self.inference_state,
+            [LazyKnownValues(self._dict_values())]
+        )])
+
+    def _dict_values(self):
+        return ValueSet.from_sets(lazy_value.infer() for lazy_value in self._dct.values())
+
+    def _dict_keys(self):
+        return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
+
+    def exact_key_items(self):
+        return self._dct.items()
+
     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self._dct)


 class MergedArray(Sequence):
-
     def __init__(self, inference_state, arrays):
         super().__init__(inference_state)
         self.array_type = arrays[-1].array_type
         self._arrays = arrays

+    def py__iter__(self, contextualized_node=None):
+        for array in self._arrays:
+            yield from array.py__iter__()
+
+    def py__simple_getitem__(self, index):
+        return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
+

 def unpack_tuple_to_dict(context, types, exprlist):
     """
     Unpacking tuple assignments in for statements and expr_stmts.
     """
-    pass
+    if exprlist.type == 'name':
+        return {exprlist.value: types}
+    elif exprlist.type == 'atom' and exprlist.children[0] in ('(', '['):
+        return unpack_tuple_to_dict(context, types, exprlist.children[1])
+    elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist',
+                           'testlist_star_expr'):
+        dct = {}
+        parts = iter(exprlist.children[::2])
+        n = 0
+        for lazy_value in types.iterate(ContextualizedNode(context, exprlist)):
+            n += 1
+            try:
+                part = next(parts)
+            except StopIteration:
+                analysis.add(context, 'value-error-too-many-values', part,
+                             message="ValueError: too many values to unpack (expected %s)" % n)
+            else:
+                dct.update(unpack_tuple_to_dict(context, lazy_value.infer(), part))
+        has_parts = next(parts, None)
+        if types and has_parts is not None:
+            analysis.add(context, 'value-error-too-few-values', has_parts,
+                         message="ValueError: need more than %s values to unpack" % n)
+        return dct
+    elif exprlist.type == 'power' or exprlist.type == 'atom_expr':
+        # Something like ``arr[x], var = ...``.
+        # This is something that is not yet supported, would also be difficult
+        # to write into a dict.
+        return {}
+    elif exprlist.type == 'star_expr':  # `a, *b, c = x` type unpackings
+        # Currently we're not supporting them.
+        return {}
+    raise NotImplementedError


 class Slice(LazyValueWrapper):
-
     def __init__(self, python_context, start, stop, step):
         self.inference_state = python_context.inference_state
         self._context = python_context
+        # All of them are either a Precedence or None.
         self._start = start
         self._stop = stop
         self._step = step

+    def _get_wrapped_value(self):
+        value = compiled.builtin_from_name(self._context.inference_state, 'slice')
+        slice_value, = value.execute_with_values()
+        return slice_value
+
     def get_safe_value(self, default=sentinel):
         """
         Imitate CompiledValue.obj behavior and return a ``builtin.slice()``
         object.
         """
-        pass
+        def get(element):
+            if element is None:
+                return None
+
+            result = self._context.infer_node(element)
+            if len(result) != 1:
+                # For simplicity, we want slices to be clear defined with just
+                # one type.  Otherwise we will return an empty slice object.
+                raise IndexError
+
+            value, = result
+            return get_int_or_none(value)
+
+        try:
+            return slice(get(self._start), get(self._stop), get(self._step))
+        except IndexError:
+            return slice(None, None, None)
diff --git a/jedi/inference/value/klass.py b/jedi/inference/value/klass.py
index b88b0996..d4074f36 100644
--- a/jedi/inference/value/klass.py
+++ b/jedi/inference/value/klass.py
@@ -37,14 +37,17 @@ py__doc__()                            Returns the docstring for a value.

 """
 from jedi import debug
-from jedi.parser_utils import get_cached_parent_scope, expr_is_dotted, function_is_property
-from jedi.inference.cache import inference_state_method_cache, CachedMetaClass, inference_state_method_generator_cache
+from jedi.parser_utils import get_cached_parent_scope, expr_is_dotted, \
+    function_is_property
+from jedi.inference.cache import inference_state_method_cache, CachedMetaClass, \
+    inference_state_method_generator_cache
 from jedi.inference import compiled
 from jedi.inference.lazy_value import LazyKnownValues, LazyTreeValue
 from jedi.inference.filters import ParserTreeFilter
 from jedi.inference.names import TreeNameDefinition, ValueName
 from jedi.inference.arguments import unpack_arglist, ValuesArguments
-from jedi.inference.base_value import ValueSet, iterator_to_value_set, NO_VALUES
+from jedi.inference.base_value import ValueSet, iterator_to_value_set, \
+    NO_VALUES
 from jedi.inference.context import ClassContext
 from jedi.inference.value.function import FunctionAndClassBase
 from jedi.inference.gradual.generics import LazyGenericManager, TupleGenericManager
@@ -52,26 +55,336 @@ from jedi.plugins import plugin_manager


 class ClassName(TreeNameDefinition):
-
     def __init__(self, class_value, tree_name, name_context, apply_decorators):
         super().__init__(name_context, tree_name)
         self._apply_decorators = apply_decorators
         self._class_value = class_value

+    @iterator_to_value_set
+    def infer(self):
+        # We're using a different value to infer, so we cannot call super().
+        from jedi.inference.syntax_tree import tree_name_to_values
+        inferred = tree_name_to_values(
+            self.parent_context.inference_state, self.parent_context, self.tree_name)

-class ClassFilter(ParserTreeFilter):
+        for result_value in inferred:
+            if self._apply_decorators:
+                yield from result_value.py__get__(instance=None, class_value=self._class_value)
+            else:
+                yield result_value
+
+    @property
+    def api_type(self):
+        type_ = super().api_type
+        if type_ == 'function':
+            definition = self.tree_name.get_definition()
+            if definition is None:
+                return type_
+            if function_is_property(definition):
+                # This essentially checks if there is an @property before
+                # the function. @property could be something different, but
+                # any programmer that redefines property as something that
+                # is not really a property anymore, should be shot. (i.e.
+                # this is a heuristic).
+                return 'property'
+        return type_

+
+class ClassFilter(ParserTreeFilter):
     def __init__(self, class_value, node_context=None, until_position=None,
-        origin_scope=None, is_instance=False):
-        super().__init__(class_value.as_context(), node_context,
-            until_position=until_position, origin_scope=origin_scope)
+                 origin_scope=None, is_instance=False):
+        super().__init__(
+            class_value.as_context(), node_context,
+            until_position=until_position,
+            origin_scope=origin_scope,
+        )
         self._class_value = class_value
         self._is_instance = is_instance

+    def _convert_names(self, names):
+        return [
+            ClassName(
+                class_value=self._class_value,
+                tree_name=name,
+                name_context=self._node_context,
+                apply_decorators=not self._is_instance,
+            ) for name in names
+        ]
+
+    def _equals_origin_scope(self):
+        node = self._origin_scope
+        while node is not None:
+            if node == self._parser_scope or node == self.parent_context:
+                return True
+            node = get_cached_parent_scope(self._parso_cache_node, node)
+        return False
+
+    def _access_possible(self, name):
+        # Filter for name mangling of private variables like __foo
+        return not name.value.startswith('__') or name.value.endswith('__') \
+            or self._equals_origin_scope()
+
+    def _filter(self, names):
+        names = super()._filter(names)
+        return [name for name in names if self._access_possible(name)]
+

 class ClassMixin:
-    pass
+    def is_class(self):
+        return True
+
+    def is_class_mixin(self):
+        return True
+
+    def py__call__(self, arguments):
+        from jedi.inference.value import TreeInstance
+
+        from jedi.inference.gradual.typing import TypedDict
+        if self.is_typeddict():
+            return ValueSet([TypedDict(self)])
+        return ValueSet([TreeInstance(self.inference_state, self.parent_context, self, arguments)])
+
+    def py__class__(self):
+        return compiled.builtin_from_name(self.inference_state, 'type')
+
+    @property
+    def name(self):
+        return ValueName(self, self.tree_node.name)
+
+    def py__name__(self):
+        return self.name.string_name
+
+    @inference_state_method_generator_cache()
+    def py__mro__(self):
+        mro = [self]
+        yield self
+        # TODO Do a proper mro resolution. Currently we are just listing
+        # classes. However, it's a complicated algorithm.
+        for lazy_cls in self.py__bases__():
+            # TODO there's multiple different mro paths possible if this yields
+            # multiple possibilities. Could be changed to be more correct.
+            for cls in lazy_cls.infer():
+                # TODO detect for TypeError: duplicate base class str,
+                # e.g.  `class X(str, str): pass`
+                try:
+                    mro_method = cls.py__mro__
+                except AttributeError:
+                    # TODO add a TypeError like:
+                    """
+                    >>> class Y(lambda: test): pass
+                    Traceback (most recent call last):
+                      File "<stdin>", line 1, in <module>
+                    TypeError: function() argument 1 must be code, not str
+                    >>> class Y(1): pass
+                    Traceback (most recent call last):
+                      File "<stdin>", line 1, in <module>
+                    TypeError: int() takes at most 2 arguments (3 given)
+                    """
+                    debug.warning('Super class of %s is not a class: %s', self, cls)
+                else:
+                    for cls_new in mro_method():
+                        if cls_new not in mro:
+                            mro.append(cls_new)
+                            yield cls_new
+
+    def get_filters(self, origin_scope=None, is_instance=False,
+                    include_metaclasses=True, include_type_when_class=True):
+        if include_metaclasses:
+            metaclasses = self.get_metaclasses()
+            if metaclasses:
+                yield from self.get_metaclass_filters(metaclasses, is_instance)
+
+        for cls in self.py__mro__():
+            if cls.is_compiled():
+                yield from cls.get_filters(is_instance=is_instance)
+            else:
+                yield ClassFilter(
+                    self, node_context=cls.as_context(),
+                    origin_scope=origin_scope,
+                    is_instance=is_instance
+                )
+        if not is_instance and include_type_when_class:
+            from jedi.inference.compiled import builtin_from_name
+            type_ = builtin_from_name(self.inference_state, 'type')
+            assert isinstance(type_, ClassValue)
+            if type_ != self:
+                # We are not using execute_with_values here, because the
+                # plugin function for type would get executed instead of an
+                # instance creation.
+                args = ValuesArguments([])
+                for instance in type_.py__call__(args):
+                    instance_filters = instance.get_filters()
+                    # Filter out self filters
+                    next(instance_filters, None)
+                    next(instance_filters, None)
+                    x = next(instance_filters, None)
+                    assert x is not None
+                    yield x
+
+    def get_signatures(self):
+        # Since calling staticmethod without a function is illegal, the Jedi
+        # plugin doesn't return anything. Therefore call directly and get what
+        # we want: An instance of staticmethod.
+        metaclasses = self.get_metaclasses()
+        if metaclasses:
+            sigs = self.get_metaclass_signatures(metaclasses)
+            if sigs:
+                return sigs
+        args = ValuesArguments([])
+        init_funcs = self.py__call__(args).py__getattribute__('__init__')
+        return [sig.bind(self) for sig in init_funcs.get_signatures()]
+
+    def _as_context(self):
+        return ClassContext(self)
+
+    def get_type_hint(self, add_class_info=True):
+        if add_class_info:
+            return 'Type[%s]' % self.py__name__()
+        return self.py__name__()
+
+    @inference_state_method_cache(default=False)
+    def is_typeddict(self):
+        # TODO Do a proper mro resolution. Currently we are just listing
+        # classes. However, it's a complicated algorithm.
+        from jedi.inference.gradual.typing import TypedDictClass
+        for lazy_cls in self.py__bases__():
+            if not isinstance(lazy_cls, LazyTreeValue):
+                return False
+            tree_node = lazy_cls.data
+            # Only resolve simple classes, stuff like Iterable[str] are more
+            # intensive to resolve and if generics are involved, we know it's
+            # not a TypedDict.
+            if not expr_is_dotted(tree_node):
+                return False
+
+            for cls in lazy_cls.infer():
+                if isinstance(cls, TypedDictClass):
+                    return True
+                try:
+                    method = cls.is_typeddict
+                except AttributeError:
+                    # We're only dealing with simple classes, so just returning
+                    # here should be fine. This only happens with e.g. compiled
+                    # classes.
+                    return False
+                else:
+                    if method():
+                        return True
+        return False
+
+    def py__getitem__(self, index_value_set, contextualized_node):
+        from jedi.inference.gradual.base import GenericClass
+        if not index_value_set:
+            debug.warning('Class indexes inferred to nothing. Returning class instead')
+            return ValueSet([self])
+        return ValueSet(
+            GenericClass(
+                self,
+                LazyGenericManager(
+                    context_of_index=contextualized_node.context,
+                    index_value=index_value,
+                )
+            )
+            for index_value in index_value_set
+        )
+
+    def with_generics(self, generics_tuple):
+        from jedi.inference.gradual.base import GenericClass
+        return GenericClass(
+            self,
+            TupleGenericManager(generics_tuple)
+        )
+
+    def define_generics(self, type_var_dict):
+        from jedi.inference.gradual.base import GenericClass
+
+        def remap_type_vars():
+            """
+            The TypeVars in the resulting classes have sometimes different names
+            and we need to check for that, e.g. a signature can be:
+
+            def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
+
+            However, the iterator is defined as Iterator[_T_co], which means it has
+            a different type var name.
+            """
+            for type_var in self.list_type_vars():
+                yield type_var_dict.get(type_var.py__name__(), NO_VALUES)
+
+        if type_var_dict:
+            return ValueSet([GenericClass(
+                self,
+                TupleGenericManager(tuple(remap_type_vars()))
+            )])
+        return ValueSet({self})


 class ClassValue(ClassMixin, FunctionAndClassBase, metaclass=CachedMetaClass):
     api_type = 'class'
+
+    @inference_state_method_cache()
+    def list_type_vars(self):
+        found = []
+        arglist = self.tree_node.get_super_arglist()
+        if arglist is None:
+            return []
+
+        for stars, node in unpack_arglist(arglist):
+            if stars:
+                continue  # These are not relevant for this search.
+
+            from jedi.inference.gradual.annotation import find_unknown_type_vars
+            for type_var in find_unknown_type_vars(self.parent_context, node):
+                if type_var not in found:
+                    # The order matters and it's therefore a list.
+                    found.append(type_var)
+        return found
+
+    def _get_bases_arguments(self):
+        arglist = self.tree_node.get_super_arglist()
+        if arglist:
+            from jedi.inference import arguments
+            return arguments.TreeArguments(self.inference_state, self.parent_context, arglist)
+        return None
+
+    @inference_state_method_cache(default=())
+    def py__bases__(self):
+        args = self._get_bases_arguments()
+        if args is not None:
+            lst = [value for key, value in args.unpack() if key is None]
+            if lst:
+                return lst
+
+        if self.py__name__() == 'object' \
+                and self.parent_context.is_builtins_module():
+            return []
+        return [LazyKnownValues(
+            self.inference_state.builtins_module.py__getattribute__('object')
+        )]
+
+    @plugin_manager.decorate()
+    def get_metaclass_filters(self, metaclasses, is_instance):
+        debug.warning('Unprocessed metaclass %s', metaclasses)
+        return []
+
+    @inference_state_method_cache(default=NO_VALUES)
+    def get_metaclasses(self):
+        args = self._get_bases_arguments()
+        if args is not None:
+            m = [value for key, value in args.unpack() if key == 'metaclass']
+            metaclasses = ValueSet.from_sets(lazy_value.infer() for lazy_value in m)
+            metaclasses = ValueSet(m for m in metaclasses if m.is_class())
+            if metaclasses:
+                return metaclasses
+
+        for lazy_base in self.py__bases__():
+            for value in lazy_base.infer():
+                if value.is_class():
+                    values = value.get_metaclasses()
+                    if values:
+                        return values
+        return NO_VALUES
+
+    @plugin_manager.decorate()
+    def get_metaclass_signatures(self, metaclasses):
+        return []
diff --git a/jedi/inference/value/module.py b/jedi/inference/value/module.py
index c79b2dda..6461cb4b 100644
--- a/jedi/inference/value/module.py
+++ b/jedi/inference/value/module.py
@@ -1,6 +1,7 @@
 import os
 from pathlib import Path
 from typing import Optional
+
 from jedi.inference.cache import inference_state_method_cache
 from jedi.inference.names import AbstractNameDefinition, ModuleName
 from jedi.inference.filters import GlobalNameFilter, ParserTreeFilter, DictFilter, MergedFilter
@@ -24,51 +25,166 @@ class _ModuleAttributeName(AbstractNameDefinition):
         self.string_name = string_name
         self._string_value = string_value

+    def infer(self):
+        if self._string_value is not None:
+            s = self._string_value
+            return ValueSet([
+                create_simple_object(self.parent_context.inference_state, s)
+            ])
+        return compiled.get_string_value_set(self.parent_context.inference_state)

-class SubModuleDictMixin:

+class SubModuleDictMixin:
     @inference_state_method_cache()
     def sub_modules_dict(self):
         """
         Lists modules in the directory of this module (if this module is a
         package).
         """
-        pass
+        names = {}
+        if self.is_package():
+            mods = self.inference_state.compiled_subprocess.iter_module_names(
+                self.py__path__()
+            )
+            for name in mods:
+                # It's obviously a relative import to the current module.
+                names[name] = SubModuleName(self.as_context(), name)
+
+        # In the case of an import like `from x.` we don't need to
+        # add all the variables, this is only about submodules.
+        return names


 class ModuleMixin(SubModuleDictMixin):
     _module_name_class = ModuleName

+    def get_filters(self, origin_scope=None):
+        yield MergedFilter(
+            ParserTreeFilter(
+                parent_context=self.as_context(),
+                origin_scope=origin_scope
+            ),
+            GlobalNameFilter(self.as_context()),
+        )
+        yield DictFilter(self.sub_modules_dict())
+        yield DictFilter(self._module_attributes_dict())
+        yield from self.iter_star_filters()
+
+    def py__class__(self):
+        c, = values_from_qualified_names(self.inference_state, 'types', 'ModuleType')
+        return c
+
+    def is_module(self):
+        return True
+
+    def is_stub(self):
+        return False
+
+    @property  # type: ignore[misc]
+    @inference_state_method_cache()
+    def name(self):
+        return self._module_name_class(self, self.string_names[-1])
+
+    @inference_state_method_cache()
+    def _module_attributes_dict(self):
+        names = ['__package__', '__doc__', '__name__']
+        # All the additional module attributes are strings.
+        dct = dict((n, _ModuleAttributeName(self, n)) for n in names)
+        path = self.py__file__()
+        if path is not None:
+            dct['__file__'] = _ModuleAttributeName(self, '__file__', str(path))
+        return dct
+
+    def iter_star_filters(self):
+        for star_module in self.star_imports():
+            f = next(star_module.get_filters(), None)
+            assert f is not None
+            yield f
+
+    # I'm not sure if the star import cache is really that effective anymore
+    # with all the other really fast import caches. Recheck. Also we would need
+    # to push the star imports into InferenceState.module_cache, if we reenable this.
+    @inference_state_method_cache([])
+    def star_imports(self):
+        from jedi.inference.imports import Importer
+
+        modules = []
+        module_context = self.as_context()
+        for i in self.tree_node.iter_imports():
+            if i.is_star_import():
+                new = Importer(
+                    self.inference_state,
+                    import_path=i.get_paths()[-1],
+                    module_context=module_context,
+                    level=i.level
+                ).follow()
+
+                for module in new:
+                    if isinstance(module, ModuleValue):
+                        modules += module.star_imports()
+                modules += new
+        return modules
+
     def get_qualified_names(self):
         """
         A module doesn't have a qualified name, but it's important to note that
         it's reachable and not `None`. With this information we can add
         qualified names on top for all value children.
         """
-        pass
+        return ()


 class ModuleValue(ModuleMixin, TreeValue):
     api_type = 'module'

-    def __init__(self, inference_state, module_node, code_lines, file_io=
-        None, string_names=None, is_package=False):
-        super().__init__(inference_state, parent_context=None, tree_node=
-            module_node)
+    def __init__(self, inference_state, module_node, code_lines, file_io=None,
+                 string_names=None, is_package=False):
+        super().__init__(
+            inference_state,
+            parent_context=None,
+            tree_node=module_node
+        )
         self.file_io = file_io
         if file_io is None:
             self._path: Optional[Path] = None
         else:
             self._path = file_io.path
-        self.string_names = string_names
+        self.string_names = string_names  # Optional[Tuple[str, ...]]
         self.code_lines = code_lines
         self._is_package = is_package

-    def py__file__(self) ->Optional[Path]:
+    def is_stub(self):
+        if self._path is not None and self._path.suffix == '.pyi':
+            # Currently this is the way how we identify stubs when e.g. goto is
+            # used in them. This could be changed if stubs would be identified
+            # sooner and used as StubModuleValue.
+            return True
+        return super().is_stub()
+
+    def py__name__(self):
+        if self.string_names is None:
+            return None
+        return '.'.join(self.string_names)
+
+    def py__file__(self) -> Optional[Path]:
         """
         In contrast to Python's __file__ can be None.
         """
-        pass
+        if self._path is None:
+            return None
+
+        return self._path.absolute()
+
+    def is_package(self):
+        return self._is_package
+
+    def py__package__(self):
+        if self.string_names is None:
+            return []
+
+        if self._is_package:
+            return self.string_names
+        return self.string_names[:-1]

     def py__path__(self):
         """
@@ -76,9 +192,39 @@ class ModuleValue(ModuleMixin, TreeValue):
         is a list of paths (strings).
         Returns None if the module is not a package.
         """
-        pass
+        if not self._is_package:
+            return None
+
+        # A namespace package is typically auto generated and ~10 lines long.
+        first_few_lines = ''.join(self.code_lines[:50])
+        # these are strings that need to be used for namespace packages,
+        # the first one is ``pkgutil``, the second ``pkg_resources``.
+        options = ('declare_namespace(__name__)', 'extend_path(__path__')
+        if options[0] in first_few_lines or options[1] in first_few_lines:
+            # It is a namespace, now try to find the rest of the
+            # modules on sys_path or whatever the search_path is.
+            paths = set()
+            for s in self.inference_state.get_sys_path():
+                other = os.path.join(s, self.name.string_name)
+                if os.path.isdir(other):
+                    paths.add(other)
+            if paths:
+                return list(paths)
+            # Nested namespace packages will not be supported. Nobody ever
+            # asked for it and in Python 3 they are there without using all the
+            # crap above.
+
+        # Default to the of this file.
+        file = self.py__file__()
+        assert file is not None  # Shouldn't be a package in the first place.
+        return [os.path.dirname(file)]
+
+    def _as_context(self):
+        return ModuleContext(self)

     def __repr__(self):
-        return '<%s: %s@%s-%s is_stub=%s>' % (self.__class__.__name__, self
-            .py__name__(), self.tree_node.start_pos[0], self.tree_node.
-            end_pos[0], self.is_stub())
+        return "<%s: %s@%s-%s is_stub=%s>" % (
+            self.__class__.__name__, self.py__name__(),
+            self.tree_node.start_pos[0], self.tree_node.end_pos[0],
+            self.is_stub()
+        )
diff --git a/jedi/inference/value/namespace.py b/jedi/inference/value/namespace.py
index 33eb07fc..11737cc9 100644
--- a/jedi/inference/value/namespace.py
+++ b/jedi/inference/value/namespace.py
@@ -1,5 +1,6 @@
 from pathlib import Path
 from typing import Optional
+
 from jedi.inference.cache import inference_state_method_cache
 from jedi.inference.filters import DictFilter
 from jedi.inference.names import ValueNameMixin, AbstractNameDefinition
@@ -13,7 +14,6 @@ class ImplicitNSName(ValueNameMixin, AbstractNameDefinition):
     Accessing names for implicit namespace packages should infer to nothing.
     This object will prevent Jedi from raising exceptions
     """
-
     def __init__(self, implicit_ns_value, string_name):
         self._value = implicit_ns_value
         self.string_name = string_name
@@ -32,10 +32,43 @@ class ImplicitNamespaceValue(Value, SubModuleDictMixin):
         self.string_names = string_names
         self._paths = paths

+    def get_filters(self, origin_scope=None):
+        yield DictFilter(self.sub_modules_dict())
+
+    def get_qualified_names(self):
+        return ()
+
+    @property  # type: ignore[misc]
+    @inference_state_method_cache()
+    def name(self):
+        string_name = self.py__package__()[-1]
+        return ImplicitNSName(self, string_name)
+
+    def py__file__(self) -> Optional[Path]:
+        return None
+
     def py__package__(self):
         """Return the fullname
         """
-        pass
+        return self.string_names
+
+    def py__path__(self):
+        return self._paths
+
+    def py__name__(self):
+        return '.'.join(self.string_names)
+
+    def is_namespace(self):
+        return True
+
+    def is_stub(self):
+        return False
+
+    def is_package(self):
+        return True
+
+    def as_context(self):
+        return NamespaceContext(self)

     def __repr__(self):
         return '<%s: %s>' % (self.__class__.__name__, self.py__name__())
diff --git a/jedi/parser_utils.py b/jedi/parser_utils.py
index 75857eda..3c7fa151 100644
--- a/jedi/parser_utils.py
+++ b/jedi/parser_utils.py
@@ -3,22 +3,69 @@ import textwrap
 from ast import literal_eval
 from inspect import cleandoc
 from weakref import WeakKeyDictionary
+
 from parso.python import tree
 from parso.cache import parser_cache
 from parso import split_lines
-_EXECUTE_NODES = {'funcdef', 'classdef', 'import_from', 'import_name',
-    'test', 'or_test', 'and_test', 'not_test', 'comparison', 'expr',
-    'xor_expr', 'and_expr', 'shift_expr', 'arith_expr', 'atom_expr', 'term',
-    'factor', 'power', 'atom'}
-_FLOW_KEYWORDS = ('try', 'except', 'finally', 'else', 'if', 'elif', 'with',
-    'for', 'while')
+
+_EXECUTE_NODES = {'funcdef', 'classdef', 'import_from', 'import_name', 'test',
+                  'or_test', 'and_test', 'not_test', 'comparison', 'expr',
+                  'xor_expr', 'and_expr', 'shift_expr', 'arith_expr',
+                  'atom_expr', 'term', 'factor', 'power', 'atom'}
+
+_FLOW_KEYWORDS = (
+    'try', 'except', 'finally', 'else', 'if', 'elif', 'with', 'for', 'while'
+)


 def get_executable_nodes(node, last_added=False):
     """
     For static analysis.
     """
-    pass
+    result = []
+    typ = node.type
+    if typ == 'name':
+        next_leaf = node.get_next_leaf()
+        if last_added is False and node.parent.type != 'param' and next_leaf != '=':
+            result.append(node)
+    elif typ == 'expr_stmt':
+        # I think inferring the statement (and possibly returned arrays),
+        # should be enough for static analysis.
+        result.append(node)
+        for child in node.children:
+            result += get_executable_nodes(child, last_added=True)
+    elif typ == 'decorator':
+        # decorator
+        if node.children[-2] == ')':
+            node = node.children[-3]
+            if node != '(':
+                result += get_executable_nodes(node)
+    else:
+        try:
+            children = node.children
+        except AttributeError:
+            pass
+        else:
+            if node.type in _EXECUTE_NODES and not last_added:
+                result.append(node)
+
+            for child in children:
+                result += get_executable_nodes(child, last_added)
+
+    return result
+
+
+def get_sync_comp_fors(comp_for):
+    yield comp_for
+    last = comp_for.children[-1]
+    while True:
+        if last.type == 'comp_for':
+            yield last.children[1]  # Ignore the async.
+        elif last.type == 'sync_comp_for':
+            yield last
+        elif not last.type == 'comp_if':
+            break
+        last = last.children[-1]


 def for_stmt_defines_one_name(for_stmt):
@@ -28,16 +75,60 @@ def for_stmt_defines_one_name(for_stmt):

     :returns: bool
     """
-    pass
+    return for_stmt.children[1].type == 'name'
+
+
+def get_flow_branch_keyword(flow_node, node):
+    start_pos = node.start_pos
+    if not (flow_node.start_pos < start_pos <= flow_node.end_pos):
+        raise ValueError('The node is not part of the flow.')
+
+    keyword = None
+    for i, child in enumerate(flow_node.children):
+        if start_pos < child.start_pos:
+            return keyword
+        first_leaf = child.get_first_leaf()
+        if first_leaf in _FLOW_KEYWORDS:
+            keyword = first_leaf
+    return None


 def clean_scope_docstring(scope_node):
     """ Returns a cleaned version of the docstring token. """
-    pass
+    node = scope_node.get_doc_node()
+    if node is not None:
+        # TODO We have to check next leaves until there are no new
+        # leaves anymore that might be part of the docstring. A
+        # docstring can also look like this: ``'foo' 'bar'
+        # Returns a literal cleaned version of the ``Token``.
+        return cleandoc(safe_literal_eval(node.value))
+    return ''
+
+
+def find_statement_documentation(tree_node):
+    if tree_node.type == 'expr_stmt':
+        tree_node = tree_node.parent  # simple_stmt
+        maybe_string = tree_node.get_next_sibling()
+        if maybe_string is not None:
+            if maybe_string.type == 'simple_stmt':
+                maybe_string = maybe_string.children[0]
+                if maybe_string.type == 'string':
+                    return cleandoc(safe_literal_eval(maybe_string.value))
+    return ''
+
+
+def safe_literal_eval(value):
+    first_two = value[:2].lower()
+    if first_two[0] == 'f' or first_two in ('fr', 'rf'):
+        # literal_eval is not able to resovle f literals. We have to do that
+        # manually, but that's right now not implemented.
+        return ''

+    return literal_eval(value)

-def get_signature(funcdef, width=72, call_string=None, omit_first_param=
-    False, omit_return_annotation=False):
+
+def get_signature(funcdef, width=72, call_string=None,
+                  omit_first_param=False, omit_return_annotation=False):
     """
     Generate a string signature of a function.

@@ -48,14 +139,38 @@ def get_signature(funcdef, width=72, call_string=None, omit_first_param=

     :rtype: str
     """
-    pass
+    # Lambdas have no name.
+    if call_string is None:
+        if funcdef.type == 'lambdef':
+            call_string = '<lambda>'
+        else:
+            call_string = funcdef.name.value
+    params = funcdef.get_params()
+    if omit_first_param:
+        params = params[1:]
+    p = '(' + ''.join(param.get_code() for param in params).strip() + ')'
+    # TODO this is pretty bad, we should probably just normalize.
+    p = re.sub(r'\s+', ' ', p)
+    if funcdef.annotation and not omit_return_annotation:
+        rtype = " ->" + funcdef.annotation.get_code()
+    else:
+        rtype = ""
+    code = call_string + p + rtype
+
+    return '\n'.join(textwrap.wrap(code, width))


 def move(node, line_offset):
     """
     Move the `Node` start_pos.
     """
-    pass
+    try:
+        children = node.children
+    except AttributeError:
+        node.line += line_offset
+    else:
+        for c in children:
+            move(c, line_offset)


 def get_following_comment_same_line(node):
@@ -63,14 +178,91 @@ def get_following_comment_same_line(node):
     returns (as string) any comment that appears on the same line,
     after the node, including the #
     """
-    pass
+    try:
+        if node.type == 'for_stmt':
+            whitespace = node.children[5].get_first_leaf().prefix
+        elif node.type == 'with_stmt':
+            whitespace = node.children[3].get_first_leaf().prefix
+        elif node.type == 'funcdef':
+            # actually on the next line
+            whitespace = node.children[4].get_first_leaf().get_next_leaf().prefix
+        else:
+            whitespace = node.get_last_leaf().get_next_leaf().prefix
+    except AttributeError:
+        return None
+    except ValueError:
+        # TODO in some particular cases, the tree doesn't seem to be linked
+        # correctly
+        return None
+    if "#" not in whitespace:
+        return None
+    comment = whitespace[whitespace.index("#"):]
+    if "\r" in comment:
+        comment = comment[:comment.index("\r")]
+    if "\n" in comment:
+        comment = comment[:comment.index("\n")]
+    return comment
+
+
+def is_scope(node):
+    t = node.type
+    if t == 'comp_for':
+        # Starting with Python 3.8, async is outside of the statement.
+        return node.children[1].type != 'sync_comp_for'
+
+    return t in ('file_input', 'classdef', 'funcdef', 'lambdef', 'sync_comp_for')
+
+
+def _get_parent_scope_cache(func):
+    cache = WeakKeyDictionary()
+
+    def wrapper(parso_cache_node, node, include_flows=False):
+        if parso_cache_node is None:
+            return func(node, include_flows)
+
+        try:
+            for_module = cache[parso_cache_node]
+        except KeyError:
+            for_module = cache[parso_cache_node] = {}
+
+        try:
+            return for_module[node]
+        except KeyError:
+            result = for_module[node] = func(node, include_flows)
+            return result
+    return wrapper


 def get_parent_scope(node, include_flows=False):
     """
     Returns the underlying scope.
     """
-    pass
+    scope = node.parent
+    if scope is None:
+        return None  # It's a module already.
+
+    while True:
+        if is_scope(scope):
+            if scope.type in ('classdef', 'funcdef', 'lambdef'):
+                index = scope.children.index(':')
+                if scope.children[index].start_pos >= node.start_pos:
+                    if node.parent.type == 'param' and node.parent.name == node:
+                        pass
+                    elif node.parent.type == 'tfpdef' and node.parent.children[0] == node:
+                        pass
+                    else:
+                        scope = scope.parent
+                        continue
+            return scope
+        elif include_flows and isinstance(scope, tree.Flow):
+            # The cursor might be on `if foo`, so the parent scope will not be
+            # the if, but the parent of the if.
+            if not (scope.type == 'if_stmt'
+                    and any(n.start_pos <= node.start_pos < n.end_pos
+                            for n in scope.get_test_nodes())):
+                return scope
+
+        scope = scope.parent


 get_cached_parent_scope = _get_parent_scope_cache(get_parent_scope)
@@ -81,7 +273,7 @@ def get_cached_code_lines(grammar, path):
     Basically access the cached code lines in parso. This is not the nicest way
     to do this, but we avoid splitting all the lines again.
     """
-    pass
+    return get_parso_cache_node(grammar, path).lines


 def get_parso_cache_node(grammar, path):
@@ -92,21 +284,56 @@ def get_parso_cache_node(grammar, path):
     The reason for this is mostly caching. This is obviously also a sign of a
     broken caching architecture.
     """
-    pass
+    return parser_cache[grammar._hashed][path]


 def cut_value_at_position(leaf, position):
     """
     Cuts of the value of the leaf at position
     """
-    pass
+    lines = split_lines(leaf.value, keepends=True)[:position[0] - leaf.line + 1]
+    column = position[1]
+    if leaf.line == position[0]:
+        column -= leaf.column
+    if not lines:
+        return ''
+    lines[-1] = lines[-1][:column]
+    return ''.join(lines)


 def expr_is_dotted(node):
     """
     Checks if a path looks like `name` or `name.foo.bar` and not `name()`.
     """
-    pass
+    if node.type == 'atom':
+        if len(node.children) == 3 and node.children[0] == '(':
+            return expr_is_dotted(node.children[1])
+        return False
+    if node.type == 'atom_expr':
+        children = node.children
+        if children[0] == 'await':
+            return False
+        if not expr_is_dotted(children[0]):
+            return False
+        # Check trailers
+        return all(c.children[0] == '.' for c in children[1:])
+    return node.type == 'name'
+
+
+def _function_is_x_method(*method_names):
+    def wrapper(function_node):
+        """
+        This is a heuristic. It will not hold ALL the times, but it will be
+        correct pretty much for anyone that doesn't try to beat it.
+        staticmethod/classmethod are builtins and unless overwritten, this will
+        be correct.
+        """
+        for decorator in function_node.get_decorators():
+            dotted_name = decorator.children[1]
+            if dotted_name.get_code() in method_names:
+                return True
+        return False
+    return wrapper


 function_is_staticmethod = _function_is_x_method('staticmethod')
diff --git a/jedi/plugins/django.py b/jedi/plugins/django.py
index b9ceb2ec..cd443bbd 100644
--- a/jedi/plugins/django.py
+++ b/jedi/plugins/django.py
@@ -2,6 +2,7 @@
 Module is used to infer Django model fields.
 """
 from inspect import Parameter
+
 from jedi import debug
 from jedi.inference.cache import inference_state_function_cache
 from jedi.inference.base_value import ValueSet, iterator_to_value_set, ValueWrapper
@@ -13,67 +14,283 @@ from jedi.inference.value.klass import ClassMixin
 from jedi.inference.gradual.base import GenericClass
 from jedi.inference.gradual.generics import TupleGenericManager
 from jedi.inference.signature import AbstractSignature
-mapping = {'IntegerField': (None, 'int'), 'BigIntegerField': (None, 'int'),
-    'PositiveIntegerField': (None, 'int'), 'SmallIntegerField': (None,
-    'int'), 'CharField': (None, 'str'), 'TextField': (None, 'str'),
-    'EmailField': (None, 'str'), 'GenericIPAddressField': (None, 'str'),
-    'URLField': (None, 'str'), 'FloatField': (None, 'float'), 'BinaryField':
-    (None, 'bytes'), 'BooleanField': (None, 'bool'), 'DecimalField': (
-    'decimal', 'Decimal'), 'TimeField': ('datetime', 'time'),
-    'DurationField': ('datetime', 'timedelta'), 'DateField': ('datetime',
-    'date'), 'DateTimeField': ('datetime', 'datetime'), 'UUIDField': (
-    'uuid', 'UUID')}
+
+
+mapping = {
+    'IntegerField': (None, 'int'),
+    'BigIntegerField': (None, 'int'),
+    'PositiveIntegerField': (None, 'int'),
+    'SmallIntegerField': (None, 'int'),
+    'CharField': (None, 'str'),
+    'TextField': (None, 'str'),
+    'EmailField': (None, 'str'),
+    'GenericIPAddressField': (None, 'str'),
+    'URLField': (None, 'str'),
+    'FloatField': (None, 'float'),
+    'BinaryField': (None, 'bytes'),
+    'BooleanField': (None, 'bool'),
+    'DecimalField': ('decimal', 'Decimal'),
+    'TimeField': ('datetime', 'time'),
+    'DurationField': ('datetime', 'timedelta'),
+    'DateField': ('datetime', 'date'),
+    'DateTimeField': ('datetime', 'datetime'),
+    'UUIDField': ('uuid', 'UUID'),
+}
+
 _FILTER_LIKE_METHODS = ('create', 'filter', 'exclude', 'update', 'get',
-    'get_or_create', 'update_or_create')
+                        'get_or_create', 'update_or_create')


-class DjangoModelName(NameWrapper):
+@inference_state_function_cache()
+def _get_deferred_attributes(inference_state):
+    return inference_state.import_module(
+        ('django', 'db', 'models', 'query_utils')
+    ).py__getattribute__('DeferredAttribute').execute_annotation()
+
+
+def _infer_scalar_field(inference_state, field_name, field_tree_instance, is_instance):
+    try:
+        module_name, attribute_name = mapping[field_tree_instance.py__name__()]
+    except KeyError:
+        return None
+
+    if not is_instance:
+        return _get_deferred_attributes(inference_state)
+
+    if module_name is None:
+        module = inference_state.builtins_module
+    else:
+        module = inference_state.import_module((module_name,))
+
+    for attribute in module.py__getattribute__(attribute_name):
+        return attribute.execute_with_values()
+
+
+@iterator_to_value_set
+def _get_foreign_key_values(cls, field_tree_instance):
+    if isinstance(field_tree_instance, TreeInstance):
+        # TODO private access..
+        argument_iterator = field_tree_instance._arguments.unpack()
+        key, lazy_values = next(argument_iterator, (None, None))
+        if key is None and lazy_values is not None:
+            for value in lazy_values.infer():
+                if value.py__name__() == 'str':
+                    foreign_key_class_name = value.get_safe_value()
+                    module = cls.get_root_context()
+                    for v in module.py__getattribute__(foreign_key_class_name):
+                        if v.is_class():
+                            yield v
+                elif value.is_class():
+                    yield value
+

+def _infer_field(cls, field_name, is_instance):
+    inference_state = cls.inference_state
+    result = field_name.infer()
+    for field_tree_instance in result:
+        scalar_field = _infer_scalar_field(
+            inference_state, field_name, field_tree_instance, is_instance)
+        if scalar_field is not None:
+            return scalar_field
+
+        name = field_tree_instance.py__name__()
+        is_many_to_many = name == 'ManyToManyField'
+        if name in ('ForeignKey', 'OneToOneField') or is_many_to_many:
+            if not is_instance:
+                return _get_deferred_attributes(inference_state)
+
+            values = _get_foreign_key_values(cls, field_tree_instance)
+            if is_many_to_many:
+                return ValueSet(filter(None, [
+                    _create_manager_for(v, 'RelatedManager') for v in values
+                ]))
+            else:
+                return values.execute_with_values()
+
+    debug.dbg('django plugin: fail to infer `%s` from class `%s`',
+              field_name.string_name, cls.py__name__())
+    return result
+
+
+class DjangoModelName(NameWrapper):
     def __init__(self, cls, name, is_instance):
         super().__init__(name)
         self._cls = cls
         self._is_instance = is_instance

+    def infer(self):
+        return _infer_field(self._cls, self._wrapped_name, self._is_instance)
+
+
+def _create_manager_for(cls, manager_cls='BaseManager'):
+    managers = cls.inference_state.import_module(
+        ('django', 'db', 'models', 'manager')
+    ).py__getattribute__(manager_cls)
+    for m in managers:
+        if m.is_class_mixin():
+            generics_manager = TupleGenericManager((ValueSet([cls]),))
+            for c in GenericClass(m, generics_manager).execute_annotation():
+                return c
+    return None
+
+
+def _new_dict_filter(cls, is_instance):
+    filters = list(cls.get_filters(
+        is_instance=is_instance,
+        include_metaclasses=False,
+        include_type_when_class=False)
+    )
+    dct = {
+        name.string_name: DjangoModelName(cls, name, is_instance)
+        for filter_ in reversed(filters)
+        for name in filter_.values()
+    }
+    if is_instance:
+        # Replace the objects with a name that amounts to nothing when accessed
+        # in an instance. This is not perfect and still completes "objects" in
+        # that case, but it at least not inferes stuff like `.objects.filter`.
+        # It would be nicer to do that in a better way, so that it also doesn't
+        # show up in completions, but it's probably just not worth doing that
+        # for the extra amount of work.
+        dct['objects'] = EmptyCompiledName(cls.inference_state, 'objects')
+
+    return DictFilter(dct)
+
+
+def is_django_model_base(value):
+    return value.py__name__() == 'ModelBase' \
+        and value.get_root_context().py__name__() == 'django.db.models.base'
+
+
+def get_metaclass_filters(func):
+    def wrapper(cls, metaclasses, is_instance):
+        for metaclass in metaclasses:
+            if is_django_model_base(metaclass):
+                return [_new_dict_filter(cls, is_instance)]
+
+        return func(cls, metaclasses, is_instance)
+    return wrapper
+
+
+def tree_name_to_values(func):
+    def wrapper(inference_state, context, tree_name):
+        result = func(inference_state, context, tree_name)
+        if tree_name.value in _FILTER_LIKE_METHODS:
+            # Here we try to overwrite stuff like User.objects.filter. We need
+            # this to make sure that keyword param completion works on these
+            # kind of methods.
+            for v in result:
+                if v.get_qualified_names() == ('_BaseQuerySet', tree_name.value) \
+                        and v.parent_context.is_module() \
+                        and v.parent_context.py__name__() == 'django.db.models.query':
+                    qs = context.get_value()
+                    generics = qs.get_generics()
+                    if len(generics) >= 1:
+                        return ValueSet(QuerySetMethodWrapper(v, model)
+                                        for model in generics[0])
+
+        elif tree_name.value == 'BaseManager' and context.is_module() \
+                and context.py__name__() == 'django.db.models.manager':
+            return ValueSet(ManagerWrapper(r) for r in result)
+
+        elif tree_name.value == 'Field' and context.is_module() \
+                and context.py__name__() == 'django.db.models.fields':
+            return ValueSet(FieldWrapper(r) for r in result)
+        return result
+    return wrapper
+
+
+def _find_fields(cls):
+    for name in _new_dict_filter(cls, is_instance=False).values():
+        for value in name.infer():
+            if value.name.get_qualified_names(include_module_names=True) \
+                    == ('django', 'db', 'models', 'query_utils', 'DeferredAttribute'):
+                yield name
+
+
+def _get_signatures(cls):
+    return [DjangoModelSignature(cls, field_names=list(_find_fields(cls)))]
+
+
+def get_metaclass_signatures(func):
+    def wrapper(cls, metaclasses):
+        for metaclass in metaclasses:
+            if is_django_model_base(metaclass):
+                return _get_signatures(cls)
+        return func(cls, metaclass)
+    return wrapper
+

 class ManagerWrapper(ValueWrapper):
-    pass
+    def py__getitem__(self, index_value_set, contextualized_node):
+        return ValueSet(
+            GenericManagerWrapper(generic)
+            for generic in self._wrapped_value.py__getitem__(
+                index_value_set, contextualized_node)
+        )


 class GenericManagerWrapper(AttributeOverwrite, ClassMixin):
-    pass
+    def py__get__on_class(self, calling_instance, instance, class_value):
+        return calling_instance.class_value.with_generics(
+            (ValueSet({class_value}),)
+        ).py__call__(calling_instance._arguments)
+
+    def with_generics(self, generics_tuple):
+        return self._wrapped_value.with_generics(generics_tuple)


 class FieldWrapper(ValueWrapper):
-    pass
+    def py__getitem__(self, index_value_set, contextualized_node):
+        return ValueSet(
+            GenericFieldWrapper(generic)
+            for generic in self._wrapped_value.py__getitem__(
+                index_value_set, contextualized_node)
+        )


 class GenericFieldWrapper(AttributeOverwrite, ClassMixin):
-    pass
+    def py__get__on_class(self, calling_instance, instance, class_value):
+        # This is mostly an optimization to avoid Jedi aborting inference,
+        # because of too many function executions of Field.__get__.
+        return ValueSet({calling_instance})


 class DjangoModelSignature(AbstractSignature):
-
     def __init__(self, value, field_names):
         super().__init__(value)
         self._field_names = field_names

+    def get_param_names(self, resolve_stars=False):
+        return [DjangoParamName(name) for name in self._field_names]

-class DjangoParamName(BaseTreeParamName):

+class DjangoParamName(BaseTreeParamName):
     def __init__(self, field_name):
         super().__init__(field_name.parent_context, field_name.tree_name)
         self._field_name = field_name

+    def get_kind(self):
+        return Parameter.KEYWORD_ONLY
+
+    def infer(self):
+        return self._field_name.infer()

-class QuerySetMethodWrapper(ValueWrapper):

+class QuerySetMethodWrapper(ValueWrapper):
     def __init__(self, method, model_cls):
         super().__init__(method)
         self._model_cls = model_cls

+    def py__get__(self, instance, class_value):
+        return ValueSet({QuerySetBoundMethodWrapper(v, self._model_cls)
+                         for v in self._wrapped_value.py__get__(instance, class_value)})

-class QuerySetBoundMethodWrapper(ValueWrapper):

+class QuerySetBoundMethodWrapper(ValueWrapper):
     def __init__(self, method, model_cls):
         super().__init__(method)
         self._model_cls = model_cls
+
+    def get_signatures(self):
+        return _get_signatures(self._model_cls)
diff --git a/jedi/plugins/flask.py b/jedi/plugins/flask.py
index d3c813ea..8d67b839 100644
--- a/jedi/plugins/flask.py
+++ b/jedi/plugins/flask.py
@@ -3,4 +3,19 @@ def import_module(callback):
     Handle "magic" Flask extension imports:
     ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
     """
-    pass
+    def wrapper(inference_state, import_names, module_context, *args, **kwargs):
+        if len(import_names) == 3 and import_names[:2] == ('flask', 'ext'):
+            # New style.
+            ipath = ('flask_' + import_names[2]),
+            value_set = callback(inference_state, ipath, None, *args, **kwargs)
+            if value_set:
+                return value_set
+            value_set = callback(inference_state, ('flaskext',), None, *args, **kwargs)
+            return callback(
+                inference_state,
+                ('flaskext', import_names[2]),
+                next(iter(value_set)),
+                *args, **kwargs
+            )
+        return callback(inference_state, import_names, module_context, *args, **kwargs)
+    return wrapper
diff --git a/jedi/plugins/pytest.py b/jedi/plugins/pytest.py
index 5295fd78..06adda44 100644
--- a/jedi/plugins/pytest.py
+++ b/jedi/plugins/pytest.py
@@ -1,15 +1,116 @@
 import sys
 from typing import List
 from pathlib import Path
+
 from parso.tree import search_ancestor
 from jedi.inference.cache import inference_state_method_cache
 from jedi.inference.imports import goto_import, load_module_from_path
 from jedi.inference.filters import ParserTreeFilter
 from jedi.inference.base_value import NO_VALUES, ValueSet
 from jedi.inference.helpers import infer_call_of_leaf
-_PYTEST_FIXTURE_MODULES = [('_pytest', 'monkeypatch'), ('_pytest',
-    'capture'), ('_pytest', 'logging'), ('_pytest', 'tmpdir'), ('_pytest',
-    'pytester')]
+
+_PYTEST_FIXTURE_MODULES = [
+    ('_pytest', 'monkeypatch'),
+    ('_pytest', 'capture'),
+    ('_pytest', 'logging'),
+    ('_pytest', 'tmpdir'),
+    ('_pytest', 'pytester'),
+]
+
+
+def execute(callback):
+    def wrapper(value, arguments):
+        # This might not be necessary anymore in pytest 4/5, definitely needed
+        # for pytest 3.
+        if value.py__name__() == 'fixture' \
+                and value.parent_context.py__name__() == '_pytest.fixtures':
+            return NO_VALUES
+
+        return callback(value, arguments)
+    return wrapper
+
+
+def infer_anonymous_param(func):
+    def get_returns(value):
+        if value.tree_node.annotation is not None:
+            result = value.execute_with_values()
+            if any(v.name.get_qualified_names(include_module_names=True)
+                   == ('typing', 'Generator')
+                   for v in result):
+                return ValueSet.from_sets(
+                    v.py__getattribute__('__next__').execute_annotation()
+                    for v in result
+                )
+            return result
+
+        # In pytest we need to differentiate between generators and normal
+        # returns.
+        # Parameters still need to be anonymous, .as_context() ensures that.
+        function_context = value.as_context()
+        if function_context.is_generator():
+            return function_context.merge_yield_values()
+        else:
+            return function_context.get_return_values()
+
+    def wrapper(param_name):
+        # parameters with an annotation do not need special handling
+        if param_name.annotation_node:
+            return func(param_name)
+        is_pytest_param, param_name_is_function_name = \
+            _is_a_pytest_param_and_inherited(param_name)
+        if is_pytest_param:
+            module = param_name.get_root_context()
+            fixtures = _goto_pytest_fixture(
+                module,
+                param_name.string_name,
+                # This skips the current module, because we are basically
+                # inheriting a fixture from somewhere else.
+                skip_own_module=param_name_is_function_name,
+            )
+            if fixtures:
+                return ValueSet.from_sets(
+                    get_returns(value)
+                    for fixture in fixtures
+                    for value in fixture.infer()
+                )
+        return func(param_name)
+    return wrapper
+
+
+def goto_anonymous_param(func):
+    def wrapper(param_name):
+        is_pytest_param, param_name_is_function_name = \
+            _is_a_pytest_param_and_inherited(param_name)
+        if is_pytest_param:
+            names = _goto_pytest_fixture(
+                param_name.get_root_context(),
+                param_name.string_name,
+                skip_own_module=param_name_is_function_name,
+            )
+            if names:
+                return names
+        return func(param_name)
+    return wrapper
+
+
+def complete_param_names(func):
+    def wrapper(context, func_name, decorator_nodes):
+        module_context = context.get_root_context()
+        if _is_pytest_func(func_name, decorator_nodes):
+            names = []
+            for module_context in _iter_pytest_modules(module_context):
+                names += FixtureFilter(module_context).values()
+            if names:
+                return names
+        return func(context, func_name, decorator_nodes)
+    return wrapper
+
+
+def _goto_pytest_fixture(module_context, name, skip_own_module):
+    for module_context in _iter_pytest_modules(module_context, skip_own_module=skip_own_module):
+        names = FixtureFilter(module_context).get(name)
+        if names:
+            return names


 def _is_a_pytest_param_and_inherited(param_name):
@@ -19,17 +120,131 @@ def _is_a_pytest_param_and_inherited(param_name):

     This is a heuristic and will work in most cases.
     """
-    pass
+    funcdef = search_ancestor(param_name.tree_name, 'funcdef')
+    if funcdef is None:  # A lambda
+        return False, False
+    decorators = funcdef.get_decorators()
+    return _is_pytest_func(funcdef.name.value, decorators), \
+        funcdef.name.value == param_name.string_name


-def _find_pytest_plugin_modules() ->List[List[str]]:
+def _is_pytest_func(func_name, decorator_nodes):
+    return func_name.startswith('test') \
+        or any('fixture' in n.get_code() for n in decorator_nodes)
+
+
+def _find_pytest_plugin_modules() -> List[List[str]]:
     """
     Finds pytest plugin modules hooked by setuptools entry points

     See https://docs.pytest.org/en/stable/how-to/writing_plugins.html#setuptools-entry-points
     """
-    pass
+    if sys.version_info >= (3, 8):
+        from importlib.metadata import entry_points
+
+        if sys.version_info >= (3, 10):
+            pytest_entry_points = entry_points(group="pytest11")
+        else:
+            pytest_entry_points = entry_points().get("pytest11", ())
+
+        if sys.version_info >= (3, 9):
+            return [ep.module.split(".") for ep in pytest_entry_points]
+        else:
+            # Python 3.8 doesn't have `EntryPoint.module`. Implement equivalent
+            # to what Python 3.9 does (with additional None check to placate `mypy`)
+            matches = [
+                ep.pattern.match(ep.value)
+                for ep in pytest_entry_points
+            ]
+            return [x.group('module').split(".") for x in matches if x]
+
+    else:
+        from pkg_resources import iter_entry_points
+        return [ep.module_name.split(".") for ep in iter_entry_points(group="pytest11")]
+
+
+@inference_state_method_cache()
+def _iter_pytest_modules(module_context, skip_own_module=False):
+    if not skip_own_module:
+        yield module_context
+
+    file_io = module_context.get_value().file_io
+    if file_io is not None:
+        folder = file_io.get_parent_folder()
+        sys_path = module_context.inference_state.get_sys_path()
+
+        # prevent an infinite loop when reaching the root of the current drive
+        last_folder = None
+
+        while any(folder.path.startswith(p) for p in sys_path):
+            file_io = folder.get_file_io('conftest.py')
+            if Path(file_io.path) != module_context.py__file__():
+                try:
+                    m = load_module_from_path(module_context.inference_state, file_io)
+                    yield m.as_context()
+                except FileNotFoundError:
+                    pass
+            folder = folder.get_parent_folder()
+
+            # prevent an infinite for loop if the same parent folder is return twice
+            if last_folder is not None and folder.path == last_folder.path:
+                break
+            last_folder = folder  # keep track of the last found parent name
+
+    for names in _PYTEST_FIXTURE_MODULES + _find_pytest_plugin_modules():
+        for module_value in module_context.inference_state.import_module(names):
+            yield module_value.as_context()


 class FixtureFilter(ParserTreeFilter):
-    pass
+    def _filter(self, names):
+        for name in super()._filter(names):
+            # look for fixture definitions of imported names
+            if name.parent.type == "import_from":
+                imported_names = goto_import(self.parent_context, name)
+                if any(
+                    self._is_fixture(iname.parent_context, iname.tree_name)
+                    for iname in imported_names
+                    # discard imports of whole modules, that have no tree_name
+                    if iname.tree_name
+                ):
+                    yield name
+
+            elif self._is_fixture(self.parent_context, name):
+                yield name
+
+    def _is_fixture(self, context, name):
+        funcdef = name.parent
+        # Class fixtures are not supported
+        if funcdef.type != "funcdef":
+            return False
+        decorated = funcdef.parent
+        if decorated.type != "decorated":
+            return False
+        decorators = decorated.children[0]
+        if decorators.type == 'decorators':
+            decorators = decorators.children
+        else:
+            decorators = [decorators]
+        for decorator in decorators:
+            dotted_name = decorator.children[1]
+            # A heuristic, this makes it faster.
+            if 'fixture' in dotted_name.get_code():
+                if dotted_name.type == 'atom_expr':
+                    # Since Python3.9 a decorator does not have dotted names
+                    # anymore.
+                    last_trailer = dotted_name.children[-1]
+                    last_leaf = last_trailer.get_last_leaf()
+                    if last_leaf == ')':
+                        values = infer_call_of_leaf(
+                            context, last_leaf, cut_own_trailer=True
+                        )
+                    else:
+                        values = context.infer_node(dotted_name)
+                else:
+                    values = context.infer_node(dotted_name)
+                for value in values:
+                    if value.name.get_qualified_names(include_module_names=True) \
+                            == ('_pytest', 'fixtures', 'fixture'):
+                        return True
+        return False
diff --git a/jedi/plugins/registry.py b/jedi/plugins/registry.py
index 0d06a5e6..c1a0b749 100644
--- a/jedi/plugins/registry.py
+++ b/jedi/plugins/registry.py
@@ -1,9 +1,12 @@
 """
 This is not a plugin, this is just the place were plugins are registered.
 """
+
 from jedi.plugins import stdlib
 from jedi.plugins import flask
 from jedi.plugins import pytest
 from jedi.plugins import django
 from jedi.plugins import plugin_manager
+
+
 plugin_manager.register(stdlib, flask, pytest, django)
diff --git a/jedi/plugins/stdlib.py b/jedi/plugins/stdlib.py
index 3c1eb074..e1004ec8 100644
--- a/jedi/plugins/stdlib.py
+++ b/jedi/plugins/stdlib.py
@@ -12,23 +12,33 @@ compiled module that returns the types for C-builtins.
 import parso
 import os
 from inspect import Parameter
+
 from jedi import debug
 from jedi.inference.utils import safe_property
 from jedi.inference.helpers import get_str_or_none
-from jedi.inference.arguments import iterate_argument_clinic, ParamIssue, repack_with_argument_clinic, AbstractArguments, TreeArgumentsWrapper
+from jedi.inference.arguments import iterate_argument_clinic, ParamIssue, \
+    repack_with_argument_clinic, AbstractArguments, TreeArgumentsWrapper
 from jedi.inference import analysis
 from jedi.inference import compiled
-from jedi.inference.value.instance import AnonymousMethodExecutionContext, MethodExecutionContext
-from jedi.inference.base_value import ContextualizedNode, NO_VALUES, ValueSet, ValueWrapper, LazyValueWrapper
+from jedi.inference.value.instance import \
+    AnonymousMethodExecutionContext, MethodExecutionContext
+from jedi.inference.base_value import ContextualizedNode, \
+    NO_VALUES, ValueSet, ValueWrapper, LazyValueWrapper
 from jedi.inference.value import ClassValue, ModuleValue
 from jedi.inference.value.klass import ClassMixin
 from jedi.inference.value.function import FunctionMixin
 from jedi.inference.value import iterable
-from jedi.inference.lazy_value import LazyTreeValue, LazyKnownValue, LazyKnownValues
+from jedi.inference.lazy_value import LazyTreeValue, LazyKnownValue, \
+    LazyKnownValues
 from jedi.inference.names import ValueName, BaseTreeParamName
-from jedi.inference.filters import AttributeOverwrite, publish_method, ParserTreeFilter, DictFilter
+from jedi.inference.filters import AttributeOverwrite, publish_method, \
+    ParserTreeFilter, DictFilter
 from jedi.inference.signature import AbstractSignature, SignatureWrapper
-_NAMEDTUPLE_CLASS_TEMPLATE = """_property = property
+
+
+# Copied from Python 3.6's stdlib.
+_NAMEDTUPLE_CLASS_TEMPLATE = """\
+_property = property
 _tuple = tuple
 from operator import itemgetter as _itemgetter
 from collections import OrderedDict
@@ -85,58 +95,293 @@ class {typename}(tuple):

 {field_defs}
 """
-_NAMEDTUPLE_FIELD_TEMPLATE = """    {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
-"""
+
+_NAMEDTUPLE_FIELD_TEMPLATE = '''\
+    {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
+'''
+
+
+def execute(callback):
+    def wrapper(value, arguments):
+        def call():
+            return callback(value, arguments=arguments)
+
+        try:
+            obj_name = value.name.string_name
+        except AttributeError:
+            pass
+        else:
+            p = value.parent_context
+            if p is not None and p.is_builtins_module():
+                module_name = 'builtins'
+            elif p is not None and p.is_module():
+                module_name = p.py__name__()
+            else:
+                return call()
+
+            if value.is_bound_method() or value.is_instance():
+                # value can be an instance for example if it is a partial
+                # object.
+                return call()
+
+            # for now we just support builtin functions.
+            try:
+                func = _implemented[module_name][obj_name]
+            except KeyError:
+                pass
+            else:
+                return func(value, arguments=arguments, callback=call)
+        return call()
+
+    return wrapper
+
+
+def _follow_param(inference_state, arguments, index):
+    try:
+        key, lazy_value = list(arguments.unpack())[index]
+    except IndexError:
+        return NO_VALUES
+    else:
+        return lazy_value.infer()


 def argument_clinic(clinic_string, want_value=False, want_context=False,
-    want_arguments=False, want_inference_state=False, want_callback=False):
+                    want_arguments=False, want_inference_state=False,
+                    want_callback=False):
     """
     Works like Argument Clinic (PEP 436), to validate function params.
     """
-    pass
+
+    def f(func):
+        def wrapper(value, arguments, callback):
+            try:
+                args = tuple(iterate_argument_clinic(
+                    value.inference_state, arguments, clinic_string))
+            except ParamIssue:
+                return NO_VALUES
+
+            debug.dbg('builtin start %s' % value, color='MAGENTA')
+            kwargs = {}
+            if want_context:
+                kwargs['context'] = arguments.context
+            if want_value:
+                kwargs['value'] = value
+            if want_inference_state:
+                kwargs['inference_state'] = value.inference_state
+            if want_arguments:
+                kwargs['arguments'] = arguments
+            if want_callback:
+                kwargs['callback'] = callback
+            result = func(*args, **kwargs)
+            debug.dbg('builtin end: %s', result, color='MAGENTA')
+            return result
+
+        return wrapper
+    return f
+
+
+@argument_clinic('iterator[, default], /', want_inference_state=True)
+def builtins_next(iterators, defaults, inference_state):
+    # TODO theoretically we have to check here if something is an iterator.
+    # That is probably done by checking if it's not a class.
+    return defaults | iterators.py__getattribute__('__next__').execute_with_values()
+
+
+@argument_clinic('iterator[, default], /')
+def builtins_iter(iterators_or_callables, defaults):
+    # TODO implement this if it's a callable.
+    return iterators_or_callables.py__getattribute__('__iter__').execute_with_values()
+
+
+@argument_clinic('object, name[, default], /')
+def builtins_getattr(objects, names, defaults=None):
+    # follow the first param
+    for value in objects:
+        for name in names:
+            string = get_str_or_none(name)
+            if string is None:
+                debug.warning('getattr called without str')
+                continue
+            else:
+                return value.py__getattribute__(string)
+    return NO_VALUES
+
+
+@argument_clinic('object[, bases, dict], /')
+def builtins_type(objects, bases, dicts):
+    if bases or dicts:
+        # It's a type creation... maybe someday...
+        return NO_VALUES
+    else:
+        return objects.py__class__()


 class SuperInstance(LazyValueWrapper):
     """To be used like the object ``super`` returns."""
-
     def __init__(self, inference_state, instance):
         self.inference_state = inference_state
-        self._instance = instance
+        self._instance = instance  # Corresponds to super().__self__
+
+    def _get_bases(self):
+        return self._instance.py__class__().py__bases__()
+
+    def _get_wrapped_value(self):
+        objs = self._get_bases()[0].infer().execute_with_values()
+        if not objs:
+            # This is just a fallback and will only be used, if it's not
+            # possible to find a class
+            return self._instance
+        return next(iter(objs))
+
+    def get_filters(self, origin_scope=None):
+        for b in self._get_bases():
+            for value in b.infer().execute_with_values():
+                for f in value.get_filters():
+                    yield f
+
+
+@argument_clinic('[type[, value]], /', want_context=True)
+def builtins_super(types, objects, context):
+    instance = None
+    if isinstance(context, AnonymousMethodExecutionContext):
+        instance = context.instance
+    elif isinstance(context, MethodExecutionContext):
+        instance = context.instance
+    if instance is None:
+        return NO_VALUES
+    return ValueSet({SuperInstance(instance.inference_state, instance)})


 class ReversedObject(AttributeOverwrite):
-
     def __init__(self, reversed_obj, iter_list):
         super().__init__(reversed_obj)
         self._iter_list = iter_list

+    def py__iter__(self, contextualized_node=None):
+        return self._iter_list
+
+    @publish_method('__next__')
+    def _next(self, arguments):
+        return ValueSet.from_sets(
+            lazy_value.infer() for lazy_value in self._iter_list
+        )
+
+
+@argument_clinic('sequence, /', want_value=True, want_arguments=True)
+def builtins_reversed(sequences, value, arguments):
+    # While we could do without this variable (just by using sequences), we
+    # want static analysis to work well. Therefore we need to generated the
+    # values again.
+    key, lazy_value = next(arguments.unpack())
+    cn = None
+    if isinstance(lazy_value, LazyTreeValue):
+        cn = ContextualizedNode(lazy_value.context, lazy_value.data)
+    ordered = list(sequences.iterate(cn))
+
+    # Repack iterator values and then run it the normal way. This is
+    # necessary, because `reversed` is a function and autocompletion
+    # would fail in certain cases like `reversed(x).__iter__` if we
+    # just returned the result directly.
+    seq, = value.inference_state.typing_module.py__getattribute__('Iterator').execute_with_values()
+    return ValueSet([ReversedObject(seq, list(reversed(ordered)))])
+
+
+@argument_clinic('value, type, /', want_arguments=True, want_inference_state=True)
+def builtins_isinstance(objects, types, arguments, inference_state):
+    bool_results = set()
+    for o in objects:
+        cls = o.py__class__()
+        try:
+            cls.py__bases__
+        except AttributeError:
+            # This is temporary. Everything should have a class attribute in
+            # Python?! Maybe we'll leave it here, because some numpy objects or
+            # whatever might not.
+            bool_results = set([True, False])
+            break
+
+        mro = list(cls.py__mro__())
+
+        for cls_or_tup in types:
+            if cls_or_tup.is_class():
+                bool_results.add(cls_or_tup in mro)
+            elif cls_or_tup.name.string_name == 'tuple' \
+                    and cls_or_tup.get_root_context().is_builtins_module():
+                # Check for tuples.
+                classes = ValueSet.from_sets(
+                    lazy_value.infer()
+                    for lazy_value in cls_or_tup.iterate()
+                )
+                bool_results.add(any(cls in mro for cls in classes))
+            else:
+                _, lazy_value = list(arguments.unpack())[1]
+                if isinstance(lazy_value, LazyTreeValue):
+                    node = lazy_value.data
+                    message = 'TypeError: isinstance() arg 2 must be a ' \
+                              'class, type, or tuple of classes and types, ' \
+                              'not %s.' % cls_or_tup
+                    analysis.add(lazy_value.context, 'type-error-isinstance', node, message)
+
+    return ValueSet(
+        compiled.builtin_from_name(inference_state, str(b))
+        for b in bool_results
+    )
+

 class StaticMethodObject(ValueWrapper):
-    pass
+    def py__get__(self, instance, class_value):
+        return ValueSet([self._wrapped_value])


-class ClassMethodObject(ValueWrapper):
+@argument_clinic('sequence, /')
+def builtins_staticmethod(functions):
+    return ValueSet(StaticMethodObject(f) for f in functions)

+
+class ClassMethodObject(ValueWrapper):
     def __init__(self, class_method_obj, function):
         super().__init__(class_method_obj)
         self._function = function

+    def py__get__(self, instance, class_value):
+        return ValueSet([
+            ClassMethodGet(__get__, class_value, self._function)
+            for __get__ in self._wrapped_value.py__getattribute__('__get__')
+        ])

-class ClassMethodGet(ValueWrapper):

+class ClassMethodGet(ValueWrapper):
     def __init__(self, get_method, klass, function):
         super().__init__(get_method)
         self._class = klass
         self._function = function

+    def get_signatures(self):
+        return [sig.bind(self._function) for sig in self._function.get_signatures()]

-class ClassMethodArguments(TreeArgumentsWrapper):
+    def py__call__(self, arguments):
+        return self._function.execute(ClassMethodArguments(self._class, arguments))

+
+class ClassMethodArguments(TreeArgumentsWrapper):
     def __init__(self, klass, arguments):
         super().__init__(arguments)
         self._class = klass

+    def unpack(self, func=None):
+        yield None, LazyKnownValue(self._class)
+        for values in self._wrapped_arguments.unpack(func):
+            yield values
+
+
+@argument_clinic('sequence, /', want_value=True, want_arguments=True)
+def builtins_classmethod(functions, value, arguments):
+    return ValueSet(
+        ClassMethodObject(class_method_object, function)
+        for class_method_object in value.py__call__(arguments=arguments)
+        for function in functions
+    )
+

 class PropertyObject(AttributeOverwrite, ValueWrapper):
     api_type = 'property'
@@ -145,6 +390,26 @@ class PropertyObject(AttributeOverwrite, ValueWrapper):
         super().__init__(property_obj)
         self._function = function

+    def py__get__(self, instance, class_value):
+        if instance is None:
+            return ValueSet([self])
+        return self._function.execute_with_values(instance)
+
+    @publish_method('deleter')
+    @publish_method('getter')
+    @publish_method('setter')
+    def _return_self(self, arguments):
+        return ValueSet({self})
+
+
+@argument_clinic('func, /', want_callback=True)
+def builtins_property(functions, callback):
+    return ValueSet(
+        PropertyObject(property_value, function)
+        for property_value in callback()
+        for function in functions
+    )
+

 def collections_namedtuple(value, arguments, callback):
     """
@@ -154,109 +419,461 @@ def collections_namedtuple(value, arguments, callback):
     inferring the result.

     """
-    pass
+    inference_state = value.inference_state
+
+    # Process arguments
+    name = 'jedi_unknown_namedtuple'
+    for c in _follow_param(inference_state, arguments, 0):
+        x = get_str_or_none(c)
+        if x is not None:
+            name = x
+            break
+
+    # TODO here we only use one of the types, we should use all.
+    param_values = _follow_param(inference_state, arguments, 1)
+    if not param_values:
+        return NO_VALUES
+    _fields = list(param_values)[0]
+    string = get_str_or_none(_fields)
+    if string is not None:
+        fields = string.replace(',', ' ').split()
+    elif isinstance(_fields, iterable.Sequence):
+        fields = [
+            get_str_or_none(v)
+            for lazy_value in _fields.py__iter__()
+            for v in lazy_value.infer()
+        ]
+        fields = [f for f in fields if f is not None]
+    else:
+        return NO_VALUES
+
+    # Build source code
+    code = _NAMEDTUPLE_CLASS_TEMPLATE.format(
+        typename=name,
+        field_names=tuple(fields),
+        num_fields=len(fields),
+        arg_list=repr(tuple(fields)).replace("'", "")[1:-1],
+        repr_fmt='',
+        field_defs='\n'.join(_NAMEDTUPLE_FIELD_TEMPLATE.format(index=index, name=name)
+                             for index, name in enumerate(fields))
+    )
+
+    # Parse source code
+    module = inference_state.grammar.parse(code)
+    generated_class = next(module.iter_classdefs())
+    parent_context = ModuleValue(
+        inference_state, module,
+        code_lines=parso.split_lines(code, keepends=True),
+    ).as_context()
+
+    return ValueSet([ClassValue(inference_state, parent_context, generated_class)])


 class PartialObject(ValueWrapper):
-
     def __init__(self, actual_value, arguments, instance=None):
         super().__init__(actual_value)
         self._arguments = arguments
         self._instance = instance

+    def _get_functions(self, unpacked_arguments):
+        key, lazy_value = next(unpacked_arguments, (None, None))
+        if key is not None or lazy_value is None:
+            debug.warning("Partial should have a proper function %s", self._arguments)
+            return None
+        return lazy_value.infer()
+
+    def get_signatures(self):
+        unpacked_arguments = self._arguments.unpack()
+        funcs = self._get_functions(unpacked_arguments)
+        if funcs is None:
+            return []
+
+        arg_count = 0
+        if self._instance is not None:
+            arg_count = 1
+        keys = set()
+        for key, _ in unpacked_arguments:
+            if key is None:
+                arg_count += 1
+            else:
+                keys.add(key)
+        return [PartialSignature(s, arg_count, keys) for s in funcs.get_signatures()]
+
+    def py__call__(self, arguments):
+        funcs = self._get_functions(self._arguments.unpack())
+        if funcs is None:
+            return NO_VALUES
+
+        return funcs.execute(
+            MergedPartialArguments(self._arguments, arguments, self._instance)
+        )
+
     def py__doc__(self):
         """
         In CPython partial does not replace the docstring. However we are still
         imitating it here, because we want this docstring to be worth something
         for the user.
         """
-        pass
+        callables = self._get_functions(self._arguments.unpack())
+        if callables is None:
+            return ''
+        for callable_ in callables:
+            return callable_.py__doc__()
+        return ''
+
+    def py__get__(self, instance, class_value):
+        return ValueSet([self])


 class PartialMethodObject(PartialObject):
-    pass
+    def py__get__(self, instance, class_value):
+        if instance is None:
+            return ValueSet([self])
+        return ValueSet([PartialObject(self._wrapped_value, self._arguments, instance)])


 class PartialSignature(SignatureWrapper):
-
     def __init__(self, wrapped_signature, skipped_arg_count, skipped_arg_set):
         super().__init__(wrapped_signature)
         self._skipped_arg_count = skipped_arg_count
         self._skipped_arg_set = skipped_arg_set

+    def get_param_names(self, resolve_stars=False):
+        names = self._wrapped_signature.get_param_names()[self._skipped_arg_count:]
+        return [n for n in names if n.string_name not in self._skipped_arg_set]

-class MergedPartialArguments(AbstractArguments):

+class MergedPartialArguments(AbstractArguments):
     def __init__(self, partial_arguments, call_arguments, instance=None):
         self._partial_arguments = partial_arguments
         self._call_arguments = call_arguments
         self._instance = instance

+    def unpack(self, funcdef=None):
+        unpacked = self._partial_arguments.unpack(funcdef)
+        # Ignore this one, it's the function. It was checked before that it's
+        # there.
+        next(unpacked, None)
+        if self._instance is not None:
+            yield None, LazyKnownValue(self._instance)
+        for key_lazy_value in unpacked:
+            yield key_lazy_value
+        for key_lazy_value in self._call_arguments.unpack(funcdef):
+            yield key_lazy_value
+
+
+def functools_partial(value, arguments, callback):
+    return ValueSet(
+        PartialObject(instance, arguments)
+        for instance in value.py__call__(arguments)
+    )
+
+
+def functools_partialmethod(value, arguments, callback):
+    return ValueSet(
+        PartialMethodObject(instance, arguments)
+        for instance in value.py__call__(arguments)
+    )
+
+
+@argument_clinic('first, /')
+def _return_first_param(firsts):
+    return firsts
+
+
+@argument_clinic('seq')
+def _random_choice(sequences):
+    return ValueSet.from_sets(
+        lazy_value.infer()
+        for sequence in sequences
+        for lazy_value in sequence.py__iter__()
+    )
+
+
+def _dataclass(value, arguments, callback):
+    for c in _follow_param(value.inference_state, arguments, 0):
+        if c.is_class():
+            return ValueSet([DataclassWrapper(c)])
+        else:
+            return ValueSet([value])
+    return NO_VALUES
+

 class DataclassWrapper(ValueWrapper, ClassMixin):
-    pass
+    def get_signatures(self):
+        param_names = []
+        for cls in reversed(list(self.py__mro__())):
+            if isinstance(cls, DataclassWrapper):
+                filter_ = cls.as_context().get_global_filter()
+                # .values ordering is not guaranteed, at least not in
+                # Python < 3.6, when dicts where not ordered, which is an
+                # implementation detail anyway.
+                for name in sorted(filter_.values(), key=lambda name: name.start_pos):
+                    d = name.tree_name.get_definition()
+                    annassign = d.children[1]
+                    if d.type == 'expr_stmt' and annassign.type == 'annassign':
+                        if len(annassign.children) < 4:
+                            default = None
+                        else:
+                            default = annassign.children[3]
+                        param_names.append(DataclassParamName(
+                            parent_context=cls.parent_context,
+                            tree_name=name.tree_name,
+                            annotation_node=annassign.children[1],
+                            default_node=default,
+                        ))
+        return [DataclassSignature(cls, param_names)]


 class DataclassSignature(AbstractSignature):
-
     def __init__(self, value, param_names):
         super().__init__(value)
         self._param_names = param_names

+    def get_param_names(self, resolve_stars=False):
+        return self._param_names

-class DataclassParamName(BaseTreeParamName):

-    def __init__(self, parent_context, tree_name, annotation_node, default_node
-        ):
+class DataclassParamName(BaseTreeParamName):
+    def __init__(self, parent_context, tree_name, annotation_node, default_node):
         super().__init__(parent_context, tree_name)
         self.annotation_node = annotation_node
         self.default_node = default_node

+    def get_kind(self):
+        return Parameter.POSITIONAL_OR_KEYWORD
+
+    def infer(self):
+        if self.annotation_node is None:
+            return NO_VALUES
+        else:
+            return self.parent_context.infer_node(self.annotation_node)

-class ItemGetterCallable(ValueWrapper):

+class ItemGetterCallable(ValueWrapper):
     def __init__(self, instance, args_value_set):
         super().__init__(instance)
         self._args_value_set = args_value_set

+    @repack_with_argument_clinic('item, /')
+    def py__call__(self, item_value_set):
+        value_set = NO_VALUES
+        for args_value in self._args_value_set:
+            lazy_values = list(args_value.py__iter__())
+            if len(lazy_values) == 1:
+                # TODO we need to add the contextualized value.
+                value_set |= item_value_set.get_item(lazy_values[0].infer(), None)
+            else:
+                value_set |= ValueSet([iterable.FakeList(
+                    self._wrapped_value.inference_state,
+                    [
+                        LazyKnownValues(item_value_set.get_item(lazy_value.infer(), None))
+                        for lazy_value in lazy_values
+                    ],
+                )])
+        return value_set
+
+
+@argument_clinic('func, /')
+def _functools_wraps(funcs):
+    return ValueSet(WrapsCallable(func) for func in funcs)
+

 class WrapsCallable(ValueWrapper):
-    pass
+    # XXX this is not the correct wrapped value, it should be a weird
+    #     partials object, but it doesn't matter, because it's always used as a
+    #     decorator anyway.
+    @repack_with_argument_clinic('func, /')
+    def py__call__(self, funcs):
+        return ValueSet({Wrapped(func, self._wrapped_value) for func in funcs})


 class Wrapped(ValueWrapper, FunctionMixin):
-
     def __init__(self, func, original_function):
         super().__init__(func)
         self._original_function = original_function

-
-_implemented = {'builtins': {'getattr': builtins_getattr, 'type':
-    builtins_type, 'super': builtins_super, 'reversed': builtins_reversed,
-    'isinstance': builtins_isinstance, 'next': builtins_next, 'iter':
-    builtins_iter, 'staticmethod': builtins_staticmethod, 'classmethod':
-    builtins_classmethod, 'property': builtins_property}, 'copy': {'copy':
-    _return_first_param, 'deepcopy': _return_first_param}, 'json': {'load':
-    lambda value, arguments, callback: NO_VALUES, 'loads': lambda value,
-    arguments, callback: NO_VALUES}, 'collections': {'namedtuple':
-    collections_namedtuple}, 'functools': {'partial': functools_partial,
-    'partialmethod': functools_partialmethod, 'wraps': _functools_wraps},
-    '_weakref': {'proxy': _return_first_param}, 'random': {'choice':
-    _random_choice}, 'operator': {'itemgetter': _operator_itemgetter},
-    'abc': {'abstractmethod': _return_first_param}, 'typing': {'_alias': lambda
-    value, arguments, callback: NO_VALUES, 'runtime_checkable': lambda
-    value, arguments, callback: NO_VALUES}, 'dataclasses': {'dataclass':
-    _dataclass}, 'attr': {'define': _dataclass, 'frozen': _dataclass},
-    'attrs': {'define': _dataclass, 'frozen': _dataclass}, 'os.path': {
-    'dirname': _create_string_input_function(os.path.dirname), 'abspath':
-    _create_string_input_function(os.path.abspath), 'relpath':
-    _create_string_input_function(os.path.relpath), 'join': _os_path_join}}
+    @property
+    def name(self):
+        return self._original_function.name
+
+    def get_signature_functions(self):
+        return [self]
+
+
+@argument_clinic('*args, /', want_value=True, want_arguments=True)
+def _operator_itemgetter(args_value_set, value, arguments):
+    return ValueSet([
+        ItemGetterCallable(instance, args_value_set)
+        for instance in value.py__call__(arguments)
+    ])
+
+
+def _create_string_input_function(func):
+    @argument_clinic('string, /', want_value=True, want_arguments=True)
+    def wrapper(strings, value, arguments):
+        def iterate():
+            for value in strings:
+                s = get_str_or_none(value)
+                if s is not None:
+                    s = func(s)
+                    yield compiled.create_simple_object(value.inference_state, s)
+        values = ValueSet(iterate())
+        if values:
+            return values
+        return value.py__call__(arguments)
+    return wrapper
+
+
+@argument_clinic('*args, /', want_callback=True)
+def _os_path_join(args_set, callback):
+    if len(args_set) == 1:
+        string = ''
+        sequence, = args_set
+        is_first = True
+        for lazy_value in sequence.py__iter__():
+            string_values = lazy_value.infer()
+            if len(string_values) != 1:
+                break
+            s = get_str_or_none(next(iter(string_values)))
+            if s is None:
+                break
+            if not is_first:
+                string += os.path.sep
+            string += s
+            is_first = False
+        else:
+            return ValueSet([compiled.create_simple_object(sequence.inference_state, string)])
+    return callback()
+
+
+_implemented = {
+    'builtins': {
+        'getattr': builtins_getattr,
+        'type': builtins_type,
+        'super': builtins_super,
+        'reversed': builtins_reversed,
+        'isinstance': builtins_isinstance,
+        'next': builtins_next,
+        'iter': builtins_iter,
+        'staticmethod': builtins_staticmethod,
+        'classmethod': builtins_classmethod,
+        'property': builtins_property,
+    },
+    'copy': {
+        'copy': _return_first_param,
+        'deepcopy': _return_first_param,
+    },
+    'json': {
+        'load': lambda value, arguments, callback: NO_VALUES,
+        'loads': lambda value, arguments, callback: NO_VALUES,
+    },
+    'collections': {
+        'namedtuple': collections_namedtuple,
+    },
+    'functools': {
+        'partial': functools_partial,
+        'partialmethod': functools_partialmethod,
+        'wraps': _functools_wraps,
+    },
+    '_weakref': {
+        'proxy': _return_first_param,
+    },
+    'random': {
+        'choice': _random_choice,
+    },
+    'operator': {
+        'itemgetter': _operator_itemgetter,
+    },
+    'abc': {
+        # Not sure if this is necessary, but it's used a lot in typeshed and
+        # it's for now easier to just pass the function.
+        'abstractmethod': _return_first_param,
+    },
+    'typing': {
+        # The _alias function just leads to some annoying type inference.
+        # Therefore, just make it return nothing, which leads to the stubs
+        # being used instead. This only matters for 3.7+.
+        '_alias': lambda value, arguments, callback: NO_VALUES,
+        # runtime_checkable doesn't really change anything and is just
+        # adding logs for infering stuff, so we can safely ignore it.
+        'runtime_checkable': lambda value, arguments, callback: NO_VALUES,
+    },
+    'dataclasses': {
+        # For now this works at least better than Jedi trying to understand it.
+        'dataclass': _dataclass
+    },
+    # attrs exposes declaration interface roughly compatible with dataclasses
+    # via attrs.define, attrs.frozen and attrs.mutable
+    # https://www.attrs.org/en/stable/names.html
+    'attr': {
+        'define': _dataclass,
+        'frozen': _dataclass,
+    },
+    'attrs': {
+        'define': _dataclass,
+        'frozen': _dataclass,
+    },
+    'os.path': {
+        'dirname': _create_string_input_function(os.path.dirname),
+        'abspath': _create_string_input_function(os.path.abspath),
+        'relpath': _create_string_input_function(os.path.relpath),
+        'join': _os_path_join,
+    }
+}
+
+
+def get_metaclass_filters(func):
+    def wrapper(cls, metaclasses, is_instance):
+        for metaclass in metaclasses:
+            if metaclass.py__name__() == 'EnumMeta' \
+                    and metaclass.get_root_context().py__name__() == 'enum':
+                filter_ = ParserTreeFilter(parent_context=cls.as_context())
+                return [DictFilter({
+                    name.string_name: EnumInstance(cls, name).name
+                    for name in filter_.values()
+                })]
+        return func(cls, metaclasses, is_instance)
+    return wrapper


 class EnumInstance(LazyValueWrapper):
-
     def __init__(self, cls, name):
         self.inference_state = cls.inference_state
-        self._cls = cls
+        self._cls = cls  # Corresponds to super().__self__
         self._name = name
         self.tree_node = self._name.tree_name
+
+    @safe_property
+    def name(self):
+        return ValueName(self, self._name.tree_name)
+
+    def _get_wrapped_value(self):
+        n = self._name.string_name
+        if n.startswith('__') and n.endswith('__') or self._name.api_type == 'function':
+            inferred = self._name.infer()
+            if inferred:
+                return next(iter(inferred))
+            o, = self.inference_state.builtins_module.py__getattribute__('object')
+            return o
+
+        value, = self._cls.execute_with_values()
+        return value
+
+    def get_filters(self, origin_scope=None):
+        yield DictFilter(dict(
+            name=compiled.create_simple_object(self.inference_state, self._name.string_name).name,
+            value=self._name,
+        ))
+        for f in self._get_wrapped_value().get_filters():
+            yield f
+
+
+def tree_name_to_values(func):
+    def wrapper(inference_state, context, tree_name):
+        if tree_name.value == 'sep' and context.is_module() and context.py__name__() == 'os.path':
+            return ValueSet({
+                compiled.create_simple_object(inference_state, os.path.sep),
+            })
+        return func(inference_state, context, tree_name)
+    return wrapper
diff --git a/jedi/settings.py b/jedi/settings.py
index 3f9febf2..3a333e01 100644
--- a/jedi/settings.py
+++ b/jedi/settings.py
@@ -49,22 +49,36 @@ Caching
 """
 import os
 import platform
+
+# ----------------
+# Completion Output Settings
+# ----------------
+
 case_insensitive_completion = True
 """
 Completions are by default case insensitive.
 """
+
 add_bracket_after_function = False
 """
 Adds an opening bracket after a function for completions.
 """
+
+# ----------------
+# Filesystem Cache
+# ----------------
+
 if platform.system().lower() == 'windows':
-    _cache_directory = os.path.join(os.getenv('LOCALAPPDATA') or os.path.
-        expanduser('~'), 'Jedi', 'Jedi')
+    _cache_directory = os.path.join(
+        os.getenv('LOCALAPPDATA') or os.path.expanduser('~'),
+        'Jedi',
+        'Jedi',
+    )
 elif platform.system().lower() == 'darwin':
     _cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi')
 else:
-    _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or
-        '~/.cache', 'jedi')
+    _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache',
+                                    'jedi')
 cache_directory = os.path.expanduser(_cache_directory)
 """
 The path where the cache is stored.
@@ -74,6 +88,11 @@ On Linux, this defaults to ``~/.cache/jedi/``, on OS X to
 On Linux, if the environment variable ``$XDG_CACHE_HOME`` is set,
 ``$XDG_CACHE_HOME/jedi`` is used instead of the default one.
 """
+
+# ----------------
+# Parser
+# ----------------
+
 fast_parser = True
 """
 Uses Parso's diff parser. If it is enabled, this might cause issues, please
@@ -81,36 +100,49 @@ read the warning on :class:`.Script`. This feature makes it possible to only
 parse the parts again that have changed, while reusing the rest of the syntax
 tree.
 """
-_cropped_file_size = int(10000000.0)
+
+_cropped_file_size = int(10e6)  # 1 Megabyte
 """
 Jedi gets extremely slow if the file size exceed a few thousand lines.
 To avoid getting stuck completely Jedi crops the file at some point.

 One megabyte of typical Python code equals about 20'000 lines of code.
 """
+
+# ----------------
+# Dynamic Stuff
+# ----------------
+
 dynamic_array_additions = True
 """
 check for `append`, etc. on arrays: [], {}, () as well as list/set calls.
 """
+
 dynamic_params = True
 """
 A dynamic param completion, finds the callees of the function, which define
 the params of a function.
 """
+
 dynamic_params_for_other_modules = True
 """
 Do the same for other modules.
 """
+
 dynamic_flow_information = True
 """
 Check for `isinstance` and other information to infer a type.
 """
-auto_import_modules = ['gi']
+
+auto_import_modules = [
+    'gi',  # This third-party repository (GTK stuff) doesn't really work with jedi
+]
 """
 Modules that will not be analyzed but imported, if they contain Python code.
 This improves autocompletion for libraries that use ``setattr`` or
 ``globals()`` modifications a lot.
 """
+
 allow_unsafe_interpreter_executions = True
 """
 Controls whether descriptors are evaluated when using an Interpreter. This is
@@ -119,6 +151,11 @@ something you might want to control when using Jedi from a Repl (e.g. IPython)
 Generally this setting allows Jedi to execute __getitem__ and descriptors like
 `property`.
 """
+
+# ----------------
+# Caching Validity
+# ----------------
+
 call_signatures_validity = 3.0
 """
 Finding function calls might be slow (0.1-0.5s). This is not acceptible for
diff --git a/jedi/utils.py b/jedi/utils.py
index 0dc90e28..dd529be7 100644
--- a/jedi/utils.py
+++ b/jedi/utils.py
@@ -1,6 +1,7 @@
 """
 Utilities for end-users.
 """
+
 import __main__
 from collections import namedtuple
 import logging
@@ -8,7 +9,10 @@ import traceback
 import re
 import os
 import sys
+
 from jedi import Interpreter
+
+
 READLINE_DEBUG = False


@@ -54,7 +58,69 @@ def setup_readline(namespace_module=__main__, fuzzy=False):
     your shell profile (usually ``.bash_profile`` or ``.profile`` if you use
     bash).
     """
-    pass
+    if READLINE_DEBUG:
+        logging.basicConfig(
+            filename='/tmp/jedi.log',
+            filemode='a',
+            level=logging.DEBUG
+        )
+
+    class JediRL:
+        def complete(self, text, state):
+            """
+            This complete stuff is pretty weird, a generator would make
+            a lot more sense, but probably due to backwards compatibility
+            this is still the way how it works.
+
+            The only important part is stuff in the ``state == 0`` flow,
+            everything else has been copied from the ``rlcompleter`` std.
+            library module.
+            """
+            if state == 0:
+                sys.path.insert(0, os.getcwd())
+                # Calling python doesn't have a path, so add to sys.path.
+                try:
+                    logging.debug("Start REPL completion: " + repr(text))
+                    interpreter = Interpreter(text, [namespace_module.__dict__])
+
+                    completions = interpreter.complete(fuzzy=fuzzy)
+                    logging.debug("REPL completions: %s", completions)
+
+                    self.matches = [
+                        text[:len(text) - c._like_name_length] + c.name_with_symbols
+                        for c in completions
+                    ]
+                except:
+                    logging.error("REPL Completion error:\n" + traceback.format_exc())
+                    raise
+                finally:
+                    sys.path.pop(0)
+            try:
+                return self.matches[state]
+            except IndexError:
+                return None
+
+    try:
+        # Need to import this one as well to make sure it's executed before
+        # this code. This didn't use to be an issue until 3.3. Starting with
+        # 3.4 this is different, it always overwrites the completer if it's not
+        # already imported here.
+        import rlcompleter  # noqa: F401
+        import readline
+    except ImportError:
+        print("Jedi: Module readline not available.")
+    else:
+        readline.set_completer(JediRL().complete)
+        readline.parse_and_bind("tab: complete")
+        # jedi itself does the case matching
+        readline.parse_and_bind("set completion-ignore-case on")
+        # because it's easier to hit the tab just once
+        readline.parse_and_bind("set show-all-if-unmodified")
+        readline.parse_and_bind("set show-all-if-ambiguous on")
+        # don't repeat all the things written in the readline all the time
+        readline.parse_and_bind("set completion-prefix-display-length 2")
+        # No delimiters, Jedi handles that.
+        readline.set_completer_delims('')


 def version_info():
@@ -62,4 +128,7 @@ def version_info():
     Returns a namedtuple of Jedi's version, similar to Python's
     ``sys.version_info``.
     """
-    pass
+    Version = namedtuple('Version', 'major, minor, micro')
+    from jedi import __version__
+    tupl = re.findall(r'[a-z]+|\d+', __version__)
+    return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)])